diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f29e73f --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.idea +vendor +*.log +rainbow-sync* diff --git a/README.md b/README.md index 75a561a..b1a1f46 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,67 @@ # rainbow-sync A daemon that synchronizes IRIS hub data for the Rainbow wallet backend + +## Run +- Cosmos +```bash +cd service/cosmos && make all +nohup ./rainbow-sync > debug.log 2>&1 & +``` +- Iris +```bash +cd service/iris && make all +nohup ./rainbow-sync > debug.log 2>&1 & +``` + +## Run with docker +You can run application with docker. +### Iris +- Build Rainbow-sync Image +```$xslt +cd service/iris && docker build -t rainbow-sync:dev01 . +``` + +- Run Application +```$xslt +docker run --name rainbow-sync \& +-v /mnt/data/rainbow-sync/logs:/root/go/src/github.com/irisnet/rainbow-sync/logs \& +-e "DB_ADDR=127.0.0.1:27217" -e "DB_USER=user" \& +-e "DB_PASSWD=password" -e "DB_DATABASE=db_name" \& +-e "SER_BC_FULL_NODE=tcp://localhost:26657,..." rainbow-sync:dev01 +``` +### Cosmos +- Build Rainbow-sync Image +```$xslt +cd service/cosmos && docker build -t rainbow-sync:dev01 . +``` +- Run Application +```$xslt +docker run --name rainbow-sync \& +-v /mnt/data/rainbow-sync/logs:/root/go/src/github.com/irisnet/rainbow-sync/logs \& +-e "DB_ADDR=127.0.0.1:27217" -e "DB_USER=user" \& +-e "DB_PASSWD=password" -e "DB_DATABASE=db_name" \& +-e "SER_BC_FULL_NODE_COSMOS=tcp://localhost:36657,..." rainbow-sync:dev01 +``` + + +## environment params + +| param | type | default |description | example | +| :--- | :--- | :--- | :---: | :---: | +| DB_ADDR | string | "" | db addr | 127.0.0.1:27017,127.0.0.2:27017... | +| DB_USER | string | "" | db user | user | +| DB_PASSWD | string | "" |db passwd | password | +| DB_DATABASE | string | "" |database name | db_name | +| IRIS_NETWORK | string | "testnet" |irishub name | testnet or mainnet | +| SER_BC_FULL_NODE | string | tcp://localhost:26657 | iris full node rpc url | tcp://localhost:26657, tcp://127.0.0.2:26657 | +| WORKER_NUM_CREATE_TASK | string | 2 | 创建同步Iris的Tag任务的线程数 | 2 | +| WORKER_NUM_EXECUTE_TASK | string | 30 | 执行同步Iris的Tag任务的线程数 | 30 | +| WORKER_MAX_SLEEP_TIME | string | 120 | 允许同步Iris的Tag线程处于不工作状态的最大时长(单位为:秒) | 120 | +| BLOCK_NUM_PER_WORKER_HANDLE | string | 50 | 每个同步Iris的Tag任务所包含的Iris区块数 | 50 | +| SER_BC_FULL_NODE_COSMOS | string | tcp://localhost:36657 |cosmos full node rpc url | tcp://localhost:36657, tcp://127.0.0.2:36657 | +| WORKER_NUM_CREATE_TASK_COSMOS | string | 2 | 创建同步Cosmos的Tx任务的线程数 | 2 | +| WORKER_NUM_EXECUTE_TASK_COSMOS | string | 30 | 执行同步Cosmos的Tx任务的线程数 | 30 | +| WORKER_MAX_SLEEP_TIME_COSMOS | string | 120 | 允许同步Cosmos的Tx线程处于不工作状态的最大时长(单位为:秒) | 120 | +| BLOCK_NUM_PER_WORKER_HANDLE_COSMOS | string | 50 | 每个同步Cosmos的Tx任务所包含的Cosmos区块数 | 50 | + + diff --git a/script/mongodb.js b/script/mongodb.js new file mode 100644 index 0000000..e683922 --- /dev/null +++ b/script/mongodb.js @@ -0,0 +1,37 @@ +// create table +db.createCollection("sync_iris_asset_detail"); +db.sync_block.renameCollection("sync_iris_block"); +db.sync_task.renameCollection("sync_iris_task"); +db.createCollection("sync_iris_tx"); +db.createCollection("sync_cosmos_tx"); +db.createCollection("sync_cosmos_block"); +db.createCollection("sync_cosmos_task"); + + +// create index +db.sync_iris_task.createIndex({"status": 1}, {"background": true}); +db.sync_iris_tx.createIndex({"to": -1, "height": -1}); +db.sync_iris_asset_detail.createIndex({"to": -1, "height": -1}); +db.sync_iris_asset_detail.createIndex({"to": -1, "subject": -1}); +db.sync_iris_block.createIndex({"height": -1}, {"unique": true}); +db.sync_iris_task.createIndex({"start_height": 1, "end_height": 1}, {"unique": true}); + +db.sync_cosmos_task.createIndex({"status": 1}, {"background": true}); +db.sync_cosmos_tx.createIndex({"to": -1, "height": -1}); +db.sync_cosmos_block.createIndex({"height": -1}, {"unique": true}); +db.sync_cosmos_task.createIndex({"start_height": 1, "end_height": 1}, {"unique": true}); + +db.sync_cosmos_tx.createIndex({"status": 1}, {"background": true}); +db.sync_cosmos_tx.createIndex({"type": 1}, {"background": true}); +db.sync_cosmos_tx.createIndex({'from': 1}, {'background': true}); +db.sync_cosmos_tx.createIndex({'initiator': 1}, {'background': true}); + +db.sync_iris_tx.createIndex({'from': 1}, {'background': true}); +db.sync_iris_tx.createIndex({'initiator': 1}, {'background': true}); +db.sync_iris_tx.createIndex({"type": 1}, {"background": true}); +/* + * remove collection data + */ +// db.sync_iris_asset_detail.deleteMany({}); +// db.sync_block.deleteMany({}); +// db.sync_task.deleteMany({}); \ No newline at end of file diff --git a/service/cosmos/Dockerfile b/service/cosmos/Dockerfile new file mode 100644 index 0000000..0c6fdf2 --- /dev/null +++ b/service/cosmos/Dockerfile @@ -0,0 +1,27 @@ +FROM alpine:3.8 + +# Set up dependencies +ENV PACKAGES go make git libc-dev bash + +# Set up path +ENV BINARY_NAME rainbow-sync +ENV GOPATH /root/go +ENV REPO_PATH $GOPATH/src/github.com/irisnet/rainbow-sync/service/cosmos +ENV PATH $GOPATH/bin:$PATH + +RUN mkdir -p $GOPATH $REPO_PATH + +COPY . $REPO_PATH +WORKDIR $REPO_PATH + +VOLUME $REPO_PATH/logs + +# Install minimum necessary dependencies, build binary +RUN apk add --no-cache $PACKAGES && \ + cd $REPO_PATH && make all && \ + mv $REPO_PATH/$BINARY_NAME $GOPATH/bin && \ + rm -rf $REPO_PATH/vendor && \ + rm -rf $GOPATH/src/github.com/golang $GOPATH/bin/dep $GOPATH/pkg/* && \ + apk del $PACKAGES + +CMD $BINARY_NAME \ No newline at end of file diff --git a/service/cosmos/Gopkg.lock b/service/cosmos/Gopkg.lock new file mode 100644 index 0000000..73cc1d8 --- /dev/null +++ b/service/cosmos/Gopkg.lock @@ -0,0 +1,701 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "UT" + revision = "4b2b341e8d7715fae06375aa633dbb6e91b3fb46" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:9e7c5138114ff9c51a60731b3a425c319305013c6ea8b3f60fd2435baba1a0db" + name = "github.com/btcsuite/btcd" + packages = ["btcec"] + pruneopts = "UT" + revision = "a0d1e3e36d50f61ee6eaab26d7bd246aae1f9ece" + +[[projects]] + branch = "master" + digest = "1:386de157f7d19259a7f9c81f26ce011223ce0f090353c1152ffdf730d7d10ac2" + name = "github.com/btcsuite/btcutil" + packages = ["bech32"] + pruneopts = "UT" + revision = "9e5f4b9a998d263e3ce9c56664a7816001ac8000" + +[[projects]] + digest = "1:80fd39ec6e2c140540ec648b80adfdc23c28de1033af888ec23a3ff0a663da25" + name = "github.com/cosmos/cosmos-sdk" + packages = [ + "baseapp", + "cmd/gaia/app", + "codec", + "store", + "store/cachekv", + "store/cachemulti", + "store/dbadapter", + "store/errors", + "store/gaskv", + "store/iavl", + "store/prefix", + "store/rootmulti", + "store/tracekv", + "store/transient", + "store/types", + "types", + "version", + "x/auth", + "x/bank", + "x/crisis", + "x/distribution", + "x/distribution/keeper", + "x/distribution/tags", + "x/distribution/types", + "x/gov", + "x/gov/tags", + "x/mint", + "x/mock", + "x/params", + "x/params/subspace", + "x/slashing", + "x/slashing/tags", + "x/staking", + "x/staking/keeper", + "x/staking/querier", + "x/staking/tags", + "x/staking/types", + ] + pruneopts = "UT" + revision = "b85371ae512da9b0f035587ea977d728ff5fdfdc" + source = "https://github.com/cosmos/cosmos-sdk.git" + version = "v0.34.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" + name = "github.com/fsnotify/fsnotify" + packages = ["."] + pruneopts = "UT" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + version = "v1.4.7" + +[[projects]] + digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11" + name = "github.com/go-kit/kit" + packages = [ + "log", + "log/level", + "log/term", + "metrics", + "metrics/discard", + "metrics/internal/lv", + "metrics/prometheus", + ] + pruneopts = "UT" + revision = "4dc7be5d2d12881735283bcab7352178e190fc71" + version = "v0.6.0" + +[[projects]] + digest = "1:4062bc6de62d73e2be342243cf138cf499b34d558876db8d9430e2149388a4d8" + name = "github.com/go-logfmt/logfmt" + packages = ["."] + pruneopts = "UT" + revision = "07c9b44f60d7ffdfb7d8efe1ad539965737836dc" + version = "v0.4.0" + +[[projects]] + digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d" + name = "github.com/go-stack/stack" + packages = ["."] + pruneopts = "UT" + revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" + version = "v1.8.0" + +[[projects]] + digest = "1:95e1006e41c641abd2f365dfa0f1213c04da294e7cd5f0bf983af234b775db64" + name = "github.com/gogo/protobuf" + packages = [ + "gogoproto", + "jsonpb", + "proto", + "protoc-gen-gogo/descriptor", + "sortkeys", + "types", + ] + pruneopts = "UT" + revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" + version = "v1.2.1" + +[[projects]] + digest = "1:239c4c7fd2159585454003d9be7207167970194216193a8a210b8d29576f19c9" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "UT" + revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" + version = "v1.3.1" + +[[projects]] + digest = "1:e4f5819333ac698d294fe04dbf640f84719658d5c7ce195b10060cc37292ce79" + name = "github.com/golang/snappy" + packages = ["."] + pruneopts = "UT" + revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a" + version = "v0.0.1" + +[[projects]] + digest = "1:7b5c6e2eeaa9ae5907c391a91c132abfd5c9e8a784a341b5625e750c67e6825d" + name = "github.com/gorilla/websocket" + packages = ["."] + pruneopts = "UT" + revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" + version = "v1.4.0" + +[[projects]] + digest = "1:c0d19ab64b32ce9fe5cf4ddceba78d5bc9807f0016db6b1183599da3dcc24d10" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/printer", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token", + ] + pruneopts = "UT" + revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" + version = "v1.0.0" + +[[projects]] + digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + pruneopts = "UT" + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + digest = "1:a74b5a8e34ee5843cd6e65f698f3e75614f812ff170c2243425d75bc091e9af2" + name = "github.com/jmhodges/levigo" + packages = ["."] + pruneopts = "UT" + revision = "853d788c5c416eaaee5b044570784a96c7a26975" + version = "v1.0.0" + +[[projects]] + digest = "1:d622b76d4ca8cebd9fd06fc5983edd55c75fa06d6bb91c4a8fb38e24fc60d0d6" + name = "github.com/jolestar/go-commons-pool" + packages = [ + ".", + "collections", + "concurrent", + ] + pruneopts = "UT" + revision = "3f5d5f81046da81d73466f44fe6e0ac36ff304bd" + version = "v2.0.0" + +[[projects]] + branch = "master" + digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72" + name = "github.com/kr/logfmt" + packages = ["."] + pruneopts = "UT" + revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" + +[[projects]] + digest = "1:5a0ef768465592efca0412f7e838cdc0826712f8447e70e6ccc52eb441e9ab13" + name = "github.com/magiconair/properties" + packages = ["."] + pruneopts = "UT" + revision = "de8848e004dd33dc07a2947b3d76f618a7fc7ef1" + version = "v1.8.1" + +[[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "UT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + pruneopts = "UT" + revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" + version = "v1.1.2" + +[[projects]] + digest = "1:93131d8002d7025da13582877c32d1fc302486775a1b06f62241741006428c5e" + name = "github.com/pelletier/go-toml" + packages = ["."] + pruneopts = "UT" + revision = "728039f679cbcd4f6a54e080d2219a4c4928c546" + version = "v1.4.0" + +[[projects]] + digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "UT" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:e89f2cdede55684adbe44b5566f55838ad2aee1dff348d14b73ccf733607b671" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/internal", + "prometheus/promhttp", + ] + pruneopts = "UT" + revision = "2641b987480bca71fb39738eb8c8b0d577cb1d76" + version = "v0.9.4" + +[[projects]] + branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "UT" + revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" + +[[projects]] + digest = "1:8dcedf2e8f06c7f94e48267dea0bc0be261fa97b377f3ae3e87843a92a549481" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "UT" + revision = "17f5ca1748182ddf24fc33a5a7caaaf790a52fcc" + version = "v0.4.1" + +[[projects]] + digest = "1:403b810b43500b5b0a9a24a47347e31dc2783ccae8cf97c891b46f5b0496fa1a" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/fs", + ] + pruneopts = "UT" + revision = "833678b5bb319f2d20a475cb165c6cc59c2cc77c" + version = "v0.0.2" + +[[projects]] + branch = "master" + digest = "1:d38f81081a389f1466ec98192cf9115a82158854d6f01e1c23e2e7554b97db71" + name = "github.com/rcrowley/go-metrics" + packages = ["."] + pruneopts = "UT" + revision = "3113b8401b8a98917cde58f8bbd42a1b1c03b1fd" + +[[projects]] + digest = "1:b0c25f00bad20d783d259af2af8666969e2fc343fa0dc9efe52936bbd67fb758" + name = "github.com/rs/cors" + packages = ["."] + pruneopts = "UT" + revision = "9a47f48565a795472d43519dd49aac781f3034fb" + version = "v1.6.0" + +[[projects]] + digest = "1:bb495ec276ab82d3dd08504bbc0594a65de8c3b22c6f2aaa92d05b73fbf3a82e" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem", + ] + pruneopts = "UT" + revision = "588a75ec4f32903aa5e39a2619ba6a4631e28424" + version = "v1.2.2" + +[[projects]] + digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc" + name = "github.com/spf13/cast" + packages = ["."] + pruneopts = "UT" + revision = "8c9545af88b134710ab1cd196795e7f2388358d7" + version = "v1.3.0" + +[[projects]] + digest = "1:e096613fb7cf34743d49af87d197663cfccd61876e2219853005a57baedfa562" + name = "github.com/spf13/cobra" + packages = ["."] + pruneopts = "UT" + revision = "f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5" + version = "v0.0.5" + +[[projects]] + digest = "1:1b753ec16506f5864d26a28b43703c58831255059644351bbcb019b843950900" + name = "github.com/spf13/jwalterweatherman" + packages = ["."] + pruneopts = "UT" + revision = "94f6ae3ed3bceceafa716478c5fbf8d29ca601a1" + version = "v1.1.0" + +[[projects]] + digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "UT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + digest = "1:11118bd196646c6515fea3d6c43f66162833c6ae4939bfb229b9956d91c6cf17" + name = "github.com/spf13/viper" + packages = ["."] + pruneopts = "UT" + revision = "b5bf975e5823809fb22c7644d008757f78a4259e" + version = "v1.4.0" + +[[projects]] + digest = "1:5da8ce674952566deae4dbc23d07c85caafc6cfa815b0b3e03e41979cedb8750" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" + version = "v1.3.0" + +[[projects]] + digest = "1:5b180f17d5bc50b765f4dcf0d126c72979531cbbd7f7929bf3edd87fb801ea2d" + name = "github.com/syndtr/goleveldb" + packages = [ + "leveldb", + "leveldb/cache", + "leveldb/comparer", + "leveldb/errors", + "leveldb/filter", + "leveldb/iterator", + "leveldb/journal", + "leveldb/memdb", + "leveldb/opt", + "leveldb/storage", + "leveldb/table", + "leveldb/util", + ] + pruneopts = "UT" + revision = "9d007e481048296f09f59bd19bb7ae584563cd95" + version = "v1.0.0" + +[[projects]] + digest = "1:ad9c4c1a4e7875330b1f62906f2830f043a23edb5db997e3a5ac5d3e6eadf80a" + name = "github.com/tendermint/go-amino" + packages = ["."] + pruneopts = "UT" + revision = "dc14acf9ef15f85828bfbc561ed9dd9d2a284885" + version = "v0.14.1" + +[[projects]] + digest = "1:1bb088f6291e5426e3874a60bca0e481a91a5633395d7e0c427ec3e49b626e7b" + name = "github.com/tendermint/iavl" + packages = ["."] + pruneopts = "UT" + revision = "ac7c35c12e8633a1e9fd0b52a00b900b40f32cd3" + version = "v0.12.1" + +[[projects]] + digest = "1:c06da383dd57271707507fe337c09fa2f7862de5ead0c0a0af146f4785601e7b" + name = "github.com/tendermint/tendermint" + packages = [ + "abci/client", + "abci/example/code", + "abci/example/counter", + "abci/example/kvstore", + "abci/types", + "blockchain", + "config", + "consensus", + "consensus/types", + "crypto", + "crypto/ed25519", + "crypto/encoding/amino", + "crypto/merkle", + "crypto/multisig", + "crypto/multisig/bitarray", + "crypto/secp256k1", + "crypto/secp256k1/internal/secp256k1", + "crypto/tmhash", + "evidence", + "libs/autofile", + "libs/bech32", + "libs/cli", + "libs/clist", + "libs/common", + "libs/db", + "libs/events", + "libs/fail", + "libs/flowrate", + "libs/log", + "libs/pubsub", + "libs/pubsub/query", + "mempool", + "node", + "p2p", + "p2p/conn", + "p2p/pex", + "privval", + "proxy", + "rpc/client", + "rpc/core", + "rpc/core/types", + "rpc/grpc", + "rpc/lib/client", + "rpc/lib/server", + "rpc/lib/types", + "state", + "state/txindex", + "state/txindex/kv", + "state/txindex/null", + "types", + "types/time", + "version", + ] + pruneopts = "UT" + revision = "4695414393a3e0bc3836fb1cece7f1f3768d3311" + version = "v0.31.4" + +[[projects]] + digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81" + name = "go.uber.org/atomic" + packages = ["."] + pruneopts = "UT" + revision = "df976f2515e274675050de7b3f42545de80594fd" + version = "v1.4.0" + +[[projects]] + digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" + name = "go.uber.org/multierr" + packages = ["."] + pruneopts = "UT" + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + digest = "1:676160e6a4722b08e0e26b11521d575c2cb2b6f0c679e1ee6178c5d8dee51e5e" + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "zapcore", + ] + pruneopts = "UT" + revision = "27376062155ad36be76b0f12cf1572a221d3a48c" + version = "v1.10.0" + +[[projects]] + branch = "master" + digest = "1:ac4cae5ba81720e88d4821ebbeae974934fa7271676f58b58a22b2c362b4d12c" + name = "golang.org/x/crypto" + packages = [ + "chacha20poly1305", + "curve25519", + "ed25519", + "ed25519/internal/edwards25519", + "hkdf", + "internal/chacha20", + "internal/subtle", + "nacl/box", + "nacl/secretbox", + "poly1305", + "ripemd160", + "salsa20/salsa", + ] + pruneopts = "UT" + revision = "f99c8df09eb5bff426315721bfa5f16a99cad32c" + +[[projects]] + branch = "master" + digest = "1:308b068fba27216784b512b9ea0492a4b5c6b4346c2f4c619a2b03d8af143a9c" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "netutil", + "trace", + ] + pruneopts = "UT" + revision = "461777fb6f67e8cb9d70cda16573678d085a74cf" + +[[projects]] + branch = "master" + digest = "1:0beb3839ca69f4a32c40d753a8cd60b273c2ba266329d1a9957731fc4cdb5478" + name = "golang.org/x/sys" + packages = [ + "cpu", + "unix", + ] + pruneopts = "UT" + revision = "93c9922d18aeb82498a065f07aec7ad7fa60dfb7" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:583a0c80f5e3a9343d33aea4aead1e1afcc0043db66fdf961ddd1fe8cd3a4faf" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "UT" + revision = "eb0b1bdb6ae60fcfc41b8d907b50dfb346112301" + +[[projects]] + digest = "1:e8800ddadd6bce3bc0c5ffd7bc55dbdddc6e750956c10cc10271cade542fccbe" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "501c41df7f472c740d0674ff27122f3f48c80ce7" + version = "v1.21.1" + +[[projects]] + branch = "v2" + digest = "1:df1ffb6d59bacf4d162d65d50acaf21a16c8947086c638e86aaa01c1ae59f5ac" + name = "gopkg.in/mgo.v2" + packages = [ + ".", + "bson", + "internal/json", + "internal/sasl", + "internal/scram", + "txn", + ] + pruneopts = "UT" + revision = "9856a29383ce1c59f308dd1cf0363a79b5bef6b5" + +[[projects]] + digest = "1:c805e517269b0ba4c21ded5836019ed7d16953d4026cb7d00041d039c7906be9" + name = "gopkg.in/natefinch/lumberjack.v2" + packages = ["."] + pruneopts = "UT" + revision = "a96e63847dc3c67d17befa69c303767e2f84e54f" + version = "v2.1" + +[[projects]] + digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/cosmos/cosmos-sdk/cmd/gaia/app", + "github.com/cosmos/cosmos-sdk/codec", + "github.com/cosmos/cosmos-sdk/types", + "github.com/cosmos/cosmos-sdk/x/auth", + "github.com/cosmos/cosmos-sdk/x/bank", + "github.com/cosmos/cosmos-sdk/x/crisis", + "github.com/cosmos/cosmos-sdk/x/distribution/types", + "github.com/cosmos/cosmos-sdk/x/gov", + "github.com/cosmos/cosmos-sdk/x/slashing", + "github.com/cosmos/cosmos-sdk/x/staking/types", + "github.com/jolestar/go-commons-pool", + "github.com/tendermint/tendermint/abci/types", + "github.com/tendermint/tendermint/rpc/client", + "github.com/tendermint/tendermint/types", + "go.uber.org/zap", + "go.uber.org/zap/zapcore", + "gopkg.in/mgo.v2", + "gopkg.in/mgo.v2/bson", + "gopkg.in/mgo.v2/txn", + "gopkg.in/natefinch/lumberjack.v2", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/service/cosmos/Gopkg.toml b/service/cosmos/Gopkg.toml new file mode 100644 index 0000000..907fa17 --- /dev/null +++ b/service/cosmos/Gopkg.toml @@ -0,0 +1,62 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +[[override]] + name = "github.com/tendermint/tendermint" + version = "=v0.31.4" + + [[override]] + name = "github.com/tendermint/iavl" + version = "=v0.12.1" + + [[override]] + name = "github.com/cosmos/cosmos-sdk" + source = "https://github.com/cosmos/cosmos-sdk.git" + version = "=v0.34.1" + +[[constraint]] + name = "github.com/syndtr/goleveldb" + version = "v0.0.0-20180708030551-c4c61651e9e3" + +[[constraint]] + branch = "v2" + name = "gopkg.in/mgo.v2" + +[[constraint]] + name = "go.uber.org/zap" + version = "1.9.1" + +[[constraint]] + name = "gopkg.in/natefinch/lumberjack.v2" + version = "2.1.0" + +[[constraint]] + version = "v2.0.0" + name = "github.com/jolestar/go-commons-pool" + +[prune] + go-tests = true + unused-packages = true diff --git a/service/cosmos/Makefile b/service/cosmos/Makefile new file mode 100644 index 0000000..ffc4092 --- /dev/null +++ b/service/cosmos/Makefile @@ -0,0 +1,43 @@ +GOCMD=go +GOBUILD=$(GOCMD) build +GOCLEAN=$(GOCMD) clean +GOTEST=$(GOCMD) test +GOGET=$(GOCMD) get +BINARY_NAME=rainbow-sync +BINARY_UNIX=$(BINARY_NAME)-unix + +all: get_tools get_deps build + +get_deps: + @rm -rf vendor/ + @echo "--> Running dep ensure" + @dep ensure -v + +build: + $(GOBUILD) -o $(BINARY_NAME) -v + +clean: + $(GOCLEAN) + rm -f $(BINARY_NAME) + rm -f $(BINARY_UNIX) + +run: + $(GOBUILD) -o $(BINARY_NAME) -v + ./$(BINARY_NAME) + + +# Cross compilation +build-linux: + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 $(GOBUILD) -o $(BINARY_UNIX) -v + +###################################### +## Tools + +check_tools: + cd tools && $(MAKE) check_tools + +get_tools: + cd tools && $(MAKE) get_tools + +update_tools: + cd tools && $(MAKE) update_tools \ No newline at end of file diff --git a/service/cosmos/block/parse_tx.go b/service/cosmos/block/parse_tx.go new file mode 100644 index 0000000..eae22c7 --- /dev/null +++ b/service/cosmos/block/parse_tx.go @@ -0,0 +1,418 @@ +package block + +import ( + model "github.com/irisnet/rainbow-sync/service/cosmos/db" + cmodel "github.com/irisnet/rainbow-sync/service/cosmos/model" + "github.com/irisnet/rainbow-sync/service/cosmos/helper" + "github.com/irisnet/rainbow-sync/service/cosmos/logger" + "github.com/irisnet/rainbow-sync/service/cosmos/constant" + "github.com/tendermint/tendermint/types" + "github.com/cosmos/cosmos-sdk/x/auth" + abci "github.com/tendermint/tendermint/abci/types" + "fmt" + "time" + "gopkg.in/mgo.v2/txn" + "gopkg.in/mgo.v2/bson" + dtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + "github.com/cosmos/cosmos-sdk/x/bank" + "github.com/cosmos/cosmos-sdk/x/gov" + "github.com/cosmos/cosmos-sdk/x/slashing" + stypes "github.com/cosmos/cosmos-sdk/x/staking/types" + sdk "github.com/cosmos/cosmos-sdk/types" + cutils "github.com/irisnet/rainbow-sync/service/cosmos/utils" + "github.com/cosmos/cosmos-sdk/x/crisis" + "encoding/json" +) + +const ( + COSMOS = "Cosmos" +) + +type ( + MsgTransfer = bank.MsgSend + MsgMultiSend = bank.MsgMultiSend + + MsgUnjail = slashing.MsgUnjail + MsgSetWithdrawAddress = dtypes.MsgSetWithdrawAddress + MsgWithdrawDelegatorReward = dtypes.MsgWithdrawDelegatorReward + MsgWithdrawValidatorCommission = dtypes.MsgWithdrawValidatorCommission + + MsgDeposit = gov.MsgDeposit + MsgSubmitProposal = gov.MsgSubmitProposal + MsgVote = gov.MsgVote + Proposal = gov.Proposal + + MsgVerifyInvariant = crisis.MsgVerifyInvariant + + MsgDelegate = stypes.MsgDelegate + MsgUndelegate = stypes.MsgUndelegate + MsgBeginRedelegate = stypes.MsgBeginRedelegate + MsgCreateValidator = stypes.MsgCreateValidator + MsgEditValidator = stypes.MsgEditValidator +) + +type Cosmos_Block struct{} + +func (cosmos *Cosmos_Block) Name() string { + return COSMOS +} + +func (cosmos *Cosmos_Block) SaveDocsWithTxn(blockDoc *cmodel.Block, cosmosTxs []cmodel.CosmosTx, taskDoc cmodel.SyncCosmosTask) error { + var ( + ops, cosmosTxsOps []txn.Op + ) + + if blockDoc.Height == 0 { + return fmt.Errorf("invalid block, height equal 0") + } + + blockOp := txn.Op{ + C: cmodel.CollectionNameBlock, + Id: bson.NewObjectId(), + Insert: blockDoc, + } + + if length := len(cosmosTxs); length > 0 { + + cosmosTxsOps = make([]txn.Op, 0, length) + for _, v := range cosmosTxs { + op := txn.Op{ + C: cmodel.CollectionNameCosmosTx, + Id: bson.NewObjectId(), + Insert: v, + } + cosmosTxsOps = append(cosmosTxsOps, op) + } + } + + updateOp := txn.Op{ + C: cmodel.CollectionNameSyncCosmosTask, + Id: taskDoc.ID, + Assert: txn.DocExists, + Update: bson.M{ + "$set": bson.M{ + "current_height": taskDoc.CurrentHeight, + "status": taskDoc.Status, + "last_update_time": taskDoc.LastUpdateTime, + }, + }, + } + + ops = make([]txn.Op, 0, len(cosmosTxsOps)+2) + ops = append(append(ops, blockOp, updateOp), cosmosTxsOps...) + + if len(ops) > 0 { + err := model.Txn(ops) + if err != nil { + return err + } + } + + return nil +} + +func (cosmos *Cosmos_Block) ParseBlock(b int64, client *cosmoshelper.CosmosClient) (resBlock *cmodel.Block, cosmosTxs []cmodel.CosmosTx, resErr error) { + + defer func() { + if err := recover(); err != nil { + logger.Error("parse cosmos block fail", logger.Int64("height", b), + logger.Any("err", err), logger.String("Chain Block", cosmos.Name())) + + resBlock = &cmodel.Block{} + cosmosTxs = nil + resErr = fmt.Errorf("%v", err) + } + }() + + cosmosTxsdata, err := cosmos.ParseCosmosTxs(b, client) + if err != nil { + logger.Error("parse cosmos asset error", logger.String("error", err.Error()), + logger.String("Chain Block", cosmos.Name())) + } + + resBlock = &cmodel.Block{ + Height: b, + CreateTime: time.Now().Unix(), + } + cosmosTxs = cosmosTxsdata + resErr = err + return +} + +// parse cosmos txs from block result txs +func (cosmos *Cosmos_Block) ParseCosmosTxs(b int64, client *cosmoshelper.CosmosClient) ([]cmodel.CosmosTx, error) { + resblock, err := client.Block(&b) + if err != nil { + logger.Warn("get block result err, now try again", logger.String("err", err.Error()), + logger.String("Chain Block", cosmos.Name())) + // there is possible parse block fail when in iterator + var err2 error + client2 := cosmoshelper.GetCosmosClient() + resblock, err2 = client2.Block(&b) + client2.Release() + if err2 != nil { + return nil, err2 + } + } + + //fmt.Printf("======>>resblock.Block:%+v\n",resblock.Block) + //fmt.Println("length:",len(resblock.Block.Txs)) + + cosmosTxs := make([]cmodel.CosmosTx, 0, len(resblock.Block.Txs)) + for _, tx := range resblock.Block.Txs { + if cosmostx := cosmos.ParseCosmosTxModel(tx, resblock.Block); len(cosmostx) > 0 { + cosmosTxs = append(cosmosTxs, cosmostx...) + } + } + + return cosmosTxs, nil +} + +func (cosmos *Cosmos_Block) ParseCosmosTxModel(txBytes types.Tx, block *types.Block) []cmodel.CosmosTx { + var ( + authTx auth.StdTx + methodName = "parseCosmosTxModel" + txdetail cmodel.CosmosTx + ) + + cdc := cutils.GetCodec() + err := cdc.UnmarshalBinaryLengthPrefixed(txBytes, &authTx) + if err != nil { + logger.Error(err.Error()) + return nil + } + //fmt.Printf("=====>authTx:%+v\n",authTx) + status, result, err := QueryTxResult(txBytes.Hash()) + if err != nil { + logger.Error("get txResult err", logger.String("method", methodName), + logger.String("err", err.Error()), + logger.String("Chain Block", cosmos.Name())) + } + msgStat, err := parseRawlog(result.Log) + if err != nil { + logger.Error("get parseRawlog err", logger.String("method", methodName), + logger.String("err", err.Error()), + logger.String("Chain Block", cosmos.Name())) + } + + fee := cutils.BuildFee(authTx.Fee) + txdetail.TxHash = cutils.BuildHex(txBytes.Hash()) + txdetail.Height = block.Height + txdetail.Memo = authTx.Memo + txdetail.Fee = &fee + txdetail.Time = block.Time + txdetail.Status = status + txdetail.Code = result.Code + Tags := parseTags(result) + + length_msgStat := len(msgStat) + length_Tags := len(Tags) + + msgs := authTx.GetMsgs() + len_msgs := len(msgs) + if len_msgs <= 0 { + logger.Error("can't get msgs", logger.String("method", methodName), + logger.String("Chain Block", cosmos.Name())) + return nil + } + txs := make([]cmodel.CosmosTx, 0, len_msgs) + for i, msg := range msgs { + txdetail.Initiator = "" + txdetail.From = "" + txdetail.To = "" + txdetail.Amount = nil + txdetail.Type = "" + if length_Tags > i { + txdetail.Tags = Tags[i] + } + if length_msgStat > i { + txdetail.Status = msgStat[i] + } + switch msg.(type) { + case MsgDelegate: + msg := msg.(MsgDelegate) + txdetail.Initiator = msg.DelegatorAddress.String() + txdetail.From = msg.DelegatorAddress.String() + txdetail.To = msg.ValidatorAddress.String() + txdetail.Amount = cutils.ParseCoins(sdk.Coins{msg.Amount}) + txdetail.Type = constant.Cosmos_TxTypeStakeDelegate + txs = append(txs, txdetail) + + case MsgUndelegate: + msg := msg.(MsgUndelegate) + txdetail.Initiator = msg.DelegatorAddress.String() + txdetail.From = msg.DelegatorAddress.String() + txdetail.To = msg.ValidatorAddress.String() + txdetail.Amount = cutils.ParseCoins(sdk.Coins{msg.Amount}) + txdetail.Type = constant.Cosmos_TxTypeStakeUnDelegate + txs = append(txs, txdetail) + + case MsgEditValidator: + msg := msg.(MsgEditValidator) + txdetail.Initiator = msg.ValidatorAddress.String() + txdetail.From = msg.ValidatorAddress.String() + txdetail.To = "" + txdetail.Amount = []*cmodel.Coin{} + txdetail.Type = constant.Cosmos_TxTypeStakeEditValidator + txs = append(txs, txdetail) + + case MsgCreateValidator: + msg := msg.(MsgCreateValidator) + txdetail.Initiator = msg.DelegatorAddress.String() + txdetail.From = msg.DelegatorAddress.String() + txdetail.To = msg.ValidatorAddress.String() + txdetail.Amount = cutils.ParseCoins(sdk.Coins{msg.Value}) + txdetail.Type = constant.Cosmos_TxTypeStakeCreateValidator + txs = append(txs, txdetail) + + case MsgBeginRedelegate: + msg := msg.(MsgBeginRedelegate) + txdetail.Initiator = msg.DelegatorAddress.String() + txdetail.From = msg.ValidatorSrcAddress.String() + txdetail.To = msg.ValidatorDstAddress.String() + txdetail.Amount = cutils.ParseCoins(sdk.Coins{msg.Amount}) + txdetail.Type = constant.Cosmos_TxTypeBeginRedelegate + txs = append(txs, txdetail) + + case MsgTransfer: + msg := msg.(MsgTransfer) + txdetail.Initiator = msg.FromAddress.String() + txdetail.From = msg.FromAddress.String() + txdetail.To = msg.ToAddress.String() + txdetail.Amount = cutils.ParseCoins(msg.Amount) + txdetail.Type = constant.Cosmos_TxTypeTransfer + txs = append(txs, txdetail) + + case MsgMultiSend: + msg := msg.(MsgMultiSend) + txdetail.Initiator = msg.Inputs[0].Address.String() + txdetail.From = msg.Inputs[0].Address.String() + txdetail.To = msg.Outputs[0].Address.String() + txdetail.Amount = cutils.ParseCoins(msg.Inputs[0].Coins) + txdetail.Type = constant.Cosmos_TxTypeMultiSend + txs = append(txs, txdetail) + + case MsgVerifyInvariant: + msg := msg.(MsgVerifyInvariant) + txdetail.Initiator = msg.Sender.String() + txdetail.From = msg.Sender.String() + txdetail.To = "" + txdetail.Amount = []*cmodel.Coin{} + txdetail.Type = constant.Cosmos_TxTypeVerifyInvariant + txs = append(txs, txdetail) + + case MsgUnjail: + msg := msg.(MsgUnjail) + txdetail.Initiator = msg.ValidatorAddr.String() + txdetail.From = msg.ValidatorAddr.String() + txdetail.Type = constant.Cosmos_TxTypeUnjail + txs = append(txs, txdetail) + case MsgSetWithdrawAddress: + msg := msg.(MsgSetWithdrawAddress) + txdetail.Initiator = msg.DelegatorAddress.String() + txdetail.From = msg.DelegatorAddress.String() + txdetail.To = msg.WithdrawAddress.String() + txdetail.Type = constant.Cosmos_TxTypeSetWithdrawAddress + txs = append(txs, txdetail) + + case MsgWithdrawDelegatorReward: + msg := msg.(MsgWithdrawDelegatorReward) + txdetail.Initiator = msg.DelegatorAddress.String() + txdetail.From = msg.DelegatorAddress.String() + txdetail.To = msg.ValidatorAddress.String() + txdetail.Type = constant.Cosmos_TxTypeWithdrawDelegatorReward + txs = append(txs, txdetail) + + case MsgWithdrawValidatorCommission: + msg := msg.(MsgWithdrawValidatorCommission) + txdetail.Initiator = msg.ValidatorAddress.String() + txdetail.From = msg.ValidatorAddress.String() + txdetail.Type = constant.Cosmos_TxTypeWithdrawDelegatorRewardsAll + txs = append(txs, txdetail) + + case MsgSubmitProposal: + msg := msg.(MsgSubmitProposal) + txdetail.Initiator = msg.Proposer.String() + txdetail.From = msg.Proposer.String() + txdetail.To = "" + txdetail.Amount = cutils.ParseCoins(msg.InitialDeposit) + txdetail.Type = constant.Cosmos_TxTypeSubmitProposal + txs = append(txs, txdetail) + + case MsgDeposit: + msg := msg.(MsgDeposit) + txdetail.Initiator = msg.Depositor.String() + txdetail.From = msg.Depositor.String() + txdetail.Amount = cutils.ParseCoins(msg.Amount) + txdetail.Type = constant.Cosmos_TxTypeDeposit + txs = append(txs, txdetail) + case MsgVote: + msg := msg.(MsgVote) + txdetail.Initiator = msg.Voter.String() + txdetail.From = msg.Voter.String() + txdetail.Amount = []*cmodel.Coin{} + txdetail.Type = constant.Cosmos_TxTypeVote + txs = append(txs, txdetail) + + default: + logger.Warn("unknown msg type") + } + } + + return txs +} + +// get tx status and log by query txHash +func QueryTxResult(txHash []byte) (string, *abci.ResponseDeliverTx, error) { + status := constant.TxStatusSuccess + + client := cosmoshelper.GetCosmosClient() + defer client.Release() + + res, err := client.Tx(txHash, false) + if err != nil { + return "unknown", nil, err + } + result := res.TxResult + if result.Code != 0 { + status = constant.TxStatusFail + } + + return status, &result, nil +} + +func parseTags(result *abci.ResponseDeliverTx) []cmodel.Tag { + var tags []cmodel.Tag + tags_opt := make(cmodel.Tag, 0) + for i, tag := range result.Tags { + key := string(tag.Key) + value := string(tag.Value) + tags_opt[key] = value + if i > 0 && string(result.Tags[i].Key) == "action" { + tags = append(tags, tags_opt) + tags_opt = make(cmodel.Tag, 0) + } else if i == len(result.Tags)-1 { + tags = append(tags, tags_opt) + } + } + return tags +} + +func parseRawlog(rawlog string) (map[int]string, error) { + + var Stats []cmodel.RawLog + if err := json.Unmarshal([]byte(rawlog), &Stats); err != nil { + return nil, err + } + + msgStat := make(map[int]string, len(Stats)) + for _, stat := range Stats { + if stat.Success { + msgStat[stat.MsgIndex] = constant.TxStatusSuccess + } else { + msgStat[stat.MsgIndex] = constant.TxStatusFail + } + + } + return msgStat, nil +} diff --git a/service/cosmos/block/parse_tx_test.go b/service/cosmos/block/parse_tx_test.go new file mode 100644 index 0000000..9f3e097 --- /dev/null +++ b/service/cosmos/block/parse_tx_test.go @@ -0,0 +1,56 @@ +package block + +import ( + "testing" + "github.com/irisnet/rainbow-sync/service/cosmos/logger" + cosmosConf "github.com/irisnet/rainbow-sync/service/cosmos/conf" + "github.com/irisnet/rainbow-sync/service/cosmos/helper" + "encoding/json" +) + +func TestParseCosmosTxModel(t *testing.T) { + cosmoshelper.Init(cosmosConf.BlockChainMonitorUrl, cosmosConf.MaxConnectionNum, cosmosConf.InitConnectionNum) + client := cosmoshelper.GetCosmosClient() + defer func() { + client.Release() + logger.Info("Release tm client") + }() + type args struct { + b int64 + client *cosmoshelper.CosmosClient + } + tests := []struct { + name string + args args + }{ + { + name: "test parse cosmos tx", + args: args{ + b: 545165, + client: client, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cosmos := Cosmos_Block{} + res, err := cosmos.ParseCosmosTxs(tt.args.b, tt.args.client) + if err != nil { + t.Fatal(err) + } + resBytes, _ := json.MarshalIndent(res, "", "\t") + t.Log(string(resBytes)) + }) + } +} + +func Test_parseRawlog(t *testing.T) { + rawlog := "[{\"msg_index\":\"0\",\"success\":false,\"log\":\"\"}," + + "{\"msg_index\":\"1\",\"success\":true,\"log\":\"\"}," + + "{\"msg_index\":\"2\",\"success\":true,\"log\":\"\"}]" + ret, err := parseRawlog(rawlog) + if err != nil { + t.Error(err) + } + t.Log(ret) +} diff --git a/service/cosmos/conf/db/types.go b/service/cosmos/conf/db/types.go new file mode 100644 index 0000000..33226f6 --- /dev/null +++ b/service/cosmos/conf/db/types.go @@ -0,0 +1,41 @@ +package db + +import ( + "github.com/irisnet/rainbow-sync/service/cosmos/logger" + constant "github.com/irisnet/rainbow-sync/service/cosmos/conf" + "os" +) + +var ( + Addrs = "localhost:27019" + User = "iris" + Passwd = "irispassword" + Database = "rainbow-server" +) + +// get value of env var +func init() { + addrs, found := os.LookupEnv(constant.EnvNameDbAddr) + if found { + Addrs = addrs + } + + user, found := os.LookupEnv(constant.EnvNameDbUser) + if found { + User = user + } + + passwd, found := os.LookupEnv(constant.EnvNameDbPassWd) + if found { + Passwd = passwd + } + + database, found := os.LookupEnv(constant.EnvNameDbDataBase) + if found { + Database = database + } + + logger.Debug("init db config", logger.String("addrs", Addrs), + logger.Bool("userIsEmpty", User == ""), logger.Bool("passwdIsEmpty", Passwd == ""), + logger.String("database", Database)) +} diff --git a/service/cosmos/conf/types.go b/service/cosmos/conf/types.go new file mode 100644 index 0000000..34fa9fd --- /dev/null +++ b/service/cosmos/conf/types.go @@ -0,0 +1,81 @@ +package conf + +import ( + "os" + "strings" + "strconv" + "github.com/irisnet/rainbow-sync/service/cosmos/logger" +) + +var ( + BlockChainMonitorUrl = []string{"tcp://34.80.141.14:26667"} + + WorkerNumCreateTask = 2 + WorkerNumExecuteTask = 30 + WorkerMaxSleepTime = 2 * 60 + BlockNumPerWorkerHandle = 50 + + InitConnectionNum = 50 // fast init num of tendermint client pool + MaxConnectionNum = 100 // max size of tendermint client pool + +) + +const ( + EnvNameSerNetworkFullNode_COSMOS = "SER_BC_FULL_NODE_COSMOS" + EnvNameWorkerNumCreateTask_COSMOS = "WORKER_NUM_CREATE_TASK_COSMOS" + EnvNameWorkerNumExecuteTask_COSMOS = "WORKER_NUM_EXECUTE_TASK_COSMOS" + EnvNameWorkerMaxSleepTime_COSMOS = "WORKER_MAX_SLEEP_TIME_COSMOS" + EnvNameBlockNumPerWorkerHandle_COSMOS = "BLOCK_NUM_PER_WORKER_HANDLE_COSMOS" + + EnvNameDbAddr = "DB_ADDR" + EnvNameDbUser = "DB_USER" + EnvNameDbPassWd = "DB_PASSWD" + EnvNameDbDataBase = "DB_DATABASE" +) + +// get value of env var +func init() { + var err error + + nodeUrl, found := os.LookupEnv(EnvNameSerNetworkFullNode_COSMOS) + if found { + BlockChainMonitorUrl = strings.Split(nodeUrl, ",") + } + logger.Info("Env Value", logger.Any(EnvNameSerNetworkFullNode_COSMOS, BlockChainMonitorUrl)) + + workerNumCreateTask, found := os.LookupEnv(EnvNameWorkerNumCreateTask_COSMOS) + if found { + WorkerNumCreateTask, err = strconv.Atoi(workerNumCreateTask) + if err != nil { + logger.Fatal("Can't convert str to int", logger.String(EnvNameWorkerNumCreateTask_COSMOS, workerNumCreateTask)) + } + } + logger.Info("Env Value", logger.Int(EnvNameWorkerNumCreateTask_COSMOS, WorkerNumCreateTask)) + + workerNumExecuteTask, found := os.LookupEnv(EnvNameWorkerNumExecuteTask_COSMOS) + if found { + WorkerNumExecuteTask, err = strconv.Atoi(workerNumExecuteTask) + if err != nil { + logger.Fatal("Can't convert str to int", logger.String(EnvNameWorkerNumExecuteTask_COSMOS, workerNumCreateTask)) + } + } + logger.Info("Env Value", logger.Int(EnvNameWorkerNumExecuteTask_COSMOS, WorkerNumExecuteTask)) + + workerMaxSleepTime, found := os.LookupEnv(EnvNameWorkerMaxSleepTime_COSMOS) + if found { + WorkerMaxSleepTime, err = strconv.Atoi(workerMaxSleepTime) + if err != nil { + logger.Fatal("Can't convert str to int", logger.String(EnvNameWorkerMaxSleepTime_COSMOS, workerMaxSleepTime)) + } + } + logger.Info("Env Value", logger.Int(EnvNameWorkerMaxSleepTime_COSMOS, WorkerMaxSleepTime)) + + blockNumPerWorkerHandle, found := os.LookupEnv(EnvNameBlockNumPerWorkerHandle_COSMOS) + if found { + BlockNumPerWorkerHandle, err = strconv.Atoi(blockNumPerWorkerHandle) + if err != nil { + logger.Fatal("Can't convert str to int", logger.String(EnvNameBlockNumPerWorkerHandle_COSMOS, blockNumPerWorkerHandle)) + } + } + logger.Info("Env Value", logger.Int(EnvNameBlockNumPerWorkerHandle_COSMOS, BlockNumPerWorkerHandle)) +} diff --git a/service/cosmos/constant/types.go b/service/cosmos/constant/types.go new file mode 100644 index 0000000..622b593 --- /dev/null +++ b/service/cosmos/constant/types.go @@ -0,0 +1,22 @@ +package constant + +const ( + Cosmos_TxTypeMultiSend = "MultiSend" + Cosmos_TxTypeVerifyInvariant = "VerifyInvariant" + Cosmos_TxTypeTransfer = "Transfer" + Cosmos_TxTypeStakeCreateValidator = "CreateValidator" + Cosmos_TxTypeStakeEditValidator = "EditValidator" + Cosmos_TxTypeStakeDelegate = "Delegate" + Cosmos_TxTypeStakeUnDelegate = "UnDelegate" + Cosmos_TxTypeBeginRedelegate = "BeginRedelegate" + Cosmos_TxTypeUnjail = "Unjail" + Cosmos_TxTypeSetWithdrawAddress = "SetWithdrawAddress" + Cosmos_TxTypeWithdrawDelegatorReward = "WithdrawDelegatorReward" + Cosmos_TxTypeWithdrawDelegatorRewardsAll = "WithdrawDelegatorRewardsAll" + Cosmos_TxTypeSubmitProposal = "SubmitProposal" + Cosmos_TxTypeDeposit = "Deposit" + Cosmos_TxTypeVote = "Vote" + + TxStatusSuccess = "success" + TxStatusFail = "fail" +) diff --git a/service/cosmos/db/const.go b/service/cosmos/db/const.go new file mode 100644 index 0000000..278dd6f --- /dev/null +++ b/service/cosmos/db/const.go @@ -0,0 +1,17 @@ +package db + +const ( + // value of status + SyncTaskStatusUnHandled = "unhandled" + SyncTaskStatusUnderway = "underway" + SyncTaskStatusCompleted = "completed" + + // only for follow task + // when current_height of follow task add blockNumPerWorkerHandle + // less than blockchain current_height, this follow task's status should be set invalid + FollowTaskStatusInvalid = "invalid" + + // taskType + SyncTaskTypeCatchUp = "catch_up" + SyncTaskTypeFollow = "follow" +) diff --git a/service/cosmos/db/db.go b/service/cosmos/db/db.go new file mode 100644 index 0000000..93c8d4d --- /dev/null +++ b/service/cosmos/db/db.go @@ -0,0 +1,108 @@ +package db + +import ( + "fmt" + conf "github.com/irisnet/rainbow-sync/service/cosmos/conf/db" + "github.com/irisnet/rainbow-sync/service/cosmos/logger" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" + "strings" + "time" +) + +var ( + session *mgo.Session +) + +func Start() { + addrs := strings.Split(conf.Addrs, ",") + dialInfo := &mgo.DialInfo{ + Addrs: addrs, + Database: conf.Database, + Username: conf.User, + Password: conf.Passwd, + Direct: true, + Timeout: time.Second * 10, + PoolLimit: 4096, // Session.SetPoolLimit + } + + var err error + session, err = mgo.DialWithInfo(dialInfo) + if err != nil { + logger.Fatal("connect db fail", logger.String("err", err.Error())) + } + session.SetMode(mgo.Strong, true) + logger.Info("init db success") +} + +func Stop() { + logger.Info("release resource :mongoDb") + session.Close() +} + +func getSession() *mgo.Session { + // max session num is 4096 + return session.Clone() +} + +// get collection object +func ExecCollection(collectionName string, s func(*mgo.Collection) error) error { + session := getSession() + defer session.Close() + c := session.DB(conf.Database).C(collectionName) + return s(c) +} + +func Save(h Docs) error { + save := func(c *mgo.Collection) error { + pk := h.PkKvPair() + n, _ := c.Find(pk).Count() + if n >= 1 { + return fmt.Errorf("record exist") + } + return c.Insert(h) + } + return ExecCollection(h.Name(), save) +} + +func Update(h Docs) error { + update := func(c *mgo.Collection) error { + key := h.PkKvPair() + return c.Update(key, h) + } + return ExecCollection(h.Name(), update) +} + +func Delete(h Docs) error { + remove := func(c *mgo.Collection) error { + key := h.PkKvPair() + return c.Remove(key) + } + return ExecCollection(h.Name(), remove) +} + +//mgo transaction method +//detail to see: https://godoc.org/gopkg.in/mgo.v2/txn +func Txn(ops []txn.Op) error { + session := getSession() + defer session.Close() + + c := session.DB(conf.Database).C(CollectionNameTxn) + runner := txn.NewRunner(c) + + txObjectId := bson.NewObjectId() + err := runner.Run(ops, txObjectId, nil) + if err != nil { + if err == txn.ErrAborted { + err = runner.Resume(txObjectId) + if err != nil { + return err + } + } else { + return err + } + } + + return nil +} diff --git a/service/cosmos/db/types.go b/service/cosmos/db/types.go new file mode 100644 index 0000000..be3809b --- /dev/null +++ b/service/cosmos/db/types.go @@ -0,0 +1,16 @@ +// interface for a document + +package db + +const ( + CollectionNameTxn = "sync_mgo_txn" +) + +type ( + Docs interface { + // collection name + Name() string + // primary key pair(used to find a unique record) + PkKvPair() map[string]interface{} + } +) diff --git a/service/cosmos/helper/cosmos_client.go b/service/cosmos/helper/cosmos_client.go new file mode 100644 index 0000000..af098fd --- /dev/null +++ b/service/cosmos/helper/cosmos_client.go @@ -0,0 +1,50 @@ +package cosmoshelper + +import ( + "fmt" + rpcClient "github.com/tendermint/tendermint/rpc/client" + "github.com/irisnet/rainbow-sync/service/cosmos/logger" + "time" +) + +type CosmosClient struct { + Id string + rpcClient.Client +} + +func newClient(addr string) *CosmosClient { + return &CosmosClient{ + Id: generateId(addr), + Client: rpcClient.NewHTTP(addr, "/websocket"), + } +} + +// get client from pool +func GetCosmosClient() *CosmosClient { + c, err := cosmos_pool.BorrowObject(ctx) + for err != nil { + logger.Error("GetClient failed,will try again after 3 seconds", logger.String("err", err.Error())) + time.Sleep(3 * time.Second) + c, err = cosmos_pool.BorrowObject(ctx) + } + + return c.(*CosmosClient) +} + +// release client +func (c *CosmosClient) Release() { + err := cosmos_pool.ReturnObject(ctx, c) + if err != nil { + logger.Error(err.Error()) + } +} + +func (c *CosmosClient) HeartBeat() error { + http := c.Client.(*rpcClient.HTTP) + _, err := http.Health() + return err +} + +func generateId(address string) string { + return fmt.Sprintf("peer[%s]", address) +} diff --git a/service/cosmos/helper/cosmos_pool.go b/service/cosmos/helper/cosmos_pool.go new file mode 100644 index 0000000..11dd340 --- /dev/null +++ b/service/cosmos/helper/cosmos_pool.go @@ -0,0 +1,124 @@ +package cosmoshelper + +import ( + "context" + commonPool "github.com/jolestar/go-commons-pool" + "github.com/irisnet/rainbow-sync/service/cosmos/logger" + "math/rand" + "sync" +) + +type ( + PoolFactory struct { + peersMap sync.Map + } + EndPoint struct { + Address string + Available bool + } +) + +var ( + cosmos_poolFactory PoolFactory + cosmos_pool *commonPool.ObjectPool + ctx = context.Background() +) + +func Init(BlockChainMonitorUrl []string, MaxConnectionNum, InitConnectionNum int) { + var syncMap sync.Map + for _, url := range BlockChainMonitorUrl { + key := generateId(url) + endPoint := EndPoint{ + Address: url, + Available: true, + } + + syncMap.Store(key, endPoint) + } + cosmos_poolFactory = PoolFactory{ + peersMap: syncMap, + } + + config := commonPool.NewDefaultPoolConfig() + config.MaxTotal = MaxConnectionNum + config.MaxIdle = InitConnectionNum + config.MinIdle = InitConnectionNum + config.TestOnBorrow = true + config.TestOnCreate = true + config.TestWhileIdle = true + + logger.Info("PoolConfig", logger.Int("config.MaxTotal", config.MaxTotal), + logger.Int("config.MaxIdle", config.MaxIdle)) + cosmos_pool = commonPool.NewObjectPool(ctx, &cosmos_poolFactory, config) + cosmos_pool.PreparePool(ctx) +} + +func ClosePool() { + cosmos_pool.Close(ctx) +} + +func (f *PoolFactory) MakeObject(ctx context.Context) (*commonPool.PooledObject, error) { + endpoint := f.GetEndPoint() + return commonPool.NewPooledObject(newClient(endpoint.Address)), nil +} + +func (f *PoolFactory) DestroyObject(ctx context.Context, object *commonPool.PooledObject) error { + c := object.Object.(*CosmosClient) + if c.IsRunning() { + c.Stop() + } + return nil +} + +func (f *PoolFactory) ValidateObject(ctx context.Context, object *commonPool.PooledObject) bool { + // do validate + c := object.Object.(*CosmosClient) + if c.HeartBeat() != nil { + value, ok := f.peersMap.Load(c.Id) + if ok { + endPoint := value.(EndPoint) + endPoint.Available = true + f.peersMap.Store(c.Id, endPoint) + } + return false + } + return true +} + +func (f *PoolFactory) ActivateObject(ctx context.Context, object *commonPool.PooledObject) error { + return nil +} + +func (f *PoolFactory) PassivateObject(ctx context.Context, object *commonPool.PooledObject) error { + return nil +} + +func (f *PoolFactory) GetEndPoint() EndPoint { + var ( + keys []string + selectedKey string + ) + + f.peersMap.Range(func(k, value interface{}) bool { + key := k.(string) + endPoint := value.(EndPoint) + if endPoint.Available { + keys = append(keys, key) + } + selectedKey = key + + return true + }) + + if len(keys) > 0 { + index := rand.Intn(len(keys)) + selectedKey = keys[index] + } + value, ok := f.peersMap.Load(selectedKey) + if ok { + return value.(EndPoint) + } else { + logger.Error("Can't get selected end point", logger.String("selectedKey", selectedKey)) + } + return EndPoint{} +} diff --git a/service/cosmos/logger/zap_logger.go b/service/cosmos/logger/zap_logger.go new file mode 100644 index 0000000..7194ffc --- /dev/null +++ b/service/cosmos/logger/zap_logger.go @@ -0,0 +1,114 @@ +package logger + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" + "os" +) + +type ( + Field = zap.Field +) + +var ( + zapLogger *zap.Logger + + // zap method + Binary = zap.Binary + Bool = zap.Bool + Complex128 = zap.Complex128 + Complex64 = zap.Complex64 + Float64 = zap.Float64 + Float32 = zap.Float32 + Int = zap.Int + Int64 = zap.Int64 + Int32 = zap.Int32 + Int16 = zap.Int16 + Int8 = zap.Int8 + String = zap.String + Uint = zap.Uint + Uint64 = zap.Uint64 + Uint32 = zap.Uint32 + Uint16 = zap.Uint16 + Uint8 = zap.Uint8 + Time = zap.Time + Any = zap.Any + Duration = zap.Duration +) + +func Debug(msg string, fields ...Field) { + defer sync() + zapLogger.Debug(msg, fields...) +} + +func Info(msg string, fields ...Field) { + defer sync() + zapLogger.Info(msg, fields...) +} + +func Warn(msg string, fields ...Field) { + defer sync() + zapLogger.Warn(msg, fields...) +} + +func Error(msg string, fields ...Field) { + defer sync() + zapLogger.Error(msg, fields...) +} + +func Panic(msg string, fields ...Field) { + defer sync() + zapLogger.Panic(msg, fields...) +} + +func Fatal(msg string, fields ...Field) { + defer sync() + zapLogger.Fatal(msg, fields...) +} + +func With(fields ...Field) { + defer sync() + zapLogger.With(fields...) +} + +func sync() { + zapLogger.Sync() +} + +func init() { + var core zapcore.Core + hook := lumberjack.Logger{ + Filename: "./logs/sync.log", + MaxSize: 100, // megabytes + MaxBackups: 3, + MaxAge: 7, //days + Compress: true, // disabled by default + LocalTime: true, + } + + fileWriter := zapcore.AddSync(&hook) + consoleDebugging := zapcore.Lock(os.Stdout) + + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + encoder := zapcore.NewJSONEncoder(encoderConfig) + + // Join the outputs, encoders, and level-handling functions into + // zapcore.Cores, then tee the four cores together. + highPriority := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool { + return lvl >= zapcore.InfoLevel + }) + lowPriority := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool { + return lvl >= zapcore.DebugLevel + }) + + core = zapcore.NewTee( + zapcore.NewCore(encoder, consoleDebugging, lowPriority), + zapcore.NewCore(encoder, fileWriter, highPriority), + ) + caller := zap.AddCaller() + callerSkipOpt := zap.AddCallerSkip(1) + // From a zapcore.Core, it's easy to construct a Logger. + zapLogger = zap.New(core, caller, callerSkipOpt, zap.AddStacktrace(zap.ErrorLevel)) +} diff --git a/service/cosmos/main.go b/service/cosmos/main.go new file mode 100644 index 0000000..6bb01e4 --- /dev/null +++ b/service/cosmos/main.go @@ -0,0 +1,37 @@ +package main + +import ( + "os" + "os/signal" + "syscall" + + "github.com/irisnet/rainbow-sync/service/cosmos/logger" + model "github.com/irisnet/rainbow-sync/service/cosmos/db" + "github.com/irisnet/rainbow-sync/service/cosmos/task" + "runtime" +) + +func main() { + runtime.GOMAXPROCS(runtime.NumCPU() / 2) + c := make(chan os.Signal) + + defer func() { + logger.Info("System Exit") + + model.Stop() + + if err := recover(); err != nil { + logger.Error("", logger.Any("err", err)) + os.Exit(1) + } + }() + + signal.Notify(c, os.Interrupt, os.Kill, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + logger.Info("Start sync Program") + + model.Start() + task.Start() + + <-c +} diff --git a/service/cosmos/model/block.go b/service/cosmos/model/block.go new file mode 100644 index 0000000..d5e0e73 --- /dev/null +++ b/service/cosmos/model/block.go @@ -0,0 +1,12 @@ +package cosmos + +const ( + CollectionNameBlock = "sync_cosmos_block" +) + +type ( + Block struct { + Height int64 `bson:"height"` + CreateTime int64 `bson:"create_time"` + } +) diff --git a/service/cosmos/model/cosmos_tx.go b/service/cosmos/model/cosmos_tx.go new file mode 100644 index 0000000..d23c8cd --- /dev/null +++ b/service/cosmos/model/cosmos_tx.go @@ -0,0 +1,57 @@ +package cosmos + +import ( + "gopkg.in/mgo.v2/bson" + "time" +) + +type ( + CosmosTx struct { + Time time.Time `bson:"time"` + Height int64 `bson:"height"` + TxHash string `bson:"tx_hash"` + From string `bson:"from"` + To string `bson:"to"` + Initiator string `bson:"initiator"` + Amount []*Coin `bson:"amount"` + Type string `bson:"type"` + Fee *Fee `bson:"fee"` + Memo string `bson:"memo"` + Status string `bson:"status"` + Code uint32 `bson:"code"` + Tags map[string]string `bson:"tags"` + //Msg Msg `bson:"msg"` + } +) + +const ( + CollectionNameCosmosTx = "sync_cosmos_tx" +) + +func (d CosmosTx) Name() string { + return CollectionNameCosmosTx +} + +func (d CosmosTx) PkKvPair() map[string]interface{} { + return bson.M{} +} + +type Coin struct { + Denom string `bson:"denom" json:"denom"` + Amount int64 `bson:"amount" json:"amount"` +} + +type Coins []*Coin + +type Fee struct { + Amount []*Coin `bson:"amount" json:"amount"` + Gas int64 `bson:"gas" json:"gas"` +} + +type Tag map[string]string + +type RawLog struct { + MsgIndex int `json:"msg_index,string"` + Success bool `json:"success"` + Log string `json:"log"` +} diff --git a/service/cosmos/model/sync_task.go b/service/cosmos/model/sync_task.go new file mode 100644 index 0000000..2b46c95 --- /dev/null +++ b/service/cosmos/model/sync_task.go @@ -0,0 +1,214 @@ +package cosmos + +import ( + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + model "github.com/irisnet/rainbow-sync/service/cosmos/db" + "time" +) + +const ( + CollectionNameSyncCosmosTask = "sync_cosmos_task" +) + +type ( + SyncCosmosTask struct { + ID bson.ObjectId `bson:"_id"` + StartHeight int64 `bson:"start_height"` // task start height + EndHeight int64 `bson:"end_height"` // task end height + CurrentHeight int64 `bson:"current_height"` // task current height + Status string `bson:"status"` // task status + WorkerId string `bson:"worker_id"` // worker id + WorkerLogs []WorkerLog `bson:"worker_logs"` // worker logs + LastUpdateTime int64 `bson:"last_update_time"` // unix timestamp + } + + WorkerLog struct { + WorkerId string `bson:"worker_id"` // worker id + BeginTime time.Time `bson:"begin_time"` // time which worker begin handle this task + } +) + +func (d *SyncCosmosTask) Name() string { + return CollectionNameSyncCosmosTask +} + +func (d *SyncCosmosTask) PkKvPair() map[string]interface{} { + return bson.M{"start_height": d.CurrentHeight, "end_height": d.EndHeight} +} + +// get max block height in sync task +func (d *SyncCosmosTask) GetMaxBlockHeight() (int64, error) { + type maxHeightRes struct { + MaxHeight int64 `bson:"max"` + } + var res []maxHeightRes + + q := []bson.M{ + { + "$group": bson.M{ + "_id": nil, + "max": bson.M{"$max": "$end_height"}, + }, + }, + } + + getMaxBlockHeightFn := func(c *mgo.Collection) error { + return c.Pipe(q).All(&res) + } + err := model.ExecCollection(d.Name(), getMaxBlockHeightFn) + + if err != nil { + return 0, err + } + if len(res) > 0 { + return res[0].MaxHeight, nil + } + + return 0, nil +} + +// query record by status +func (d *SyncCosmosTask) QueryAll(status []string, taskType string) ([]SyncCosmosTask, error) { + var syncTasks []SyncCosmosTask + q := bson.M{} + + if len(status) > 0 { + q["status"] = bson.M{ + "$in": status, + } + } + + switch taskType { + case model.SyncTaskTypeCatchUp: + q["end_height"] = bson.M{ + "$ne": 0, + } + break + case model.SyncTaskTypeFollow: + q["end_height"] = bson.M{ + "$eq": 0, + } + break + } + + fn := func(c *mgo.Collection) error { + return c.Find(q).All(&syncTasks) + } + + err := model.ExecCollection(d.Name(), fn) + + if err != nil { + return syncTasks, err + } + + return syncTasks, nil +} + +func (d *SyncCosmosTask) GetExecutableTask(maxWorkerSleepTime int64) ([]SyncCosmosTask, error) { + var tasks []SyncCosmosTask + + t := time.Now().Add(time.Duration(-maxWorkerSleepTime) * time.Second).Unix() + q := bson.M{ + "status": bson.M{ + "$in": []string{model.SyncTaskStatusUnHandled, model.SyncTaskStatusUnderway}, + }, + } + + fn := func(c *mgo.Collection) error { + return c.Find(q).Sort("-status").Limit(1000).All(&tasks) + } + + err := model.ExecCollection(d.Name(), fn) + + if err != nil { + return tasks, err + } + + ret := make([]SyncCosmosTask, 0, len(tasks)) + //filter the task which last_update_time >= now + for _, task := range tasks { + if task.LastUpdateTime >= t && task.Status == model.SyncTaskStatusUnderway { + continue + } + ret = append(ret, task) + } + //fmt.Println("SyncCosmosTask GetExecutableTask ret:",ret) + //fmt.Println("SyncCosmosTask GetExecutableTask:",tasks) + + return ret, nil +} + +func (d *SyncCosmosTask) GetTaskById(id bson.ObjectId) (SyncCosmosTask, error) { + var task SyncCosmosTask + + fn := func(c *mgo.Collection) error { + return c.FindId(id).One(&task) + } + + err := model.ExecCollection(d.Name(), fn) + if err != nil { + return task, err + } + return task, nil +} + +func (d *SyncCosmosTask) GetTaskByIdAndWorker(id bson.ObjectId, worker string) (SyncCosmosTask, error) { + var task SyncCosmosTask + + fn := func(c *mgo.Collection) error { + q := bson.M{ + "_id": id, + "worker_id": worker, + } + + return c.Find(q).One(&task) + } + + err := model.ExecCollection(d.Name(), fn) + if err != nil { + return task, err + } + return task, nil +} + +// take over a task +// update status, worker_id, worker_logs and last_update_time +func (d *SyncCosmosTask) TakeOverTask(task SyncCosmosTask, workerId string) error { + // multiple goroutine attempt to update same record, + // use this selector to ensure only one goroutine can update success at same time + fn := func(c *mgo.Collection) error { + selector := bson.M{ + "_id": task.ID, + "last_update_time": task.LastUpdateTime, + } + + task.Status = model.SyncTaskStatusUnderway + task.WorkerId = workerId + task.LastUpdateTime = time.Now().Unix() + task.WorkerLogs = append(task.WorkerLogs, WorkerLog{ + WorkerId: workerId, + BeginTime: time.Now(), + }) + + return c.Update(selector, task) + } + + return model.ExecCollection(d.Name(), fn) +} + +// update task last update time +func (d *SyncCosmosTask) UpdateLastUpdateTime(task SyncCosmosTask) error { + fn := func(c *mgo.Collection) error { + selector := bson.M{ + "_id": task.ID, + "worker_id": task.WorkerId, + } + + task.LastUpdateTime = time.Now().Unix() + + return c.Update(selector, task) + } + + return model.ExecCollection(d.Name(), fn) +} diff --git a/service/cosmos/task/create.go b/service/cosmos/task/create.go new file mode 100644 index 0000000..b48c721 --- /dev/null +++ b/service/cosmos/task/create.go @@ -0,0 +1,232 @@ +package task + +import ( + "gopkg.in/mgo.v2/txn" + "fmt" + "gopkg.in/mgo.v2/bson" + "time" + model "github.com/irisnet/rainbow-sync/service/cosmos/db" + "github.com/irisnet/rainbow-sync/service/cosmos/logger" + cmodel "github.com/irisnet/rainbow-sync/service/cosmos/model" + "github.com/irisnet/rainbow-sync/service/cosmos/conf" + "github.com/irisnet/rainbow-sync/service/cosmos/block" +) + +type TaskCosmosService struct { + blockType block.Cosmos_Block + syncCosmosModel cmodel.SyncCosmosTask +} + +func (s *TaskCosmosService) StartCreateTask() { + blockNumPerWorkerHandle := int64(conf.BlockNumPerWorkerHandle) + + logger.Info("Start create task", logger.String("Chain Block", s.blockType.Name())) + + // buffer channel to limit goroutine num + chanLimit := make(chan bool, conf.WorkerNumCreateTask) + + for { + chanLimit <- true + go s.createTask(blockNumPerWorkerHandle, chanLimit) + time.Sleep(time.Duration(1) * time.Minute) + } +} + +func (s *TaskCosmosService) createTask(blockNumPerWorkerHandle int64, chanLimit chan bool) { + var ( + syncCosmosTasks []*cmodel.SyncCosmosTask + ops []txn.Op + invalidFollowTask cmodel.SyncCosmosTask + logMsg string + ) + + defer func() { + if err := recover(); err != nil { + logger.Error("Create cosmos task failed", logger.Any("err", err), + logger.String("Chain Block", s.blockType.Name())) + } + <-chanLimit + }() + + // check valid follow task if exist + // status of valid follow task is unhandled or underway + validFollowTasks, err := s.syncCosmosModel.QueryAll( + []string{ + model.SyncTaskStatusUnHandled, + model.SyncTaskStatusUnderway, + }, model.SyncTaskTypeFollow) + if err != nil { + logger.Error("Query sync cosmos task failed", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + return + } + if len(validFollowTasks) == 0 { + // get max end_height from sync_task + maxEndHeight, err := s.syncCosmosModel.GetMaxBlockHeight() + if err != nil { + logger.Error("Get Cosmos max endBlock failed", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + return + } + + blockChainLatestHeight, err := getCosmosBlockChainLatestHeight() + if err != nil { + logger.Error("Get Cosmos current block height failed", logger.String("err", err.Error())) + return + } + + if maxEndHeight+blockNumPerWorkerHandle <= blockChainLatestHeight { + syncCosmosTasks = createCosmosCatchUpTask(maxEndHeight, blockNumPerWorkerHandle, blockChainLatestHeight) + logMsg = fmt.Sprintf("Create cosmos catch up task during follow task not exist,from-to:%v-%v,Chain Block:%v", + maxEndHeight+1, blockChainLatestHeight, s.blockType.Name()) + } else { + finished, err := s.assertAllCatchUpCosmosTaskFinished() + if err != nil { + logger.Error("AssertAllCatchUpTaskFinished failed", logger.String("err", err.Error())) + return + } + if finished { + syncCosmosTasks = createFollowCosmosTask(maxEndHeight, blockNumPerWorkerHandle, blockChainLatestHeight) + logMsg = fmt.Sprintf("Create follow cosmos task during follow task not exist,from-to:%v-%v,Chain Block:%v", + maxEndHeight+1, blockChainLatestHeight, s.blockType.Name()) + } + } + } else { + followTask := validFollowTasks[0] + followedHeight := followTask.CurrentHeight + if followedHeight == 0 { + followedHeight = followTask.StartHeight - 1 + } + + blockChainLatestHeight, err := getCosmosBlockChainLatestHeight() + if err != nil { + logger.Error("Get Cosmos blockChain latest height failed", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + return + } + + if followedHeight+blockNumPerWorkerHandle <= blockChainLatestHeight { + syncCosmosTasks = createCosmosCatchUpTask(followedHeight, blockNumPerWorkerHandle, blockChainLatestHeight) + + invalidFollowTask = followTask + logMsg = fmt.Sprintf("Create cosmos catch up task during follow task exist,from-to:%v-%v,invalidFollowTaskId:%v,invalidFollowTaskCurHeight:%v,Chain Block:%v", + followedHeight+1, blockChainLatestHeight, invalidFollowTask.ID.Hex(), invalidFollowTask.CurrentHeight, s.blockType.Name()) + } + } + + // bulk insert or remove use transaction + ops = make([]txn.Op, 0, len(syncCosmosTasks)+1) + if len(syncCosmosTasks) > 0 { + for _, v := range syncCosmosTasks { + objectId := bson.NewObjectId() + v.ID = objectId + op := txn.Op{ + C: cmodel.CollectionNameSyncCosmosTask, + Id: objectId, + Assert: nil, + Insert: v, + } + + ops = append(ops, op) + } + } + + if invalidFollowTask.ID.Valid() { + op := txn.Op{ + C: cmodel.CollectionNameSyncCosmosTask, + Id: invalidFollowTask.ID, + Assert: bson.M{ + "current_height": invalidFollowTask.CurrentHeight, + "last_update_time": invalidFollowTask.LastUpdateTime, + }, + Update: bson.M{ + "$set": bson.M{ + "status": model.FollowTaskStatusInvalid, + "last_update_time": time.Now().Unix(), + }, + }, + } + ops = append(ops, op) + } + + if len(ops) > 0 { + err := model.Txn(ops) + if err != nil { + logger.Warn("Create Cosmos sync task fail", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + } else { + logger.Info(fmt.Sprintf("Create sync Cosmos task success,%v", logMsg), logger.String("Chain Block", s.blockType.Name())) + } + } + + time.Sleep(1 * time.Second) +} + +func createCosmosCatchUpTask(maxEndHeight, blockNumPerWorker, currentBlockHeight int64) []*cmodel.SyncCosmosTask { + var ( + syncTasks []*cmodel.SyncCosmosTask + ) + logger.Info("createCosmosCatchUpTask", logger.Int64("maxEndHeight", maxEndHeight), + logger.Int64("blockNumPerWorker", blockNumPerWorker), logger.Int64("currentBlockHeight", currentBlockHeight)) + + if length := currentBlockHeight - (maxEndHeight + blockNumPerWorker); length > 0 { + syncTasks = make([]*cmodel.SyncCosmosTask, 0, length+1) + } + + for maxEndHeight+blockNumPerWorker <= currentBlockHeight { + syncTask := cmodel.SyncCosmosTask{ + StartHeight: maxEndHeight + 1, + EndHeight: maxEndHeight + blockNumPerWorker, + Status: model.SyncTaskStatusUnHandled, + LastUpdateTime: time.Now().Unix(), + } + syncTasks = append(syncTasks, &syncTask) + + maxEndHeight += blockNumPerWorker + } + + return syncTasks +} + +func (s *TaskCosmosService) assertAllCatchUpCosmosTaskFinished() (bool, error) { + var ( + allCatchUpTaskFinished = false + ) + + // assert all catch up task whether finished + tasks, err := s.syncCosmosModel.QueryAll( + []string{ + model.SyncTaskStatusUnHandled, + model.SyncTaskStatusUnderway, + }, + model.SyncTaskTypeCatchUp) + if err != nil { + return false, err + } + + if len(tasks) == 0 { + allCatchUpTaskFinished = true + } + + return allCatchUpTaskFinished, nil +} + +func createFollowCosmosTask(maxEndHeight, blockNumPerWorker, currentBlockHeight int64) []*cmodel.SyncCosmosTask { + var ( + syncCosmosTasks []*cmodel.SyncCosmosTask + ) + syncCosmosTasks = make([]*cmodel.SyncCosmosTask, 0, 1) + + if maxEndHeight+blockNumPerWorker > currentBlockHeight { + syncTask := cmodel.SyncCosmosTask{ + StartHeight: maxEndHeight + 1, + EndHeight: 0, + Status: model.SyncTaskStatusUnHandled, + LastUpdateTime: time.Now().Unix(), + } + + syncCosmosTasks = append(syncCosmosTasks, &syncTask) + } + + return syncCosmosTasks +} diff --git a/service/cosmos/task/execute.go b/service/cosmos/task/execute.go new file mode 100644 index 0000000..52b45e0 --- /dev/null +++ b/service/cosmos/task/execute.go @@ -0,0 +1,287 @@ +package task + +import ( + "github.com/irisnet/rainbow-sync/service/cosmos/helper" + "github.com/irisnet/rainbow-sync/service/cosmos/logger" + model "github.com/irisnet/rainbow-sync/service/cosmos/db" + cmodel "github.com/irisnet/rainbow-sync/service/cosmos/model" + "github.com/irisnet/rainbow-sync/service/cosmos/conf" + "time" + "os" + "fmt" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" +) + +func (s *TaskCosmosService) StartExecuteTask() { + var ( + blockNumPerWorkerHandle = int64(conf.BlockNumPerWorkerHandle) + workerMaxSleepTime = int64(conf.WorkerMaxSleepTime) + ) + if workerMaxSleepTime <= 1*60 { + logger.Fatal("workerMaxSleepTime shouldn't less than 1 minute") + } + + logger.Info("Start execute task", logger.String("Chain Block", s.blockType.Name())) + + // buffer channel to limit goroutine num + chanLimit := make(chan bool, conf.WorkerNumExecuteTask) + + cosmoshelper.Init(conf.BlockChainMonitorUrl, conf.MaxConnectionNum, conf.InitConnectionNum) + defer func() { + cosmoshelper.ClosePool() + }() + + for { + chanLimit <- true + go s.executeTask(blockNumPerWorkerHandle, workerMaxSleepTime, chanLimit) + time.Sleep(time.Duration(1) * time.Second) + } +} + +func (s *TaskCosmosService) executeTask(blockNumPerWorkerHandle, maxWorkerSleepTime int64, chanLimit chan bool) { + var ( + //syncTaskModel imodel.SyncTask + workerId, taskType string + blockChainLatestHeight int64 + ) + genWorkerId := func() string { + // generate worker id use hostname@xxx + hostname, _ := os.Hostname() + return fmt.Sprintf("%v@%v", hostname, bson.NewObjectId().Hex()) + } + + healthCheckQuit := make(chan bool) + workerId = genWorkerId() + client := cosmoshelper.GetCosmosClient() + + defer func() { + if r := recover(); r != nil { + logger.Error("execute Cosmos task fail", logger.Any("err", r)) + } + close(healthCheckQuit) + <-chanLimit + client.Release() + }() + + // check whether exist executable task + // status = unhandled or + // status = underway and now - lastUpdateTime > confTime + tasks, err := s.syncCosmosModel.GetExecutableTask(maxWorkerSleepTime) + if err != nil { + logger.Error("Get Cosmos executable task fail", logger.String("err", err.Error()), logger.String("Chain Block", s.blockType.Name())) + } + if len(tasks) == 0 { + // there is no executable tasks + //logger.Info("there is no executable tasks",logger.String("Chain Block",s.blockType.Name())) + return + } + + // take over sync task + // attempt to update status, worker_id and worker_logs + task := tasks[0] + err = s.syncCosmosModel.TakeOverTask(task, workerId) + if err != nil { + if err == mgo.ErrNotFound { + // this task has been take over by other goroutine + logger.Info("Task has been take over by other goroutine", logger.String("Chain Block", s.blockType.Name())) + } else { + logger.Error("Take over task fail", logger.String("err", err.Error()), logger.String("Chain Block", s.blockType.Name())) + } + return + } else { + // task over task success, update task worker to current worker + task.WorkerId = workerId + } + + if task.EndHeight != 0 { + taskType = model.SyncTaskTypeCatchUp + } else { + taskType = model.SyncTaskTypeFollow + } + logger.Info("worker begin execute Cosmos task", logger.String("Chain Block", s.blockType.Name()), + logger.String("curWorker", workerId), logger.Any("taskId", task.ID), + logger.String("from-to", fmt.Sprintf("%v-%v", task.StartHeight, task.EndHeight))) + + // worker health check, if worker is alive, then update last update time every minute. + // health check will exit in follow conditions: + // 1. task is not owned by current worker + // 2. task is invalid + workerHealthCheck := func(taskId bson.ObjectId, currentWorker string) { + defer func() { + if r := recover(); r != nil { + logger.Error("Cosmos worker health check err", logger.Any("err", r), logger.String("Chain Block", s.blockType.Name())) + } + }() + + func() { + for { + select { + case <-healthCheckQuit: + logger.Info("Cosmos get health check quit signal, now exit health check", logger.String("Chain Block", s.blockType.Name())) + return + default: + task, err := s.syncCosmosModel.GetTaskByIdAndWorker(taskId, workerId) + if err == nil { + if _, valid := s.assertCosmosTaskValid(task, blockNumPerWorkerHandle); valid { + // update task last update time + if err := s.syncCosmosModel.UpdateLastUpdateTime(task); err != nil { + logger.Error("update last update time fail", logger.String("err", err.Error()), logger.String("Chain Block", s.blockType.Name())) + } + } else { + logger.Info("Cosmos task is invalid, exit health check", logger.String("taskId", taskId.Hex()), logger.String("Chain Block", s.blockType.Name())) + return + } + } else { + if err == mgo.ErrNotFound { + logger.Info("Cosmos task may be task over by other goroutine, exit health check", + logger.String("taskId", taskId.Hex()), logger.String("curWorker", workerId), logger.String("Chain Block", s.blockType.Name())) + return + } else { + logger.Error("Cosmos get task by id and worker fail", logger.String("taskId", taskId.Hex()), + logger.String("curWorker", workerId), logger.String("Chain Block", s.blockType.Name())) + } + } + } + time.Sleep(1 * time.Minute) + } + }() + } + go workerHealthCheck(task.ID, workerId) + + // check task is valid + // valid catch up task: current_height < end_height + // valid follow task: current_height + blockNumPerWorkerHandle > blockChainLatestHeight + blockChainLatestHeight, isValid := s.assertCosmosTaskValid(task, blockNumPerWorkerHandle) + for isValid { + var inProcessBlock int64 + if task.CurrentHeight == 0 { + inProcessBlock = task.StartHeight + } else { + inProcessBlock = task.CurrentHeight + 1 + } + + // if inProcessBlock > blockChainLatestHeight, should wait blockChainLatestHeight update + if taskType == model.SyncTaskTypeFollow && inProcessBlock > blockChainLatestHeight { + logger.Info("wait Cosmos blockChain latest height update", + logger.Int64("curSyncedHeight", inProcessBlock-1), + logger.Int64("blockChainLatestHeight", blockChainLatestHeight), + logger.String("Chain Block", s.blockType.Name())) + time.Sleep(2 * time.Second) + // continue to assert task is valid + blockChainLatestHeight, isValid = s.assertCosmosTaskValid(task, blockNumPerWorkerHandle) + continue + } + + // parse data from block + blockDoc, BlockTypeChainDocs, err := s.blockType.ParseBlock(inProcessBlock, client) + if err != nil { + logger.Error("Parse Cosmos block fail", logger.Int64("block", inProcessBlock), + logger.String("err", err.Error()), logger.String("Chain Block", s.blockType.Name())) + } + //logger.Info("ParseBlock success",logger.String("Chain Block",s.blockType.Name())) + + // check task owner + workerUnchanged, err := assertCosmosTaskWorkerUnchanged(task.ID, task.WorkerId) + if err != nil { + logger.Error("assert Cosmos task worker is unchanged fail", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + } + if workerUnchanged { + // save data and update sync task + taskDoc := task + taskDoc.CurrentHeight = inProcessBlock + taskDoc.LastUpdateTime = time.Now().Unix() + taskDoc.Status = model.SyncTaskStatusUnderway + if inProcessBlock == task.EndHeight { + taskDoc.Status = model.SyncTaskStatusCompleted + } + + err := s.blockType.SaveDocsWithTxn(blockDoc, BlockTypeChainDocs, taskDoc) + if err != nil { + logger.Error("save docs fail", logger.String("err", err.Error())) + } else { + task.CurrentHeight = inProcessBlock + //logger.Info("SaveDocsWithTxn success",logger.String("Chain Block",s.blockType.Name())) + } + + // continue to assert task is valid + blockChainLatestHeight, isValid = s.assertCosmosTaskValid(task, blockNumPerWorkerHandle) + } else { + logger.Info("Cosmos task worker changed", logger.Any("task_id", task.ID), + logger.String("origin worker", workerId), logger.String("current worker", task.WorkerId), + logger.String("Chain Block", s.blockType.Name())) + return + } + } + + logger.Info("Cosmos worker finish execute task", logger.String("Chain Block", s.blockType.Name()), + logger.String("task_worker", task.WorkerId), logger.Any("task_id", task.ID), + logger.String("from-to-current", fmt.Sprintf("%v-%v-%v", task.StartHeight, task.EndHeight, task.CurrentHeight))) +} + +func (s *TaskCosmosService) assertCosmosTaskValid(task cmodel.SyncCosmosTask, blockNumPerWorkerHandle int64) (int64, bool) { + var ( + taskType string + flag = false + blockChainLatestHeight int64 + err error + ) + if task.EndHeight != 0 { + taskType = model.SyncTaskTypeCatchUp + } else { + taskType = model.SyncTaskTypeFollow + } + currentHeight := task.CurrentHeight + if currentHeight == 0 { + currentHeight = task.StartHeight - 1 + } + + switch taskType { + case model.SyncTaskTypeCatchUp: + if currentHeight < task.EndHeight { + flag = true + } + break + case model.SyncTaskTypeFollow: + blockChainLatestHeight, err = getCosmosBlockChainLatestHeight() + if err != nil { + logger.Error("getCosmos blockChain latest height err", logger.String("err", err.Error())) + return blockChainLatestHeight, flag + } + if currentHeight+blockNumPerWorkerHandle > blockChainLatestHeight { + flag = true + } + break + } + return blockChainLatestHeight, flag +} +func assertCosmosTaskWorkerUnchanged(taskId bson.ObjectId, workerId string) (bool, error) { + var ( + syncTaskModel cmodel.SyncCosmosTask + ) + // check task owner + task, err := syncTaskModel.GetTaskById(taskId) + if err != nil { + return false, err + } + + if task.WorkerId == workerId { + return true, nil + } else { + return false, nil + } +} + +func getCosmosBlockChainLatestHeight() (int64, error) { + client := cosmoshelper.GetCosmosClient() + defer func() { + client.Release() + }() + status, err := client.Status() + if err != nil { + return 0, err + } + + return status.SyncInfo.LatestBlockHeight, nil +} diff --git a/service/cosmos/task/start.go b/service/cosmos/task/start.go new file mode 100644 index 0000000..5d8bfa8 --- /dev/null +++ b/service/cosmos/task/start.go @@ -0,0 +1,7 @@ +package task + +func Start() { + synctask := new(TaskCosmosService) + go synctask.StartCreateTask() + go synctask.StartExecuteTask() +} diff --git a/service/cosmos/tools/Makefile b/service/cosmos/tools/Makefile new file mode 100644 index 0000000..08e0cbc --- /dev/null +++ b/service/cosmos/tools/Makefile @@ -0,0 +1,33 @@ +######################################## +### DEP + +DEP = github.com/golang/dep/cmd/dep +DEP_CHECK := $(shell command -v dep 2> /dev/null) + +check_tools: +ifndef DEP_CHECK + @echo "No dep in path. Install with 'make get_tools'." +else + @echo "Found dep in path." +endif + +get_tools: +ifdef DEP_CHECK + @echo "Dep is already installed. Run 'make update_tools' to update." +else + @echo "$(ansi_grn)Installing dep$(ansi_end)" + go get -v $(DEP) +endif + +update_tools: + @echo "$(ansi_grn)Updating dep$(ansi_end)" + go get -u -v $(DEP) + + +######################################## +# ANSI colors + +ansi_red=\033[0;31m +ansi_grn=\033[0;32m +ansi_yel=\033[0;33m +ansi_end=\033[0m \ No newline at end of file diff --git a/service/cosmos/utils/utils.go b/service/cosmos/utils/utils.go new file mode 100644 index 0000000..bafac7d --- /dev/null +++ b/service/cosmos/utils/utils.go @@ -0,0 +1,59 @@ +package cosmos + +import ( + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/x/auth" + sdk "github.com/cosmos/cosmos-sdk/types" + "encoding/hex" + "strings" + "github.com/cosmos/cosmos-sdk/cmd/gaia/app" + cmodel "github.com/irisnet/rainbow-sync/service/cosmos/model" + "strconv" + "github.com/irisnet/rainbow-sync/service/cosmos/logger" +) + +var ( + cdc *codec.Codec +) + +// 初始化账户地址前缀 +func init() { + cdc = app.MakeCodec() +} + +func GetCodec() *codec.Codec { + return cdc +} + +func BuildHex(bytes []byte) string { + return strings.ToUpper(hex.EncodeToString(bytes)) +} + +func BuildFee(fee auth.StdFee) cmodel.Fee { + return cmodel.Fee{ + Amount: ParseCoins(fee.Amount), + Gas: int64(fee.Gas), + } +} + +func ParseCoins(coinsStr sdk.Coins) (coins []*cmodel.Coin) { + + coins = make([]*cmodel.Coin, 0, len(coinsStr)) + for _, coinStr := range coinsStr { + coin := ParseCoin(coinStr) + coins = append(coins, &coin) + } + return coins +} + +func ParseCoin(sdkcoin sdk.Coin) (coin cmodel.Coin) { + amount, err := strconv.ParseInt(sdkcoin.Amount.String(), 10, 64) + if err != nil { + logger.Error("ParseCoin have error", logger.String("error", err.Error())) + } + return cmodel.Coin{ + Denom: sdkcoin.Denom, + Amount: amount, + } + +} diff --git a/service/iris/Dockerfile b/service/iris/Dockerfile new file mode 100644 index 0000000..e9f5548 --- /dev/null +++ b/service/iris/Dockerfile @@ -0,0 +1,27 @@ +FROM alpine:3.8 + +# Set up dependencies +ENV PACKAGES go make git libc-dev bash + +# Set up path +ENV BINARY_NAME rainbow-sync +ENV GOPATH /root/go +ENV REPO_PATH $GOPATH/src/github.com/irisnet/rainbow-sync/service/iris +ENV PATH $GOPATH/bin:$PATH + +RUN mkdir -p $GOPATH $REPO_PATH + +COPY . $REPO_PATH +WORKDIR $REPO_PATH + +VOLUME $REPO_PATH/logs + +# Install minimum necessary dependencies, build binary +RUN apk add --no-cache $PACKAGES && \ + cd $REPO_PATH && make all && \ + mv $REPO_PATH/$BINARY_NAME $GOPATH/bin && \ + rm -rf $REPO_PATH/vendor && \ + rm -rf $GOPATH/src/github.com/golang $GOPATH/bin/dep $GOPATH/pkg/* && \ + apk del $PACKAGES + +CMD $BINARY_NAME \ No newline at end of file diff --git a/service/iris/Gopkg.lock b/service/iris/Gopkg.lock new file mode 100644 index 0000000..23be999 --- /dev/null +++ b/service/iris/Gopkg.lock @@ -0,0 +1,787 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "UT" + revision = "4b2b341e8d7715fae06375aa633dbb6e91b3fb46" + version = "v1.0.0" + +[[projects]] + digest = "1:1343a2963481a305ca4d051e84bc2abd16b601ee22ed324f8d605de1adb291b0" + name = "github.com/bgentry/speakeasy" + packages = ["."] + pruneopts = "UT" + revision = "4aabc24848ce5fd31929f7d1e4ea74d3709c14cd" + version = "v0.1.0" + +[[projects]] + branch = "master" + digest = "1:9e7c5138114ff9c51a60731b3a425c319305013c6ea8b3f60fd2435baba1a0db" + name = "github.com/btcsuite/btcd" + packages = ["btcec"] + pruneopts = "UT" + revision = "a0d1e3e36d50f61ee6eaab26d7bd246aae1f9ece" + +[[projects]] + digest = "1:386de157f7d19259a7f9c81f26ce011223ce0f090353c1152ffdf730d7d10ac2" + name = "github.com/btcsuite/btcutil" + packages = ["bech32"] + pruneopts = "UT" + revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" + +[[projects]] + branch = "master" + digest = "1:0427cfa11785c899dfe0ea6987bc0c9dea61647f6bb3987163d62c867f1cad6a" + name = "github.com/cosmos/go-bip39" + packages = ["."] + pruneopts = "UT" + revision = "555e2067c45d9fcd7292bf3b8e732bc10ac8c58e" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:5991d93f13f971f4090db8b7dbbd8709210baa23df89701a51c6658ab4646678" + name = "github.com/emicklei/proto" + packages = ["."] + pruneopts = "UT" + revision = "0a9409d9194451bb7ef74edd3694cccfd316b3b1" + version = "v1.6.13" + +[[projects]] + digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" + name = "github.com/fsnotify/fsnotify" + packages = ["."] + pruneopts = "UT" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + version = "v1.4.7" + +[[projects]] + digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11" + name = "github.com/go-kit/kit" + packages = [ + "log", + "log/level", + "log/term", + "metrics", + "metrics/discard", + "metrics/internal/lv", + "metrics/prometheus", + ] + pruneopts = "UT" + revision = "4dc7be5d2d12881735283bcab7352178e190fc71" + version = "v0.6.0" + +[[projects]] + digest = "1:4062bc6de62d73e2be342243cf138cf499b34d558876db8d9430e2149388a4d8" + name = "github.com/go-logfmt/logfmt" + packages = ["."] + pruneopts = "UT" + revision = "07c9b44f60d7ffdfb7d8efe1ad539965737836dc" + version = "v0.4.0" + +[[projects]] + digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d" + name = "github.com/go-stack/stack" + packages = ["."] + pruneopts = "UT" + revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" + version = "v1.8.0" + +[[projects]] + digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e" + name = "github.com/gogo/protobuf" + packages = [ + "gogoproto", + "jsonpb", + "proto", + "protoc-gen-gogo/descriptor", + "sortkeys", + "types", + ] + pruneopts = "UT" + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" + +[[projects]] + digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "UT" + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" + +[[projects]] + digest = "1:e4f5819333ac698d294fe04dbf640f84719658d5c7ce195b10060cc37292ce79" + name = "github.com/golang/snappy" + packages = ["."] + pruneopts = "UT" + revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a" + version = "v0.0.1" + +[[projects]] + digest = "1:7b5c6e2eeaa9ae5907c391a91c132abfd5c9e8a784a341b5625e750c67e6825d" + name = "github.com/gorilla/websocket" + packages = ["."] + pruneopts = "UT" + revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" + version = "v1.4.0" + +[[projects]] + digest = "1:c0d19ab64b32ce9fe5cf4ddceba78d5bc9807f0016db6b1183599da3dcc24d10" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/printer", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token", + ] + pruneopts = "UT" + revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" + version = "v1.0.0" + +[[projects]] + digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + pruneopts = "UT" + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + digest = "1:8a3253c9cc527db52a8ed0bbfc034e30e690e830b01e9251424944026fb5ac26" + name = "github.com/irisnet/irishub" + packages = [ + "app", + "app/protocol", + "app/v0", + "client", + "client/keys", + "codec", + "crypto", + "crypto/keys", + "crypto/keys/hd", + "crypto/keys/keyerror", + "crypto/keys/mintkey", + "mock", + "mock/baseapp", + "modules/auth", + "modules/bank", + "modules/distribution", + "modules/distribution/keeper", + "modules/distribution/tags", + "modules/distribution/types", + "modules/gov", + "modules/gov/tags", + "modules/guardian", + "modules/mint", + "modules/mint/tags", + "modules/params", + "modules/params/subspace", + "modules/service", + "modules/service/tags", + "modules/slashing", + "modules/stake", + "modules/stake/keeper", + "modules/stake/querier", + "modules/stake/tags", + "modules/stake/types", + "modules/upgrade", + "server", + "server/config", + "store", + "tools/protoidl", + "types", + "version", + ] + pruneopts = "UT" + revision = "3f353ab8da3588684075c5d1cdaa65d9125477ac" + version = "v0.14.1" + +[[projects]] + digest = "1:a74b5a8e34ee5843cd6e65f698f3e75614f812ff170c2243425d75bc091e9af2" + name = "github.com/jmhodges/levigo" + packages = ["."] + pruneopts = "UT" + revision = "853d788c5c416eaaee5b044570784a96c7a26975" + version = "v1.0.0" + +[[projects]] + digest = "1:d622b76d4ca8cebd9fd06fc5983edd55c75fa06d6bb91c4a8fb38e24fc60d0d6" + name = "github.com/jolestar/go-commons-pool" + packages = [ + ".", + "collections", + "concurrent", + ] + pruneopts = "UT" + revision = "3f5d5f81046da81d73466f44fe6e0ac36ff304bd" + version = "v2.0.0" + +[[projects]] + branch = "master" + digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72" + name = "github.com/kr/logfmt" + packages = ["."] + pruneopts = "UT" + revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" + +[[projects]] + digest = "1:5a0ef768465592efca0412f7e838cdc0826712f8447e70e6ccc52eb441e9ab13" + name = "github.com/magiconair/properties" + packages = ["."] + pruneopts = "UT" + revision = "de8848e004dd33dc07a2947b3d76f618a7fc7ef1" + version = "v1.8.1" + +[[projects]] + digest = "1:9b90c7639a41697f3d4ad12d7d67dfacc9a7a4a6e0bbfae4fc72d0da57c28871" + name = "github.com/mattn/go-isatty" + packages = ["."] + pruneopts = "UT" + revision = "1311e847b0cb909da63b5fecfb5370aa66236465" + version = "v0.0.8" + +[[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "UT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + pruneopts = "UT" + revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" + version = "v1.1.2" + +[[projects]] + digest = "1:93131d8002d7025da13582877c32d1fc302486775a1b06f62241741006428c5e" + name = "github.com/pelletier/go-toml" + packages = ["."] + pruneopts = "UT" + revision = "728039f679cbcd4f6a54e080d2219a4c4928c546" + version = "v1.4.0" + +[[projects]] + digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "UT" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:e89f2cdede55684adbe44b5566f55838ad2aee1dff348d14b73ccf733607b671" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/internal", + "prometheus/promhttp", + ] + pruneopts = "UT" + revision = "2641b987480bca71fb39738eb8c8b0d577cb1d76" + version = "v0.9.4" + +[[projects]] + branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "UT" + revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" + +[[projects]] + digest = "1:8dcedf2e8f06c7f94e48267dea0bc0be261fa97b377f3ae3e87843a92a549481" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "UT" + revision = "17f5ca1748182ddf24fc33a5a7caaaf790a52fcc" + version = "v0.4.1" + +[[projects]] + digest = "1:403b810b43500b5b0a9a24a47347e31dc2783ccae8cf97c891b46f5b0496fa1a" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/fs", + ] + pruneopts = "UT" + revision = "833678b5bb319f2d20a475cb165c6cc59c2cc77c" + version = "v0.0.2" + +[[projects]] + digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c" + name = "github.com/rcrowley/go-metrics" + packages = ["."] + pruneopts = "UT" + revision = "e2704e165165ec55d062f5919b4b29494e9fa790" + +[[projects]] + digest = "1:b0c25f00bad20d783d259af2af8666969e2fc343fa0dc9efe52936bbd67fb758" + name = "github.com/rs/cors" + packages = ["."] + pruneopts = "UT" + revision = "9a47f48565a795472d43519dd49aac781f3034fb" + version = "v1.6.0" + +[[projects]] + digest = "1:bb495ec276ab82d3dd08504bbc0594a65de8c3b22c6f2aaa92d05b73fbf3a82e" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem", + ] + pruneopts = "UT" + revision = "588a75ec4f32903aa5e39a2619ba6a4631e28424" + version = "v1.2.2" + +[[projects]] + digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc" + name = "github.com/spf13/cast" + packages = ["."] + pruneopts = "UT" + revision = "8c9545af88b134710ab1cd196795e7f2388358d7" + version = "v1.3.0" + +[[projects]] + digest = "1:e096613fb7cf34743d49af87d197663cfccd61876e2219853005a57baedfa562" + name = "github.com/spf13/cobra" + packages = ["."] + pruneopts = "UT" + revision = "f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5" + version = "v0.0.5" + +[[projects]] + digest = "1:1b753ec16506f5864d26a28b43703c58831255059644351bbcb019b843950900" + name = "github.com/spf13/jwalterweatherman" + packages = ["."] + pruneopts = "UT" + revision = "94f6ae3ed3bceceafa716478c5fbf8d29ca601a1" + version = "v1.1.0" + +[[projects]] + digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "UT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + digest = "1:11118bd196646c6515fea3d6c43f66162833c6ae4939bfb229b9956d91c6cf17" + name = "github.com/spf13/viper" + packages = ["."] + pruneopts = "UT" + revision = "b5bf975e5823809fb22c7644d008757f78a4259e" + version = "v1.4.0" + +[[projects]] + digest = "1:5da8ce674952566deae4dbc23d07c85caafc6cfa815b0b3e03e41979cedb8750" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" + version = "v1.3.0" + +[[projects]] + digest = "1:5b180f17d5bc50b765f4dcf0d126c72979531cbbd7f7929bf3edd87fb801ea2d" + name = "github.com/syndtr/goleveldb" + packages = [ + "leveldb", + "leveldb/cache", + "leveldb/comparer", + "leveldb/errors", + "leveldb/filter", + "leveldb/iterator", + "leveldb/journal", + "leveldb/memdb", + "leveldb/opt", + "leveldb/storage", + "leveldb/table", + "leveldb/util", + ] + pruneopts = "UT" + revision = "9d007e481048296f09f59bd19bb7ae584563cd95" + version = "v1.0.0" + +[[projects]] + digest = "1:83f5e189eea2baad419a6a410984514266ff690075759c87e9ede596809bd0b8" + name = "github.com/tendermint/btcd" + packages = ["btcec"] + pruneopts = "UT" + revision = "80daadac05d1cd29571fccf27002d79667a88b58" + version = "v0.1.1" + +[[projects]] + digest = "1:ad9c4c1a4e7875330b1f62906f2830f043a23edb5db997e3a5ac5d3e6eadf80a" + name = "github.com/tendermint/go-amino" + packages = ["."] + pruneopts = "UT" + revision = "dc14acf9ef15f85828bfbc561ed9dd9d2a284885" + version = "v0.14.1" + +[[projects]] + digest = "1:1bb088f6291e5426e3874a60bca0e481a91a5633395d7e0c427ec3e49b626e7b" + name = "github.com/tendermint/iavl" + packages = ["."] + pruneopts = "UT" + revision = "ac7c35c12e8633a1e9fd0b52a00b900b40f32cd3" + version = "v0.12.1" + +[[projects]] + digest = "1:ade23e23038ae0e1dd2add1fbbdbac457b6eb5694f541a75bdb73b607501e313" + name = "github.com/tendermint/tendermint" + packages = [ + "abci/client", + "abci/example/code", + "abci/example/kvstore", + "abci/server", + "abci/types", + "blockchain", + "cmd/tendermint/commands", + "config", + "consensus", + "consensus/types", + "crypto", + "crypto/armor", + "crypto/ed25519", + "crypto/encoding/amino", + "crypto/merkle", + "crypto/multisig", + "crypto/multisig/bitarray", + "crypto/secp256k1", + "crypto/tmhash", + "crypto/xsalsa20symmetric", + "evidence", + "libs/autofile", + "libs/bech32", + "libs/cli", + "libs/cli/flags", + "libs/clist", + "libs/common", + "libs/db", + "libs/events", + "libs/fail", + "libs/flowrate", + "libs/log", + "libs/pubsub", + "libs/pubsub/query", + "lite", + "lite/client", + "lite/errors", + "lite/proxy", + "mempool", + "node", + "p2p", + "p2p/conn", + "p2p/pex", + "p2p/upnp", + "privval", + "proxy", + "rpc/client", + "rpc/core", + "rpc/core/types", + "rpc/grpc", + "rpc/lib/client", + "rpc/lib/server", + "rpc/lib/types", + "state", + "state/txindex", + "state/txindex/kv", + "state/txindex/null", + "types", + "types/time", + "version", + ] + pruneopts = "UT" + revision = "45e49670ac6cd99b420f0f4aebf9b8c6808c98a2" + source = "https://github.com/irisnet/tendermint.git" + version = "v0.28.0" + +[[projects]] + digest = "1:5a736f4722932d9a45d28852e85b83f68292d9af2ef0be29d4e5fe6fd89d5d96" + name = "github.com/zondax/hid" + packages = ["."] + pruneopts = "UT" + revision = "302fd402163c34626286195dfa9adac758334acc" + version = "v0.9.0" + +[[projects]] + digest = "1:c14bdb83fff0655bd6d3958aacb8a5cb147c9081761d86023ba936dae7014458" + name = "github.com/zondax/ledger-cosmos-go" + packages = ["."] + pruneopts = "UT" + revision = "af771e468374cca2f6b3309ecc548ee8c70efb17" + version = "v0.9.9" + +[[projects]] + digest = "1:c450d0b3b9217e3926714751c2034a9171e2d769c254ef3c74e5795ad74e1b04" + name = "github.com/zondax/ledger-go" + packages = ["."] + pruneopts = "UT" + revision = "94455688b6fac63ee05a4a61f44d5a4095317f74" + version = "v0.9.0" + +[[projects]] + digest = "1:a5158647b553c61877aa9ae74f4015000294e47981e6b8b07525edcbb0747c81" + name = "go.uber.org/atomic" + packages = ["."] + pruneopts = "UT" + revision = "df976f2515e274675050de7b3f42545de80594fd" + version = "v1.4.0" + +[[projects]] + digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" + name = "go.uber.org/multierr" + packages = ["."] + pruneopts = "UT" + revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" + version = "v1.1.0" + +[[projects]] + digest = "1:676160e6a4722b08e0e26b11521d575c2cb2b6f0c679e1ee6178c5d8dee51e5e" + name = "go.uber.org/zap" + packages = [ + ".", + "buffer", + "internal/bufferpool", + "internal/color", + "internal/exit", + "zapcore", + ] + pruneopts = "UT" + revision = "27376062155ad36be76b0f12cf1572a221d3a48c" + version = "v1.10.0" + +[[projects]] + digest = "1:6f6dc6060c4e9ba73cf28aa88f12a69a030d3d19d518ef8e931879eaa099628d" + name = "golang.org/x/crypto" + packages = [ + "bcrypt", + "blowfish", + "chacha20poly1305", + "curve25519", + "ed25519", + "ed25519/internal/edwards25519", + "hkdf", + "internal/chacha20", + "internal/subtle", + "nacl/box", + "nacl/secretbox", + "openpgp/armor", + "openpgp/errors", + "pbkdf2", + "poly1305", + "ripemd160", + "salsa20/salsa", + ] + pruneopts = "UT" + revision = "3764759f34a542a3aef74d6b02e35be7ab893bba" + source = "https://github.com/tendermint/crypto" + +[[projects]] + digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "netutil", + "trace", + ] + pruneopts = "UT" + revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f" + +[[projects]] + branch = "master" + digest = "1:0beb3839ca69f4a32c40d753a8cd60b273c2ba266329d1a9957731fc4cdb5478" + name = "golang.org/x/sys" + packages = [ + "cpu", + "unix", + ] + pruneopts = "UT" + revision = "93c9922d18aeb82498a065f07aec7ad7fa60dfb7" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "UT" + revision = "383e8b2c3b9e36c4076b235b32537292176bae20" + +[[projects]] + digest = "1:e8800ddadd6bce3bc0c5ffd7bc55dbdddc6e750956c10cc10271cade542fccbe" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "501c41df7f472c740d0674ff27122f3f48c80ce7" + version = "v1.21.1" + +[[projects]] + branch = "v2" + digest = "1:df1ffb6d59bacf4d162d65d50acaf21a16c8947086c638e86aaa01c1ae59f5ac" + name = "gopkg.in/mgo.v2" + packages = [ + ".", + "bson", + "internal/json", + "internal/sasl", + "internal/scram", + "txn", + ] + pruneopts = "UT" + revision = "9856a29383ce1c59f308dd1cf0363a79b5bef6b5" + +[[projects]] + digest = "1:c805e517269b0ba4c21ded5836019ed7d16953d4026cb7d00041d039c7906be9" + name = "gopkg.in/natefinch/lumberjack.v2" + packages = ["."] + pruneopts = "UT" + revision = "a96e63847dc3c67d17befa69c303767e2f84e54f" + version = "v2.1" + +[[projects]] + digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/irisnet/irishub/app", + "github.com/irisnet/irishub/codec", + "github.com/irisnet/irishub/modules/auth", + "github.com/irisnet/irishub/modules/bank", + "github.com/irisnet/irishub/modules/distribution", + "github.com/irisnet/irishub/modules/distribution/tags", + "github.com/irisnet/irishub/modules/distribution/types", + "github.com/irisnet/irishub/modules/gov", + "github.com/irisnet/irishub/modules/slashing", + "github.com/irisnet/irishub/modules/stake", + "github.com/irisnet/irishub/types", + "github.com/jolestar/go-commons-pool", + "github.com/tendermint/tendermint/abci/types", + "github.com/tendermint/tendermint/rpc/client", + "github.com/tendermint/tendermint/types", + "go.uber.org/zap", + "go.uber.org/zap/zapcore", + "gopkg.in/mgo.v2", + "gopkg.in/mgo.v2/bson", + "gopkg.in/mgo.v2/txn", + "gopkg.in/natefinch/lumberjack.v2", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/service/iris/Gopkg.toml b/service/iris/Gopkg.toml new file mode 100644 index 0000000..a21042b --- /dev/null +++ b/service/iris/Gopkg.toml @@ -0,0 +1,72 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +[[override]] + name = "github.com/tendermint/tendermint" + source = "https://github.com/irisnet/tendermint.git" + version = "=v0.28.0" + +[[override]] + name = "github.com/irisnet/irishub" + version = "=v0.14.1" + +[[override]] + name = "google.golang.org/genproto" + revision = "383e8b2c3b9e36c4076b235b32537292176bae20" + +[[override]] + name = "golang.org/x/crypto" + source = "https://github.com/tendermint/crypto" + revision = "3764759f34a542a3aef74d6b02e35be7ab893bba" + + [[override]] + name = "github.com/tendermint/iavl" + version = "=v0.12.1" + + +[[constraint]] + name = "github.com/syndtr/goleveldb" + version = "v0.0.0-20180708030551-c4c61651e9e3" + +[[constraint]] + branch = "v2" + name = "gopkg.in/mgo.v2" + +[[constraint]] + name = "go.uber.org/zap" + version = "1.9.1" + +[[constraint]] + name = "gopkg.in/natefinch/lumberjack.v2" + version = "2.1.0" + +[[constraint]] + version = "v2.0.0" + name = "github.com/jolestar/go-commons-pool" + +[prune] + go-tests = true + unused-packages = true diff --git a/service/iris/Makefile b/service/iris/Makefile new file mode 100644 index 0000000..ffc4092 --- /dev/null +++ b/service/iris/Makefile @@ -0,0 +1,43 @@ +GOCMD=go +GOBUILD=$(GOCMD) build +GOCLEAN=$(GOCMD) clean +GOTEST=$(GOCMD) test +GOGET=$(GOCMD) get +BINARY_NAME=rainbow-sync +BINARY_UNIX=$(BINARY_NAME)-unix + +all: get_tools get_deps build + +get_deps: + @rm -rf vendor/ + @echo "--> Running dep ensure" + @dep ensure -v + +build: + $(GOBUILD) -o $(BINARY_NAME) -v + +clean: + $(GOCLEAN) + rm -f $(BINARY_NAME) + rm -f $(BINARY_UNIX) + +run: + $(GOBUILD) -o $(BINARY_NAME) -v + ./$(BINARY_NAME) + + +# Cross compilation +build-linux: + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 $(GOBUILD) -o $(BINARY_UNIX) -v + +###################################### +## Tools + +check_tools: + cd tools && $(MAKE) check_tools + +get_tools: + cd tools && $(MAKE) get_tools + +update_tools: + cd tools && $(MAKE) update_tools \ No newline at end of file diff --git a/service/iris/block/parse_asset_detail.go b/service/iris/block/parse_asset_detail.go new file mode 100644 index 0000000..8b313e6 --- /dev/null +++ b/service/iris/block/parse_asset_detail.go @@ -0,0 +1,222 @@ +package block + +import ( + "github.com/irisnet/rainbow-sync/service/iris/logger" + model "github.com/irisnet/rainbow-sync/service/iris/db" + imodel "github.com/irisnet/rainbow-sync/service/iris/model" + "github.com/irisnet/rainbow-sync/service/iris/helper" + "strings" + "gopkg.in/mgo.v2/txn" + "gopkg.in/mgo.v2/bson" + "time" + "fmt" +) + +var ( + assetDetailTriggers = map[string]bool{ + "stakeEndBlocker": true, + "slashBeginBlocker": true, + "slashEndBlocker": true, + "govEndBlocker": true, + } + + // adapt multiple asset + assetDenoms = []string{"iris-atto"} +) + +const ( + triggerTxHashLength = 64 + separator = "::" // tag value separator + triggerTx = "tx" + unDelegationSubject = "Undelegation" + IRIS = "Iris" +) + +type Iris_Block struct{} + +func (iris *Iris_Block) Name() string { + return IRIS +} + +func (iris *Iris_Block) SaveDocsWithTxn(blockDoc *imodel.Block, irisAssetDetail []*imodel.IrisAssetDetail, irisTxs []*imodel.IrisTx, taskDoc imodel.SyncTask) error { + var ( + ops, irisAssetDetailOps, irisTxsOps []txn.Op + ) + + if blockDoc.Height == 0 { + return fmt.Errorf("invalid block, height equal 0") + } + + blockOp := txn.Op{ + C: imodel.CollectionNameBlock, + Id: bson.NewObjectId(), + Insert: blockDoc, + } + + length_assetdetail := len(irisAssetDetail) + if length_assetdetail > 0 { + irisAssetDetailOps = make([]txn.Op, 0, length_assetdetail) + for _, v := range irisAssetDetail { + op := txn.Op{ + C: imodel.CollectionNameAssetDetail, + Id: bson.NewObjectId(), + Insert: v, + } + irisAssetDetailOps = append(irisAssetDetailOps, op) + } + } + length_txs := len(irisTxs) + if length_txs > 0 { + irisTxsOps = make([]txn.Op, 0, length_txs) + for _, v := range irisTxs { + op := txn.Op{ + C: imodel.CollectionNameIrisTx, + Id: bson.NewObjectId(), + Insert: v, + } + irisTxsOps = append(irisTxsOps, op) + } + } + + updateOp := txn.Op{ + C: imodel.CollectionNameSyncTask, + Id: taskDoc.ID, + Assert: txn.DocExists, + Update: bson.M{ + "$set": bson.M{ + "current_height": taskDoc.CurrentHeight, + "status": taskDoc.Status, + "last_update_time": taskDoc.LastUpdateTime, + }, + }, + } + + ops = make([]txn.Op, 0, length_assetdetail+length_txs+2) + ops = append(append(ops, blockOp, updateOp), irisAssetDetailOps...) + ops = append(ops, irisTxsOps...) + + if len(ops) > 0 { + err := model.Txn(ops) + if err != nil { + return err + } + } + + return nil +} + +func (iris *Iris_Block) ParseBlock(b int64, client *helper.Client) (resBlock *imodel.Block, resIrisAssetDetails []*imodel.IrisAssetDetail, resIrisTxs []*imodel.IrisTx, resErr error) { + + defer func() { + if err := recover(); err != nil { + logger.Error("parse iris block fail", logger.Int64("height", b), + logger.Any("err", err), logger.String("Chain Block", iris.Name())) + + resBlock = &imodel.Block{} + resIrisAssetDetails = nil + resIrisTxs = nil + resErr = fmt.Errorf("%v", err) + } + }() + + irisAssetDetails, err := iris.ParseIrisAssetDetail(b, client) + if err != nil { + logger.Error("parse iris asset detail error", logger.String("error", err.Error()), logger.String("Chain Block", iris.Name())) + } + + irisTxs, err := iris.ParseIrisTxs(b, client) + if err != nil { + logger.Error("parse iris txs", logger.String("error", err.Error()), logger.String("Chain Block", iris.Name())) + } + + resBlock = &imodel.Block{ + Height: b, + CreateTime: time.Now().Unix(), + } + resIrisAssetDetails = irisAssetDetails + resIrisTxs = irisTxs + resErr = err + + return +} + +// parse iris asset detail from block result tags +func (iris *Iris_Block) ParseIrisAssetDetail(b int64, client *helper.Client) ([]*imodel.IrisAssetDetail, error) { + var irisAssetDetails []*imodel.IrisAssetDetail + res, err := client.BlockResults(&b) + if err != nil { + logger.Warn("get block result err, now try again", logger.String("err", err.Error()), + logger.String("Chain Block", iris.Name())) + // there is possible parse block fail when in iterator + var err2 error + client2 := helper.GetClient() + res, err2 = client2.BlockResults(&b) + client2.Release() + if err2 != nil { + return nil, err2 + } + } + + tags := res.Results.EndBlock.Tags + //fmt.Printf("======>>tags:%+v\n",tags) + + // filter asset detail trigger from tags and build asset detail model + irisAssetDetails = make([]*imodel.IrisAssetDetail, 0, len(tags)) + for _, t := range tags { + tagKey := string(t.Key) + tagValue := string(t.Value) + + if assetDetailTriggers[tagKey] || len(tagKey) == triggerTxHashLength { + values := strings.Split(tagValue, separator) + if len(values) != 6 { + logger.Warn("struct of iris asset detail changed in block result, skip parse this asset detail", + logger.Int64("height", b), logger.String("tagKey", tagKey), + logger.String("Chain Block", iris.Name())) + continue + } + + irisAssetDetails = append(irisAssetDetails, buildIrisAssetDetailFromTag(tagKey, values, b)) + } + } + + return irisAssetDetails, nil +} + +// get asset detail info by parse tag key and values +func buildIrisAssetDetailFromTag(tagKey string, keyValues []string, height int64) *imodel.IrisAssetDetail { + values := keyValues + coinAmount, coinUnit := parseCoinAmountAndUnitFromStr(values[2]) + + irisAssetDetail := &imodel.IrisAssetDetail{ + From: values[0], + To: values[1], + CoinAmount: coinAmount, + CoinUnit: coinUnit, + Trigger: tagKey, + Subject: values[3], + Description: values[4], + Timestamp: values[5], + Height: height, + } + + if len(tagKey) == triggerTxHashLength { + irisAssetDetail.TxHash = tagKey + irisAssetDetail.Trigger = triggerTx + } + + if irisAssetDetail.Subject == unDelegationSubject { + irisAssetDetail.TxHash = irisAssetDetail.Description + } + + irisAssetDetail.TxHash = strings.ToUpper(irisAssetDetail.TxHash) + return irisAssetDetail +} + +func parseCoinAmountAndUnitFromStr(s string) (string, string) { + for _, denom := range assetDenoms { + if strings.HasSuffix(s, denom) { + return strings.Replace(s, denom, "", -1), denom + } + } + return "", "" +} diff --git a/service/iris/block/parse_asset_detail_test.go b/service/iris/block/parse_asset_detail_test.go new file mode 100644 index 0000000..b94d3a5 --- /dev/null +++ b/service/iris/block/parse_asset_detail_test.go @@ -0,0 +1,45 @@ +package block + +import ( + "testing" + irisConf "github.com/irisnet/rainbow-sync/service/iris/conf" + "encoding/json" + "github.com/irisnet/rainbow-sync/service/iris/logger" + "github.com/irisnet/rainbow-sync/service/iris/helper" +) + +func TestIris_Block_ParseIrisAssetDetail(t *testing.T) { + helper.Init(irisConf.BlockChainMonitorUrl, irisConf.MaxConnectionNum, irisConf.InitConnectionNum) + client := helper.GetClient() + defer func() { + client.Release() + logger.Info("Release tm client") + }() + type args struct { + b int64 + client *helper.Client + } + tests := []struct { + name string + args args + }{ + { + name: "test parse asset detail", + args: args{ + b: 19301, + client: client, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + biris := Iris_Block{} + res, err := biris.ParseIrisAssetDetail(tt.args.b, tt.args.client) + if err != nil { + t.Fatal(err) + } + resBytes, _ := json.MarshalIndent(res, "", "\t") + t.Log(string(resBytes)) + }) + } +} diff --git a/service/iris/block/parse_tx.go b/service/iris/block/parse_tx.go new file mode 100644 index 0000000..f336656 --- /dev/null +++ b/service/iris/block/parse_tx.go @@ -0,0 +1,285 @@ +package block + +import ( + "github.com/irisnet/rainbow-sync/service/iris/logger" + imodel "github.com/irisnet/rainbow-sync/service/iris/model" + "github.com/irisnet/rainbow-sync/service/iris/utils" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/irisnet/irishub/modules/auth" + "github.com/tendermint/tendermint/types" + "github.com/irisnet/rainbow-sync/service/iris/helper" + "github.com/irisnet/rainbow-sync/service/iris/constant" +) + +// parse iris txs from block result txs +func (iris *Iris_Block) ParseIrisTxs(b int64, client *helper.Client) ([]*imodel.IrisTx, error) { + resblock, err := client.Block(&b) + if err != nil { + logger.Warn("get block result err, now try again", logger.String("err", err.Error()), + logger.String("Chain Block", iris.Name()), logger.Any("height", b)) + // there is possible parse block fail when in iterator + var err2 error + client2 := helper.GetClient() + resblock, err2 = client2.Block(&b) + client2.Release() + if err2 != nil { + return nil, err2 + } + } + + irisTxs := make([]*imodel.IrisTx, 0, len(resblock.Block.Txs)) + for _, tx := range resblock.Block.Txs { + iristx := iris.ParseIrisTxModel(tx, resblock.Block) + irisTxs = append(irisTxs, &iristx) + } + + return irisTxs, nil +} + +// parse iris tx from iris block result tx +func (iris *Iris_Block) ParseIrisTxModel(txBytes types.Tx, block *types.Block) imodel.IrisTx { + + var ( + authTx auth.StdTx + methodName = "ParseTx" + docTx imodel.IrisTx + actualFee *imodel.ActualFee + ) + + cdc := utils.GetCodec() + + err := cdc.UnmarshalBinaryLengthPrefixed(txBytes, &authTx) + if err != nil { + logger.Error(err.Error()) + return docTx + } + + height := block.Height + time := block.Time + txHash := utils.BuildHex(txBytes.Hash()) + fee := utils.BuildFee(authTx.Fee) + memo := authTx.Memo + + // get tx status, gasUsed, gasPrice and actualFee from tx result + status, result, err := utils.QueryTxResult(txBytes.Hash()) + if err != nil { + logger.Error("get txResult err", logger.String("method", methodName), logger.String("err", err.Error())) + } + gasUsed := Min(result.GasUsed, fee.Gas) + if len(fee.Amount) > 0 { + gasPrice := fee.Amount[0].Amount / float64(fee.Gas) + actualFee = &imodel.ActualFee{ + Denom: fee.Amount[0].Denom, + Amount: float64(gasUsed) * gasPrice, + } + } else { + actualFee = &imodel.ActualFee{} + } + msgs := authTx.GetMsgs() + if len(msgs) <= 0 { + logger.Error("can't get msgs", logger.String("method", methodName)) + return docTx + } + msg := msgs[0] + + docTx = imodel.IrisTx{ + Height: height, + Time: time, + TxHash: txHash, + Fee: fee, + ActualFee: actualFee, + Memo: memo, + Status: status, + Code: result.Code, + Tags: parseTags(result), + } + switch msg.(type) { + case imodel.MsgTransfer: + msg := msg.(imodel.MsgTransfer) + + docTx.From = msg.Inputs[0].Address.String() + docTx.To = msg.Outputs[0].Address.String() + docTx.Initiator = msg.Inputs[0].Address.String() + docTx.Amount = utils.ParseCoins(msg.Inputs[0].Coins.String()) + docTx.Type = constant.Iris_TxTypeTransfer + case imodel.MsgBurn: + msg := msg.(imodel.MsgBurn) + docTx.From = msg.Owner.String() + docTx.To = "" + docTx.Initiator = msg.Owner.String() + docTx.Amount = utils.ParseCoins(msg.Coins.String()) + docTx.Type = constant.Iris_TxTypeBurn + + case imodel.MsgStakeCreate: + msg := msg.(imodel.MsgStakeCreate) + docTx.From = msg.DelegatorAddr.String() + docTx.To = msg.ValidatorAddr.String() + docTx.Initiator = msg.DelegatorAddr.String() + docTx.Amount = []*imodel.Coin{utils.ParseCoin(msg.Delegation.String())} + docTx.Type = constant.Iris_TxTypeStakeCreateValidator + + case imodel.MsgStakeEdit: + msg := msg.(imodel.MsgStakeEdit) + + docTx.From = msg.ValidatorAddr.String() + docTx.To = "" + docTx.Initiator = msg.ValidatorAddr.String() + docTx.Amount = []*imodel.Coin{} + docTx.Type = constant.Iris_TxTypeStakeEditValidator + + case imodel.MsgStakeDelegate: + msg := msg.(imodel.MsgStakeDelegate) + + docTx.From = msg.DelegatorAddr.String() + docTx.To = msg.ValidatorAddr.String() + docTx.Initiator = msg.DelegatorAddr.String() + docTx.Amount = []*imodel.Coin{utils.ParseCoin(msg.Delegation.String())} + docTx.Type = constant.Iris_TxTypeStakeDelegate + + case imodel.MsgStakeBeginUnbonding: + msg := msg.(imodel.MsgStakeBeginUnbonding) + + shares := utils.ParseFloat(msg.SharesAmount.String()) + docTx.From = msg.DelegatorAddr.String() + docTx.To = msg.ValidatorAddr.String() + docTx.Initiator = msg.DelegatorAddr.String() + coin := imodel.Coin{ + Amount: shares, + } + docTx.Amount = []*imodel.Coin{&coin} + docTx.Type = constant.Iris_TxTypeStakeBeginUnbonding + case imodel.MsgBeginRedelegate: + msg := msg.(imodel.MsgBeginRedelegate) + + shares := utils.ParseFloat(msg.SharesAmount.String()) + docTx.From = msg.ValidatorSrcAddr.String() + docTx.To = msg.ValidatorDstAddr.String() + docTx.Initiator = msg.DelegatorAddr.String() + coin := imodel.Coin{ + Amount: shares, + } + docTx.Amount = []*imodel.Coin{&coin} + docTx.Type = constant.Iris_TxTypeBeginRedelegate + case imodel.MsgUnjail: + msg := msg.(imodel.MsgUnjail) + + docTx.From = msg.ValidatorAddr.String() + docTx.Initiator = msg.ValidatorAddr.String() + docTx.Type = constant.Iris_TxTypeUnjail + case imodel.MsgSetWithdrawAddress: + msg := msg.(imodel.MsgSetWithdrawAddress) + + docTx.From = msg.DelegatorAddr.String() + docTx.To = msg.WithdrawAddr.String() + docTx.Initiator = msg.DelegatorAddr.String() + docTx.Type = constant.Iris_TxTypeSetWithdrawAddress + case imodel.MsgWithdrawDelegatorReward: + msg := msg.(imodel.MsgWithdrawDelegatorReward) + + docTx.From = msg.DelegatorAddr.String() + docTx.To = msg.ValidatorAddr.String() + docTx.Initiator = msg.DelegatorAddr.String() + docTx.Type = constant.Iris_TxTypeWithdrawDelegatorReward + + for _, tag := range result.Tags { + key := string(tag.Key) + if key == imodel.TagDistributionReward { + reward := string(tag.Value) + docTx.Amount = utils.ParseCoins(reward) + break + } + } + case imodel.MsgWithdrawDelegatorRewardsAll: + msg := msg.(imodel.MsgWithdrawDelegatorRewardsAll) + + docTx.From = msg.DelegatorAddr.String() + docTx.Initiator = msg.DelegatorAddr.String() + docTx.Type = constant.Iris_TxTypeWithdrawDelegatorRewardsAll + for _, tag := range result.Tags { + key := string(tag.Key) + if key == imodel.TagDistributionReward { + reward := string(tag.Value) + docTx.Amount = utils.ParseCoins(reward) + break + } + } + case imodel.MsgWithdrawValidatorRewardsAll: + msg := msg.(imodel.MsgWithdrawValidatorRewardsAll) + + docTx.From = msg.ValidatorAddr.String() + docTx.Initiator = msg.ValidatorAddr.String() + docTx.Type = constant.Iris_TxTypeWithdrawValidatorRewardsAll + for _, tag := range result.Tags { + key := string(tag.Key) + if key == imodel.TagDistributionReward { + reward := string(tag.Value) + docTx.Amount = utils.ParseCoins(reward) + break + } + } + case imodel.MsgSubmitProposal: + msg := msg.(imodel.MsgSubmitProposal) + + docTx.From = msg.Proposer.String() + docTx.To = "" + docTx.Initiator = msg.Proposer.String() + docTx.Amount = utils.ParseCoins(msg.InitialDeposit.String()) + docTx.Type = constant.Iris_TxTypeSubmitProposal + + case imodel.MsgSubmitSoftwareUpgradeProposal: + msg := msg.(imodel.MsgSubmitSoftwareUpgradeProposal) + + docTx.From = msg.Proposer.String() + docTx.To = "" + docTx.Initiator = msg.Proposer.String() + docTx.Amount = utils.ParseCoins(msg.InitialDeposit.String()) + docTx.Type = constant.Iris_TxTypeSubmitProposal + + case imodel.MsgSubmitTaxUsageProposal: + msg := msg.(imodel.MsgSubmitTaxUsageProposal) + + docTx.From = msg.Proposer.String() + docTx.To = "" + docTx.Initiator = msg.Proposer.String() + docTx.Amount = utils.ParseCoins(msg.InitialDeposit.String()) + docTx.Type = constant.Iris_TxTypeSubmitProposal + + case imodel.MsgDeposit: + msg := msg.(imodel.MsgDeposit) + + docTx.From = msg.Depositor.String() + docTx.Initiator = msg.Depositor.String() + docTx.Amount = utils.ParseCoins(msg.Amount.String()) + docTx.Type = constant.Iris_TxTypeDeposit + case imodel.MsgVote: + msg := msg.(imodel.MsgVote) + + docTx.From = msg.Voter.String() + docTx.Initiator = msg.Voter.String() + docTx.Amount = []*imodel.Coin{} + docTx.Type = constant.Iris_TxTypeVote + + default: + logger.Warn("unknown msg type") + } + + return docTx + +} + +func parseTags(result abci.ResponseDeliverTx) map[string]string { + tags := make(map[string]string, 0) + for _, tag := range result.Tags { + key := string(tag.Key) + value := string(tag.Value) + tags[key] = value + } + return tags +} + +func Min(x, y int64) int64 { + if x < y { + return x + } + return y +} diff --git a/service/iris/block/parse_tx_test.go b/service/iris/block/parse_tx_test.go new file mode 100644 index 0000000..f868036 --- /dev/null +++ b/service/iris/block/parse_tx_test.go @@ -0,0 +1,43 @@ +package block + +import ( + "testing" + "github.com/irisnet/rainbow-sync/service/iris/helper" + irisConf "github.com/irisnet/rainbow-sync/service/iris/conf" + "encoding/json" +) + +func TestIris_Block_ParseIrisTx(t *testing.T) { + helper.Init(irisConf.BlockChainMonitorUrl, irisConf.MaxConnectionNum, irisConf.InitConnectionNum) + client := helper.GetClient() + defer func() { + client.Release() + }() + type args struct { + b int64 + client *helper.Client + } + tests := []struct { + name string + args args + }{ + { + name: "test parse iris tx", + args: args{ + b: 408960, + client: client, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + iris := Iris_Block{} + res, err := iris.ParseIrisTxs(tt.args.b, tt.args.client) + if err != nil { + t.Fatal(err) + } + resBytes, _ := json.MarshalIndent(res, "", "\t") + t.Log(string(resBytes)) + }) + } +} diff --git a/service/iris/conf/db/types.go b/service/iris/conf/db/types.go new file mode 100644 index 0000000..1fda759 --- /dev/null +++ b/service/iris/conf/db/types.go @@ -0,0 +1,41 @@ +package db + +import ( + "github.com/irisnet/rainbow-sync/service/iris/logger" + constant "github.com/irisnet/rainbow-sync/service/iris/conf" + "os" +) + +var ( + Addrs = "localhost:27019" + User = "iris" + Passwd = "irispassword" + Database = "rainbow-server" +) + +// get value of env var +func init() { + addrs, found := os.LookupEnv(constant.EnvNameDbAddr) + if found { + Addrs = addrs + } + + user, found := os.LookupEnv(constant.EnvNameDbUser) + if found { + User = user + } + + passwd, found := os.LookupEnv(constant.EnvNameDbPassWd) + if found { + Passwd = passwd + } + + database, found := os.LookupEnv(constant.EnvNameDbDataBase) + if found { + Database = database + } + + logger.Debug("init db config", logger.String("addrs", Addrs), + logger.Bool("userIsEmpty", User == ""), logger.Bool("passwdIsEmpty", Passwd == ""), + logger.String("database", Database)) +} diff --git a/service/iris/conf/types.go b/service/iris/conf/types.go new file mode 100644 index 0000000..6ff16b9 --- /dev/null +++ b/service/iris/conf/types.go @@ -0,0 +1,87 @@ +package conf + +import ( + "github.com/irisnet/rainbow-sync/service/iris/logger" + "os" + "strconv" + "strings" +) + +var ( + BlockChainMonitorUrl = []string{"tcp://192.168.150.31:26657"} + + IrisNetwork = "testnet" + WorkerNumCreateTask = 2 + WorkerNumExecuteTask = 30 + WorkerMaxSleepTime = 2 * 60 + BlockNumPerWorkerHandle = 50 + + InitConnectionNum = 50 // fast init num of tendermint client pool + MaxConnectionNum = 100 // max size of tendermint client pool +) + +const ( + EnvNameDbAddr = "DB_ADDR" + EnvNameDbUser = "DB_USER" + EnvNameDbPassWd = "DB_PASSWD" + EnvNameDbDataBase = "DB_DATABASE" + + EnvNameSerNetworkFullNode = "SER_BC_FULL_NODE" + EnvNameWorkerNumCreateTask = "WORKER_NUM_CREATE_TASK" + EnvNameWorkerNumExecuteTask = "WORKER_NUM_EXECUTE_TASK" + EnvNameWorkerMaxSleepTime = "WORKER_MAX_SLEEP_TIME" + EnvNameBlockNumPerWorkerHandle = "BLOCK_NUM_PER_WORKER_HANDLE" + EnvNameIrisNetwork = "IRIS_NETWORK" +) + +// get value of env var +func init() { + var err error + + nodeUrl, found := os.LookupEnv(EnvNameSerNetworkFullNode) + if found { + BlockChainMonitorUrl = strings.Split(nodeUrl, ",") + } + logger.Info("Env Value", logger.Any(EnvNameSerNetworkFullNode, BlockChainMonitorUrl)) + + workerNumCreateTask, found := os.LookupEnv(EnvNameWorkerNumCreateTask) + if found { + WorkerNumCreateTask, err = strconv.Atoi(workerNumCreateTask) + if err != nil { + logger.Fatal("Can't convert str to int", logger.String(EnvNameWorkerNumCreateTask, workerNumCreateTask)) + } + } + logger.Info("Env Value", logger.Int(EnvNameWorkerNumCreateTask, WorkerNumCreateTask)) + + workerNumExecuteTask, found := os.LookupEnv(EnvNameWorkerNumExecuteTask) + if found { + WorkerNumExecuteTask, err = strconv.Atoi(workerNumExecuteTask) + if err != nil { + logger.Fatal("Can't convert str to int", logger.String(EnvNameWorkerNumExecuteTask, workerNumCreateTask)) + } + } + logger.Info("Env Value", logger.Int(EnvNameWorkerNumExecuteTask, WorkerNumExecuteTask)) + + workerMaxSleepTime, found := os.LookupEnv(EnvNameWorkerMaxSleepTime) + if found { + WorkerMaxSleepTime, err = strconv.Atoi(workerMaxSleepTime) + if err != nil { + logger.Fatal("Can't convert str to int", logger.String(EnvNameWorkerMaxSleepTime, workerMaxSleepTime)) + } + } + logger.Info("Env Value", logger.Int(EnvNameWorkerMaxSleepTime, WorkerMaxSleepTime)) + + blockNumPerWorkerHandle, found := os.LookupEnv(EnvNameBlockNumPerWorkerHandle) + if found { + BlockNumPerWorkerHandle, err = strconv.Atoi(blockNumPerWorkerHandle) + if err != nil { + logger.Fatal("Can't convert str to int", logger.String(EnvNameBlockNumPerWorkerHandle, blockNumPerWorkerHandle)) + } + } + logger.Info("Env Value", logger.Int(EnvNameBlockNumPerWorkerHandle, BlockNumPerWorkerHandle)) + network, found := os.LookupEnv(EnvNameIrisNetwork) + if found { + IrisNetwork = network + } + logger.Info("Env Value", logger.String(EnvNameIrisNetwork, IrisNetwork)) +} diff --git a/service/iris/constant/types.go b/service/iris/constant/types.go new file mode 100644 index 0000000..6450f91 --- /dev/null +++ b/service/iris/constant/types.go @@ -0,0 +1,22 @@ +package constant + +const ( + Iris_TxTypeTransfer = "Transfer" + Iris_TxTypeBurn = "Burn" + Iris_TxTypeStakeCreateValidator = "CreateValidator" + Iris_TxTypeStakeEditValidator = "EditValidator" + Iris_TxTypeStakeDelegate = "Delegate" + Iris_TxTypeStakeBeginUnbonding = "BeginUnbonding" + Iris_TxTypeBeginRedelegate = "BeginRedelegate" + Iris_TxTypeUnjail = "Unjail" + Iris_TxTypeSetWithdrawAddress = "SetWithdrawAddress" + Iris_TxTypeWithdrawDelegatorReward = "WithdrawDelegatorReward" + Iris_TxTypeWithdrawDelegatorRewardsAll = "WithdrawDelegatorRewardsAll" + Iris_TxTypeWithdrawValidatorRewardsAll = "WithdrawValidatorRewardsAll" + Iris_TxTypeSubmitProposal = "SubmitProposal" + Iris_TxTypeDeposit = "Deposit" + Iris_TxTypeVote = "Vote" + + TxStatusSuccess = "success" + TxStatusFail = "fail" +) diff --git a/service/iris/db/const.go b/service/iris/db/const.go new file mode 100644 index 0000000..278dd6f --- /dev/null +++ b/service/iris/db/const.go @@ -0,0 +1,17 @@ +package db + +const ( + // value of status + SyncTaskStatusUnHandled = "unhandled" + SyncTaskStatusUnderway = "underway" + SyncTaskStatusCompleted = "completed" + + // only for follow task + // when current_height of follow task add blockNumPerWorkerHandle + // less than blockchain current_height, this follow task's status should be set invalid + FollowTaskStatusInvalid = "invalid" + + // taskType + SyncTaskTypeCatchUp = "catch_up" + SyncTaskTypeFollow = "follow" +) diff --git a/service/iris/db/db.go b/service/iris/db/db.go new file mode 100644 index 0000000..101c80c --- /dev/null +++ b/service/iris/db/db.go @@ -0,0 +1,108 @@ +package db + +import ( + "fmt" + conf "github.com/irisnet/rainbow-sync/service/iris/conf/db" + "github.com/irisnet/rainbow-sync/service/iris/logger" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" + "strings" + "time" +) + +var ( + session *mgo.Session +) + +func Start() { + addrs := strings.Split(conf.Addrs, ",") + dialInfo := &mgo.DialInfo{ + Addrs: addrs, + Database: conf.Database, + Username: conf.User, + Password: conf.Passwd, + Direct: true, + Timeout: time.Second * 10, + PoolLimit: 4096, // Session.SetPoolLimit + } + + var err error + session, err = mgo.DialWithInfo(dialInfo) + if err != nil { + logger.Fatal("connect db fail", logger.String("err", err.Error())) + } + session.SetMode(mgo.Strong, true) + logger.Info("init db success") +} + +func Stop() { + logger.Info("release resource :mongoDb") + session.Close() +} + +func getSession() *mgo.Session { + // max session num is 4096 + return session.Clone() +} + +// get collection object +func ExecCollection(collectionName string, s func(*mgo.Collection) error) error { + session := getSession() + defer session.Close() + c := session.DB(conf.Database).C(collectionName) + return s(c) +} + +func Save(h Docs) error { + save := func(c *mgo.Collection) error { + pk := h.PkKvPair() + n, _ := c.Find(pk).Count() + if n >= 1 { + return fmt.Errorf("record exist") + } + return c.Insert(h) + } + return ExecCollection(h.Name(), save) +} + +func Update(h Docs) error { + update := func(c *mgo.Collection) error { + key := h.PkKvPair() + return c.Update(key, h) + } + return ExecCollection(h.Name(), update) +} + +func Delete(h Docs) error { + remove := func(c *mgo.Collection) error { + key := h.PkKvPair() + return c.Remove(key) + } + return ExecCollection(h.Name(), remove) +} + +//mgo transaction method +//detail to see: https://godoc.org/gopkg.in/mgo.v2/txn +func Txn(ops []txn.Op) error { + session := getSession() + defer session.Close() + + c := session.DB(conf.Database).C(CollectionNameTxn) + runner := txn.NewRunner(c) + + txObjectId := bson.NewObjectId() + err := runner.Run(ops, txObjectId, nil) + if err != nil { + if err == txn.ErrAborted { + err = runner.Resume(txObjectId) + if err != nil { + return err + } + } else { + return err + } + } + + return nil +} diff --git a/service/iris/db/types.go b/service/iris/db/types.go new file mode 100644 index 0000000..be3809b --- /dev/null +++ b/service/iris/db/types.go @@ -0,0 +1,16 @@ +// interface for a document + +package db + +const ( + CollectionNameTxn = "sync_mgo_txn" +) + +type ( + Docs interface { + // collection name + Name() string + // primary key pair(used to find a unique record) + PkKvPair() map[string]interface{} + } +) diff --git a/service/iris/helper/pool_client.go b/service/iris/helper/pool_client.go new file mode 100644 index 0000000..d0dd316 --- /dev/null +++ b/service/iris/helper/pool_client.go @@ -0,0 +1,53 @@ +//init client from clientPool. +//client is httpClient of tendermint + +package helper + +import ( + "fmt" + rpcClient "github.com/tendermint/tendermint/rpc/client" + "github.com/irisnet/rainbow-sync/service/iris/logger" + "time" +) + +type Client struct { + Id string + rpcClient.Client +} + +func newClient(addr string) *Client { + return &Client{ + Id: generateId(addr), + Client: rpcClient.NewHTTP(addr, "/websocket"), + } +} + +// get client from pool +func GetClient() *Client { + c, err := pool.BorrowObject(ctx) + for err != nil { + logger.Error("GetClient failed,will try again after 3 seconds", logger.String("err", err.Error())) + time.Sleep(3 * time.Second) + c, err = pool.BorrowObject(ctx) + } + + return c.(*Client) +} + +// release client +func (c *Client) Release() { + err := pool.ReturnObject(ctx, c) + if err != nil { + logger.Error(err.Error()) + } +} + +func (c *Client) HeartBeat() error { + http := c.Client.(*rpcClient.HTTP) + _, err := http.Health() + return err +} + +func generateId(address string) string { + return fmt.Sprintf("peer[%s]", address) +} diff --git a/service/iris/helper/pool_factory.go b/service/iris/helper/pool_factory.go new file mode 100644 index 0000000..74df986 --- /dev/null +++ b/service/iris/helper/pool_factory.go @@ -0,0 +1,124 @@ +package helper + +import ( + "context" + commonPool "github.com/jolestar/go-commons-pool" + "github.com/irisnet/rainbow-sync/service/iris/logger" + "math/rand" + "sync" +) + +type ( + PoolFactory struct { + peersMap sync.Map + } + EndPoint struct { + Address string + Available bool + } +) + +var ( + poolFactory PoolFactory + pool *commonPool.ObjectPool + ctx = context.Background() +) + +func Init(BlockChainMonitorUrl []string, MaxConnectionNum, InitConnectionNum int) { + var syncMap sync.Map + for _, url := range BlockChainMonitorUrl { + key := generateId(url) + endPoint := EndPoint{ + Address: url, + Available: true, + } + + syncMap.Store(key, endPoint) + } + poolFactory = PoolFactory{ + peersMap: syncMap, + } + + config := commonPool.NewDefaultPoolConfig() + config.MaxTotal = MaxConnectionNum + config.MaxIdle = InitConnectionNum + config.MinIdle = InitConnectionNum + config.TestOnBorrow = true + config.TestOnCreate = true + config.TestWhileIdle = true + + logger.Info("PoolConfig", logger.Int("config.MaxTotal", config.MaxTotal), + logger.Int("config.MaxIdle", config.MaxIdle)) + pool = commonPool.NewObjectPool(ctx, &poolFactory, config) + pool.PreparePool(ctx) +} + +func ClosePool() { + pool.Close(ctx) +} + +func (f *PoolFactory) MakeObject(ctx context.Context) (*commonPool.PooledObject, error) { + endpoint := f.GetEndPoint() + return commonPool.NewPooledObject(newClient(endpoint.Address)), nil +} + +func (f *PoolFactory) DestroyObject(ctx context.Context, object *commonPool.PooledObject) error { + c := object.Object.(*Client) + if c.IsRunning() { + c.Stop() + } + return nil +} + +func (f *PoolFactory) ValidateObject(ctx context.Context, object *commonPool.PooledObject) bool { + // do validate + c := object.Object.(*Client) + if c.HeartBeat() != nil { + value, ok := f.peersMap.Load(c.Id) + if ok { + endPoint := value.(EndPoint) + endPoint.Available = true + f.peersMap.Store(c.Id, endPoint) + } + return false + } + return true +} + +func (f *PoolFactory) ActivateObject(ctx context.Context, object *commonPool.PooledObject) error { + return nil +} + +func (f *PoolFactory) PassivateObject(ctx context.Context, object *commonPool.PooledObject) error { + return nil +} + +func (f *PoolFactory) GetEndPoint() EndPoint { + var ( + keys []string + selectedKey string + ) + + f.peersMap.Range(func(k, value interface{}) bool { + key := k.(string) + endPoint := value.(EndPoint) + if endPoint.Available { + keys = append(keys, key) + } + selectedKey = key + + return true + }) + + if len(keys) > 0 { + index := rand.Intn(len(keys)) + selectedKey = keys[index] + } + value, ok := f.peersMap.Load(selectedKey) + if ok { + return value.(EndPoint) + } else { + logger.Error("Can't get selected end point", logger.String("selectedKey", selectedKey)) + } + return EndPoint{} +} diff --git a/service/iris/logger/zap_logger.go b/service/iris/logger/zap_logger.go new file mode 100644 index 0000000..7194ffc --- /dev/null +++ b/service/iris/logger/zap_logger.go @@ -0,0 +1,114 @@ +package logger + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" + "os" +) + +type ( + Field = zap.Field +) + +var ( + zapLogger *zap.Logger + + // zap method + Binary = zap.Binary + Bool = zap.Bool + Complex128 = zap.Complex128 + Complex64 = zap.Complex64 + Float64 = zap.Float64 + Float32 = zap.Float32 + Int = zap.Int + Int64 = zap.Int64 + Int32 = zap.Int32 + Int16 = zap.Int16 + Int8 = zap.Int8 + String = zap.String + Uint = zap.Uint + Uint64 = zap.Uint64 + Uint32 = zap.Uint32 + Uint16 = zap.Uint16 + Uint8 = zap.Uint8 + Time = zap.Time + Any = zap.Any + Duration = zap.Duration +) + +func Debug(msg string, fields ...Field) { + defer sync() + zapLogger.Debug(msg, fields...) +} + +func Info(msg string, fields ...Field) { + defer sync() + zapLogger.Info(msg, fields...) +} + +func Warn(msg string, fields ...Field) { + defer sync() + zapLogger.Warn(msg, fields...) +} + +func Error(msg string, fields ...Field) { + defer sync() + zapLogger.Error(msg, fields...) +} + +func Panic(msg string, fields ...Field) { + defer sync() + zapLogger.Panic(msg, fields...) +} + +func Fatal(msg string, fields ...Field) { + defer sync() + zapLogger.Fatal(msg, fields...) +} + +func With(fields ...Field) { + defer sync() + zapLogger.With(fields...) +} + +func sync() { + zapLogger.Sync() +} + +func init() { + var core zapcore.Core + hook := lumberjack.Logger{ + Filename: "./logs/sync.log", + MaxSize: 100, // megabytes + MaxBackups: 3, + MaxAge: 7, //days + Compress: true, // disabled by default + LocalTime: true, + } + + fileWriter := zapcore.AddSync(&hook) + consoleDebugging := zapcore.Lock(os.Stdout) + + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + encoder := zapcore.NewJSONEncoder(encoderConfig) + + // Join the outputs, encoders, and level-handling functions into + // zapcore.Cores, then tee the four cores together. + highPriority := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool { + return lvl >= zapcore.InfoLevel + }) + lowPriority := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool { + return lvl >= zapcore.DebugLevel + }) + + core = zapcore.NewTee( + zapcore.NewCore(encoder, consoleDebugging, lowPriority), + zapcore.NewCore(encoder, fileWriter, highPriority), + ) + caller := zap.AddCaller() + callerSkipOpt := zap.AddCallerSkip(1) + // From a zapcore.Core, it's easy to construct a Logger. + zapLogger = zap.New(core, caller, callerSkipOpt, zap.AddStacktrace(zap.ErrorLevel)) +} diff --git a/service/iris/main.go b/service/iris/main.go new file mode 100644 index 0000000..b8603a2 --- /dev/null +++ b/service/iris/main.go @@ -0,0 +1,36 @@ +package main + +import ( + "os" + "os/signal" + "syscall" + "github.com/irisnet/rainbow-sync/service/iris/logger" + model "github.com/irisnet/rainbow-sync/service/iris/db" + "github.com/irisnet/rainbow-sync/service/iris/task" + "runtime" +) + +func main() { + runtime.GOMAXPROCS(runtime.NumCPU() / 2) + c := make(chan os.Signal) + + defer func() { + logger.Info("System Exit") + + model.Stop() + + if err := recover(); err != nil { + logger.Error("", logger.Any("err", err)) + os.Exit(1) + } + }() + + signal.Notify(c, os.Interrupt, os.Kill, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + logger.Info("Start sync Program") + + model.Start() + task.Start() + + <-c +} diff --git a/service/iris/model/block.go b/service/iris/model/block.go new file mode 100644 index 0000000..2660121 --- /dev/null +++ b/service/iris/model/block.go @@ -0,0 +1,12 @@ +package iris + +const ( + CollectionNameBlock = "sync_iris_block" +) + +type ( + Block struct { + Height int64 `bson:"height"` + CreateTime int64 `bson:"create_time"` + } +) diff --git a/service/iris/model/iris_asset_detail.go b/service/iris/model/iris_asset_detail.go new file mode 100644 index 0000000..d5312d0 --- /dev/null +++ b/service/iris/model/iris_asset_detail.go @@ -0,0 +1,31 @@ +package iris + +import "gopkg.in/mgo.v2/bson" + +type ( + IrisAssetDetail struct { + From string `bson:"from"` + To string `bson:"to"` + CoinAmount string `bson:"coin_amount"` + CoinUnit string `bson:"coin_unit"` + Trigger string `bson:"trigger"` + Subject string `bson:"subject"` + Description string `bson:"description"` + Timestamp string `bson:"timestamp"` + Height int64 `bson:"height"` + TxHash string `bson:"tx_hash"` + Ext string `bson:"ext"` + } +) + +const ( + CollectionNameAssetDetail = "sync_iris_asset_detail" +) + +func (d IrisAssetDetail) Name() string { + return CollectionNameAssetDetail +} + +func (d IrisAssetDetail) PkKvPair() map[string]interface{} { + return bson.M{} +} diff --git a/service/iris/model/sync_task.go b/service/iris/model/sync_task.go new file mode 100644 index 0000000..9f2a53b --- /dev/null +++ b/service/iris/model/sync_task.go @@ -0,0 +1,212 @@ +package iris + +import ( + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + model "github.com/irisnet/rainbow-sync/service/iris/db" + "time" +) + +const ( + CollectionNameSyncTask = "sync_iris_task" +) + +type ( + SyncTask struct { + ID bson.ObjectId `bson:"_id"` + StartHeight int64 `bson:"start_height"` // task start height + EndHeight int64 `bson:"end_height"` // task end height + CurrentHeight int64 `bson:"current_height"` // task current height + Status string `bson:"status"` // task status + WorkerId string `bson:"worker_id"` // worker id + WorkerLogs []WorkerLog `bson:"worker_logs"` // worker logs + LastUpdateTime int64 `bson:"last_update_time"` // unix timestamp + } + + WorkerLog struct { + WorkerId string `bson:"worker_id"` // worker id + BeginTime time.Time `bson:"begin_time"` // time which worker begin handle this task + } +) + +func (d SyncTask) Name() string { + return CollectionNameSyncTask +} + +func (d SyncTask) PkKvPair() map[string]interface{} { + return bson.M{"start_height": d.CurrentHeight, "end_height": d.EndHeight} +} + +// get max block height in sync task +func (d SyncTask) GetMaxBlockHeight() (int64, error) { + type maxHeightRes struct { + MaxHeight int64 `bson:"max"` + } + var res []maxHeightRes + + q := []bson.M{ + { + "$group": bson.M{ + "_id": nil, + "max": bson.M{"$max": "$end_height"}, + }, + }, + } + + getMaxBlockHeightFn := func(c *mgo.Collection) error { + return c.Pipe(q).All(&res) + } + err := model.ExecCollection(d.Name(), getMaxBlockHeightFn) + + if err != nil { + return 0, err + } + if len(res) > 0 { + return res[0].MaxHeight, nil + } + + return 0, nil +} + +// query record by status +func (d SyncTask) QueryAll(status []string, taskType string) ([]SyncTask, error) { + var syncTasks []SyncTask + q := bson.M{} + + if len(status) > 0 { + q["status"] = bson.M{ + "$in": status, + } + } + + switch taskType { + case model.SyncTaskTypeCatchUp: + q["end_height"] = bson.M{ + "$ne": 0, + } + break + case model.SyncTaskTypeFollow: + q["end_height"] = bson.M{ + "$eq": 0, + } + break + } + + fn := func(c *mgo.Collection) error { + return c.Find(q).All(&syncTasks) + } + + err := model.ExecCollection(d.Name(), fn) + + if err != nil { + return syncTasks, err + } + + return syncTasks, nil +} + +func (d SyncTask) GetExecutableTask(maxWorkerSleepTime int64) ([]SyncTask, error) { + var tasks []SyncTask + + t := time.Now().Add(time.Duration(-maxWorkerSleepTime) * time.Second).Unix() + q := bson.M{ + "status": bson.M{ + "$in": []string{model.SyncTaskStatusUnHandled, model.SyncTaskStatusUnderway}, + }, + } + + fn := func(c *mgo.Collection) error { + return c.Find(q).Sort("-status").Limit(1000).All(&tasks) + } + + err := model.ExecCollection(d.Name(), fn) + + if err != nil { + return tasks, err + } + + ret := make([]SyncTask, 0, len(tasks)) + //filter the task which last_update_time >= now + for _, task := range tasks { + if task.LastUpdateTime >= t && task.Status == model.SyncTaskStatusUnderway { + continue + } + ret = append(ret, task) + } + + return ret, nil +} + +func (d SyncTask) GetTaskById(id bson.ObjectId) (SyncTask, error) { + var task SyncTask + + fn := func(c *mgo.Collection) error { + return c.FindId(id).One(&task) + } + + err := model.ExecCollection(d.Name(), fn) + if err != nil { + return task, err + } + return task, nil +} + +func (d SyncTask) GetTaskByIdAndWorker(id bson.ObjectId, worker string) (SyncTask, error) { + var task SyncTask + + fn := func(c *mgo.Collection) error { + q := bson.M{ + "_id": id, + "worker_id": worker, + } + + return c.Find(q).One(&task) + } + + err := model.ExecCollection(d.Name(), fn) + if err != nil { + return task, err + } + return task, nil +} + +// take over a task +// update status, worker_id, worker_logs and last_update_time +func (d SyncTask) TakeOverTask(task SyncTask, workerId string) error { + // multiple goroutine attempt to update same record, + // use this selector to ensure only one goroutine can update success at same time + fn := func(c *mgo.Collection) error { + selector := bson.M{ + "_id": task.ID, + "last_update_time": task.LastUpdateTime, + } + + task.Status = model.SyncTaskStatusUnderway + task.WorkerId = workerId + task.LastUpdateTime = time.Now().Unix() + task.WorkerLogs = append(task.WorkerLogs, WorkerLog{ + WorkerId: workerId, + BeginTime: time.Now(), + }) + + return c.Update(selector, task) + } + + return model.ExecCollection(d.Name(), fn) +} + +// update task last update time +func (d SyncTask) UpdateLastUpdateTime(task SyncTask) error { + fn := func(c *mgo.Collection) error { + selector := bson.M{ + "_id": task.ID, + "worker_id": task.WorkerId, + } + + task.LastUpdateTime = time.Now().Unix() + + return c.Update(selector, task) + } + + return model.ExecCollection(d.Name(), fn) +} diff --git a/service/iris/model/sync_task_test.go b/service/iris/model/sync_task_test.go new file mode 100644 index 0000000..15facf5 --- /dev/null +++ b/service/iris/model/sync_task_test.go @@ -0,0 +1,23 @@ +package iris + +import ( + "testing" + "encoding/json" + model "github.com/irisnet/rainbow-sync/service/iris/db" +) + +func TestMain(m *testing.M) { + model.Start() + m.Run() +} + +func TestSyncTask_GetExecutableTask(t *testing.T) { + d := SyncTask{} + + if res, err := d.GetExecutableTask(120); err != nil { + t.Fatal(err) + } else { + resBytes, _ := json.Marshal(res) + t.Log(string(resBytes)) + } +} diff --git a/service/iris/model/tx.go b/service/iris/model/tx.go new file mode 100644 index 0000000..af54220 --- /dev/null +++ b/service/iris/model/tx.go @@ -0,0 +1,95 @@ +package iris + +import ( + "gopkg.in/mgo.v2/bson" + "time" + "github.com/irisnet/irishub/modules/stake" + "github.com/irisnet/irishub/modules/distribution" + "github.com/irisnet/irishub/modules/gov" + "github.com/irisnet/irishub/modules/bank" + "github.com/irisnet/irishub/modules/slashing" + dtypes "github.com/irisnet/irishub/modules/distribution/types" + dtags "github.com/irisnet/irishub/modules/distribution/tags" + "github.com/irisnet/irishub/types" +) + +type IrisTx struct { + Time time.Time `json:"time" bson:"time"` + Height int64 `json:"height" bson:"height"` + TxHash string `json:"tx_hash" bson:"tx_hash"` + From string `json:"from" bson:"from"` + To string `json:"to" bson:"to"` + Initiator string `json:"initiator" bson:"initiator"` + Amount []*Coin `json:"amount" bson:"amount"` + Type string `json:"type" bson:"type"` + Fee *Fee `json:"fee" bson:"fee"` + ActualFee *ActualFee `json:"actual_fee" bson:"actual_fee"` + Memo string `json:"memo" bson:"memo"` + Status string `json:"status" bson:"status"` + Code uint32 `json:"code" bson:"code"` + Tags map[string]string `json:"tags" bson:"tags"` + //Msg Msg `bson:"msg"` +} + +const ( + CollectionNameIrisTx = "sync_iris_tx" +) + +func (d IrisTx) Name() string { + return CollectionNameIrisTx +} + +func (d IrisTx) PkKvPair() map[string]interface{} { + return bson.M{} +} + +type ( + MsgTransfer = bank.MsgSend + MsgBurn = bank.MsgBurn + + MsgStakeCreate = stake.MsgCreateValidator + MsgStakeEdit = stake.MsgEditValidator + MsgStakeDelegate = stake.MsgDelegate + MsgStakeBeginUnbonding = stake.MsgBeginUnbonding + MsgBeginRedelegate = stake.MsgBeginRedelegate + MsgUnjail = slashing.MsgUnjail + MsgSetWithdrawAddress = dtypes.MsgSetWithdrawAddress + MsgWithdrawDelegatorReward = distribution.MsgWithdrawDelegatorReward + MsgWithdrawDelegatorRewardsAll = distribution.MsgWithdrawDelegatorRewardsAll + MsgWithdrawValidatorRewardsAll = distribution.MsgWithdrawValidatorRewardsAll + StakeValidator = stake.Validator + Delegation = stake.Delegation + UnbondingDelegation = stake.UnbondingDelegation + + MsgDeposit = gov.MsgDeposit + MsgSubmitProposal = gov.MsgSubmitProposal + MsgSubmitSoftwareUpgradeProposal = gov.MsgSubmitSoftwareUpgradeProposal + MsgSubmitTaxUsageProposal = gov.MsgSubmitTxTaxUsageProposal + MsgVote = gov.MsgVote + Proposal = gov.Proposal + SdkVote = gov.Vote + + SdkCoins = types.Coins + KVPair = types.KVPair +) + +var ( + TagDistributionReward = dtags.Reward +) + +type Coin struct { + Denom string `bson:"denom" json:"denom"` + Amount float64 `bson:"amount" json:"amount"` +} + +type Coins []*Coin + +type Fee struct { + Amount Coins `bson:"amount" json:"amount"` + Gas int64 `bson:"gas" json:"gas"` +} + +type ActualFee struct { + Denom string `json:"denom"` + Amount float64 `json:"amount"` +} diff --git a/service/iris/task/create.go b/service/iris/task/create.go new file mode 100644 index 0000000..019b34c --- /dev/null +++ b/service/iris/task/create.go @@ -0,0 +1,232 @@ +package task + +import ( + "fmt" + "github.com/irisnet/rainbow-sync/service/iris/logger" + imodel "github.com/irisnet/rainbow-sync/service/iris/model" + "github.com/irisnet/rainbow-sync/service/iris/conf" + model "github.com/irisnet/rainbow-sync/service/iris/db" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" + "time" + "github.com/irisnet/rainbow-sync/service/iris/block" +) + +type TaskIrisService struct { + blockType block.Iris_Block + syncIrisModel imodel.SyncTask +} + +func (s *TaskIrisService) StartCreateTask() { + blockNumPerWorkerHandle := int64(conf.BlockNumPerWorkerHandle) + + logger.Info("Start create task", logger.String("Chain Block", s.blockType.Name())) + + // buffer channel to limit goroutine num + chanLimit := make(chan bool, conf.WorkerNumCreateTask) + + for { + chanLimit <- true + go s.createTask(blockNumPerWorkerHandle, chanLimit) + time.Sleep(time.Duration(1) * time.Minute) + } +} + +func (s *TaskIrisService) createTask(blockNumPerWorkerHandle int64, chanLimit chan bool) { + var ( + syncIrisTasks []*imodel.SyncTask + ops []txn.Op + invalidFollowTask imodel.SyncTask + logMsg string + ) + + defer func() { + if err := recover(); err != nil { + logger.Error("Create task failed", logger.Any("err", err), + logger.String("Chain Block", s.blockType.Name())) + } + <-chanLimit + }() + + // check valid follow task if exist + // status of valid follow task is unhandled or underway + validFollowTasks, err := s.syncIrisModel.QueryAll( + []string{ + model.SyncTaskStatusUnHandled, + model.SyncTaskStatusUnderway, + }, model.SyncTaskTypeFollow) + if err != nil { + logger.Error("Query sync task failed", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + return + } + if len(validFollowTasks) == 0 { + // get max end_height from sync_task + maxEndHeight, err := s.syncIrisModel.GetMaxBlockHeight() + if err != nil { + logger.Error("Get max endBlock failed", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + return + } + + blockChainLatestHeight, err := getBlockChainLatestHeight() + if err != nil { + logger.Error("Get current block height failed", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + return + } + + if maxEndHeight+blockNumPerWorkerHandle <= blockChainLatestHeight { + syncIrisTasks = createCatchUpTask(maxEndHeight, blockNumPerWorkerHandle, blockChainLatestHeight) + logMsg = fmt.Sprintf("Create catch up task during follow task not exist,from-to:%v-%v,Chain Block:%v", + maxEndHeight+1, blockChainLatestHeight, s.blockType.Name()) + } else { + finished, err := s.assertAllCatchUpTaskFinished() + if err != nil { + logger.Error("AssertAllCatchUpTaskFinished failed", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + return + } + if finished { + syncIrisTasks = createFollowTask(maxEndHeight, blockNumPerWorkerHandle, blockChainLatestHeight) + logMsg = fmt.Sprintf("Create follow task during follow task not exist,from-to:%v-%v,Chain Block:%v", + maxEndHeight+1, blockChainLatestHeight, s.blockType.Name()) + } + } + } else { + followTask := validFollowTasks[0] + followedHeight := followTask.CurrentHeight + if followedHeight == 0 { + followedHeight = followTask.StartHeight - 1 + } + + blockChainLatestHeight, err := getBlockChainLatestHeight() + if err != nil { + logger.Error("Get blockChain latest height failed", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + return + } + + if followedHeight+blockNumPerWorkerHandle <= blockChainLatestHeight { + syncIrisTasks = createCatchUpTask(followedHeight, blockNumPerWorkerHandle, blockChainLatestHeight) + + invalidFollowTask = followTask + logMsg = fmt.Sprintf("Create catch up task during follow task exist,from-to:%v-%v,invalidFollowTaskId:%v,invalidFollowTaskCurHeight:%v,Chain Block:%v", + followedHeight+1, blockChainLatestHeight, invalidFollowTask.ID.Hex(), invalidFollowTask.CurrentHeight, s.blockType.Name()) + + } + } + + // bulk insert or remove use transaction + ops = make([]txn.Op, 0, len(syncIrisTasks)+1) + if len(syncIrisTasks) > 0 { + for _, v := range syncIrisTasks { + objectId := bson.NewObjectId() + v.ID = objectId + op := txn.Op{ + C: imodel.CollectionNameSyncTask, + Id: objectId, + Assert: nil, + Insert: v, + } + + ops = append(ops, op) + } + } + + if invalidFollowTask.ID.Valid() { + op := txn.Op{ + C: imodel.CollectionNameSyncTask, + Id: invalidFollowTask.ID, + Assert: bson.M{ + "current_height": invalidFollowTask.CurrentHeight, + "last_update_time": invalidFollowTask.LastUpdateTime, + }, + Update: bson.M{ + "$set": bson.M{ + "status": model.FollowTaskStatusInvalid, + "last_update_time": time.Now().Unix(), + }, + }, + } + ops = append(ops, op) + } + + if len(ops) > 0 { + err := model.Txn(ops) + if err != nil { + logger.Warn("Create sync task fail", logger.String("err", err.Error()), + logger.String("Chain Block", s.blockType.Name())) + } else { + logger.Info(fmt.Sprintf("Create sync task success,%v", logMsg), logger.String("Chain Block", s.blockType.Name())) + } + } + + time.Sleep(1 * time.Second) +} + +func createCatchUpTask(maxEndHeight, blockNumPerWorker, currentBlockHeight int64) []*imodel.SyncTask { + var ( + syncTasks []*imodel.SyncTask + ) + if length := currentBlockHeight - (maxEndHeight + blockNumPerWorker); length > 0 { + syncTasks = make([]*imodel.SyncTask, 0, length+1) + } + + for maxEndHeight+blockNumPerWorker <= currentBlockHeight { + syncTask := imodel.SyncTask{ + StartHeight: maxEndHeight + 1, + EndHeight: maxEndHeight + blockNumPerWorker, + Status: model.SyncTaskStatusUnHandled, + LastUpdateTime: time.Now().Unix(), + } + syncTasks = append(syncTasks, &syncTask) + + maxEndHeight += blockNumPerWorker + } + + return syncTasks +} + +func (s *TaskIrisService) assertAllCatchUpTaskFinished() (bool, error) { + var ( + allCatchUpTaskFinished = false + ) + + // assert all catch up task whether finished + tasks, err := s.syncIrisModel.QueryAll( + []string{ + model.SyncTaskStatusUnHandled, + model.SyncTaskStatusUnderway, + }, + model.SyncTaskTypeCatchUp) + if err != nil { + return false, err + } + + if len(tasks) == 0 { + allCatchUpTaskFinished = true + } + + return allCatchUpTaskFinished, nil +} + +func createFollowTask(maxEndHeight, blockNumPerWorker, currentBlockHeight int64) []*imodel.SyncTask { + var ( + syncIrisTasks []*imodel.SyncTask + ) + syncIrisTasks = make([]*imodel.SyncTask, 0, 1) + + if maxEndHeight+blockNumPerWorker > currentBlockHeight { + syncTask := imodel.SyncTask{ + StartHeight: maxEndHeight + 1, + EndHeight: 0, + Status: model.SyncTaskStatusUnHandled, + LastUpdateTime: time.Now().Unix(), + } + + syncIrisTasks = append(syncIrisTasks, &syncTask) + } + + return syncIrisTasks +} diff --git a/service/iris/task/execute.go b/service/iris/task/execute.go new file mode 100644 index 0000000..d016138 --- /dev/null +++ b/service/iris/task/execute.go @@ -0,0 +1,285 @@ +package task + +import ( + "fmt" + "github.com/irisnet/rainbow-sync/service/iris/logger" + imodel "github.com/irisnet/rainbow-sync/service/iris/model" + "github.com/irisnet/rainbow-sync/service/iris/conf" + model "github.com/irisnet/rainbow-sync/service/iris/db" + "github.com/irisnet/rainbow-sync/service/iris/helper" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "os" + "time" +) + +func (s *TaskIrisService) StartExecuteTask() { + var ( + blockNumPerWorkerHandle = int64(conf.BlockNumPerWorkerHandle) + workerMaxSleepTime = int64(conf.WorkerMaxSleepTime) + ) + if workerMaxSleepTime <= 1*60 { + logger.Fatal("workerMaxSleepTime shouldn't less than 1 minute") + } + + logger.Info("Start execute task", logger.String("Chain Block", s.blockType.Name())) + + // buffer channel to limit goroutine num + chanLimit := make(chan bool, conf.WorkerNumExecuteTask) + helper.Init(conf.BlockChainMonitorUrl, conf.MaxConnectionNum, conf.InitConnectionNum) + defer func() { + helper.ClosePool() + }() + + for { + chanLimit <- true + go s.executeTask(blockNumPerWorkerHandle, workerMaxSleepTime, chanLimit) + time.Sleep(time.Duration(1) * time.Second) + } +} + +func (s *TaskIrisService) executeTask(blockNumPerWorkerHandle, maxWorkerSleepTime int64, chanLimit chan bool) { + var ( + workerId, taskType string + blockChainLatestHeight int64 + ) + genWorkerId := func() string { + // generate worker id use hostname@xxx + hostname, _ := os.Hostname() + return fmt.Sprintf("%v@%v", hostname, bson.NewObjectId().Hex()) + } + + healthCheckQuit := make(chan bool) + workerId = genWorkerId() + client := helper.GetClient() + + defer func() { + if r := recover(); r != nil { + logger.Error("execute task fail", logger.Any("err", r)) + } + close(healthCheckQuit) + <-chanLimit + client.Release() + }() + + // check whether exist executable task + // status = unhandled or + // status = underway and now - lastUpdateTime > confTime + tasks, err := s.syncIrisModel.GetExecutableTask(maxWorkerSleepTime) + if err != nil { + logger.Error("Get executable task fail", logger.String("err", err.Error())) + } + if len(tasks) == 0 { + // there is no executable tasks + return + } + + // take over sync task + // attempt to update status, worker_id and worker_logs + task := tasks[0] + err = s.syncIrisModel.TakeOverTask(task, workerId) + if err != nil { + if err == mgo.ErrNotFound { + // this task has been take over by other goroutine + logger.Info("Task has been take over by other goroutine", logger.String("Chain Block", s.blockType.Name())) + } else { + logger.Error("Take over task fail", logger.String("Chain Block", s.blockType.Name()), logger.String("err", err.Error())) + } + return + } else { + // task over task success, update task worker to current worker + task.WorkerId = workerId + } + + if task.EndHeight != 0 { + taskType = model.SyncTaskTypeCatchUp + } else { + taskType = model.SyncTaskTypeFollow + } + logger.Info("worker begin execute task", logger.String("Chain Block", s.blockType.Name()), + logger.String("curWorker", workerId), logger.Any("taskId", task.ID), + logger.String("from-to", fmt.Sprintf("%v-%v", task.StartHeight, task.EndHeight))) + + // worker health check, if worker is alive, then update last update time every minute. + // health check will exit in follow conditions: + // 1. task is not owned by current worker + // 2. task is invalid + workerHealthCheck := func(taskId bson.ObjectId, currentWorker string) { + defer func() { + if r := recover(); r != nil { + logger.Error("worker health check err", logger.String("Chain Block", s.blockType.Name()), logger.Any("err", r)) + } + }() + + func() { + for { + select { + case <-healthCheckQuit: + logger.Info("get health check quit signal, now exit health check", logger.String("Chain Block", s.blockType.Name())) + return + default: + task, err := s.syncIrisModel.GetTaskByIdAndWorker(taskId, workerId) + if err == nil { + if _, valid := assertTaskValid(task, blockNumPerWorkerHandle); valid { + // update task last update time + if err := s.syncIrisModel.UpdateLastUpdateTime(task); err != nil { + logger.Error("update last update time fail", logger.String("Chain Block", s.blockType.Name()), logger.String("err", err.Error())) + } + } else { + logger.Info("task is invalid, exit health check", logger.String("Chain Block", s.blockType.Name()), logger.String("taskId", taskId.Hex())) + return + } + } else { + if err == mgo.ErrNotFound { + logger.Info("task may be task over by other goroutine, exit health check", logger.String("Chain Block", s.blockType.Name()), + logger.String("taskId", taskId.Hex()), logger.String("curWorker", workerId)) + return + } else { + logger.Error("get task by id and worker fail", logger.String("Chain Block", s.blockType.Name()), logger.String("taskId", taskId.Hex()), + logger.String("curWorker", workerId)) + } + } + } + time.Sleep(1 * time.Minute) + } + }() + } + go workerHealthCheck(task.ID, workerId) + + // check task is valid + // valid catch up task: current_height < end_height + // valid follow task: current_height + blockNumPerWorkerHandle > blockChainLatestHeight + blockChainLatestHeight, isValid := assertTaskValid(task, blockNumPerWorkerHandle) + for isValid { + var inProcessBlock int64 + if task.CurrentHeight == 0 { + inProcessBlock = task.StartHeight + } else { + inProcessBlock = task.CurrentHeight + 1 + } + + // if inProcessBlock > blockChainLatestHeight, should wait blockChainLatestHeight update + if taskType == model.SyncTaskTypeFollow && inProcessBlock > blockChainLatestHeight { + logger.Info("wait blockChain latest height update", logger.String("Chain Block", s.blockType.Name()), + logger.Int64("curSyncedHeight", inProcessBlock-1), + logger.Int64("blockChainLatestHeight", blockChainLatestHeight)) + time.Sleep(2 * time.Second) + // continue to assert task is valid + blockChainLatestHeight, isValid = assertTaskValid(task, blockNumPerWorkerHandle) + continue + } + + // parse data from block + blockDoc, assetDetailDocs, txDocs, err := s.blockType.ParseBlock(inProcessBlock, client) + if err != nil { + logger.Error("Parse block fail", logger.String("Chain Block", s.blockType.Name()), logger.Int64("block", inProcessBlock), + logger.String("err", err.Error())) + } + + // check task owner + workerUnchanged, err := assertTaskWorkerUnchanged(task.ID, task.WorkerId) + if err != nil { + logger.Error("assert task worker is unchanged fail", logger.String("Chain Block", s.blockType.Name()), logger.String("err", err.Error())) + } + if workerUnchanged { + // save data and update sync task + taskDoc := task + taskDoc.CurrentHeight = inProcessBlock + taskDoc.LastUpdateTime = time.Now().Unix() + taskDoc.Status = model.SyncTaskStatusUnderway + if inProcessBlock == task.EndHeight { + taskDoc.Status = model.SyncTaskStatusCompleted + } + + err := s.blockType.SaveDocsWithTxn(blockDoc, assetDetailDocs, txDocs, taskDoc) + if err != nil { + logger.Error("save docs fail", logger.String("Chain Block", s.blockType.Name()), logger.String("err", err.Error())) + } else { + task.CurrentHeight = inProcessBlock + } + + // continue to assert task is valid + blockChainLatestHeight, isValid = assertTaskValid(task, blockNumPerWorkerHandle) + } else { + logger.Info("task worker changed", logger.String("Chain Block", s.blockType.Name()), logger.Any("task_id", task.ID), + logger.String("origin worker", workerId), logger.String("current worker", task.WorkerId)) + return + } + } + + logger.Info("worker finish execute task", logger.String("Chain Block", s.blockType.Name()), + logger.String("task_worker", task.WorkerId), logger.Any("task_id", task.ID), + logger.String("from-to-current", fmt.Sprintf("%v-%v-%v", task.StartHeight, task.EndHeight, task.CurrentHeight))) +} + +// assert task is valid +// valid catch up task: current_height < end_height +// valid follow task: current_height + blockNumPerWorkerHandle > blockChainLatestHeight +func assertTaskValid(task imodel.SyncTask, blockNumPerWorkerHandle int64) (int64, bool) { + var ( + taskType string + flag = false + blockChainLatestHeight int64 + err error + ) + if task.EndHeight != 0 { + taskType = model.SyncTaskTypeCatchUp + } else { + taskType = model.SyncTaskTypeFollow + } + currentHeight := task.CurrentHeight + if currentHeight == 0 { + currentHeight = task.StartHeight - 1 + } + + switch taskType { + case model.SyncTaskTypeCatchUp: + if currentHeight < task.EndHeight { + flag = true + } + break + case model.SyncTaskTypeFollow: + blockChainLatestHeight, err = getBlockChainLatestHeight() + if err != nil { + logger.Error("get blockChain latest height err", logger.String("err", err.Error())) + return blockChainLatestHeight, flag + } + if currentHeight+blockNumPerWorkerHandle > blockChainLatestHeight { + flag = true + } + break + } + return blockChainLatestHeight, flag +} + +// assert task worker unchanged +func assertTaskWorkerUnchanged(taskId bson.ObjectId, workerId string) (bool, error) { + var ( + syncTaskModel imodel.SyncTask + ) + // check task owner + task, err := syncTaskModel.GetTaskById(taskId) + if err != nil { + return false, err + } + + if task.WorkerId == workerId { + return true, nil + } else { + return false, nil + } +} + +// get current block height +func getBlockChainLatestHeight() (int64, error) { + client := helper.GetClient() + defer func() { + client.Release() + }() + status, err := client.Status() + if err != nil { + return 0, err + } + + return status.SyncInfo.LatestBlockHeight, nil +} diff --git a/service/iris/task/start.go b/service/iris/task/start.go new file mode 100644 index 0000000..7528655 --- /dev/null +++ b/service/iris/task/start.go @@ -0,0 +1,7 @@ +package task + +func Start() { + synctask := new(TaskIrisService) + go synctask.StartCreateTask() + go synctask.StartExecuteTask() +} diff --git a/service/iris/tools/Makefile b/service/iris/tools/Makefile new file mode 100644 index 0000000..08e0cbc --- /dev/null +++ b/service/iris/tools/Makefile @@ -0,0 +1,33 @@ +######################################## +### DEP + +DEP = github.com/golang/dep/cmd/dep +DEP_CHECK := $(shell command -v dep 2> /dev/null) + +check_tools: +ifndef DEP_CHECK + @echo "No dep in path. Install with 'make get_tools'." +else + @echo "Found dep in path." +endif + +get_tools: +ifdef DEP_CHECK + @echo "Dep is already installed. Run 'make update_tools' to update." +else + @echo "$(ansi_grn)Installing dep$(ansi_end)" + go get -v $(DEP) +endif + +update_tools: + @echo "$(ansi_grn)Updating dep$(ansi_end)" + go get -u -v $(DEP) + + +######################################## +# ANSI colors + +ansi_red=\033[0;31m +ansi_grn=\033[0;32m +ansi_yel=\033[0;33m +ansi_end=\033[0m \ No newline at end of file diff --git a/service/iris/utils/utils.go b/service/iris/utils/utils.go new file mode 100644 index 0000000..3d75757 --- /dev/null +++ b/service/iris/utils/utils.go @@ -0,0 +1,140 @@ +package utils + +import ( + "strings" + "encoding/hex" + "strconv" + "github.com/irisnet/irishub/codec" + "github.com/irisnet/irishub/modules/auth" + abci "github.com/tendermint/tendermint/abci/types" + imodel "github.com/irisnet/rainbow-sync/service/iris/model" + "github.com/irisnet/rainbow-sync/service/iris/helper" + "github.com/irisnet/rainbow-sync/service/iris/constant" + "github.com/irisnet/rainbow-sync/service/iris/logger" + "fmt" + "regexp" + "github.com/irisnet/irishub/app" + "github.com/irisnet/irishub/types" + "github.com/irisnet/rainbow-sync/service/iris/conf" +) + +var ( + cdc *codec.Codec +) + +// 初始化账户地址前缀 +func init() { + if conf.IrisNetwork == types.Mainnet { + types.SetNetworkType(types.Mainnet) + } + cdc = app.MakeLatestCodec() +} + +func GetCodec() *codec.Codec { + return cdc +} + +func BuildHex(bytes []byte) string { + return strings.ToUpper(hex.EncodeToString(bytes)) +} + +func ParseCoins(coinsStr string) (coins imodel.Coins) { + coinsStr = strings.TrimSpace(coinsStr) + if len(coinsStr) == 0 { + return + } + + coinStrs := strings.Split(coinsStr, ",") + for _, coinStr := range coinStrs { + coin := ParseCoin(coinStr) + coins = append(coins, coin) + } + return coins +} + +func ParseCoin(coinStr string) (coin *imodel.Coin) { + var ( + reDnm = `[A-Za-z\-]{2,15}` + reAmt = `[0-9]+[.]?[0-9]*` + reSpc = `[[:space:]]*` + reCoin = regexp.MustCompile(fmt.Sprintf(`^(%s)%s(%s)$`, reAmt, reSpc, reDnm)) + ) + + coinStr = strings.TrimSpace(coinStr) + + matches := reCoin.FindStringSubmatch(coinStr) + if matches == nil { + logger.Error("invalid coin expression", logger.Any("coin", coinStr)) + return coin + } + denom, amount := matches[2], matches[1] + + amt, err := strconv.ParseFloat(amount, 64) + if err != nil { + logger.Error("Convert str to int failed", logger.Any("amount", amount)) + return coin + } + + return &imodel.Coin{ + Denom: denom, + Amount: amt, + } +} + +func BuildFee(fee auth.StdFee) *imodel.Fee { + return &imodel.Fee{ + Amount: ParseCoins(fee.Amount.String()), + Gas: int64(fee.Gas), + } +} + +// get tx status and log by query txHash +func QueryTxResult(txHash []byte) (string, abci.ResponseDeliverTx, error) { + var resDeliverTx abci.ResponseDeliverTx + status := constant.TxStatusSuccess + + client := helper.GetClient() + defer client.Release() + + res, err := client.Tx(txHash, false) + if err != nil { + return "unknown", resDeliverTx, err + } + result := res.TxResult + if result.Code != 0 { + status = constant.TxStatusFail + } + + return status, result, nil +} + +func Min(x, y int64) int64 { + if x < y { + return x + } + return y +} + +func ParseFloat(s string, bit ...int) float64 { + f, err := strconv.ParseFloat(s, 64) + if err != nil { + logger.Error("common.ParseFloat error", logger.String("value", s)) + return 0 + } + + if len(bit) > 0 { + return RoundFloat(f, bit[0]) + } + return f +} + +func RoundFloat(num float64, bit int) (i float64) { + format := "%" + fmt.Sprintf("0.%d", bit) + "f" + s := fmt.Sprintf(format, num) + i, err := strconv.ParseFloat(s, 0) + if err != nil { + logger.Error("common.RoundFloat error", logger.String("format", format)) + return 0 + } + return i +}