diff --git a/.cicd/defaults.json b/.cicd/defaults.json new file mode 100644 index 0000000000..fd637bc48a --- /dev/null +++ b/.cicd/defaults.json @@ -0,0 +1,9 @@ +{ + "cdt":{ + "target":"4", + "prerelease":false + }, + "eossystemcontracts":{ + "ref":"release/3.1" + } + } diff --git a/.cicd/platforms/ubuntu20.Dockerfile b/.cicd/platforms/ubuntu20.Dockerfile index baccb7c937..e9c3a1d4b6 100644 --- a/.cicd/platforms/ubuntu20.Dockerfile +++ b/.cicd/platforms/ubuntu20.Dockerfile @@ -6,11 +6,12 @@ RUN apt-get update && apt-get upgrade -y && \ cmake \ git \ jq \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ llvm-11-dev \ ninja-build \ python3-numpy \ + file \ + zlib1g-dev \ zstd diff --git a/.cicd/platforms/ubuntu22.Dockerfile b/.cicd/platforms/ubuntu22.Dockerfile index 1e5a936a4d..57d49fe026 100644 --- a/.cicd/platforms/ubuntu22.Dockerfile +++ b/.cicd/platforms/ubuntu22.Dockerfile @@ -6,11 +6,12 @@ RUN apt-get update && apt-get upgrade -y && \ cmake \ git \ jq \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ llvm-11-dev \ ninja-build \ python3-numpy \ + file \ + zlib1g-dev \ zstd diff --git a/.github/actions/parallel-ctest-containers/dist/index.mjs b/.github/actions/parallel-ctest-containers/dist/index.mjs index 7ebdf382a7..9ace17b974 100644 --- a/.github/actions/parallel-ctest-containers/dist/index.mjs +++ b/.github/actions/parallel-ctest-containers/dist/index.mjs @@ -1,3 +1,3 @@ -import{createRequire as e}from"module";var t={7351:function(e,t,r){var n=this&&this.__createBinding||(Object.create?function(e,t,r,n){if(n===undefined)n=r;Object.defineProperty(e,n,{enumerable:true,get:function(){return t[r]}})}:function(e,t,r,n){if(n===undefined)n=r;e[n]=t[r]});var i=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:true,value:t})}:function(e,t){e["default"]=t});var a=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(e!=null)for(var r in e)if(r!=="default"&&Object.hasOwnProperty.call(e,r))n(t,e,r);i(t,e);return t};Object.defineProperty(t,"__esModule",{value:true});t.issue=t.issueCommand=void 0;const o=a(r(2037));const s=r(6321);function issueCommand(e,t,r){const n=new Command(e,t,r);process.stdout.write(n.toString()+o.EOL)}t.issueCommand=issueCommand;function issue(e,t=""){issueCommand(e,{},t)}t.issue=issue;const u="::";class Command{constructor(e,t,r){if(!e){e="missing.command"}this.command=e;this.properties=t;this.message=r}toString(){let e=u+this.command;if(this.properties&&Object.keys(this.properties).length>0){e+=" ";let t=true;for(const r in this.properties){if(this.properties.hasOwnProperty(r)){const n=this.properties[r];if(n){if(t){t=false}else{e+=","}e+=`${r}=${escapeProperty(n)}`}}}}e+=`${u}${escapeData(this.message)}`;return e}}function escapeData(e){return s.toCommandValue(e).replace(/%/g,"%25").replace(/\r/g,"%0D").replace(/\n/g,"%0A")}function escapeProperty(e){return s.toCommandValue(e).replace(/%/g,"%25").replace(/\r/g,"%0D").replace(/\n/g,"%0A").replace(/:/g,"%3A").replace(/,/g,"%2C")}},2186:function(e,t,r){var n=this&&this.__createBinding||(Object.create?function(e,t,r,n){if(n===undefined)n=r;Object.defineProperty(e,n,{enumerable:true,get:function(){return t[r]}})}:function(e,t,r,n){if(n===undefined)n=r;e[n]=t[r]});var i=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:true,value:t})}:function(e,t){e["default"]=t});var a=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(e!=null)for(var r in e)if(r!=="default"&&Object.hasOwnProperty.call(e,r))n(t,e,r);i(t,e);return t};var o=this&&this.__awaiter||function(e,t,r,n){function adopt(e){return e instanceof r?e:new r((function(t){t(e)}))}return new(r||(r=Promise))((function(r,i){function fulfilled(e){try{step(n.next(e))}catch(e){i(e)}}function rejected(e){try{step(n["throw"](e))}catch(e){i(e)}}function step(e){e.done?r(e.value):adopt(e.value).then(fulfilled,rejected)}step((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:true});t.getIDToken=t.getState=t.saveState=t.group=t.endGroup=t.startGroup=t.info=t.notice=t.warning=t.error=t.debug=t.isDebug=t.setFailed=t.setCommandEcho=t.setOutput=t.getBooleanInput=t.getMultilineInput=t.getInput=t.addPath=t.setSecret=t.exportVariable=t.ExitCode=void 0;const s=r(7351);const u=r(717);const l=r(6321);const c=a(r(2037));const d=a(r(1017));const h=r(5840);const p=r(8041);var g;(function(e){e[e["Success"]=0]="Success";e[e["Failure"]=1]="Failure"})(g=t.ExitCode||(t.ExitCode={}));function exportVariable(e,t){const r=l.toCommandValue(t);process.env[e]=r;const n=process.env["GITHUB_ENV"]||"";if(n){const t=`ghadelimiter_${h.v4()}`;if(e.includes(t)){throw new Error(`Unexpected input: name should not contain the delimiter "${t}"`)}if(r.includes(t)){throw new Error(`Unexpected input: value should not contain the delimiter "${t}"`)}const n=`${e}<<${t}${c.EOL}${r}${c.EOL}${t}`;u.issueCommand("ENV",n)}else{s.issueCommand("set-env",{name:e},r)}}t.exportVariable=exportVariable;function setSecret(e){s.issueCommand("add-mask",{},e)}t.setSecret=setSecret;function addPath(e){const t=process.env["GITHUB_PATH"]||"";if(t){u.issueCommand("PATH",e)}else{s.issueCommand("add-path",{},e)}process.env["PATH"]=`${e}${d.delimiter}${process.env["PATH"]}`}t.addPath=addPath;function getInput(e,t){const r=process.env[`INPUT_${e.replace(/ /g,"_").toUpperCase()}`]||"";if(t&&t.required&&!r){throw new Error(`Input required and not supplied: ${e}`)}if(t&&t.trimWhitespace===false){return r}return r.trim()}t.getInput=getInput;function getMultilineInput(e,t){const r=getInput(e,t).split("\n").filter((e=>e!==""));return r}t.getMultilineInput=getMultilineInput;function getBooleanInput(e,t){const r=["true","True","TRUE"];const n=["false","False","FALSE"];const i=getInput(e,t);if(r.includes(i))return true;if(n.includes(i))return false;throw new TypeError(`Input does not meet YAML 1.2 "Core Schema" specification: ${e}\n`+`Support boolean input list: \`true | True | TRUE | false | False | FALSE\``)}t.getBooleanInput=getBooleanInput;function setOutput(e,t){process.stdout.write(c.EOL);s.issueCommand("set-output",{name:e},t)}t.setOutput=setOutput;function setCommandEcho(e){s.issue("echo",e?"on":"off")}t.setCommandEcho=setCommandEcho;function setFailed(e){process.exitCode=g.Failure;error(e)}t.setFailed=setFailed;function isDebug(){return process.env["RUNNER_DEBUG"]==="1"}t.isDebug=isDebug;function debug(e){s.issueCommand("debug",{},e)}t.debug=debug;function error(e,t={}){s.issueCommand("error",l.toCommandProperties(t),e instanceof Error?e.toString():e)}t.error=error;function warning(e,t={}){s.issueCommand("warning",l.toCommandProperties(t),e instanceof Error?e.toString():e)}t.warning=warning;function notice(e,t={}){s.issueCommand("notice",l.toCommandProperties(t),e instanceof Error?e.toString():e)}t.notice=notice;function info(e){process.stdout.write(e+c.EOL)}t.info=info;function startGroup(e){s.issue("group",e)}t.startGroup=startGroup;function endGroup(){s.issue("endgroup")}t.endGroup=endGroup;function group(e,t){return o(this,void 0,void 0,(function*(){startGroup(e);let r;try{r=yield t()}finally{endGroup()}return r}))}t.group=group;function saveState(e,t){s.issueCommand("save-state",{name:e},t)}t.saveState=saveState;function getState(e){return process.env[`STATE_${e}`]||""}t.getState=getState;function getIDToken(e){return o(this,void 0,void 0,(function*(){return yield p.OidcClient.getIDToken(e)}))}t.getIDToken=getIDToken;var m=r(1327);Object.defineProperty(t,"summary",{enumerable:true,get:function(){return m.summary}});var b=r(1327);Object.defineProperty(t,"markdownSummary",{enumerable:true,get:function(){return b.markdownSummary}});var v=r(2981);Object.defineProperty(t,"toPosixPath",{enumerable:true,get:function(){return v.toPosixPath}});Object.defineProperty(t,"toWin32Path",{enumerable:true,get:function(){return v.toWin32Path}});Object.defineProperty(t,"toPlatformPath",{enumerable:true,get:function(){return v.toPlatformPath}})},717:function(e,t,r){var n=this&&this.__createBinding||(Object.create?function(e,t,r,n){if(n===undefined)n=r;Object.defineProperty(e,n,{enumerable:true,get:function(){return t[r]}})}:function(e,t,r,n){if(n===undefined)n=r;e[n]=t[r]});var i=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:true,value:t})}:function(e,t){e["default"]=t});var a=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(e!=null)for(var r in e)if(r!=="default"&&Object.hasOwnProperty.call(e,r))n(t,e,r);i(t,e);return t};Object.defineProperty(t,"__esModule",{value:true});t.issueCommand=void 0;const o=a(r(7147));const s=a(r(2037));const u=r(6321);function issueCommand(e,t){const r=process.env[`GITHUB_${e}`];if(!r){throw new Error(`Unable to find environment variable for file command ${e}`)}if(!o.existsSync(r)){throw new Error(`Missing file at path: ${r}`)}o.appendFileSync(r,`${u.toCommandValue(t)}${s.EOL}`,{encoding:"utf8"})}t.issueCommand=issueCommand},8041:function(e,t,r){var n=this&&this.__awaiter||function(e,t,r,n){function adopt(e){return e instanceof r?e:new r((function(t){t(e)}))}return new(r||(r=Promise))((function(r,i){function fulfilled(e){try{step(n.next(e))}catch(e){i(e)}}function rejected(e){try{step(n["throw"](e))}catch(e){i(e)}}function step(e){e.done?r(e.value):adopt(e.value).then(fulfilled,rejected)}step((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:true});t.OidcClient=void 0;const i=r(6255);const a=r(5526);const o=r(2186);class OidcClient{static createHttpClient(e=true,t=10){const r={allowRetries:e,maxRetries:t};return new i.HttpClient("actions/oidc-client",[new a.BearerCredentialHandler(OidcClient.getRequestToken())],r)}static getRequestToken(){const e=process.env["ACTIONS_ID_TOKEN_REQUEST_TOKEN"];if(!e){throw new Error("Unable to get ACTIONS_ID_TOKEN_REQUEST_TOKEN env variable")}return e}static getIDTokenUrl(){const e=process.env["ACTIONS_ID_TOKEN_REQUEST_URL"];if(!e){throw new Error("Unable to get ACTIONS_ID_TOKEN_REQUEST_URL env variable")}return e}static getCall(e){var t;return n(this,void 0,void 0,(function*(){const r=OidcClient.createHttpClient();const n=yield r.getJson(e).catch((e=>{throw new Error(`Failed to get ID Token. \n \n Error Code : ${e.statusCode}\n \n Error Message: ${e.result.message}`)}));const i=(t=n.result)===null||t===void 0?void 0:t.value;if(!i){throw new Error("Response json body do not have ID Token field")}return i}))}static getIDToken(e){return n(this,void 0,void 0,(function*(){try{let t=OidcClient.getIDTokenUrl();if(e){const r=encodeURIComponent(e);t=`${t}&audience=${r}`}o.debug(`ID token url is ${t}`);const r=yield OidcClient.getCall(t);o.setSecret(r);return r}catch(e){throw new Error(`Error message: ${e.message}`)}}))}}t.OidcClient=OidcClient},2981:function(e,t,r){var n=this&&this.__createBinding||(Object.create?function(e,t,r,n){if(n===undefined)n=r;Object.defineProperty(e,n,{enumerable:true,get:function(){return t[r]}})}:function(e,t,r,n){if(n===undefined)n=r;e[n]=t[r]});var i=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:true,value:t})}:function(e,t){e["default"]=t});var a=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(e!=null)for(var r in e)if(r!=="default"&&Object.hasOwnProperty.call(e,r))n(t,e,r);i(t,e);return t};Object.defineProperty(t,"__esModule",{value:true});t.toPlatformPath=t.toWin32Path=t.toPosixPath=void 0;const o=a(r(1017));function toPosixPath(e){return e.replace(/[\\]/g,"/")}t.toPosixPath=toPosixPath;function toWin32Path(e){return e.replace(/[/]/g,"\\")}t.toWin32Path=toWin32Path;function toPlatformPath(e){return e.replace(/[/\\]/g,o.sep)}t.toPlatformPath=toPlatformPath},1327:function(e,t,r){var n=this&&this.__awaiter||function(e,t,r,n){function adopt(e){return e instanceof r?e:new r((function(t){t(e)}))}return new(r||(r=Promise))((function(r,i){function fulfilled(e){try{step(n.next(e))}catch(e){i(e)}}function rejected(e){try{step(n["throw"](e))}catch(e){i(e)}}function step(e){e.done?r(e.value):adopt(e.value).then(fulfilled,rejected)}step((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:true});t.summary=t.markdownSummary=t.SUMMARY_DOCS_URL=t.SUMMARY_ENV_VAR=void 0;const i=r(2037);const a=r(7147);const{access:o,appendFile:s,writeFile:u}=a.promises;t.SUMMARY_ENV_VAR="GITHUB_STEP_SUMMARY";t.SUMMARY_DOCS_URL="https://docs.github.com/actions/using-workflows/workflow-commands-for-github-actions#adding-a-job-summary";class Summary{constructor(){this._buffer=""}filePath(){return n(this,void 0,void 0,(function*(){if(this._filePath){return this._filePath}const e=process.env[t.SUMMARY_ENV_VAR];if(!e){throw new Error(`Unable to find environment variable for $${t.SUMMARY_ENV_VAR}. Check if your runtime environment supports job summaries.`)}try{yield o(e,a.constants.R_OK|a.constants.W_OK)}catch(t){throw new Error(`Unable to access summary file: '${e}'. Check if the file has correct read/write permissions.`)}this._filePath=e;return this._filePath}))}wrap(e,t,r={}){const n=Object.entries(r).map((([e,t])=>` ${e}="${t}"`)).join("");if(!t){return`<${e}${n}>`}return`<${e}${n}>${t}`}write(e){return n(this,void 0,void 0,(function*(){const t=!!(e===null||e===void 0?void 0:e.overwrite);const r=yield this.filePath();const n=t?u:s;yield n(r,this._buffer,{encoding:"utf8"});return this.emptyBuffer()}))}clear(){return n(this,void 0,void 0,(function*(){return this.emptyBuffer().write({overwrite:true})}))}stringify(){return this._buffer}isEmptyBuffer(){return this._buffer.length===0}emptyBuffer(){this._buffer="";return this}addRaw(e,t=false){this._buffer+=e;return t?this.addEOL():this}addEOL(){return this.addRaw(i.EOL)}addCodeBlock(e,t){const r=Object.assign({},t&&{lang:t});const n=this.wrap("pre",this.wrap("code",e),r);return this.addRaw(n).addEOL()}addList(e,t=false){const r=t?"ol":"ul";const n=e.map((e=>this.wrap("li",e))).join("");const i=this.wrap(r,n);return this.addRaw(i).addEOL()}addTable(e){const t=e.map((e=>{const t=e.map((e=>{if(typeof e==="string"){return this.wrap("td",e)}const{header:t,data:r,colspan:n,rowspan:i}=e;const a=t?"th":"td";const o=Object.assign(Object.assign({},n&&{colspan:n}),i&&{rowspan:i});return this.wrap(a,r,o)})).join("");return this.wrap("tr",t)})).join("");const r=this.wrap("table",t);return this.addRaw(r).addEOL()}addDetails(e,t){const r=this.wrap("details",this.wrap("summary",e)+t);return this.addRaw(r).addEOL()}addImage(e,t,r){const{width:n,height:i}=r||{};const a=Object.assign(Object.assign({},n&&{width:n}),i&&{height:i});const o=this.wrap("img",null,Object.assign({src:e,alt:t},a));return this.addRaw(o).addEOL()}addHeading(e,t){const r=`h${t}`;const n=["h1","h2","h3","h4","h5","h6"].includes(r)?r:"h1";const i=this.wrap(n,e);return this.addRaw(i).addEOL()}addSeparator(){const e=this.wrap("hr",null);return this.addRaw(e).addEOL()}addBreak(){const e=this.wrap("br",null);return this.addRaw(e).addEOL()}addQuote(e,t){const r=Object.assign({},t&&{cite:t});const n=this.wrap("blockquote",e,r);return this.addRaw(n).addEOL()}addLink(e,t){const r=this.wrap("a",e,{href:t});return this.addRaw(r).addEOL()}}const l=new Summary;t.markdownSummary=l;t.summary=l},6321:(e,t)=>{Object.defineProperty(t,"__esModule",{value:true});t.toCommandProperties=t.toCommandValue=void 0;function toCommandValue(e){if(e===null||e===undefined){return""}else if(typeof e==="string"||e instanceof String){return e}return JSON.stringify(e)}t.toCommandValue=toCommandValue;function toCommandProperties(e){if(!Object.keys(e).length){return{}}return{title:e.title,file:e.file,line:e.startLine,endLine:e.endLine,col:e.startColumn,endColumn:e.endColumn}}t.toCommandProperties=toCommandProperties},5526:function(e,t){var r=this&&this.__awaiter||function(e,t,r,n){function adopt(e){return e instanceof r?e:new r((function(t){t(e)}))}return new(r||(r=Promise))((function(r,i){function fulfilled(e){try{step(n.next(e))}catch(e){i(e)}}function rejected(e){try{step(n["throw"](e))}catch(e){i(e)}}function step(e){e.done?r(e.value):adopt(e.value).then(fulfilled,rejected)}step((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:true});t.PersonalAccessTokenCredentialHandler=t.BearerCredentialHandler=t.BasicCredentialHandler=void 0;class BasicCredentialHandler{constructor(e,t){this.username=e;this.password=t}prepareRequest(e){if(!e.headers){throw Error("The request has no headers")}e.headers["Authorization"]=`Basic ${Buffer.from(`${this.username}:${this.password}`).toString("base64")}`}canHandleAuthentication(){return false}handleAuthentication(){return r(this,void 0,void 0,(function*(){throw new Error("not implemented")}))}}t.BasicCredentialHandler=BasicCredentialHandler;class BearerCredentialHandler{constructor(e){this.token=e}prepareRequest(e){if(!e.headers){throw Error("The request has no headers")}e.headers["Authorization"]=`Bearer ${this.token}`}canHandleAuthentication(){return false}handleAuthentication(){return r(this,void 0,void 0,(function*(){throw new Error("not implemented")}))}}t.BearerCredentialHandler=BearerCredentialHandler;class PersonalAccessTokenCredentialHandler{constructor(e){this.token=e}prepareRequest(e){if(!e.headers){throw Error("The request has no headers")}e.headers["Authorization"]=`Basic ${Buffer.from(`PAT:${this.token}`).toString("base64")}`}canHandleAuthentication(){return false}handleAuthentication(){return r(this,void 0,void 0,(function*(){throw new Error("not implemented")}))}}t.PersonalAccessTokenCredentialHandler=PersonalAccessTokenCredentialHandler},6255:function(e,t,r){var n=this&&this.__createBinding||(Object.create?function(e,t,r,n){if(n===undefined)n=r;Object.defineProperty(e,n,{enumerable:true,get:function(){return t[r]}})}:function(e,t,r,n){if(n===undefined)n=r;e[n]=t[r]});var i=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:true,value:t})}:function(e,t){e["default"]=t});var a=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(e!=null)for(var r in e)if(r!=="default"&&Object.hasOwnProperty.call(e,r))n(t,e,r);i(t,e);return t};var o=this&&this.__awaiter||function(e,t,r,n){function adopt(e){return e instanceof r?e:new r((function(t){t(e)}))}return new(r||(r=Promise))((function(r,i){function fulfilled(e){try{step(n.next(e))}catch(e){i(e)}}function rejected(e){try{step(n["throw"](e))}catch(e){i(e)}}function step(e){e.done?r(e.value):adopt(e.value).then(fulfilled,rejected)}step((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:true});t.HttpClient=t.isHttps=t.HttpClientResponse=t.HttpClientError=t.getProxyUrl=t.MediaTypes=t.Headers=t.HttpCodes=void 0;const s=a(r(3685));const u=a(r(5687));const l=a(r(9835));const c=a(r(4294));var d;(function(e){e[e["OK"]=200]="OK";e[e["MultipleChoices"]=300]="MultipleChoices";e[e["MovedPermanently"]=301]="MovedPermanently";e[e["ResourceMoved"]=302]="ResourceMoved";e[e["SeeOther"]=303]="SeeOther";e[e["NotModified"]=304]="NotModified";e[e["UseProxy"]=305]="UseProxy";e[e["SwitchProxy"]=306]="SwitchProxy";e[e["TemporaryRedirect"]=307]="TemporaryRedirect";e[e["PermanentRedirect"]=308]="PermanentRedirect";e[e["BadRequest"]=400]="BadRequest";e[e["Unauthorized"]=401]="Unauthorized";e[e["PaymentRequired"]=402]="PaymentRequired";e[e["Forbidden"]=403]="Forbidden";e[e["NotFound"]=404]="NotFound";e[e["MethodNotAllowed"]=405]="MethodNotAllowed";e[e["NotAcceptable"]=406]="NotAcceptable";e[e["ProxyAuthenticationRequired"]=407]="ProxyAuthenticationRequired";e[e["RequestTimeout"]=408]="RequestTimeout";e[e["Conflict"]=409]="Conflict";e[e["Gone"]=410]="Gone";e[e["TooManyRequests"]=429]="TooManyRequests";e[e["InternalServerError"]=500]="InternalServerError";e[e["NotImplemented"]=501]="NotImplemented";e[e["BadGateway"]=502]="BadGateway";e[e["ServiceUnavailable"]=503]="ServiceUnavailable";e[e["GatewayTimeout"]=504]="GatewayTimeout"})(d=t.HttpCodes||(t.HttpCodes={}));var h;(function(e){e["Accept"]="accept";e["ContentType"]="content-type"})(h=t.Headers||(t.Headers={}));var p;(function(e){e["ApplicationJson"]="application/json"})(p=t.MediaTypes||(t.MediaTypes={}));function getProxyUrl(e){const t=l.getProxyUrl(new URL(e));return t?t.href:""}t.getProxyUrl=getProxyUrl;const g=[d.MovedPermanently,d.ResourceMoved,d.SeeOther,d.TemporaryRedirect,d.PermanentRedirect];const m=[d.BadGateway,d.ServiceUnavailable,d.GatewayTimeout];const b=["OPTIONS","GET","DELETE","HEAD"];const v=10;const _=5;class HttpClientError extends Error{constructor(e,t){super(e);this.name="HttpClientError";this.statusCode=t;Object.setPrototypeOf(this,HttpClientError.prototype)}}t.HttpClientError=HttpClientError;class HttpClientResponse{constructor(e){this.message=e}readBody(){return o(this,void 0,void 0,(function*(){return new Promise((e=>o(this,void 0,void 0,(function*(){let t=Buffer.alloc(0);this.message.on("data",(e=>{t=Buffer.concat([t,e])}));this.message.on("end",(()=>{e(t.toString())}))}))))}))}}t.HttpClientResponse=HttpClientResponse;function isHttps(e){const t=new URL(e);return t.protocol==="https:"}t.isHttps=isHttps;class HttpClient{constructor(e,t,r){this._ignoreSslError=false;this._allowRedirects=true;this._allowRedirectDowngrade=false;this._maxRedirects=50;this._allowRetries=false;this._maxRetries=1;this._keepAlive=false;this._disposed=false;this.userAgent=e;this.handlers=t||[];this.requestOptions=r;if(r){if(r.ignoreSslError!=null){this._ignoreSslError=r.ignoreSslError}this._socketTimeout=r.socketTimeout;if(r.allowRedirects!=null){this._allowRedirects=r.allowRedirects}if(r.allowRedirectDowngrade!=null){this._allowRedirectDowngrade=r.allowRedirectDowngrade}if(r.maxRedirects!=null){this._maxRedirects=Math.max(r.maxRedirects,0)}if(r.keepAlive!=null){this._keepAlive=r.keepAlive}if(r.allowRetries!=null){this._allowRetries=r.allowRetries}if(r.maxRetries!=null){this._maxRetries=r.maxRetries}}}options(e,t){return o(this,void 0,void 0,(function*(){return this.request("OPTIONS",e,null,t||{})}))}get(e,t){return o(this,void 0,void 0,(function*(){return this.request("GET",e,null,t||{})}))}del(e,t){return o(this,void 0,void 0,(function*(){return this.request("DELETE",e,null,t||{})}))}post(e,t,r){return o(this,void 0,void 0,(function*(){return this.request("POST",e,t,r||{})}))}patch(e,t,r){return o(this,void 0,void 0,(function*(){return this.request("PATCH",e,t,r||{})}))}put(e,t,r){return o(this,void 0,void 0,(function*(){return this.request("PUT",e,t,r||{})}))}head(e,t){return o(this,void 0,void 0,(function*(){return this.request("HEAD",e,null,t||{})}))}sendStream(e,t,r,n){return o(this,void 0,void 0,(function*(){return this.request(e,t,r,n)}))}getJson(e,t={}){return o(this,void 0,void 0,(function*(){t[h.Accept]=this._getExistingOrDefaultHeader(t,h.Accept,p.ApplicationJson);const r=yield this.get(e,t);return this._processResponse(r,this.requestOptions)}))}postJson(e,t,r={}){return o(this,void 0,void 0,(function*(){const n=JSON.stringify(t,null,2);r[h.Accept]=this._getExistingOrDefaultHeader(r,h.Accept,p.ApplicationJson);r[h.ContentType]=this._getExistingOrDefaultHeader(r,h.ContentType,p.ApplicationJson);const i=yield this.post(e,n,r);return this._processResponse(i,this.requestOptions)}))}putJson(e,t,r={}){return o(this,void 0,void 0,(function*(){const n=JSON.stringify(t,null,2);r[h.Accept]=this._getExistingOrDefaultHeader(r,h.Accept,p.ApplicationJson);r[h.ContentType]=this._getExistingOrDefaultHeader(r,h.ContentType,p.ApplicationJson);const i=yield this.put(e,n,r);return this._processResponse(i,this.requestOptions)}))}patchJson(e,t,r={}){return o(this,void 0,void 0,(function*(){const n=JSON.stringify(t,null,2);r[h.Accept]=this._getExistingOrDefaultHeader(r,h.Accept,p.ApplicationJson);r[h.ContentType]=this._getExistingOrDefaultHeader(r,h.ContentType,p.ApplicationJson);const i=yield this.patch(e,n,r);return this._processResponse(i,this.requestOptions)}))}request(e,t,r,n){return o(this,void 0,void 0,(function*(){if(this._disposed){throw new Error("Client has already been disposed.")}const i=new URL(t);let a=this._prepareRequest(e,i,n);const o=this._allowRetries&&b.includes(e)?this._maxRetries+1:1;let s=0;let u;do{u=yield this.requestRaw(a,r);if(u&&u.message&&u.message.statusCode===d.Unauthorized){let e;for(const t of this.handlers){if(t.canHandleAuthentication(u)){e=t;break}}if(e){return e.handleAuthentication(this,a,r)}else{return u}}let t=this._maxRedirects;while(u.message.statusCode&&g.includes(u.message.statusCode)&&this._allowRedirects&&t>0){const o=u.message.headers["location"];if(!o){break}const s=new URL(o);if(i.protocol==="https:"&&i.protocol!==s.protocol&&!this._allowRedirectDowngrade){throw new Error("Redirect from HTTPS to HTTP protocol. This downgrade is not allowed for security reasons. If you want to allow this behavior, set the allowRedirectDowngrade option to true.")}yield u.readBody();if(s.hostname!==i.hostname){for(const e in n){if(e.toLowerCase()==="authorization"){delete n[e]}}}a=this._prepareRequest(e,s,n);u=yield this.requestRaw(a,r);t--}if(!u.message.statusCode||!m.includes(u.message.statusCode)){return u}s+=1;if(s{function callbackForResult(e,t){if(e){n(e)}else if(!t){n(new Error("Unknown error"))}else{r(t)}}this.requestRawWithCallback(e,t,callbackForResult)}))}))}requestRawWithCallback(e,t,r){if(typeof t==="string"){if(!e.options.headers){e.options.headers={}}e.options.headers["Content-Length"]=Buffer.byteLength(t,"utf8")}let n=false;function handleResult(e,t){if(!n){n=true;r(e,t)}}const i=e.httpModule.request(e.options,(e=>{const t=new HttpClientResponse(e);handleResult(undefined,t)}));let a;i.on("socket",(e=>{a=e}));i.setTimeout(this._socketTimeout||3*6e4,(()=>{if(a){a.end()}handleResult(new Error(`Request timeout: ${e.options.path}`))}));i.on("error",(function(e){handleResult(e)}));if(t&&typeof t==="string"){i.write(t,"utf8")}if(t&&typeof t!=="string"){t.on("close",(function(){i.end()}));t.pipe(i)}else{i.end()}}getAgent(e){const t=new URL(e);return this._getAgent(t)}_prepareRequest(e,t,r){const n={};n.parsedUrl=t;const i=n.parsedUrl.protocol==="https:";n.httpModule=i?u:s;const a=i?443:80;n.options={};n.options.host=n.parsedUrl.hostname;n.options.port=n.parsedUrl.port?parseInt(n.parsedUrl.port):a;n.options.path=(n.parsedUrl.pathname||"")+(n.parsedUrl.search||"");n.options.method=e;n.options.headers=this._mergeHeaders(r);if(this.userAgent!=null){n.options.headers["user-agent"]=this.userAgent}n.options.agent=this._getAgent(n.parsedUrl);if(this.handlers){for(const e of this.handlers){e.prepareRequest(n.options)}}return n}_mergeHeaders(e){if(this.requestOptions&&this.requestOptions.headers){return Object.assign({},lowercaseKeys(this.requestOptions.headers),lowercaseKeys(e||{}))}return lowercaseKeys(e||{})}_getExistingOrDefaultHeader(e,t,r){let n;if(this.requestOptions&&this.requestOptions.headers){n=lowercaseKeys(this.requestOptions.headers)[t]}return e[t]||n||r}_getAgent(e){let t;const r=l.getProxyUrl(e);const n=r&&r.hostname;if(this._keepAlive&&n){t=this._proxyAgent}if(this._keepAlive&&!n){t=this._agent}if(t){return t}const i=e.protocol==="https:";let a=100;if(this.requestOptions){a=this.requestOptions.maxSockets||s.globalAgent.maxSockets}if(r&&r.hostname){const e={maxSockets:a,keepAlive:this._keepAlive,proxy:Object.assign(Object.assign({},(r.username||r.password)&&{proxyAuth:`${r.username}:${r.password}`}),{host:r.hostname,port:r.port})};let n;const o=r.protocol==="https:";if(i){n=o?c.httpsOverHttps:c.httpsOverHttp}else{n=o?c.httpOverHttps:c.httpOverHttp}t=n(e);this._proxyAgent=t}if(this._keepAlive&&!t){const e={keepAlive:this._keepAlive,maxSockets:a};t=i?new u.Agent(e):new s.Agent(e);this._agent=t}if(!t){t=i?u.globalAgent:s.globalAgent}if(i&&this._ignoreSslError){t.options=Object.assign(t.options||{},{rejectUnauthorized:false})}return t}_performExponentialBackoff(e){return o(this,void 0,void 0,(function*(){e=Math.min(v,e);const t=_*Math.pow(2,e);return new Promise((e=>setTimeout((()=>e()),t)))}))}_processResponse(e,t){return o(this,void 0,void 0,(function*(){return new Promise(((r,n)=>o(this,void 0,void 0,(function*(){const i=e.message.statusCode||0;const a={statusCode:i,result:null,headers:{}};if(i===d.NotFound){r(a)}function dateTimeDeserializer(e,t){if(typeof t==="string"){const e=new Date(t);if(!isNaN(e.valueOf())){return e}}return t}let o;let s;try{s=yield e.readBody();if(s&&s.length>0){if(t&&t.deserializeDates){o=JSON.parse(s,dateTimeDeserializer)}else{o=JSON.parse(s)}a.result=o}a.headers=e.message.headers}catch(e){}if(i>299){let e;if(o&&o.message){e=o.message}else if(s&&s.length>0){e=s}else{e=`Failed request: (${i})`}const t=new HttpClientError(e,i);t.result=a.result;n(t)}else{r(a)}}))))}))}}t.HttpClient=HttpClient;const lowercaseKeys=e=>Object.keys(e).reduce(((t,r)=>(t[r.toLowerCase()]=e[r],t)),{})},9835:(e,t)=>{Object.defineProperty(t,"__esModule",{value:true});t.checkBypass=t.getProxyUrl=void 0;function getProxyUrl(e){const t=e.protocol==="https:";if(checkBypass(e)){return undefined}const r=(()=>{if(t){return process.env["https_proxy"]||process.env["HTTPS_PROXY"]}else{return process.env["http_proxy"]||process.env["HTTP_PROXY"]}})();if(r){return new URL(r)}else{return undefined}}t.getProxyUrl=getProxyUrl;function checkBypass(e){if(!e.hostname){return false}const t=process.env["no_proxy"]||process.env["NO_PROXY"]||"";if(!t){return false}let r;if(e.port){r=Number(e.port)}else if(e.protocol==="http:"){r=80}else if(e.protocol==="https:"){r=443}const n=[e.hostname.toUpperCase()];if(typeof r==="number"){n.push(`${n[0]}:${r}`)}for(const e of t.split(",").map((e=>e.trim().toUpperCase())).filter((e=>e))){if(n.some((t=>t===e))){return true}}return false}t.checkBypass=checkBypass},3664:(e,t,r)=>{const{Buffer:n}=r(4300);const i=Symbol.for("BufferList");function BufferList(e){if(!(this instanceof BufferList)){return new BufferList(e)}BufferList._init.call(this,e)}BufferList._init=function _init(e){Object.defineProperty(this,i,{value:true});this._bufs=[];this.length=0;if(e){this.append(e)}};BufferList.prototype._new=function _new(e){return new BufferList(e)};BufferList.prototype._offset=function _offset(e){if(e===0){return[0,0]}let t=0;for(let r=0;rthis.length||e<0){return undefined}const t=this._offset(e);return this._bufs[t[0]][t[1]]};BufferList.prototype.slice=function slice(e,t){if(typeof e==="number"&&e<0){e+=this.length}if(typeof t==="number"&&t<0){t+=this.length}return this.copy(null,0,e,t)};BufferList.prototype.copy=function copy(e,t,r,i){if(typeof r!=="number"||r<0){r=0}if(typeof i!=="number"||i>this.length){i=this.length}if(r>=this.length){return e||n.alloc(0)}if(i<=0){return e||n.alloc(0)}const copy=!!e;const a=this._offset(r);const o=i-r;let s=o;let u=copy&&t||0;let l=a[1];if(r===0&&i===this.length){if(!copy){return this._bufs.length===1?this._bufs[0]:n.concat(this._bufs,this.length)}for(let t=0;tr){this._bufs[t].copy(e,u,l);u+=r}else{this._bufs[t].copy(e,u,l,l+s);u+=r;break}s-=r;if(l){l=0}}if(e.length>u)return e.slice(0,u);return e};BufferList.prototype.shallowSlice=function shallowSlice(e,t){e=e||0;t=typeof t!=="number"?this.length:t;if(e<0){e+=this.length}if(t<0){t+=this.length}if(e===t){return this._new()}const r=this._offset(e);const n=this._offset(t);const i=this._bufs.slice(r[0],n[0]+1);if(n[1]===0){i.pop()}else{i[i.length-1]=i[i.length-1].slice(0,n[1])}if(r[1]!==0){i[0]=i[0].slice(r[1])}return this._new(i)};BufferList.prototype.toString=function toString(e,t,r){return this.slice(t,r).toString(e)};BufferList.prototype.consume=function consume(e){e=Math.trunc(e);if(Number.isNaN(e)||e<=0)return this;while(this._bufs.length){if(e>=this._bufs[0].length){e-=this._bufs[0].length;this.length-=this._bufs[0].length;this._bufs.shift()}else{this._bufs[0]=this._bufs[0].slice(e);this.length-=e;break}}return this};BufferList.prototype.duplicate=function duplicate(){const e=this._new();for(let t=0;tthis.length?this.length:t}const i=this._offset(t);let a=i[0];let o=i[1];for(;a=e.length){const r=t.indexOf(e,o);if(r!==-1){return this._reverseOffset([a,r])}o=t.length-e.length+1}else{const t=this._reverseOffset([a,o]);if(this._match(t,e)){return t}o++}}o=0}return-1};BufferList.prototype._match=function(e,t){if(this.length-e{const n=r(1642).Duplex;const i=r(4124);const a=r(3664);function BufferListStream(e){if(!(this instanceof BufferListStream)){return new BufferListStream(e)}if(typeof e==="function"){this._callback=e;const t=function piper(e){if(this._callback){this._callback(e);this._callback=null}}.bind(this);this.on("pipe",(function onPipe(e){e.on("error",t)}));this.on("unpipe",(function onUnpipe(e){e.removeListener("error",t)}));e=null}a._init.call(this,e);n.call(this)}i(BufferListStream,n);Object.assign(BufferListStream.prototype,a.prototype);BufferListStream.prototype._new=function _new(e){return new BufferListStream(e)};BufferListStream.prototype._write=function _write(e,t,r){this._appendBuffer(e);if(typeof r==="function"){r()}};BufferListStream.prototype._read=function _read(e){if(!this.length){return this.push(null)}e=Math.min(e,this.length);this.push(this.slice(0,e));this.consume(e)};BufferListStream.prototype.end=function end(e){n.prototype.end.call(this,e);if(this._callback){this._callback(null,this.slice());this._callback=null}};BufferListStream.prototype._destroy=function _destroy(e,t){this._bufs.length=0;this.length=0;t(e)};BufferListStream.prototype._isBufferList=function _isBufferList(e){return e instanceof BufferListStream||e instanceof a||BufferListStream.isBufferList(e)};BufferListStream.isBufferList=a.isBufferList;e.exports=BufferListStream;e.exports.BufferListStream=BufferListStream;e.exports.BufferList=a},1205:(e,t,r)=>{var n=r(1223);var noop=function(){};var isRequest=function(e){return e.setHeader&&typeof e.abort==="function"};var isChildProcess=function(e){return e.stdio&&Array.isArray(e.stdio)&&e.stdio.length===3};var eos=function(e,t,r){if(typeof t==="function")return eos(e,null,t);if(!t)t={};r=n(r||noop);var i=e._writableState;var a=e._readableState;var o=t.readable||t.readable!==false&&e.readable;var s=t.writable||t.writable!==false&&e.writable;var u=false;var onlegacyfinish=function(){if(!e.writable)onfinish()};var onfinish=function(){s=false;if(!o)r.call(e)};var onend=function(){o=false;if(!s)r.call(e)};var onexit=function(t){r.call(e,t?new Error("exited with error code: "+t):null)};var onerror=function(t){r.call(e,t)};var onclose=function(){process.nextTick(onclosenexttick)};var onclosenexttick=function(){if(u)return;if(o&&!(a&&(a.ended&&!a.destroyed)))return r.call(e,new Error("premature close"));if(s&&!(i&&(i.ended&&!i.destroyed)))return r.call(e,new Error("premature close"))};var onrequest=function(){e.req.on("finish",onfinish)};if(isRequest(e)){e.on("complete",onfinish);e.on("abort",onclose);if(e.req)onrequest();else e.on("request",onrequest)}else if(s&&!i){e.on("end",onlegacyfinish);e.on("close",onlegacyfinish)}if(isChildProcess(e))e.on("exit",onexit);e.on("end",onend);e.on("finish",onfinish);if(t.error!==false)e.on("error",onerror);e.on("close",onclose);return function(){u=true;e.removeListener("complete",onfinish);e.removeListener("abort",onclose);e.removeListener("request",onrequest);if(e.req)e.req.removeListener("finish",onfinish);e.removeListener("end",onlegacyfinish);e.removeListener("close",onlegacyfinish);e.removeListener("finish",onfinish);e.removeListener("exit",onexit);e.removeListener("end",onend);e.removeListener("error",onerror);e.removeListener("close",onclose)}};e.exports=eos},3186:(e,t,r)=>{e.exports=r(7147).constants||r(2057)},4124:(e,t,r)=>{try{var n=r(3837);if(typeof n.inherits!=="function")throw"";e.exports=n.inherits}catch(t){e.exports=r(8544)}},8544:e=>{if(typeof Object.create==="function"){e.exports=function inherits(e,t){if(t){e.super_=t;e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:false,writable:true,configurable:true}})}}}else{e.exports=function inherits(e,t){if(t){e.super_=t;var TempCtor=function(){};TempCtor.prototype=t.prototype;e.prototype=new TempCtor;e.prototype.constructor=e}}}},1223:(e,t,r)=>{var n=r(2940);e.exports=n(once);e.exports.strict=n(onceStrict);once.proto=once((function(){Object.defineProperty(Function.prototype,"once",{value:function(){return once(this)},configurable:true});Object.defineProperty(Function.prototype,"onceStrict",{value:function(){return onceStrict(this)},configurable:true})}));function once(e){var f=function(){if(f.called)return f.value;f.called=true;return f.value=e.apply(this,arguments)};f.called=false;return f}function onceStrict(e){var f=function(){if(f.called)throw new Error(f.onceError);f.called=true;return f.value=e.apply(this,arguments)};var t=e.name||"Function wrapped with `once`";f.onceError=t+" shouldn't be called more than once";f.called=false;return f}},7214:e=>{const t={};function createErrorType(e,r,n){if(!n){n=Error}function getMessage(e,t,n){if(typeof r==="string"){return r}else{return r(e,t,n)}}class NodeError extends n{constructor(e,t,r){super(getMessage(e,t,r))}}NodeError.prototype.name=n.name;NodeError.prototype.code=e;t[e]=NodeError}function oneOf(e,t){if(Array.isArray(e)){const r=e.length;e=e.map((e=>String(e)));if(r>2){return`one of ${t} ${e.slice(0,r-1).join(", ")}, or `+e[r-1]}else if(r===2){return`one of ${t} ${e[0]} or ${e[1]}`}else{return`of ${t} ${e[0]}`}}else{return`of ${t} ${String(e)}`}}function startsWith(e,t,r){return e.substr(!r||r<0?0:+r,t.length)===t}function endsWith(e,t,r){if(r===undefined||r>e.length){r=e.length}return e.substring(r-t.length,r)===t}function includes(e,t,r){if(typeof r!=="number"){r=0}if(r+t.length>e.length){return false}else{return e.indexOf(t,r)!==-1}}createErrorType("ERR_INVALID_OPT_VALUE",(function(e,t){return'The value "'+t+'" is invalid for option "'+e+'"'}),TypeError);createErrorType("ERR_INVALID_ARG_TYPE",(function(e,t,r){let n;if(typeof t==="string"&&startsWith(t,"not ")){n="must not be";t=t.replace(/^not /,"")}else{n="must be"}let i;if(endsWith(e," argument")){i=`The ${e} ${n} ${oneOf(t,"type")}`}else{const r=includes(e,".")?"property":"argument";i=`The "${e}" ${r} ${n} ${oneOf(t,"type")}`}i+=`. Received type ${typeof r}`;return i}),TypeError);createErrorType("ERR_STREAM_PUSH_AFTER_EOF","stream.push() after EOF");createErrorType("ERR_METHOD_NOT_IMPLEMENTED",(function(e){return"The "+e+" method is not implemented"}));createErrorType("ERR_STREAM_PREMATURE_CLOSE","Premature close");createErrorType("ERR_STREAM_DESTROYED",(function(e){return"Cannot call "+e+" after a stream was destroyed"}));createErrorType("ERR_MULTIPLE_CALLBACK","Callback called multiple times");createErrorType("ERR_STREAM_CANNOT_PIPE","Cannot pipe, not readable");createErrorType("ERR_STREAM_WRITE_AFTER_END","write after end");createErrorType("ERR_STREAM_NULL_VALUES","May not write null values to stream",TypeError);createErrorType("ERR_UNKNOWN_ENCODING",(function(e){return"Unknown encoding: "+e}),TypeError);createErrorType("ERR_STREAM_UNSHIFT_AFTER_END_EVENT","stream.unshift() after end event");e.exports.q=t},1359:(e,t,r)=>{var n=Object.keys||function(e){var t=[];for(var r in e){t.push(r)}return t};e.exports=Duplex;var i=r(1433);var a=r(6993);r(4124)(Duplex,i);{var o=n(a.prototype);for(var s=0;s{e.exports=PassThrough;var n=r(4415);r(4124)(PassThrough,n);function PassThrough(e){if(!(this instanceof PassThrough))return new PassThrough(e);n.call(this,e)}PassThrough.prototype._transform=function(e,t,r){r(null,e)}},1433:(e,t,r)=>{e.exports=Readable;var n;Readable.ReadableState=ReadableState;var i=r(2361).EventEmitter;var a=function EElistenerCount(e,t){return e.listeners(t).length};var o=r(2387);var s=r(4300).Buffer;var u=global.Uint8Array||function(){};function _uint8ArrayToBuffer(e){return s.from(e)}function _isUint8Array(e){return s.isBuffer(e)||e instanceof u}var l=r(3837);var c;if(l&&l.debuglog){c=l.debuglog("stream")}else{c=function debug(){}}var d=r(2746);var h=r(7049);var p=r(9948),g=p.getHighWaterMark;var m=r(7214).q,b=m.ERR_INVALID_ARG_TYPE,v=m.ERR_STREAM_PUSH_AFTER_EOF,_=m.ERR_METHOD_NOT_IMPLEMENTED,y=m.ERR_STREAM_UNSHIFT_AFTER_END_EVENT;var w;var R;var S;r(4124)(Readable,o);var E=h.errorOrDestroy;var k=["error","close","destroy","pause","resume"];function prependListener(e,t,r){if(typeof e.prependListener==="function")return e.prependListener(t,r);if(!e._events||!e._events[t])e.on(t,r);else if(Array.isArray(e._events[t]))e._events[t].unshift(r);else e._events[t]=[r,e._events[t]]}function ReadableState(e,t,i){n=n||r(1359);e=e||{};if(typeof i!=="boolean")i=t instanceof n;this.objectMode=!!e.objectMode;if(i)this.objectMode=this.objectMode||!!e.readableObjectMode;this.highWaterMark=g(this,e,"readableHighWaterMark",i);this.buffer=new d;this.length=0;this.pipes=null;this.pipesCount=0;this.flowing=null;this.ended=false;this.endEmitted=false;this.reading=false;this.sync=true;this.needReadable=false;this.emittedReadable=false;this.readableListening=false;this.resumeScheduled=false;this.paused=true;this.emitClose=e.emitClose!==false;this.autoDestroy=!!e.autoDestroy;this.destroyed=false;this.defaultEncoding=e.defaultEncoding||"utf8";this.awaitDrain=0;this.readingMore=false;this.decoder=null;this.encoding=null;if(e.encoding){if(!w)w=r(4841).s;this.decoder=new w(e.encoding);this.encoding=e.encoding}}function Readable(e){n=n||r(1359);if(!(this instanceof Readable))return new Readable(e);var t=this instanceof n;this._readableState=new ReadableState(e,this,t);this.readable=true;if(e){if(typeof e.read==="function")this._read=e.read;if(typeof e.destroy==="function")this._destroy=e.destroy}o.call(this)}Object.defineProperty(Readable.prototype,"destroyed",{enumerable:false,get:function get(){if(this._readableState===undefined){return false}return this._readableState.destroyed},set:function set(e){if(!this._readableState){return}this._readableState.destroyed=e}});Readable.prototype.destroy=h.destroy;Readable.prototype._undestroy=h.undestroy;Readable.prototype._destroy=function(e,t){t(e)};Readable.prototype.push=function(e,t){var r=this._readableState;var n;if(!r.objectMode){if(typeof e==="string"){t=t||r.defaultEncoding;if(t!==r.encoding){e=s.from(e,t);t=""}n=true}}else{n=true}return readableAddChunk(this,e,t,false,n)};Readable.prototype.unshift=function(e){return readableAddChunk(this,e,null,true,false)};function readableAddChunk(e,t,r,n,i){c("readableAddChunk",t);var a=e._readableState;if(t===null){a.reading=false;onEofChunk(e,a)}else{var o;if(!i)o=chunkInvalid(a,t);if(o){E(e,o)}else if(a.objectMode||t&&t.length>0){if(typeof t!=="string"&&!a.objectMode&&Object.getPrototypeOf(t)!==s.prototype){t=_uint8ArrayToBuffer(t)}if(n){if(a.endEmitted)E(e,new y);else addChunk(e,a,t,true)}else if(a.ended){E(e,new v)}else if(a.destroyed){return false}else{a.reading=false;if(a.decoder&&!r){t=a.decoder.write(t);if(a.objectMode||t.length!==0)addChunk(e,a,t,false);else maybeReadMore(e,a)}else{addChunk(e,a,t,false)}}}else if(!n){a.reading=false;maybeReadMore(e,a)}}return!a.ended&&(a.length=O){e=O}else{e--;e|=e>>>1;e|=e>>>2;e|=e>>>4;e|=e>>>8;e|=e>>>16;e++}return e}function howMuchToRead(e,t){if(e<=0||t.length===0&&t.ended)return 0;if(t.objectMode)return 1;if(e!==e){if(t.flowing&&t.length)return t.buffer.head.data.length;else return t.length}if(e>t.highWaterMark)t.highWaterMark=computeNewHighWaterMark(e);if(e<=t.length)return e;if(!t.ended){t.needReadable=true;return 0}return t.length}Readable.prototype.read=function(e){c("read",e);e=parseInt(e,10);var t=this._readableState;var r=e;if(e!==0)t.emittedReadable=false;if(e===0&&t.needReadable&&((t.highWaterMark!==0?t.length>=t.highWaterMark:t.length>0)||t.ended)){c("read: emitReadable",t.length,t.ended);if(t.length===0&&t.ended)endReadable(this);else emitReadable(this);return null}e=howMuchToRead(e,t);if(e===0&&t.ended){if(t.length===0)endReadable(this);return null}var n=t.needReadable;c("need readable",n);if(t.length===0||t.length-e0)i=fromList(e,t);else i=null;if(i===null){t.needReadable=t.length<=t.highWaterMark;e=0}else{t.length-=e;t.awaitDrain=0}if(t.length===0){if(!t.ended)t.needReadable=true;if(r!==e&&t.ended)endReadable(this)}if(i!==null)this.emit("data",i);return i};function onEofChunk(e,t){c("onEofChunk");if(t.ended)return;if(t.decoder){var r=t.decoder.end();if(r&&r.length){t.buffer.push(r);t.length+=t.objectMode?1:r.length}}t.ended=true;if(t.sync){emitReadable(e)}else{t.needReadable=false;if(!t.emittedReadable){t.emittedReadable=true;emitReadable_(e)}}}function emitReadable(e){var t=e._readableState;c("emitReadable",t.needReadable,t.emittedReadable);t.needReadable=false;if(!t.emittedReadable){c("emitReadable",t.flowing);t.emittedReadable=true;process.nextTick(emitReadable_,e)}}function emitReadable_(e){var t=e._readableState;c("emitReadable_",t.destroyed,t.length,t.ended);if(!t.destroyed&&(t.length||t.ended)){e.emit("readable");t.emittedReadable=false}t.needReadable=!t.flowing&&!t.ended&&t.length<=t.highWaterMark;flow(e)}function maybeReadMore(e,t){if(!t.readingMore){t.readingMore=true;process.nextTick(maybeReadMore_,e,t)}}function maybeReadMore_(e,t){while(!t.reading&&!t.ended&&(t.length1&&indexOf(n.pipes,e)!==-1)&&!u){c("false write response, pause",n.awaitDrain);n.awaitDrain++}r.pause()}}function onerror(t){c("onerror",t);unpipe();e.removeListener("error",onerror);if(a(e,"error")===0)E(e,t)}prependListener(e,"error",onerror);function onclose(){e.removeListener("finish",onfinish);unpipe()}e.once("close",onclose);function onfinish(){c("onfinish");e.removeListener("close",onclose);unpipe()}e.once("finish",onfinish);function unpipe(){c("unpipe");r.unpipe(e)}e.emit("pipe",r);if(!n.flowing){c("pipe resume");r.resume()}return e};function pipeOnDrain(e){return function pipeOnDrainFunctionResult(){var t=e._readableState;c("pipeOnDrain",t.awaitDrain);if(t.awaitDrain)t.awaitDrain--;if(t.awaitDrain===0&&a(e,"data")){t.flowing=true;flow(e)}}}Readable.prototype.unpipe=function(e){var t=this._readableState;var r={hasUnpiped:false};if(t.pipesCount===0)return this;if(t.pipesCount===1){if(e&&e!==t.pipes)return this;if(!e)e=t.pipes;t.pipes=null;t.pipesCount=0;t.flowing=false;if(e)e.emit("unpipe",this,r);return this}if(!e){var n=t.pipes;var i=t.pipesCount;t.pipes=null;t.pipesCount=0;t.flowing=false;for(var a=0;a0;if(n.flowing!==false)this.resume()}else if(e==="readable"){if(!n.endEmitted&&!n.readableListening){n.readableListening=n.needReadable=true;n.flowing=false;n.emittedReadable=false;c("on readable",n.length,n.reading);if(n.length){emitReadable(this)}else if(!n.reading){process.nextTick(nReadingNextTick,this)}}}return r};Readable.prototype.addListener=Readable.prototype.on;Readable.prototype.removeListener=function(e,t){var r=o.prototype.removeListener.call(this,e,t);if(e==="readable"){process.nextTick(updateReadableListening,this)}return r};Readable.prototype.removeAllListeners=function(e){var t=o.prototype.removeAllListeners.apply(this,arguments);if(e==="readable"||e===undefined){process.nextTick(updateReadableListening,this)}return t};function updateReadableListening(e){var t=e._readableState;t.readableListening=e.listenerCount("readable")>0;if(t.resumeScheduled&&!t.paused){t.flowing=true}else if(e.listenerCount("data")>0){e.resume()}}function nReadingNextTick(e){c("readable nexttick read 0");e.read(0)}Readable.prototype.resume=function(){var e=this._readableState;if(!e.flowing){c("resume");e.flowing=!e.readableListening;resume(this,e)}e.paused=false;return this};function resume(e,t){if(!t.resumeScheduled){t.resumeScheduled=true;process.nextTick(resume_,e,t)}}function resume_(e,t){c("resume",t.reading);if(!t.reading){e.read(0)}t.resumeScheduled=false;e.emit("resume");flow(e);if(t.flowing&&!t.reading)e.read(0)}Readable.prototype.pause=function(){c("call pause flowing=%j",this._readableState.flowing);if(this._readableState.flowing!==false){c("pause");this._readableState.flowing=false;this.emit("pause")}this._readableState.paused=true;return this};function flow(e){var t=e._readableState;c("flow",t.flowing);while(t.flowing&&e.read()!==null){}}Readable.prototype.wrap=function(e){var t=this;var r=this._readableState;var n=false;e.on("end",(function(){c("wrapped end");if(r.decoder&&!r.ended){var e=r.decoder.end();if(e&&e.length)t.push(e)}t.push(null)}));e.on("data",(function(i){c("wrapped data");if(r.decoder)i=r.decoder.write(i);if(r.objectMode&&(i===null||i===undefined))return;else if(!r.objectMode&&(!i||!i.length))return;var a=t.push(i);if(!a){n=true;e.pause()}}));for(var i in e){if(this[i]===undefined&&typeof e[i]==="function"){this[i]=function methodWrap(t){return function methodWrapReturnFunction(){return e[t].apply(e,arguments)}}(i)}}for(var a=0;a=t.length){if(t.decoder)r=t.buffer.join("");else if(t.buffer.length===1)r=t.buffer.first();else r=t.buffer.concat(t.length);t.buffer.clear()}else{r=t.buffer.consume(e,t.decoder)}return r}function endReadable(e){var t=e._readableState;c("endReadable",t.endEmitted);if(!t.endEmitted){t.ended=true;process.nextTick(endReadableNT,t,e)}}function endReadableNT(e,t){c("endReadableNT",e.endEmitted,e.length);if(!e.endEmitted&&e.length===0){e.endEmitted=true;t.readable=false;t.emit("end");if(e.autoDestroy){var r=t._writableState;if(!r||r.autoDestroy&&r.finished){t.destroy()}}}}if(typeof Symbol==="function"){Readable.from=function(e,t){if(S===undefined){S=r(9082)}return S(Readable,e,t)}}function indexOf(e,t){for(var r=0,n=e.length;r{e.exports=Transform;var n=r(7214).q,i=n.ERR_METHOD_NOT_IMPLEMENTED,a=n.ERR_MULTIPLE_CALLBACK,o=n.ERR_TRANSFORM_ALREADY_TRANSFORMING,s=n.ERR_TRANSFORM_WITH_LENGTH_0;var u=r(1359);r(4124)(Transform,u);function afterTransform(e,t){var r=this._transformState;r.transforming=false;var n=r.writecb;if(n===null){return this.emit("error",new a)}r.writechunk=null;r.writecb=null;if(t!=null)this.push(t);n(e);var i=this._readableState;i.reading=false;if(i.needReadable||i.length{e.exports=Writable;function WriteReq(e,t,r){this.chunk=e;this.encoding=t;this.callback=r;this.next=null}function CorkedRequest(e){var t=this;this.next=null;this.entry=null;this.finish=function(){onCorkedFinish(t,e)}}var n;Writable.WritableState=WritableState;var i={deprecate:r(5278)};var a=r(2387);var o=r(4300).Buffer;var s=global.Uint8Array||function(){};function _uint8ArrayToBuffer(e){return o.from(e)}function _isUint8Array(e){return o.isBuffer(e)||e instanceof s}var u=r(7049);var l=r(9948),c=l.getHighWaterMark;var d=r(7214).q,h=d.ERR_INVALID_ARG_TYPE,p=d.ERR_METHOD_NOT_IMPLEMENTED,g=d.ERR_MULTIPLE_CALLBACK,m=d.ERR_STREAM_CANNOT_PIPE,b=d.ERR_STREAM_DESTROYED,v=d.ERR_STREAM_NULL_VALUES,_=d.ERR_STREAM_WRITE_AFTER_END,y=d.ERR_UNKNOWN_ENCODING;var w=u.errorOrDestroy;r(4124)(Writable,a);function nop(){}function WritableState(e,t,i){n=n||r(1359);e=e||{};if(typeof i!=="boolean")i=t instanceof n;this.objectMode=!!e.objectMode;if(i)this.objectMode=this.objectMode||!!e.writableObjectMode;this.highWaterMark=c(this,e,"writableHighWaterMark",i);this.finalCalled=false;this.needDrain=false;this.ending=false;this.ended=false;this.finished=false;this.destroyed=false;var a=e.decodeStrings===false;this.decodeStrings=!a;this.defaultEncoding=e.defaultEncoding||"utf8";this.length=0;this.writing=false;this.corked=0;this.sync=true;this.bufferProcessing=false;this.onwrite=function(e){onwrite(t,e)};this.writecb=null;this.writelen=0;this.bufferedRequest=null;this.lastBufferedRequest=null;this.pendingcb=0;this.prefinished=false;this.errorEmitted=false;this.emitClose=e.emitClose!==false;this.autoDestroy=!!e.autoDestroy;this.bufferedRequestCount=0;this.corkedRequestsFree=new CorkedRequest(this)}WritableState.prototype.getBuffer=function getBuffer(){var e=this.bufferedRequest;var t=[];while(e){t.push(e);e=e.next}return t};(function(){try{Object.defineProperty(WritableState.prototype,"buffer",{get:i.deprecate((function writableStateBufferGetter(){return this.getBuffer()}),"_writableState.buffer is deprecated. Use _writableState.getBuffer "+"instead.","DEP0003")})}catch(e){}})();var R;if(typeof Symbol==="function"&&Symbol.hasInstance&&typeof Function.prototype[Symbol.hasInstance]==="function"){R=Function.prototype[Symbol.hasInstance];Object.defineProperty(Writable,Symbol.hasInstance,{value:function value(e){if(R.call(this,e))return true;if(this!==Writable)return false;return e&&e._writableState instanceof WritableState}})}else{R=function realHasInstance(e){return e instanceof this}}function Writable(e){n=n||r(1359);var t=this instanceof n;if(!t&&!R.call(Writable,this))return new Writable(e);this._writableState=new WritableState(e,this,t);this.writable=true;if(e){if(typeof e.write==="function")this._write=e.write;if(typeof e.writev==="function")this._writev=e.writev;if(typeof e.destroy==="function")this._destroy=e.destroy;if(typeof e.final==="function")this._final=e.final}a.call(this)}Writable.prototype.pipe=function(){w(this,new m)};function writeAfterEnd(e,t){var r=new _;w(e,r);process.nextTick(t,r)}function validChunk(e,t,r,n){var i;if(r===null){i=new v}else if(typeof r!=="string"&&!t.objectMode){i=new h("chunk",["string","Buffer"],r)}if(i){w(e,i);process.nextTick(n,i);return false}return true}Writable.prototype.write=function(e,t,r){var n=this._writableState;var i=false;var a=!n.objectMode&&_isUint8Array(e);if(a&&!o.isBuffer(e)){e=_uint8ArrayToBuffer(e)}if(typeof t==="function"){r=t;t=null}if(a)t="buffer";else if(!t)t=n.defaultEncoding;if(typeof r!=="function")r=nop;if(n.ending)writeAfterEnd(this,r);else if(a||validChunk(this,n,e,r)){n.pendingcb++;i=writeOrBuffer(this,n,a,e,t,r)}return i};Writable.prototype.cork=function(){this._writableState.corked++};Writable.prototype.uncork=function(){var e=this._writableState;if(e.corked){e.corked--;if(!e.writing&&!e.corked&&!e.bufferProcessing&&e.bufferedRequest)clearBuffer(this,e)}};Writable.prototype.setDefaultEncoding=function setDefaultEncoding(e){if(typeof e==="string")e=e.toLowerCase();if(!(["hex","utf8","utf-8","ascii","binary","base64","ucs2","ucs-2","utf16le","utf-16le","raw"].indexOf((e+"").toLowerCase())>-1))throw new y(e);this._writableState.defaultEncoding=e;return this};Object.defineProperty(Writable.prototype,"writableBuffer",{enumerable:false,get:function get(){return this._writableState&&this._writableState.getBuffer()}});function decodeChunk(e,t,r){if(!e.objectMode&&e.decodeStrings!==false&&typeof t==="string"){t=o.from(t,r)}return t}Object.defineProperty(Writable.prototype,"writableHighWaterMark",{enumerable:false,get:function get(){return this._writableState.highWaterMark}});function writeOrBuffer(e,t,r,n,i,a){if(!r){var o=decodeChunk(t,n,i);if(n!==o){r=true;i="buffer";n=o}}var s=t.objectMode?1:n.length;t.length+=s;var u=t.length{var n;function _defineProperty(e,t,r){if(t in e){Object.defineProperty(e,t,{value:r,enumerable:true,configurable:true,writable:true})}else{e[t]=r}return e}var i=r(6080);var a=Symbol("lastResolve");var o=Symbol("lastReject");var s=Symbol("error");var u=Symbol("ended");var l=Symbol("lastPromise");var c=Symbol("handlePromise");var d=Symbol("stream");function createIterResult(e,t){return{value:e,done:t}}function readAndResolve(e){var t=e[a];if(t!==null){var r=e[d].read();if(r!==null){e[l]=null;e[a]=null;e[o]=null;t(createIterResult(r,false))}}}function onReadable(e){process.nextTick(readAndResolve,e)}function wrapForNext(e,t){return function(r,n){e.then((function(){if(t[u]){r(createIterResult(undefined,true));return}t[c](r,n)}),n)}}var h=Object.getPrototypeOf((function(){}));var p=Object.setPrototypeOf((n={get stream(){return this[d]},next:function next(){var e=this;var t=this[s];if(t!==null){return Promise.reject(t)}if(this[u]){return Promise.resolve(createIterResult(undefined,true))}if(this[d].destroyed){return new Promise((function(t,r){process.nextTick((function(){if(e[s]){r(e[s])}else{t(createIterResult(undefined,true))}}))}))}var r=this[l];var n;if(r){n=new Promise(wrapForNext(r,this))}else{var i=this[d].read();if(i!==null){return Promise.resolve(createIterResult(i,false))}n=new Promise(this[c])}this[l]=n;return n}},_defineProperty(n,Symbol.asyncIterator,(function(){return this})),_defineProperty(n,"return",(function _return(){var e=this;return new Promise((function(t,r){e[d].destroy(null,(function(e){if(e){r(e);return}t(createIterResult(undefined,true))}))}))})),n),h);var g=function createReadableStreamAsyncIterator(e){var t;var r=Object.create(p,(t={},_defineProperty(t,d,{value:e,writable:true}),_defineProperty(t,a,{value:null,writable:true}),_defineProperty(t,o,{value:null,writable:true}),_defineProperty(t,s,{value:null,writable:true}),_defineProperty(t,u,{value:e._readableState.endEmitted,writable:true}),_defineProperty(t,c,{value:function value(e,t){var n=r[d].read();if(n){r[l]=null;r[a]=null;r[o]=null;e(createIterResult(n,false))}else{r[a]=e;r[o]=t}},writable:true}),t));r[l]=null;i(e,(function(e){if(e&&e.code!=="ERR_STREAM_PREMATURE_CLOSE"){var t=r[o];if(t!==null){r[l]=null;r[a]=null;r[o]=null;t(e)}r[s]=e;return}var n=r[a];if(n!==null){r[l]=null;r[a]=null;r[o]=null;n(createIterResult(undefined,true))}r[u]=true}));e.on("readable",onReadable.bind(null,r));return r};e.exports=g},2746:(e,t,r)=>{function ownKeys(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);if(t)n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}));r.push.apply(r,n)}return r}function _objectSpread(e){for(var t=1;t0)this.tail.next=t;else this.head=t;this.tail=t;++this.length}},{key:"unshift",value:function unshift(e){var t={data:e,next:this.head};if(this.length===0)this.tail=t;this.head=t;++this.length}},{key:"shift",value:function shift(){if(this.length===0)return;var e=this.head.data;if(this.length===1)this.head=this.tail=null;else this.head=this.head.next;--this.length;return e}},{key:"clear",value:function clear(){this.head=this.tail=null;this.length=0}},{key:"join",value:function join(e){if(this.length===0)return"";var t=this.head;var r=""+t.data;while(t=t.next){r+=e+t.data}return r}},{key:"concat",value:function concat(e){if(this.length===0)return i.alloc(0);var t=i.allocUnsafe(e>>>0);var r=this.head;var n=0;while(r){copyBuffer(r.data,t,n);n+=r.data.length;r=r.next}return t}},{key:"consume",value:function consume(e,t){var r;if(ei.length?i.length:e;if(a===i.length)n+=i;else n+=i.slice(0,e);e-=a;if(e===0){if(a===i.length){++r;if(t.next)this.head=t.next;else this.head=this.tail=null}else{this.head=t;t.data=i.slice(a)}break}++r}this.length-=r;return n}},{key:"_getBuffer",value:function _getBuffer(e){var t=i.allocUnsafe(e);var r=this.head;var n=1;r.data.copy(t);e-=r.data.length;while(r=r.next){var a=r.data;var o=e>a.length?a.length:e;a.copy(t,t.length-e,0,o);e-=o;if(e===0){if(o===a.length){++n;if(r.next)this.head=r.next;else this.head=this.tail=null}else{this.head=r;r.data=a.slice(o)}break}++n}this.length-=n;return t}},{key:s,value:function value(e,t){return o(this,_objectSpread({},t,{depth:0,customInspect:false}))}}]);return BufferList}()},7049:e=>{function destroy(e,t){var r=this;var n=this._readableState&&this._readableState.destroyed;var i=this._writableState&&this._writableState.destroyed;if(n||i){if(t){t(e)}else if(e){if(!this._writableState){process.nextTick(emitErrorNT,this,e)}else if(!this._writableState.errorEmitted){this._writableState.errorEmitted=true;process.nextTick(emitErrorNT,this,e)}}return this}if(this._readableState){this._readableState.destroyed=true}if(this._writableState){this._writableState.destroyed=true}this._destroy(e||null,(function(e){if(!t&&e){if(!r._writableState){process.nextTick(emitErrorAndCloseNT,r,e)}else if(!r._writableState.errorEmitted){r._writableState.errorEmitted=true;process.nextTick(emitErrorAndCloseNT,r,e)}else{process.nextTick(emitCloseNT,r)}}else if(t){process.nextTick(emitCloseNT,r);t(e)}else{process.nextTick(emitCloseNT,r)}}));return this}function emitErrorAndCloseNT(e,t){emitErrorNT(e,t);emitCloseNT(e)}function emitCloseNT(e){if(e._writableState&&!e._writableState.emitClose)return;if(e._readableState&&!e._readableState.emitClose)return;e.emit("close")}function undestroy(){if(this._readableState){this._readableState.destroyed=false;this._readableState.reading=false;this._readableState.ended=false;this._readableState.endEmitted=false}if(this._writableState){this._writableState.destroyed=false;this._writableState.ended=false;this._writableState.ending=false;this._writableState.finalCalled=false;this._writableState.prefinished=false;this._writableState.finished=false;this._writableState.errorEmitted=false}}function emitErrorNT(e,t){e.emit("error",t)}function errorOrDestroy(e,t){var r=e._readableState;var n=e._writableState;if(r&&r.autoDestroy||n&&n.autoDestroy)e.destroy(t);else e.emit("error",t)}e.exports={destroy:destroy,undestroy:undestroy,errorOrDestroy:errorOrDestroy}},6080:(e,t,r)=>{var n=r(7214).q.ERR_STREAM_PREMATURE_CLOSE;function once(e){var t=false;return function(){if(t)return;t=true;for(var r=arguments.length,n=new Array(r),i=0;i{function asyncGeneratorStep(e,t,r,n,i,a,o){try{var s=e[a](o);var u=s.value}catch(e){r(e);return}if(s.done){t(u)}else{Promise.resolve(u).then(n,i)}}function _asyncToGenerator(e){return function(){var t=this,r=arguments;return new Promise((function(n,i){var a=e.apply(t,r);function _next(e){asyncGeneratorStep(a,n,i,_next,_throw,"next",e)}function _throw(e){asyncGeneratorStep(a,n,i,_next,_throw,"throw",e)}_next(undefined)}))}}function ownKeys(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);if(t)n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}));r.push.apply(r,n)}return r}function _objectSpread(e){for(var t=1;t{var n;function once(e){var t=false;return function(){if(t)return;t=true;e.apply(void 0,arguments)}}var i=r(7214).q,a=i.ERR_MISSING_ARGS,o=i.ERR_STREAM_DESTROYED;function noop(e){if(e)throw e}function isRequest(e){return e.setHeader&&typeof e.abort==="function"}function destroyer(e,t,i,a){a=once(a);var s=false;e.on("close",(function(){s=true}));if(n===undefined)n=r(6080);n(e,{readable:t,writable:i},(function(e){if(e)return a(e);s=true;a()}));var u=false;return function(t){if(s)return;if(u)return;u=true;if(isRequest(e))return e.abort();if(typeof e.destroy==="function")return e.destroy();a(t||new o("pipe"))}}function call(e){e()}function pipe(e,t){return e.pipe(t)}function popCallback(e){if(!e.length)return noop;if(typeof e[e.length-1]!=="function")return noop;return e.pop()}function pipeline(){for(var e=arguments.length,t=new Array(e),r=0;r0;return destroyer(e,a,s,(function(e){if(!i)i=e;if(e)o.forEach(call);if(a)return;o.forEach(call);n(i)}))}));return t.reduce(pipe)}e.exports=pipeline},9948:(e,t,r)=>{var n=r(7214).q.ERR_INVALID_OPT_VALUE;function highWaterMarkFrom(e,t,r){return e.highWaterMark!=null?e.highWaterMark:t?e[r]:null}function getHighWaterMark(e,t,r,i){var a=highWaterMarkFrom(t,i,r);if(a!=null){if(!(isFinite(a)&&Math.floor(a)===a)||a<0){var o=i?r:"highWaterMark";throw new n(o,a)}return Math.floor(a)}return e.objectMode?16:16*1024}e.exports={getHighWaterMark:getHighWaterMark}},2387:(e,t,r)=>{e.exports=r(2781)},1642:(e,t,r)=>{var n=r(2781);if(process.env.READABLE_STREAM==="disable"&&n){e.exports=n.Readable;Object.assign(e.exports,n);e.exports.Stream=n}else{t=e.exports=r(1433);t.Stream=n||t;t.Readable=t;t.Writable=r(6993);t.Duplex=r(1359);t.Transform=r(4415);t.PassThrough=r(1542);t.finished=r(6080);t.pipeline=r(6989)}},1867:(e,t,r)=>{ +import{createRequire as e}from"module";var t={7351:function(e,t,r){var n=this&&this.__createBinding||(Object.create?function(e,t,r,n){if(n===undefined)n=r;Object.defineProperty(e,n,{enumerable:true,get:function(){return t[r]}})}:function(e,t,r,n){if(n===undefined)n=r;e[n]=t[r]});var i=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:true,value:t})}:function(e,t){e["default"]=t});var a=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(e!=null)for(var r in e)if(r!=="default"&&Object.hasOwnProperty.call(e,r))n(t,e,r);i(t,e);return t};Object.defineProperty(t,"__esModule",{value:true});t.issue=t.issueCommand=void 0;const o=a(r(2037));const s=r(6321);function issueCommand(e,t,r){const n=new Command(e,t,r);process.stdout.write(n.toString()+o.EOL)}t.issueCommand=issueCommand;function issue(e,t=""){issueCommand(e,{},t)}t.issue=issue;const u="::";class Command{constructor(e,t,r){if(!e){e="missing.command"}this.command=e;this.properties=t;this.message=r}toString(){let e=u+this.command;if(this.properties&&Object.keys(this.properties).length>0){e+=" ";let t=true;for(const r in this.properties){if(this.properties.hasOwnProperty(r)){const n=this.properties[r];if(n){if(t){t=false}else{e+=","}e+=`${r}=${escapeProperty(n)}`}}}}e+=`${u}${escapeData(this.message)}`;return e}}function escapeData(e){return s.toCommandValue(e).replace(/%/g,"%25").replace(/\r/g,"%0D").replace(/\n/g,"%0A")}function escapeProperty(e){return s.toCommandValue(e).replace(/%/g,"%25").replace(/\r/g,"%0D").replace(/\n/g,"%0A").replace(/:/g,"%3A").replace(/,/g,"%2C")}},2186:function(e,t,r){var n=this&&this.__createBinding||(Object.create?function(e,t,r,n){if(n===undefined)n=r;Object.defineProperty(e,n,{enumerable:true,get:function(){return t[r]}})}:function(e,t,r,n){if(n===undefined)n=r;e[n]=t[r]});var i=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:true,value:t})}:function(e,t){e["default"]=t});var a=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(e!=null)for(var r in e)if(r!=="default"&&Object.hasOwnProperty.call(e,r))n(t,e,r);i(t,e);return t};var o=this&&this.__awaiter||function(e,t,r,n){function adopt(e){return e instanceof r?e:new r((function(t){t(e)}))}return new(r||(r=Promise))((function(r,i){function fulfilled(e){try{step(n.next(e))}catch(e){i(e)}}function rejected(e){try{step(n["throw"](e))}catch(e){i(e)}}function step(e){e.done?r(e.value):adopt(e.value).then(fulfilled,rejected)}step((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:true});t.getIDToken=t.getState=t.saveState=t.group=t.endGroup=t.startGroup=t.info=t.notice=t.warning=t.error=t.debug=t.isDebug=t.setFailed=t.setCommandEcho=t.setOutput=t.getBooleanInput=t.getMultilineInput=t.getInput=t.addPath=t.setSecret=t.exportVariable=t.ExitCode=void 0;const s=r(7351);const u=r(717);const l=r(6321);const c=a(r(2037));const d=a(r(1017));const h=r(5840);const p=r(8041);var g;(function(e){e[e["Success"]=0]="Success";e[e["Failure"]=1]="Failure"})(g=t.ExitCode||(t.ExitCode={}));function exportVariable(e,t){const r=l.toCommandValue(t);process.env[e]=r;const n=process.env["GITHUB_ENV"]||"";if(n){const t=`ghadelimiter_${h.v4()}`;if(e.includes(t)){throw new Error(`Unexpected input: name should not contain the delimiter "${t}"`)}if(r.includes(t)){throw new Error(`Unexpected input: value should not contain the delimiter "${t}"`)}const n=`${e}<<${t}${c.EOL}${r}${c.EOL}${t}`;u.issueCommand("ENV",n)}else{s.issueCommand("set-env",{name:e},r)}}t.exportVariable=exportVariable;function setSecret(e){s.issueCommand("add-mask",{},e)}t.setSecret=setSecret;function addPath(e){const t=process.env["GITHUB_PATH"]||"";if(t){u.issueCommand("PATH",e)}else{s.issueCommand("add-path",{},e)}process.env["PATH"]=`${e}${d.delimiter}${process.env["PATH"]}`}t.addPath=addPath;function getInput(e,t){const r=process.env[`INPUT_${e.replace(/ /g,"_").toUpperCase()}`]||"";if(t&&t.required&&!r){throw new Error(`Input required and not supplied: ${e}`)}if(t&&t.trimWhitespace===false){return r}return r.trim()}t.getInput=getInput;function getMultilineInput(e,t){const r=getInput(e,t).split("\n").filter((e=>e!==""));return r}t.getMultilineInput=getMultilineInput;function getBooleanInput(e,t){const r=["true","True","TRUE"];const n=["false","False","FALSE"];const i=getInput(e,t);if(r.includes(i))return true;if(n.includes(i))return false;throw new TypeError(`Input does not meet YAML 1.2 "Core Schema" specification: ${e}\n`+`Support boolean input list: \`true | True | TRUE | false | False | FALSE\``)}t.getBooleanInput=getBooleanInput;function setOutput(e,t){process.stdout.write(c.EOL);s.issueCommand("set-output",{name:e},t)}t.setOutput=setOutput;function setCommandEcho(e){s.issue("echo",e?"on":"off")}t.setCommandEcho=setCommandEcho;function setFailed(e){process.exitCode=g.Failure;error(e)}t.setFailed=setFailed;function isDebug(){return process.env["RUNNER_DEBUG"]==="1"}t.isDebug=isDebug;function debug(e){s.issueCommand("debug",{},e)}t.debug=debug;function error(e,t={}){s.issueCommand("error",l.toCommandProperties(t),e instanceof Error?e.toString():e)}t.error=error;function warning(e,t={}){s.issueCommand("warning",l.toCommandProperties(t),e instanceof Error?e.toString():e)}t.warning=warning;function notice(e,t={}){s.issueCommand("notice",l.toCommandProperties(t),e instanceof Error?e.toString():e)}t.notice=notice;function info(e){process.stdout.write(e+c.EOL)}t.info=info;function startGroup(e){s.issue("group",e)}t.startGroup=startGroup;function endGroup(){s.issue("endgroup")}t.endGroup=endGroup;function group(e,t){return o(this,void 0,void 0,(function*(){startGroup(e);let r;try{r=yield t()}finally{endGroup()}return r}))}t.group=group;function saveState(e,t){s.issueCommand("save-state",{name:e},t)}t.saveState=saveState;function getState(e){return process.env[`STATE_${e}`]||""}t.getState=getState;function getIDToken(e){return o(this,void 0,void 0,(function*(){return yield p.OidcClient.getIDToken(e)}))}t.getIDToken=getIDToken;var m=r(1327);Object.defineProperty(t,"summary",{enumerable:true,get:function(){return m.summary}});var b=r(1327);Object.defineProperty(t,"markdownSummary",{enumerable:true,get:function(){return b.markdownSummary}});var v=r(2981);Object.defineProperty(t,"toPosixPath",{enumerable:true,get:function(){return v.toPosixPath}});Object.defineProperty(t,"toWin32Path",{enumerable:true,get:function(){return v.toWin32Path}});Object.defineProperty(t,"toPlatformPath",{enumerable:true,get:function(){return v.toPlatformPath}})},717:function(e,t,r){var n=this&&this.__createBinding||(Object.create?function(e,t,r,n){if(n===undefined)n=r;Object.defineProperty(e,n,{enumerable:true,get:function(){return t[r]}})}:function(e,t,r,n){if(n===undefined)n=r;e[n]=t[r]});var i=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:true,value:t})}:function(e,t){e["default"]=t});var a=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(e!=null)for(var r in e)if(r!=="default"&&Object.hasOwnProperty.call(e,r))n(t,e,r);i(t,e);return t};Object.defineProperty(t,"__esModule",{value:true});t.issueCommand=void 0;const o=a(r(7147));const s=a(r(2037));const u=r(6321);function issueCommand(e,t){const r=process.env[`GITHUB_${e}`];if(!r){throw new Error(`Unable to find environment variable for file command ${e}`)}if(!o.existsSync(r)){throw new Error(`Missing file at path: ${r}`)}o.appendFileSync(r,`${u.toCommandValue(t)}${s.EOL}`,{encoding:"utf8"})}t.issueCommand=issueCommand},8041:function(e,t,r){var n=this&&this.__awaiter||function(e,t,r,n){function adopt(e){return e instanceof r?e:new r((function(t){t(e)}))}return new(r||(r=Promise))((function(r,i){function fulfilled(e){try{step(n.next(e))}catch(e){i(e)}}function rejected(e){try{step(n["throw"](e))}catch(e){i(e)}}function step(e){e.done?r(e.value):adopt(e.value).then(fulfilled,rejected)}step((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:true});t.OidcClient=void 0;const i=r(6255);const a=r(5526);const o=r(2186);class OidcClient{static createHttpClient(e=true,t=10){const r={allowRetries:e,maxRetries:t};return new i.HttpClient("actions/oidc-client",[new a.BearerCredentialHandler(OidcClient.getRequestToken())],r)}static getRequestToken(){const e=process.env["ACTIONS_ID_TOKEN_REQUEST_TOKEN"];if(!e){throw new Error("Unable to get ACTIONS_ID_TOKEN_REQUEST_TOKEN env variable")}return e}static getIDTokenUrl(){const e=process.env["ACTIONS_ID_TOKEN_REQUEST_URL"];if(!e){throw new Error("Unable to get ACTIONS_ID_TOKEN_REQUEST_URL env variable")}return e}static getCall(e){var t;return n(this,void 0,void 0,(function*(){const r=OidcClient.createHttpClient();const n=yield r.getJson(e).catch((e=>{throw new Error(`Failed to get ID Token. \n \n Error Code : ${e.statusCode}\n \n Error Message: ${e.result.message}`)}));const i=(t=n.result)===null||t===void 0?void 0:t.value;if(!i){throw new Error("Response json body do not have ID Token field")}return i}))}static getIDToken(e){return n(this,void 0,void 0,(function*(){try{let t=OidcClient.getIDTokenUrl();if(e){const r=encodeURIComponent(e);t=`${t}&audience=${r}`}o.debug(`ID token url is ${t}`);const r=yield OidcClient.getCall(t);o.setSecret(r);return r}catch(e){throw new Error(`Error message: ${e.message}`)}}))}}t.OidcClient=OidcClient},2981:function(e,t,r){var n=this&&this.__createBinding||(Object.create?function(e,t,r,n){if(n===undefined)n=r;Object.defineProperty(e,n,{enumerable:true,get:function(){return t[r]}})}:function(e,t,r,n){if(n===undefined)n=r;e[n]=t[r]});var i=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:true,value:t})}:function(e,t){e["default"]=t});var a=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(e!=null)for(var r in e)if(r!=="default"&&Object.hasOwnProperty.call(e,r))n(t,e,r);i(t,e);return t};Object.defineProperty(t,"__esModule",{value:true});t.toPlatformPath=t.toWin32Path=t.toPosixPath=void 0;const o=a(r(1017));function toPosixPath(e){return e.replace(/[\\]/g,"/")}t.toPosixPath=toPosixPath;function toWin32Path(e){return e.replace(/[/]/g,"\\")}t.toWin32Path=toWin32Path;function toPlatformPath(e){return e.replace(/[/\\]/g,o.sep)}t.toPlatformPath=toPlatformPath},1327:function(e,t,r){var n=this&&this.__awaiter||function(e,t,r,n){function adopt(e){return e instanceof r?e:new r((function(t){t(e)}))}return new(r||(r=Promise))((function(r,i){function fulfilled(e){try{step(n.next(e))}catch(e){i(e)}}function rejected(e){try{step(n["throw"](e))}catch(e){i(e)}}function step(e){e.done?r(e.value):adopt(e.value).then(fulfilled,rejected)}step((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:true});t.summary=t.markdownSummary=t.SUMMARY_DOCS_URL=t.SUMMARY_ENV_VAR=void 0;const i=r(2037);const a=r(7147);const{access:o,appendFile:s,writeFile:u}=a.promises;t.SUMMARY_ENV_VAR="GITHUB_STEP_SUMMARY";t.SUMMARY_DOCS_URL="https://docs.github.com/actions/using-workflows/workflow-commands-for-github-actions#adding-a-job-summary";class Summary{constructor(){this._buffer=""}filePath(){return n(this,void 0,void 0,(function*(){if(this._filePath){return this._filePath}const e=process.env[t.SUMMARY_ENV_VAR];if(!e){throw new Error(`Unable to find environment variable for $${t.SUMMARY_ENV_VAR}. Check if your runtime environment supports job summaries.`)}try{yield o(e,a.constants.R_OK|a.constants.W_OK)}catch(t){throw new Error(`Unable to access summary file: '${e}'. Check if the file has correct read/write permissions.`)}this._filePath=e;return this._filePath}))}wrap(e,t,r={}){const n=Object.entries(r).map((([e,t])=>` ${e}="${t}"`)).join("");if(!t){return`<${e}${n}>`}return`<${e}${n}>${t}`}write(e){return n(this,void 0,void 0,(function*(){const t=!!(e===null||e===void 0?void 0:e.overwrite);const r=yield this.filePath();const n=t?u:s;yield n(r,this._buffer,{encoding:"utf8"});return this.emptyBuffer()}))}clear(){return n(this,void 0,void 0,(function*(){return this.emptyBuffer().write({overwrite:true})}))}stringify(){return this._buffer}isEmptyBuffer(){return this._buffer.length===0}emptyBuffer(){this._buffer="";return this}addRaw(e,t=false){this._buffer+=e;return t?this.addEOL():this}addEOL(){return this.addRaw(i.EOL)}addCodeBlock(e,t){const r=Object.assign({},t&&{lang:t});const n=this.wrap("pre",this.wrap("code",e),r);return this.addRaw(n).addEOL()}addList(e,t=false){const r=t?"ol":"ul";const n=e.map((e=>this.wrap("li",e))).join("");const i=this.wrap(r,n);return this.addRaw(i).addEOL()}addTable(e){const t=e.map((e=>{const t=e.map((e=>{if(typeof e==="string"){return this.wrap("td",e)}const{header:t,data:r,colspan:n,rowspan:i}=e;const a=t?"th":"td";const o=Object.assign(Object.assign({},n&&{colspan:n}),i&&{rowspan:i});return this.wrap(a,r,o)})).join("");return this.wrap("tr",t)})).join("");const r=this.wrap("table",t);return this.addRaw(r).addEOL()}addDetails(e,t){const r=this.wrap("details",this.wrap("summary",e)+t);return this.addRaw(r).addEOL()}addImage(e,t,r){const{width:n,height:i}=r||{};const a=Object.assign(Object.assign({},n&&{width:n}),i&&{height:i});const o=this.wrap("img",null,Object.assign({src:e,alt:t},a));return this.addRaw(o).addEOL()}addHeading(e,t){const r=`h${t}`;const n=["h1","h2","h3","h4","h5","h6"].includes(r)?r:"h1";const i=this.wrap(n,e);return this.addRaw(i).addEOL()}addSeparator(){const e=this.wrap("hr",null);return this.addRaw(e).addEOL()}addBreak(){const e=this.wrap("br",null);return this.addRaw(e).addEOL()}addQuote(e,t){const r=Object.assign({},t&&{cite:t});const n=this.wrap("blockquote",e,r);return this.addRaw(n).addEOL()}addLink(e,t){const r=this.wrap("a",e,{href:t});return this.addRaw(r).addEOL()}}const l=new Summary;t.markdownSummary=l;t.summary=l},6321:(e,t)=>{Object.defineProperty(t,"__esModule",{value:true});t.toCommandProperties=t.toCommandValue=void 0;function toCommandValue(e){if(e===null||e===undefined){return""}else if(typeof e==="string"||e instanceof String){return e}return JSON.stringify(e)}t.toCommandValue=toCommandValue;function toCommandProperties(e){if(!Object.keys(e).length){return{}}return{title:e.title,file:e.file,line:e.startLine,endLine:e.endLine,col:e.startColumn,endColumn:e.endColumn}}t.toCommandProperties=toCommandProperties},5526:function(e,t){var r=this&&this.__awaiter||function(e,t,r,n){function adopt(e){return e instanceof r?e:new r((function(t){t(e)}))}return new(r||(r=Promise))((function(r,i){function fulfilled(e){try{step(n.next(e))}catch(e){i(e)}}function rejected(e){try{step(n["throw"](e))}catch(e){i(e)}}function step(e){e.done?r(e.value):adopt(e.value).then(fulfilled,rejected)}step((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:true});t.PersonalAccessTokenCredentialHandler=t.BearerCredentialHandler=t.BasicCredentialHandler=void 0;class BasicCredentialHandler{constructor(e,t){this.username=e;this.password=t}prepareRequest(e){if(!e.headers){throw Error("The request has no headers")}e.headers["Authorization"]=`Basic ${Buffer.from(`${this.username}:${this.password}`).toString("base64")}`}canHandleAuthentication(){return false}handleAuthentication(){return r(this,void 0,void 0,(function*(){throw new Error("not implemented")}))}}t.BasicCredentialHandler=BasicCredentialHandler;class BearerCredentialHandler{constructor(e){this.token=e}prepareRequest(e){if(!e.headers){throw Error("The request has no headers")}e.headers["Authorization"]=`Bearer ${this.token}`}canHandleAuthentication(){return false}handleAuthentication(){return r(this,void 0,void 0,(function*(){throw new Error("not implemented")}))}}t.BearerCredentialHandler=BearerCredentialHandler;class PersonalAccessTokenCredentialHandler{constructor(e){this.token=e}prepareRequest(e){if(!e.headers){throw Error("The request has no headers")}e.headers["Authorization"]=`Basic ${Buffer.from(`PAT:${this.token}`).toString("base64")}`}canHandleAuthentication(){return false}handleAuthentication(){return r(this,void 0,void 0,(function*(){throw new Error("not implemented")}))}}t.PersonalAccessTokenCredentialHandler=PersonalAccessTokenCredentialHandler},6255:function(e,t,r){var n=this&&this.__createBinding||(Object.create?function(e,t,r,n){if(n===undefined)n=r;Object.defineProperty(e,n,{enumerable:true,get:function(){return t[r]}})}:function(e,t,r,n){if(n===undefined)n=r;e[n]=t[r]});var i=this&&this.__setModuleDefault||(Object.create?function(e,t){Object.defineProperty(e,"default",{enumerable:true,value:t})}:function(e,t){e["default"]=t});var a=this&&this.__importStar||function(e){if(e&&e.__esModule)return e;var t={};if(e!=null)for(var r in e)if(r!=="default"&&Object.hasOwnProperty.call(e,r))n(t,e,r);i(t,e);return t};var o=this&&this.__awaiter||function(e,t,r,n){function adopt(e){return e instanceof r?e:new r((function(t){t(e)}))}return new(r||(r=Promise))((function(r,i){function fulfilled(e){try{step(n.next(e))}catch(e){i(e)}}function rejected(e){try{step(n["throw"](e))}catch(e){i(e)}}function step(e){e.done?r(e.value):adopt(e.value).then(fulfilled,rejected)}step((n=n.apply(e,t||[])).next())}))};Object.defineProperty(t,"__esModule",{value:true});t.HttpClient=t.isHttps=t.HttpClientResponse=t.HttpClientError=t.getProxyUrl=t.MediaTypes=t.Headers=t.HttpCodes=void 0;const s=a(r(3685));const u=a(r(5687));const l=a(r(9835));const c=a(r(4294));var d;(function(e){e[e["OK"]=200]="OK";e[e["MultipleChoices"]=300]="MultipleChoices";e[e["MovedPermanently"]=301]="MovedPermanently";e[e["ResourceMoved"]=302]="ResourceMoved";e[e["SeeOther"]=303]="SeeOther";e[e["NotModified"]=304]="NotModified";e[e["UseProxy"]=305]="UseProxy";e[e["SwitchProxy"]=306]="SwitchProxy";e[e["TemporaryRedirect"]=307]="TemporaryRedirect";e[e["PermanentRedirect"]=308]="PermanentRedirect";e[e["BadRequest"]=400]="BadRequest";e[e["Unauthorized"]=401]="Unauthorized";e[e["PaymentRequired"]=402]="PaymentRequired";e[e["Forbidden"]=403]="Forbidden";e[e["NotFound"]=404]="NotFound";e[e["MethodNotAllowed"]=405]="MethodNotAllowed";e[e["NotAcceptable"]=406]="NotAcceptable";e[e["ProxyAuthenticationRequired"]=407]="ProxyAuthenticationRequired";e[e["RequestTimeout"]=408]="RequestTimeout";e[e["Conflict"]=409]="Conflict";e[e["Gone"]=410]="Gone";e[e["TooManyRequests"]=429]="TooManyRequests";e[e["InternalServerError"]=500]="InternalServerError";e[e["NotImplemented"]=501]="NotImplemented";e[e["BadGateway"]=502]="BadGateway";e[e["ServiceUnavailable"]=503]="ServiceUnavailable";e[e["GatewayTimeout"]=504]="GatewayTimeout"})(d=t.HttpCodes||(t.HttpCodes={}));var h;(function(e){e["Accept"]="accept";e["ContentType"]="content-type"})(h=t.Headers||(t.Headers={}));var p;(function(e){e["ApplicationJson"]="application/json"})(p=t.MediaTypes||(t.MediaTypes={}));function getProxyUrl(e){const t=l.getProxyUrl(new URL(e));return t?t.href:""}t.getProxyUrl=getProxyUrl;const g=[d.MovedPermanently,d.ResourceMoved,d.SeeOther,d.TemporaryRedirect,d.PermanentRedirect];const m=[d.BadGateway,d.ServiceUnavailable,d.GatewayTimeout];const b=["OPTIONS","GET","DELETE","HEAD"];const v=10;const _=5;class HttpClientError extends Error{constructor(e,t){super(e);this.name="HttpClientError";this.statusCode=t;Object.setPrototypeOf(this,HttpClientError.prototype)}}t.HttpClientError=HttpClientError;class HttpClientResponse{constructor(e){this.message=e}readBody(){return o(this,void 0,void 0,(function*(){return new Promise((e=>o(this,void 0,void 0,(function*(){let t=Buffer.alloc(0);this.message.on("data",(e=>{t=Buffer.concat([t,e])}));this.message.on("end",(()=>{e(t.toString())}))}))))}))}}t.HttpClientResponse=HttpClientResponse;function isHttps(e){const t=new URL(e);return t.protocol==="https:"}t.isHttps=isHttps;class HttpClient{constructor(e,t,r){this._ignoreSslError=false;this._allowRedirects=true;this._allowRedirectDowngrade=false;this._maxRedirects=50;this._allowRetries=false;this._maxRetries=1;this._keepAlive=false;this._disposed=false;this.userAgent=e;this.handlers=t||[];this.requestOptions=r;if(r){if(r.ignoreSslError!=null){this._ignoreSslError=r.ignoreSslError}this._socketTimeout=r.socketTimeout;if(r.allowRedirects!=null){this._allowRedirects=r.allowRedirects}if(r.allowRedirectDowngrade!=null){this._allowRedirectDowngrade=r.allowRedirectDowngrade}if(r.maxRedirects!=null){this._maxRedirects=Math.max(r.maxRedirects,0)}if(r.keepAlive!=null){this._keepAlive=r.keepAlive}if(r.allowRetries!=null){this._allowRetries=r.allowRetries}if(r.maxRetries!=null){this._maxRetries=r.maxRetries}}}options(e,t){return o(this,void 0,void 0,(function*(){return this.request("OPTIONS",e,null,t||{})}))}get(e,t){return o(this,void 0,void 0,(function*(){return this.request("GET",e,null,t||{})}))}del(e,t){return o(this,void 0,void 0,(function*(){return this.request("DELETE",e,null,t||{})}))}post(e,t,r){return o(this,void 0,void 0,(function*(){return this.request("POST",e,t,r||{})}))}patch(e,t,r){return o(this,void 0,void 0,(function*(){return this.request("PATCH",e,t,r||{})}))}put(e,t,r){return o(this,void 0,void 0,(function*(){return this.request("PUT",e,t,r||{})}))}head(e,t){return o(this,void 0,void 0,(function*(){return this.request("HEAD",e,null,t||{})}))}sendStream(e,t,r,n){return o(this,void 0,void 0,(function*(){return this.request(e,t,r,n)}))}getJson(e,t={}){return o(this,void 0,void 0,(function*(){t[h.Accept]=this._getExistingOrDefaultHeader(t,h.Accept,p.ApplicationJson);const r=yield this.get(e,t);return this._processResponse(r,this.requestOptions)}))}postJson(e,t,r={}){return o(this,void 0,void 0,(function*(){const n=JSON.stringify(t,null,2);r[h.Accept]=this._getExistingOrDefaultHeader(r,h.Accept,p.ApplicationJson);r[h.ContentType]=this._getExistingOrDefaultHeader(r,h.ContentType,p.ApplicationJson);const i=yield this.post(e,n,r);return this._processResponse(i,this.requestOptions)}))}putJson(e,t,r={}){return o(this,void 0,void 0,(function*(){const n=JSON.stringify(t,null,2);r[h.Accept]=this._getExistingOrDefaultHeader(r,h.Accept,p.ApplicationJson);r[h.ContentType]=this._getExistingOrDefaultHeader(r,h.ContentType,p.ApplicationJson);const i=yield this.put(e,n,r);return this._processResponse(i,this.requestOptions)}))}patchJson(e,t,r={}){return o(this,void 0,void 0,(function*(){const n=JSON.stringify(t,null,2);r[h.Accept]=this._getExistingOrDefaultHeader(r,h.Accept,p.ApplicationJson);r[h.ContentType]=this._getExistingOrDefaultHeader(r,h.ContentType,p.ApplicationJson);const i=yield this.patch(e,n,r);return this._processResponse(i,this.requestOptions)}))}request(e,t,r,n){return o(this,void 0,void 0,(function*(){if(this._disposed){throw new Error("Client has already been disposed.")}const i=new URL(t);let a=this._prepareRequest(e,i,n);const o=this._allowRetries&&b.includes(e)?this._maxRetries+1:1;let s=0;let u;do{u=yield this.requestRaw(a,r);if(u&&u.message&&u.message.statusCode===d.Unauthorized){let e;for(const t of this.handlers){if(t.canHandleAuthentication(u)){e=t;break}}if(e){return e.handleAuthentication(this,a,r)}else{return u}}let t=this._maxRedirects;while(u.message.statusCode&&g.includes(u.message.statusCode)&&this._allowRedirects&&t>0){const o=u.message.headers["location"];if(!o){break}const s=new URL(o);if(i.protocol==="https:"&&i.protocol!==s.protocol&&!this._allowRedirectDowngrade){throw new Error("Redirect from HTTPS to HTTP protocol. This downgrade is not allowed for security reasons. If you want to allow this behavior, set the allowRedirectDowngrade option to true.")}yield u.readBody();if(s.hostname!==i.hostname){for(const e in n){if(e.toLowerCase()==="authorization"){delete n[e]}}}a=this._prepareRequest(e,s,n);u=yield this.requestRaw(a,r);t--}if(!u.message.statusCode||!m.includes(u.message.statusCode)){return u}s+=1;if(s{function callbackForResult(e,t){if(e){n(e)}else if(!t){n(new Error("Unknown error"))}else{r(t)}}this.requestRawWithCallback(e,t,callbackForResult)}))}))}requestRawWithCallback(e,t,r){if(typeof t==="string"){if(!e.options.headers){e.options.headers={}}e.options.headers["Content-Length"]=Buffer.byteLength(t,"utf8")}let n=false;function handleResult(e,t){if(!n){n=true;r(e,t)}}const i=e.httpModule.request(e.options,(e=>{const t=new HttpClientResponse(e);handleResult(undefined,t)}));let a;i.on("socket",(e=>{a=e}));i.setTimeout(this._socketTimeout||3*6e4,(()=>{if(a){a.end()}handleResult(new Error(`Request timeout: ${e.options.path}`))}));i.on("error",(function(e){handleResult(e)}));if(t&&typeof t==="string"){i.write(t,"utf8")}if(t&&typeof t!=="string"){t.on("close",(function(){i.end()}));t.pipe(i)}else{i.end()}}getAgent(e){const t=new URL(e);return this._getAgent(t)}_prepareRequest(e,t,r){const n={};n.parsedUrl=t;const i=n.parsedUrl.protocol==="https:";n.httpModule=i?u:s;const a=i?443:80;n.options={};n.options.host=n.parsedUrl.hostname;n.options.port=n.parsedUrl.port?parseInt(n.parsedUrl.port):a;n.options.path=(n.parsedUrl.pathname||"")+(n.parsedUrl.search||"");n.options.method=e;n.options.headers=this._mergeHeaders(r);if(this.userAgent!=null){n.options.headers["user-agent"]=this.userAgent}n.options.agent=this._getAgent(n.parsedUrl);if(this.handlers){for(const e of this.handlers){e.prepareRequest(n.options)}}return n}_mergeHeaders(e){if(this.requestOptions&&this.requestOptions.headers){return Object.assign({},lowercaseKeys(this.requestOptions.headers),lowercaseKeys(e||{}))}return lowercaseKeys(e||{})}_getExistingOrDefaultHeader(e,t,r){let n;if(this.requestOptions&&this.requestOptions.headers){n=lowercaseKeys(this.requestOptions.headers)[t]}return e[t]||n||r}_getAgent(e){let t;const r=l.getProxyUrl(e);const n=r&&r.hostname;if(this._keepAlive&&n){t=this._proxyAgent}if(this._keepAlive&&!n){t=this._agent}if(t){return t}const i=e.protocol==="https:";let a=100;if(this.requestOptions){a=this.requestOptions.maxSockets||s.globalAgent.maxSockets}if(r&&r.hostname){const e={maxSockets:a,keepAlive:this._keepAlive,proxy:Object.assign(Object.assign({},(r.username||r.password)&&{proxyAuth:`${r.username}:${r.password}`}),{host:r.hostname,port:r.port})};let n;const o=r.protocol==="https:";if(i){n=o?c.httpsOverHttps:c.httpsOverHttp}else{n=o?c.httpOverHttps:c.httpOverHttp}t=n(e);this._proxyAgent=t}if(this._keepAlive&&!t){const e={keepAlive:this._keepAlive,maxSockets:a};t=i?new u.Agent(e):new s.Agent(e);this._agent=t}if(!t){t=i?u.globalAgent:s.globalAgent}if(i&&this._ignoreSslError){t.options=Object.assign(t.options||{},{rejectUnauthorized:false})}return t}_performExponentialBackoff(e){return o(this,void 0,void 0,(function*(){e=Math.min(v,e);const t=_*Math.pow(2,e);return new Promise((e=>setTimeout((()=>e()),t)))}))}_processResponse(e,t){return o(this,void 0,void 0,(function*(){return new Promise(((r,n)=>o(this,void 0,void 0,(function*(){const i=e.message.statusCode||0;const a={statusCode:i,result:null,headers:{}};if(i===d.NotFound){r(a)}function dateTimeDeserializer(e,t){if(typeof t==="string"){const e=new Date(t);if(!isNaN(e.valueOf())){return e}}return t}let o;let s;try{s=yield e.readBody();if(s&&s.length>0){if(t&&t.deserializeDates){o=JSON.parse(s,dateTimeDeserializer)}else{o=JSON.parse(s)}a.result=o}a.headers=e.message.headers}catch(e){}if(i>299){let e;if(o&&o.message){e=o.message}else if(s&&s.length>0){e=s}else{e=`Failed request: (${i})`}const t=new HttpClientError(e,i);t.result=a.result;n(t)}else{r(a)}}))))}))}}t.HttpClient=HttpClient;const lowercaseKeys=e=>Object.keys(e).reduce(((t,r)=>(t[r.toLowerCase()]=e[r],t)),{})},9835:(e,t)=>{Object.defineProperty(t,"__esModule",{value:true});t.checkBypass=t.getProxyUrl=void 0;function getProxyUrl(e){const t=e.protocol==="https:";if(checkBypass(e)){return undefined}const r=(()=>{if(t){return process.env["https_proxy"]||process.env["HTTPS_PROXY"]}else{return process.env["http_proxy"]||process.env["HTTP_PROXY"]}})();if(r){return new URL(r)}else{return undefined}}t.getProxyUrl=getProxyUrl;function checkBypass(e){if(!e.hostname){return false}const t=process.env["no_proxy"]||process.env["NO_PROXY"]||"";if(!t){return false}let r;if(e.port){r=Number(e.port)}else if(e.protocol==="http:"){r=80}else if(e.protocol==="https:"){r=443}const n=[e.hostname.toUpperCase()];if(typeof r==="number"){n.push(`${n[0]}:${r}`)}for(const e of t.split(",").map((e=>e.trim().toUpperCase())).filter((e=>e))){if(n.some((t=>t===e))){return true}}return false}t.checkBypass=checkBypass},3664:(e,t,r)=>{const{Buffer:n}=r(4300);const i=Symbol.for("BufferList");function BufferList(e){if(!(this instanceof BufferList)){return new BufferList(e)}BufferList._init.call(this,e)}BufferList._init=function _init(e){Object.defineProperty(this,i,{value:true});this._bufs=[];this.length=0;if(e){this.append(e)}};BufferList.prototype._new=function _new(e){return new BufferList(e)};BufferList.prototype._offset=function _offset(e){if(e===0){return[0,0]}let t=0;for(let r=0;rthis.length||e<0){return undefined}const t=this._offset(e);return this._bufs[t[0]][t[1]]};BufferList.prototype.slice=function slice(e,t){if(typeof e==="number"&&e<0){e+=this.length}if(typeof t==="number"&&t<0){t+=this.length}return this.copy(null,0,e,t)};BufferList.prototype.copy=function copy(e,t,r,i){if(typeof r!=="number"||r<0){r=0}if(typeof i!=="number"||i>this.length){i=this.length}if(r>=this.length){return e||n.alloc(0)}if(i<=0){return e||n.alloc(0)}const copy=!!e;const a=this._offset(r);const o=i-r;let s=o;let u=copy&&t||0;let l=a[1];if(r===0&&i===this.length){if(!copy){return this._bufs.length===1?this._bufs[0]:n.concat(this._bufs,this.length)}for(let t=0;tr){this._bufs[t].copy(e,u,l);u+=r}else{this._bufs[t].copy(e,u,l,l+s);u+=r;break}s-=r;if(l){l=0}}if(e.length>u)return e.slice(0,u);return e};BufferList.prototype.shallowSlice=function shallowSlice(e,t){e=e||0;t=typeof t!=="number"?this.length:t;if(e<0){e+=this.length}if(t<0){t+=this.length}if(e===t){return this._new()}const r=this._offset(e);const n=this._offset(t);const i=this._bufs.slice(r[0],n[0]+1);if(n[1]===0){i.pop()}else{i[i.length-1]=i[i.length-1].slice(0,n[1])}if(r[1]!==0){i[0]=i[0].slice(r[1])}return this._new(i)};BufferList.prototype.toString=function toString(e,t,r){return this.slice(t,r).toString(e)};BufferList.prototype.consume=function consume(e){e=Math.trunc(e);if(Number.isNaN(e)||e<=0)return this;while(this._bufs.length){if(e>=this._bufs[0].length){e-=this._bufs[0].length;this.length-=this._bufs[0].length;this._bufs.shift()}else{this._bufs[0]=this._bufs[0].slice(e);this.length-=e;break}}return this};BufferList.prototype.duplicate=function duplicate(){const e=this._new();for(let t=0;tthis.length?this.length:t}const i=this._offset(t);let a=i[0];let o=i[1];for(;a=e.length){const r=t.indexOf(e,o);if(r!==-1){return this._reverseOffset([a,r])}o=t.length-e.length+1}else{const t=this._reverseOffset([a,o]);if(this._match(t,e)){return t}o++}}o=0}return-1};BufferList.prototype._match=function(e,t){if(this.length-e{const n=r(1642).Duplex;const i=r(4124);const a=r(3664);function BufferListStream(e){if(!(this instanceof BufferListStream)){return new BufferListStream(e)}if(typeof e==="function"){this._callback=e;const t=function piper(e){if(this._callback){this._callback(e);this._callback=null}}.bind(this);this.on("pipe",(function onPipe(e){e.on("error",t)}));this.on("unpipe",(function onUnpipe(e){e.removeListener("error",t)}));e=null}a._init.call(this,e);n.call(this)}i(BufferListStream,n);Object.assign(BufferListStream.prototype,a.prototype);BufferListStream.prototype._new=function _new(e){return new BufferListStream(e)};BufferListStream.prototype._write=function _write(e,t,r){this._appendBuffer(e);if(typeof r==="function"){r()}};BufferListStream.prototype._read=function _read(e){if(!this.length){return this.push(null)}e=Math.min(e,this.length);this.push(this.slice(0,e));this.consume(e)};BufferListStream.prototype.end=function end(e){n.prototype.end.call(this,e);if(this._callback){this._callback(null,this.slice());this._callback=null}};BufferListStream.prototype._destroy=function _destroy(e,t){this._bufs.length=0;this.length=0;t(e)};BufferListStream.prototype._isBufferList=function _isBufferList(e){return e instanceof BufferListStream||e instanceof a||BufferListStream.isBufferList(e)};BufferListStream.isBufferList=a.isBufferList;e.exports=BufferListStream;e.exports.BufferListStream=BufferListStream;e.exports.BufferList=a},1205:(e,t,r)=>{var n=r(1223);var noop=function(){};var isRequest=function(e){return e.setHeader&&typeof e.abort==="function"};var isChildProcess=function(e){return e.stdio&&Array.isArray(e.stdio)&&e.stdio.length===3};var eos=function(e,t,r){if(typeof t==="function")return eos(e,null,t);if(!t)t={};r=n(r||noop);var i=e._writableState;var a=e._readableState;var o=t.readable||t.readable!==false&&e.readable;var s=t.writable||t.writable!==false&&e.writable;var u=false;var onlegacyfinish=function(){if(!e.writable)onfinish()};var onfinish=function(){s=false;if(!o)r.call(e)};var onend=function(){o=false;if(!s)r.call(e)};var onexit=function(t){r.call(e,t?new Error("exited with error code: "+t):null)};var onerror=function(t){r.call(e,t)};var onclose=function(){process.nextTick(onclosenexttick)};var onclosenexttick=function(){if(u)return;if(o&&!(a&&(a.ended&&!a.destroyed)))return r.call(e,new Error("premature close"));if(s&&!(i&&(i.ended&&!i.destroyed)))return r.call(e,new Error("premature close"))};var onrequest=function(){e.req.on("finish",onfinish)};if(isRequest(e)){e.on("complete",onfinish);e.on("abort",onclose);if(e.req)onrequest();else e.on("request",onrequest)}else if(s&&!i){e.on("end",onlegacyfinish);e.on("close",onlegacyfinish)}if(isChildProcess(e))e.on("exit",onexit);e.on("end",onend);e.on("finish",onfinish);if(t.error!==false)e.on("error",onerror);e.on("close",onclose);return function(){u=true;e.removeListener("complete",onfinish);e.removeListener("abort",onclose);e.removeListener("request",onrequest);if(e.req)e.req.removeListener("finish",onfinish);e.removeListener("end",onlegacyfinish);e.removeListener("close",onlegacyfinish);e.removeListener("finish",onfinish);e.removeListener("exit",onexit);e.removeListener("end",onend);e.removeListener("error",onerror);e.removeListener("close",onclose)}};e.exports=eos},3186:(e,t,r)=>{e.exports=r(7147).constants||r(2057)},4124:(e,t,r)=>{try{var n=r(3837);if(typeof n.inherits!=="function")throw"";e.exports=n.inherits}catch(t){e.exports=r(8544)}},8544:e=>{if(typeof Object.create==="function"){e.exports=function inherits(e,t){if(t){e.super_=t;e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:false,writable:true,configurable:true}})}}}else{e.exports=function inherits(e,t){if(t){e.super_=t;var TempCtor=function(){};TempCtor.prototype=t.prototype;e.prototype=new TempCtor;e.prototype.constructor=e}}}},1223:(e,t,r)=>{var n=r(2940);e.exports=n(once);e.exports.strict=n(onceStrict);once.proto=once((function(){Object.defineProperty(Function.prototype,"once",{value:function(){return once(this)},configurable:true});Object.defineProperty(Function.prototype,"onceStrict",{value:function(){return onceStrict(this)},configurable:true})}));function once(e){var f=function(){if(f.called)return f.value;f.called=true;return f.value=e.apply(this,arguments)};f.called=false;return f}function onceStrict(e){var f=function(){if(f.called)throw new Error(f.onceError);f.called=true;return f.value=e.apply(this,arguments)};var t=e.name||"Function wrapped with `once`";f.onceError=t+" shouldn't be called more than once";f.called=false;return f}},7214:e=>{const t={};function createErrorType(e,r,n){if(!n){n=Error}function getMessage(e,t,n){if(typeof r==="string"){return r}else{return r(e,t,n)}}class NodeError extends n{constructor(e,t,r){super(getMessage(e,t,r))}}NodeError.prototype.name=n.name;NodeError.prototype.code=e;t[e]=NodeError}function oneOf(e,t){if(Array.isArray(e)){const r=e.length;e=e.map((e=>String(e)));if(r>2){return`one of ${t} ${e.slice(0,r-1).join(", ")}, or `+e[r-1]}else if(r===2){return`one of ${t} ${e[0]} or ${e[1]}`}else{return`of ${t} ${e[0]}`}}else{return`of ${t} ${String(e)}`}}function startsWith(e,t,r){return e.substr(!r||r<0?0:+r,t.length)===t}function endsWith(e,t,r){if(r===undefined||r>e.length){r=e.length}return e.substring(r-t.length,r)===t}function includes(e,t,r){if(typeof r!=="number"){r=0}if(r+t.length>e.length){return false}else{return e.indexOf(t,r)!==-1}}createErrorType("ERR_INVALID_OPT_VALUE",(function(e,t){return'The value "'+t+'" is invalid for option "'+e+'"'}),TypeError);createErrorType("ERR_INVALID_ARG_TYPE",(function(e,t,r){let n;if(typeof t==="string"&&startsWith(t,"not ")){n="must not be";t=t.replace(/^not /,"")}else{n="must be"}let i;if(endsWith(e," argument")){i=`The ${e} ${n} ${oneOf(t,"type")}`}else{const r=includes(e,".")?"property":"argument";i=`The "${e}" ${r} ${n} ${oneOf(t,"type")}`}i+=`. Received type ${typeof r}`;return i}),TypeError);createErrorType("ERR_STREAM_PUSH_AFTER_EOF","stream.push() after EOF");createErrorType("ERR_METHOD_NOT_IMPLEMENTED",(function(e){return"The "+e+" method is not implemented"}));createErrorType("ERR_STREAM_PREMATURE_CLOSE","Premature close");createErrorType("ERR_STREAM_DESTROYED",(function(e){return"Cannot call "+e+" after a stream was destroyed"}));createErrorType("ERR_MULTIPLE_CALLBACK","Callback called multiple times");createErrorType("ERR_STREAM_CANNOT_PIPE","Cannot pipe, not readable");createErrorType("ERR_STREAM_WRITE_AFTER_END","write after end");createErrorType("ERR_STREAM_NULL_VALUES","May not write null values to stream",TypeError);createErrorType("ERR_UNKNOWN_ENCODING",(function(e){return"Unknown encoding: "+e}),TypeError);createErrorType("ERR_STREAM_UNSHIFT_AFTER_END_EVENT","stream.unshift() after end event");e.exports.q=t},1359:(e,t,r)=>{var n=Object.keys||function(e){var t=[];for(var r in e){t.push(r)}return t};e.exports=Duplex;var i=r(1433);var a=r(6993);r(4124)(Duplex,i);{var o=n(a.prototype);for(var s=0;s{e.exports=PassThrough;var n=r(4415);r(4124)(PassThrough,n);function PassThrough(e){if(!(this instanceof PassThrough))return new PassThrough(e);n.call(this,e)}PassThrough.prototype._transform=function(e,t,r){r(null,e)}},1433:(e,t,r)=>{e.exports=Readable;var n;Readable.ReadableState=ReadableState;var i=r(2361).EventEmitter;var a=function EElistenerCount(e,t){return e.listeners(t).length};var o=r(2387);var s=r(4300).Buffer;var u=global.Uint8Array||function(){};function _uint8ArrayToBuffer(e){return s.from(e)}function _isUint8Array(e){return s.isBuffer(e)||e instanceof u}var l=r(3837);var c;if(l&&l.debuglog){c=l.debuglog("stream")}else{c=function debug(){}}var d=r(2746);var h=r(7049);var p=r(9948),g=p.getHighWaterMark;var m=r(7214).q,b=m.ERR_INVALID_ARG_TYPE,v=m.ERR_STREAM_PUSH_AFTER_EOF,_=m.ERR_METHOD_NOT_IMPLEMENTED,y=m.ERR_STREAM_UNSHIFT_AFTER_END_EVENT;var w;var R;var S;r(4124)(Readable,o);var E=h.errorOrDestroy;var O=["error","close","destroy","pause","resume"];function prependListener(e,t,r){if(typeof e.prependListener==="function")return e.prependListener(t,r);if(!e._events||!e._events[t])e.on(t,r);else if(Array.isArray(e._events[t]))e._events[t].unshift(r);else e._events[t]=[r,e._events[t]]}function ReadableState(e,t,i){n=n||r(1359);e=e||{};if(typeof i!=="boolean")i=t instanceof n;this.objectMode=!!e.objectMode;if(i)this.objectMode=this.objectMode||!!e.readableObjectMode;this.highWaterMark=g(this,e,"readableHighWaterMark",i);this.buffer=new d;this.length=0;this.pipes=null;this.pipesCount=0;this.flowing=null;this.ended=false;this.endEmitted=false;this.reading=false;this.sync=true;this.needReadable=false;this.emittedReadable=false;this.readableListening=false;this.resumeScheduled=false;this.paused=true;this.emitClose=e.emitClose!==false;this.autoDestroy=!!e.autoDestroy;this.destroyed=false;this.defaultEncoding=e.defaultEncoding||"utf8";this.awaitDrain=0;this.readingMore=false;this.decoder=null;this.encoding=null;if(e.encoding){if(!w)w=r(4841).s;this.decoder=new w(e.encoding);this.encoding=e.encoding}}function Readable(e){n=n||r(1359);if(!(this instanceof Readable))return new Readable(e);var t=this instanceof n;this._readableState=new ReadableState(e,this,t);this.readable=true;if(e){if(typeof e.read==="function")this._read=e.read;if(typeof e.destroy==="function")this._destroy=e.destroy}o.call(this)}Object.defineProperty(Readable.prototype,"destroyed",{enumerable:false,get:function get(){if(this._readableState===undefined){return false}return this._readableState.destroyed},set:function set(e){if(!this._readableState){return}this._readableState.destroyed=e}});Readable.prototype.destroy=h.destroy;Readable.prototype._undestroy=h.undestroy;Readable.prototype._destroy=function(e,t){t(e)};Readable.prototype.push=function(e,t){var r=this._readableState;var n;if(!r.objectMode){if(typeof e==="string"){t=t||r.defaultEncoding;if(t!==r.encoding){e=s.from(e,t);t=""}n=true}}else{n=true}return readableAddChunk(this,e,t,false,n)};Readable.prototype.unshift=function(e){return readableAddChunk(this,e,null,true,false)};function readableAddChunk(e,t,r,n,i){c("readableAddChunk",t);var a=e._readableState;if(t===null){a.reading=false;onEofChunk(e,a)}else{var o;if(!i)o=chunkInvalid(a,t);if(o){E(e,o)}else if(a.objectMode||t&&t.length>0){if(typeof t!=="string"&&!a.objectMode&&Object.getPrototypeOf(t)!==s.prototype){t=_uint8ArrayToBuffer(t)}if(n){if(a.endEmitted)E(e,new y);else addChunk(e,a,t,true)}else if(a.ended){E(e,new v)}else if(a.destroyed){return false}else{a.reading=false;if(a.decoder&&!r){t=a.decoder.write(t);if(a.objectMode||t.length!==0)addChunk(e,a,t,false);else maybeReadMore(e,a)}else{addChunk(e,a,t,false)}}}else if(!n){a.reading=false;maybeReadMore(e,a)}}return!a.ended&&(a.length=k){e=k}else{e--;e|=e>>>1;e|=e>>>2;e|=e>>>4;e|=e>>>8;e|=e>>>16;e++}return e}function howMuchToRead(e,t){if(e<=0||t.length===0&&t.ended)return 0;if(t.objectMode)return 1;if(e!==e){if(t.flowing&&t.length)return t.buffer.head.data.length;else return t.length}if(e>t.highWaterMark)t.highWaterMark=computeNewHighWaterMark(e);if(e<=t.length)return e;if(!t.ended){t.needReadable=true;return 0}return t.length}Readable.prototype.read=function(e){c("read",e);e=parseInt(e,10);var t=this._readableState;var r=e;if(e!==0)t.emittedReadable=false;if(e===0&&t.needReadable&&((t.highWaterMark!==0?t.length>=t.highWaterMark:t.length>0)||t.ended)){c("read: emitReadable",t.length,t.ended);if(t.length===0&&t.ended)endReadable(this);else emitReadable(this);return null}e=howMuchToRead(e,t);if(e===0&&t.ended){if(t.length===0)endReadable(this);return null}var n=t.needReadable;c("need readable",n);if(t.length===0||t.length-e0)i=fromList(e,t);else i=null;if(i===null){t.needReadable=t.length<=t.highWaterMark;e=0}else{t.length-=e;t.awaitDrain=0}if(t.length===0){if(!t.ended)t.needReadable=true;if(r!==e&&t.ended)endReadable(this)}if(i!==null)this.emit("data",i);return i};function onEofChunk(e,t){c("onEofChunk");if(t.ended)return;if(t.decoder){var r=t.decoder.end();if(r&&r.length){t.buffer.push(r);t.length+=t.objectMode?1:r.length}}t.ended=true;if(t.sync){emitReadable(e)}else{t.needReadable=false;if(!t.emittedReadable){t.emittedReadable=true;emitReadable_(e)}}}function emitReadable(e){var t=e._readableState;c("emitReadable",t.needReadable,t.emittedReadable);t.needReadable=false;if(!t.emittedReadable){c("emitReadable",t.flowing);t.emittedReadable=true;process.nextTick(emitReadable_,e)}}function emitReadable_(e){var t=e._readableState;c("emitReadable_",t.destroyed,t.length,t.ended);if(!t.destroyed&&(t.length||t.ended)){e.emit("readable");t.emittedReadable=false}t.needReadable=!t.flowing&&!t.ended&&t.length<=t.highWaterMark;flow(e)}function maybeReadMore(e,t){if(!t.readingMore){t.readingMore=true;process.nextTick(maybeReadMore_,e,t)}}function maybeReadMore_(e,t){while(!t.reading&&!t.ended&&(t.length1&&indexOf(n.pipes,e)!==-1)&&!u){c("false write response, pause",n.awaitDrain);n.awaitDrain++}r.pause()}}function onerror(t){c("onerror",t);unpipe();e.removeListener("error",onerror);if(a(e,"error")===0)E(e,t)}prependListener(e,"error",onerror);function onclose(){e.removeListener("finish",onfinish);unpipe()}e.once("close",onclose);function onfinish(){c("onfinish");e.removeListener("close",onclose);unpipe()}e.once("finish",onfinish);function unpipe(){c("unpipe");r.unpipe(e)}e.emit("pipe",r);if(!n.flowing){c("pipe resume");r.resume()}return e};function pipeOnDrain(e){return function pipeOnDrainFunctionResult(){var t=e._readableState;c("pipeOnDrain",t.awaitDrain);if(t.awaitDrain)t.awaitDrain--;if(t.awaitDrain===0&&a(e,"data")){t.flowing=true;flow(e)}}}Readable.prototype.unpipe=function(e){var t=this._readableState;var r={hasUnpiped:false};if(t.pipesCount===0)return this;if(t.pipesCount===1){if(e&&e!==t.pipes)return this;if(!e)e=t.pipes;t.pipes=null;t.pipesCount=0;t.flowing=false;if(e)e.emit("unpipe",this,r);return this}if(!e){var n=t.pipes;var i=t.pipesCount;t.pipes=null;t.pipesCount=0;t.flowing=false;for(var a=0;a0;if(n.flowing!==false)this.resume()}else if(e==="readable"){if(!n.endEmitted&&!n.readableListening){n.readableListening=n.needReadable=true;n.flowing=false;n.emittedReadable=false;c("on readable",n.length,n.reading);if(n.length){emitReadable(this)}else if(!n.reading){process.nextTick(nReadingNextTick,this)}}}return r};Readable.prototype.addListener=Readable.prototype.on;Readable.prototype.removeListener=function(e,t){var r=o.prototype.removeListener.call(this,e,t);if(e==="readable"){process.nextTick(updateReadableListening,this)}return r};Readable.prototype.removeAllListeners=function(e){var t=o.prototype.removeAllListeners.apply(this,arguments);if(e==="readable"||e===undefined){process.nextTick(updateReadableListening,this)}return t};function updateReadableListening(e){var t=e._readableState;t.readableListening=e.listenerCount("readable")>0;if(t.resumeScheduled&&!t.paused){t.flowing=true}else if(e.listenerCount("data")>0){e.resume()}}function nReadingNextTick(e){c("readable nexttick read 0");e.read(0)}Readable.prototype.resume=function(){var e=this._readableState;if(!e.flowing){c("resume");e.flowing=!e.readableListening;resume(this,e)}e.paused=false;return this};function resume(e,t){if(!t.resumeScheduled){t.resumeScheduled=true;process.nextTick(resume_,e,t)}}function resume_(e,t){c("resume",t.reading);if(!t.reading){e.read(0)}t.resumeScheduled=false;e.emit("resume");flow(e);if(t.flowing&&!t.reading)e.read(0)}Readable.prototype.pause=function(){c("call pause flowing=%j",this._readableState.flowing);if(this._readableState.flowing!==false){c("pause");this._readableState.flowing=false;this.emit("pause")}this._readableState.paused=true;return this};function flow(e){var t=e._readableState;c("flow",t.flowing);while(t.flowing&&e.read()!==null){}}Readable.prototype.wrap=function(e){var t=this;var r=this._readableState;var n=false;e.on("end",(function(){c("wrapped end");if(r.decoder&&!r.ended){var e=r.decoder.end();if(e&&e.length)t.push(e)}t.push(null)}));e.on("data",(function(i){c("wrapped data");if(r.decoder)i=r.decoder.write(i);if(r.objectMode&&(i===null||i===undefined))return;else if(!r.objectMode&&(!i||!i.length))return;var a=t.push(i);if(!a){n=true;e.pause()}}));for(var i in e){if(this[i]===undefined&&typeof e[i]==="function"){this[i]=function methodWrap(t){return function methodWrapReturnFunction(){return e[t].apply(e,arguments)}}(i)}}for(var a=0;a=t.length){if(t.decoder)r=t.buffer.join("");else if(t.buffer.length===1)r=t.buffer.first();else r=t.buffer.concat(t.length);t.buffer.clear()}else{r=t.buffer.consume(e,t.decoder)}return r}function endReadable(e){var t=e._readableState;c("endReadable",t.endEmitted);if(!t.endEmitted){t.ended=true;process.nextTick(endReadableNT,t,e)}}function endReadableNT(e,t){c("endReadableNT",e.endEmitted,e.length);if(!e.endEmitted&&e.length===0){e.endEmitted=true;t.readable=false;t.emit("end");if(e.autoDestroy){var r=t._writableState;if(!r||r.autoDestroy&&r.finished){t.destroy()}}}}if(typeof Symbol==="function"){Readable.from=function(e,t){if(S===undefined){S=r(9082)}return S(Readable,e,t)}}function indexOf(e,t){for(var r=0,n=e.length;r{e.exports=Transform;var n=r(7214).q,i=n.ERR_METHOD_NOT_IMPLEMENTED,a=n.ERR_MULTIPLE_CALLBACK,o=n.ERR_TRANSFORM_ALREADY_TRANSFORMING,s=n.ERR_TRANSFORM_WITH_LENGTH_0;var u=r(1359);r(4124)(Transform,u);function afterTransform(e,t){var r=this._transformState;r.transforming=false;var n=r.writecb;if(n===null){return this.emit("error",new a)}r.writechunk=null;r.writecb=null;if(t!=null)this.push(t);n(e);var i=this._readableState;i.reading=false;if(i.needReadable||i.length{e.exports=Writable;function WriteReq(e,t,r){this.chunk=e;this.encoding=t;this.callback=r;this.next=null}function CorkedRequest(e){var t=this;this.next=null;this.entry=null;this.finish=function(){onCorkedFinish(t,e)}}var n;Writable.WritableState=WritableState;var i={deprecate:r(5278)};var a=r(2387);var o=r(4300).Buffer;var s=global.Uint8Array||function(){};function _uint8ArrayToBuffer(e){return o.from(e)}function _isUint8Array(e){return o.isBuffer(e)||e instanceof s}var u=r(7049);var l=r(9948),c=l.getHighWaterMark;var d=r(7214).q,h=d.ERR_INVALID_ARG_TYPE,p=d.ERR_METHOD_NOT_IMPLEMENTED,g=d.ERR_MULTIPLE_CALLBACK,m=d.ERR_STREAM_CANNOT_PIPE,b=d.ERR_STREAM_DESTROYED,v=d.ERR_STREAM_NULL_VALUES,_=d.ERR_STREAM_WRITE_AFTER_END,y=d.ERR_UNKNOWN_ENCODING;var w=u.errorOrDestroy;r(4124)(Writable,a);function nop(){}function WritableState(e,t,i){n=n||r(1359);e=e||{};if(typeof i!=="boolean")i=t instanceof n;this.objectMode=!!e.objectMode;if(i)this.objectMode=this.objectMode||!!e.writableObjectMode;this.highWaterMark=c(this,e,"writableHighWaterMark",i);this.finalCalled=false;this.needDrain=false;this.ending=false;this.ended=false;this.finished=false;this.destroyed=false;var a=e.decodeStrings===false;this.decodeStrings=!a;this.defaultEncoding=e.defaultEncoding||"utf8";this.length=0;this.writing=false;this.corked=0;this.sync=true;this.bufferProcessing=false;this.onwrite=function(e){onwrite(t,e)};this.writecb=null;this.writelen=0;this.bufferedRequest=null;this.lastBufferedRequest=null;this.pendingcb=0;this.prefinished=false;this.errorEmitted=false;this.emitClose=e.emitClose!==false;this.autoDestroy=!!e.autoDestroy;this.bufferedRequestCount=0;this.corkedRequestsFree=new CorkedRequest(this)}WritableState.prototype.getBuffer=function getBuffer(){var e=this.bufferedRequest;var t=[];while(e){t.push(e);e=e.next}return t};(function(){try{Object.defineProperty(WritableState.prototype,"buffer",{get:i.deprecate((function writableStateBufferGetter(){return this.getBuffer()}),"_writableState.buffer is deprecated. Use _writableState.getBuffer "+"instead.","DEP0003")})}catch(e){}})();var R;if(typeof Symbol==="function"&&Symbol.hasInstance&&typeof Function.prototype[Symbol.hasInstance]==="function"){R=Function.prototype[Symbol.hasInstance];Object.defineProperty(Writable,Symbol.hasInstance,{value:function value(e){if(R.call(this,e))return true;if(this!==Writable)return false;return e&&e._writableState instanceof WritableState}})}else{R=function realHasInstance(e){return e instanceof this}}function Writable(e){n=n||r(1359);var t=this instanceof n;if(!t&&!R.call(Writable,this))return new Writable(e);this._writableState=new WritableState(e,this,t);this.writable=true;if(e){if(typeof e.write==="function")this._write=e.write;if(typeof e.writev==="function")this._writev=e.writev;if(typeof e.destroy==="function")this._destroy=e.destroy;if(typeof e.final==="function")this._final=e.final}a.call(this)}Writable.prototype.pipe=function(){w(this,new m)};function writeAfterEnd(e,t){var r=new _;w(e,r);process.nextTick(t,r)}function validChunk(e,t,r,n){var i;if(r===null){i=new v}else if(typeof r!=="string"&&!t.objectMode){i=new h("chunk",["string","Buffer"],r)}if(i){w(e,i);process.nextTick(n,i);return false}return true}Writable.prototype.write=function(e,t,r){var n=this._writableState;var i=false;var a=!n.objectMode&&_isUint8Array(e);if(a&&!o.isBuffer(e)){e=_uint8ArrayToBuffer(e)}if(typeof t==="function"){r=t;t=null}if(a)t="buffer";else if(!t)t=n.defaultEncoding;if(typeof r!=="function")r=nop;if(n.ending)writeAfterEnd(this,r);else if(a||validChunk(this,n,e,r)){n.pendingcb++;i=writeOrBuffer(this,n,a,e,t,r)}return i};Writable.prototype.cork=function(){this._writableState.corked++};Writable.prototype.uncork=function(){var e=this._writableState;if(e.corked){e.corked--;if(!e.writing&&!e.corked&&!e.bufferProcessing&&e.bufferedRequest)clearBuffer(this,e)}};Writable.prototype.setDefaultEncoding=function setDefaultEncoding(e){if(typeof e==="string")e=e.toLowerCase();if(!(["hex","utf8","utf-8","ascii","binary","base64","ucs2","ucs-2","utf16le","utf-16le","raw"].indexOf((e+"").toLowerCase())>-1))throw new y(e);this._writableState.defaultEncoding=e;return this};Object.defineProperty(Writable.prototype,"writableBuffer",{enumerable:false,get:function get(){return this._writableState&&this._writableState.getBuffer()}});function decodeChunk(e,t,r){if(!e.objectMode&&e.decodeStrings!==false&&typeof t==="string"){t=o.from(t,r)}return t}Object.defineProperty(Writable.prototype,"writableHighWaterMark",{enumerable:false,get:function get(){return this._writableState.highWaterMark}});function writeOrBuffer(e,t,r,n,i,a){if(!r){var o=decodeChunk(t,n,i);if(n!==o){r=true;i="buffer";n=o}}var s=t.objectMode?1:n.length;t.length+=s;var u=t.length{var n;function _defineProperty(e,t,r){if(t in e){Object.defineProperty(e,t,{value:r,enumerable:true,configurable:true,writable:true})}else{e[t]=r}return e}var i=r(6080);var a=Symbol("lastResolve");var o=Symbol("lastReject");var s=Symbol("error");var u=Symbol("ended");var l=Symbol("lastPromise");var c=Symbol("handlePromise");var d=Symbol("stream");function createIterResult(e,t){return{value:e,done:t}}function readAndResolve(e){var t=e[a];if(t!==null){var r=e[d].read();if(r!==null){e[l]=null;e[a]=null;e[o]=null;t(createIterResult(r,false))}}}function onReadable(e){process.nextTick(readAndResolve,e)}function wrapForNext(e,t){return function(r,n){e.then((function(){if(t[u]){r(createIterResult(undefined,true));return}t[c](r,n)}),n)}}var h=Object.getPrototypeOf((function(){}));var p=Object.setPrototypeOf((n={get stream(){return this[d]},next:function next(){var e=this;var t=this[s];if(t!==null){return Promise.reject(t)}if(this[u]){return Promise.resolve(createIterResult(undefined,true))}if(this[d].destroyed){return new Promise((function(t,r){process.nextTick((function(){if(e[s]){r(e[s])}else{t(createIterResult(undefined,true))}}))}))}var r=this[l];var n;if(r){n=new Promise(wrapForNext(r,this))}else{var i=this[d].read();if(i!==null){return Promise.resolve(createIterResult(i,false))}n=new Promise(this[c])}this[l]=n;return n}},_defineProperty(n,Symbol.asyncIterator,(function(){return this})),_defineProperty(n,"return",(function _return(){var e=this;return new Promise((function(t,r){e[d].destroy(null,(function(e){if(e){r(e);return}t(createIterResult(undefined,true))}))}))})),n),h);var g=function createReadableStreamAsyncIterator(e){var t;var r=Object.create(p,(t={},_defineProperty(t,d,{value:e,writable:true}),_defineProperty(t,a,{value:null,writable:true}),_defineProperty(t,o,{value:null,writable:true}),_defineProperty(t,s,{value:null,writable:true}),_defineProperty(t,u,{value:e._readableState.endEmitted,writable:true}),_defineProperty(t,c,{value:function value(e,t){var n=r[d].read();if(n){r[l]=null;r[a]=null;r[o]=null;e(createIterResult(n,false))}else{r[a]=e;r[o]=t}},writable:true}),t));r[l]=null;i(e,(function(e){if(e&&e.code!=="ERR_STREAM_PREMATURE_CLOSE"){var t=r[o];if(t!==null){r[l]=null;r[a]=null;r[o]=null;t(e)}r[s]=e;return}var n=r[a];if(n!==null){r[l]=null;r[a]=null;r[o]=null;n(createIterResult(undefined,true))}r[u]=true}));e.on("readable",onReadable.bind(null,r));return r};e.exports=g},2746:(e,t,r)=>{function ownKeys(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);if(t)n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}));r.push.apply(r,n)}return r}function _objectSpread(e){for(var t=1;t0)this.tail.next=t;else this.head=t;this.tail=t;++this.length}},{key:"unshift",value:function unshift(e){var t={data:e,next:this.head};if(this.length===0)this.tail=t;this.head=t;++this.length}},{key:"shift",value:function shift(){if(this.length===0)return;var e=this.head.data;if(this.length===1)this.head=this.tail=null;else this.head=this.head.next;--this.length;return e}},{key:"clear",value:function clear(){this.head=this.tail=null;this.length=0}},{key:"join",value:function join(e){if(this.length===0)return"";var t=this.head;var r=""+t.data;while(t=t.next){r+=e+t.data}return r}},{key:"concat",value:function concat(e){if(this.length===0)return i.alloc(0);var t=i.allocUnsafe(e>>>0);var r=this.head;var n=0;while(r){copyBuffer(r.data,t,n);n+=r.data.length;r=r.next}return t}},{key:"consume",value:function consume(e,t){var r;if(ei.length?i.length:e;if(a===i.length)n+=i;else n+=i.slice(0,e);e-=a;if(e===0){if(a===i.length){++r;if(t.next)this.head=t.next;else this.head=this.tail=null}else{this.head=t;t.data=i.slice(a)}break}++r}this.length-=r;return n}},{key:"_getBuffer",value:function _getBuffer(e){var t=i.allocUnsafe(e);var r=this.head;var n=1;r.data.copy(t);e-=r.data.length;while(r=r.next){var a=r.data;var o=e>a.length?a.length:e;a.copy(t,t.length-e,0,o);e-=o;if(e===0){if(o===a.length){++n;if(r.next)this.head=r.next;else this.head=this.tail=null}else{this.head=r;r.data=a.slice(o)}break}++n}this.length-=n;return t}},{key:s,value:function value(e,t){return o(this,_objectSpread({},t,{depth:0,customInspect:false}))}}]);return BufferList}()},7049:e=>{function destroy(e,t){var r=this;var n=this._readableState&&this._readableState.destroyed;var i=this._writableState&&this._writableState.destroyed;if(n||i){if(t){t(e)}else if(e){if(!this._writableState){process.nextTick(emitErrorNT,this,e)}else if(!this._writableState.errorEmitted){this._writableState.errorEmitted=true;process.nextTick(emitErrorNT,this,e)}}return this}if(this._readableState){this._readableState.destroyed=true}if(this._writableState){this._writableState.destroyed=true}this._destroy(e||null,(function(e){if(!t&&e){if(!r._writableState){process.nextTick(emitErrorAndCloseNT,r,e)}else if(!r._writableState.errorEmitted){r._writableState.errorEmitted=true;process.nextTick(emitErrorAndCloseNT,r,e)}else{process.nextTick(emitCloseNT,r)}}else if(t){process.nextTick(emitCloseNT,r);t(e)}else{process.nextTick(emitCloseNT,r)}}));return this}function emitErrorAndCloseNT(e,t){emitErrorNT(e,t);emitCloseNT(e)}function emitCloseNT(e){if(e._writableState&&!e._writableState.emitClose)return;if(e._readableState&&!e._readableState.emitClose)return;e.emit("close")}function undestroy(){if(this._readableState){this._readableState.destroyed=false;this._readableState.reading=false;this._readableState.ended=false;this._readableState.endEmitted=false}if(this._writableState){this._writableState.destroyed=false;this._writableState.ended=false;this._writableState.ending=false;this._writableState.finalCalled=false;this._writableState.prefinished=false;this._writableState.finished=false;this._writableState.errorEmitted=false}}function emitErrorNT(e,t){e.emit("error",t)}function errorOrDestroy(e,t){var r=e._readableState;var n=e._writableState;if(r&&r.autoDestroy||n&&n.autoDestroy)e.destroy(t);else e.emit("error",t)}e.exports={destroy:destroy,undestroy:undestroy,errorOrDestroy:errorOrDestroy}},6080:(e,t,r)=>{var n=r(7214).q.ERR_STREAM_PREMATURE_CLOSE;function once(e){var t=false;return function(){if(t)return;t=true;for(var r=arguments.length,n=new Array(r),i=0;i{function asyncGeneratorStep(e,t,r,n,i,a,o){try{var s=e[a](o);var u=s.value}catch(e){r(e);return}if(s.done){t(u)}else{Promise.resolve(u).then(n,i)}}function _asyncToGenerator(e){return function(){var t=this,r=arguments;return new Promise((function(n,i){var a=e.apply(t,r);function _next(e){asyncGeneratorStep(a,n,i,_next,_throw,"next",e)}function _throw(e){asyncGeneratorStep(a,n,i,_next,_throw,"throw",e)}_next(undefined)}))}}function ownKeys(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);if(t)n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}));r.push.apply(r,n)}return r}function _objectSpread(e){for(var t=1;t{var n;function once(e){var t=false;return function(){if(t)return;t=true;e.apply(void 0,arguments)}}var i=r(7214).q,a=i.ERR_MISSING_ARGS,o=i.ERR_STREAM_DESTROYED;function noop(e){if(e)throw e}function isRequest(e){return e.setHeader&&typeof e.abort==="function"}function destroyer(e,t,i,a){a=once(a);var s=false;e.on("close",(function(){s=true}));if(n===undefined)n=r(6080);n(e,{readable:t,writable:i},(function(e){if(e)return a(e);s=true;a()}));var u=false;return function(t){if(s)return;if(u)return;u=true;if(isRequest(e))return e.abort();if(typeof e.destroy==="function")return e.destroy();a(t||new o("pipe"))}}function call(e){e()}function pipe(e,t){return e.pipe(t)}function popCallback(e){if(!e.length)return noop;if(typeof e[e.length-1]!=="function")return noop;return e.pop()}function pipeline(){for(var e=arguments.length,t=new Array(e),r=0;r0;return destroyer(e,a,s,(function(e){if(!i)i=e;if(e)o.forEach(call);if(a)return;o.forEach(call);n(i)}))}));return t.reduce(pipe)}e.exports=pipeline},9948:(e,t,r)=>{var n=r(7214).q.ERR_INVALID_OPT_VALUE;function highWaterMarkFrom(e,t,r){return e.highWaterMark!=null?e.highWaterMark:t?e[r]:null}function getHighWaterMark(e,t,r,i){var a=highWaterMarkFrom(t,i,r);if(a!=null){if(!(isFinite(a)&&Math.floor(a)===a)||a<0){var o=i?r:"highWaterMark";throw new n(o,a)}return Math.floor(a)}return e.objectMode?16:16*1024}e.exports={getHighWaterMark:getHighWaterMark}},2387:(e,t,r)=>{e.exports=r(2781)},1642:(e,t,r)=>{var n=r(2781);if(process.env.READABLE_STREAM==="disable"&&n){e.exports=n.Readable;Object.assign(e.exports,n);e.exports.Stream=n}else{t=e.exports=r(1433);t.Stream=n||t;t.Readable=t;t.Writable=r(6993);t.Duplex=r(1359);t.Transform=r(4415);t.PassThrough=r(1542);t.finished=r(6080);t.pipeline=r(6989)}},1867:(e,t,r)=>{ /*! safe-buffer. MIT License. Feross Aboukhadijeh */ -var n=r(4300);var i=n.Buffer;function copyProps(e,t){for(var r in e){t[r]=e[r]}}if(i.from&&i.alloc&&i.allocUnsafe&&i.allocUnsafeSlow){e.exports=n}else{copyProps(n,t);t.Buffer=SafeBuffer}function SafeBuffer(e,t,r){return i(e,t,r)}SafeBuffer.prototype=Object.create(i.prototype);copyProps(i,SafeBuffer);SafeBuffer.from=function(e,t,r){if(typeof e==="number"){throw new TypeError("Argument must not be a number")}return i(e,t,r)};SafeBuffer.alloc=function(e,t,r){if(typeof e!=="number"){throw new TypeError("Argument must be a number")}var n=i(e);if(t!==undefined){if(typeof r==="string"){n.fill(t,r)}else{n.fill(t)}}else{n.fill(0)}return n};SafeBuffer.allocUnsafe=function(e){if(typeof e!=="number"){throw new TypeError("Argument must be a number")}return i(e)};SafeBuffer.allocUnsafeSlow=function(e){if(typeof e!=="number"){throw new TypeError("Argument must be a number")}return n.SlowBuffer(e)}},4841:(e,t,r)=>{var n=r(1867).Buffer;var i=n.isEncoding||function(e){e=""+e;switch(e&&e.toLowerCase()){case"hex":case"utf8":case"utf-8":case"ascii":case"binary":case"base64":case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":case"raw":return true;default:return false}};function _normalizeEncoding(e){if(!e)return"utf8";var t;while(true){switch(e){case"utf8":case"utf-8":return"utf8";case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return"utf16le";case"latin1":case"binary":return"latin1";case"base64":case"ascii":case"hex":return e;default:if(t)return;e=(""+e).toLowerCase();t=true}}}function normalizeEncoding(e){var t=_normalizeEncoding(e);if(typeof t!=="string"&&(n.isEncoding===i||!i(e)))throw new Error("Unknown encoding: "+e);return t||e}t.s=StringDecoder;function StringDecoder(e){this.encoding=normalizeEncoding(e);var t;switch(this.encoding){case"utf16le":this.text=utf16Text;this.end=utf16End;t=4;break;case"utf8":this.fillLast=utf8FillLast;t=4;break;case"base64":this.text=base64Text;this.end=base64End;t=3;break;default:this.write=simpleWrite;this.end=simpleEnd;return}this.lastNeed=0;this.lastTotal=0;this.lastChar=n.allocUnsafe(t)}StringDecoder.prototype.write=function(e){if(e.length===0)return"";var t;var r;if(this.lastNeed){t=this.fillLast(e);if(t===undefined)return"";r=this.lastNeed;this.lastNeed=0}else{r=0}if(r>5===6)return 2;else if(e>>4===14)return 3;else if(e>>3===30)return 4;return e>>6===2?-1:-2}function utf8CheckIncomplete(e,t,r){var n=t.length-1;if(n=0){if(i>0)e.lastNeed=i-1;return i}if(--n=0){if(i>0)e.lastNeed=i-2;return i}if(--n=0){if(i>0){if(i===2)i=0;else e.lastNeed=i-3}return i}return 0}function utf8CheckExtraBytes(e,t,r){if((t[0]&192)!==128){e.lastNeed=0;return"�"}if(e.lastNeed>1&&t.length>1){if((t[1]&192)!==128){e.lastNeed=1;return"�"}if(e.lastNeed>2&&t.length>2){if((t[2]&192)!==128){e.lastNeed=2;return"�"}}}}function utf8FillLast(e){var t=this.lastTotal-this.lastNeed;var r=utf8CheckExtraBytes(this,e,t);if(r!==undefined)return r;if(this.lastNeed<=e.length){e.copy(this.lastChar,t,0,this.lastNeed);return this.lastChar.toString(this.encoding,0,this.lastTotal)}e.copy(this.lastChar,t,0,e.length);this.lastNeed-=e.length}function utf8Text(e,t){var r=utf8CheckIncomplete(this,e,t);if(!this.lastNeed)return e.toString("utf8",t);this.lastTotal=r;var n=e.length-(r-this.lastNeed);e.copy(this.lastChar,0,n);return e.toString("utf8",t,n)}function utf8End(e){var t=e&&e.length?this.write(e):"";if(this.lastNeed)return t+"�";return t}function utf16Text(e,t){if((e.length-t)%2===0){var r=e.toString("utf16le",t);if(r){var n=r.charCodeAt(r.length-1);if(n>=55296&&n<=56319){this.lastNeed=2;this.lastTotal=4;this.lastChar[0]=e[e.length-2];this.lastChar[1]=e[e.length-1];return r.slice(0,-1)}}return r}this.lastNeed=1;this.lastTotal=2;this.lastChar[0]=e[e.length-1];return e.toString("utf16le",t,e.length-1)}function utf16End(e){var t=e&&e.length?this.write(e):"";if(this.lastNeed){var r=this.lastTotal-this.lastNeed;return t+this.lastChar.toString("utf16le",0,r)}return t}function base64Text(e,t){var r=(e.length-t)%3;if(r===0)return e.toString("base64",t);this.lastNeed=3-r;this.lastTotal=3;if(r===1){this.lastChar[0]=e[e.length-1]}else{this.lastChar[0]=e[e.length-2];this.lastChar[1]=e[e.length-1]}return e.toString("base64",t,e.length-r)}function base64End(e){var t=e&&e.length?this.write(e):"";if(this.lastNeed)return t+this.lastChar.toString("base64",0,3-this.lastNeed);return t}function simpleWrite(e){return e.toString(this.encoding)}function simpleEnd(e){return e&&e.length?this.write(e):""}},7882:(e,t,r)=>{var n=r(3837);var i=r(336);var a=r(8860);var o=r(1642).Writable;var s=r(1642).PassThrough;var noop=function(){};var overflow=function(e){e&=511;return e&&512-e};var emptyStream=function(e,t){var r=new Source(e,t);r.end();return r};var mixinPax=function(e,t){if(t.path)e.name=t.path;if(t.linkpath)e.linkname=t.linkpath;if(t.size)e.size=parseInt(t.size,10);e.pax=t;return e};var Source=function(e,t){this._parent=e;this.offset=t;s.call(this,{autoDestroy:false})};n.inherits(Source,s);Source.prototype.destroy=function(e){this._parent.destroy(e)};var Extract=function(e){if(!(this instanceof Extract))return new Extract(e);o.call(this,e);e=e||{};this._offset=0;this._buffer=i();this._missing=0;this._partial=false;this._onparse=noop;this._header=null;this._stream=null;this._overflow=null;this._cb=null;this._locked=false;this._destroyed=false;this._pax=null;this._paxGlobal=null;this._gnuLongPath=null;this._gnuLongLinkPath=null;var t=this;var r=t._buffer;var oncontinue=function(){t._continue()};var onunlock=function(e){t._locked=false;if(e)return t.destroy(e);if(!t._stream)oncontinue()};var onstreamend=function(){t._stream=null;var e=overflow(t._header.size);if(e)t._parse(e,ondrain);else t._parse(512,onheader);if(!t._locked)oncontinue()};var ondrain=function(){t._buffer.consume(overflow(t._header.size));t._parse(512,onheader);oncontinue()};var onpaxglobalheader=function(){var e=t._header.size;t._paxGlobal=a.decodePax(r.slice(0,e));r.consume(e);onstreamend()};var onpaxheader=function(){var e=t._header.size;t._pax=a.decodePax(r.slice(0,e));if(t._paxGlobal)t._pax=Object.assign({},t._paxGlobal,t._pax);r.consume(e);onstreamend()};var ongnulongpath=function(){var n=t._header.size;this._gnuLongPath=a.decodeLongPath(r.slice(0,n),e.filenameEncoding);r.consume(n);onstreamend()};var ongnulonglinkpath=function(){var n=t._header.size;this._gnuLongLinkPath=a.decodeLongPath(r.slice(0,n),e.filenameEncoding);r.consume(n);onstreamend()};var onheader=function(){var n=t._offset;var i;try{i=t._header=a.decode(r.slice(0,512),e.filenameEncoding,e.allowUnknownFormat)}catch(e){t.emit("error",e)}r.consume(512);if(!i){t._parse(512,onheader);oncontinue();return}if(i.type==="gnu-long-path"){t._parse(i.size,ongnulongpath);oncontinue();return}if(i.type==="gnu-long-link-path"){t._parse(i.size,ongnulonglinkpath);oncontinue();return}if(i.type==="pax-global-header"){t._parse(i.size,onpaxglobalheader);oncontinue();return}if(i.type==="pax-header"){t._parse(i.size,onpaxheader);oncontinue();return}if(t._gnuLongPath){i.name=t._gnuLongPath;t._gnuLongPath=null}if(t._gnuLongLinkPath){i.linkname=t._gnuLongLinkPath;t._gnuLongLinkPath=null}if(t._pax){t._header=i=mixinPax(i,t._pax);t._pax=null}t._locked=true;if(!i.size||i.type==="directory"){t._parse(512,onheader);t.emit("entry",i,emptyStream(t,n),onunlock);return}t._stream=new Source(t,n);t.emit("entry",i,t._stream,onunlock);t._parse(i.size,onstreamend);oncontinue()};this._onheader=onheader;this._parse(512,onheader)};n.inherits(Extract,o);Extract.prototype.destroy=function(e){if(this._destroyed)return;this._destroyed=true;if(e)this.emit("error",e);this.emit("close");if(this._stream)this._stream.emit("close")};Extract.prototype._parse=function(e,t){if(this._destroyed)return;this._offset+=e;this._missing=e;if(t===this._onheader)this._partial=false;this._onparse=t};Extract.prototype._continue=function(){if(this._destroyed)return;var e=this._cb;this._cb=noop;if(this._overflow)this._write(this._overflow,undefined,e);else e()};Extract.prototype._write=function(e,t,r){if(this._destroyed)return;var n=this._stream;var i=this._buffer;var a=this._missing;if(e.length)this._partial=true;if(e.lengtha){o=e.slice(a);e=e.slice(0,a)}if(n)n.end(e);else i.append(e);this._overflow=o;this._onparse()};Extract.prototype._final=function(e){if(this._partial)return this.destroy(new Error("Unexpected end of data"));e()};e.exports=Extract},8860:(e,t)=>{var r=Buffer.alloc;var n="0000000000000000000";var i="7777777777777777777";var a="0".charCodeAt(0);var o=Buffer.from("ustar\0","binary");var s=Buffer.from("00","binary");var u=Buffer.from("ustar ","binary");var l=Buffer.from(" \0","binary");var c=parseInt("7777",8);var d=257;var h=263;var clamp=function(e,t,r){if(typeof e!=="number")return r;e=~~e;if(e>=t)return t;if(e>=0)return e;e+=t;if(e>=0)return e;return 0};var toType=function(e){switch(e){case 0:return"file";case 1:return"link";case 2:return"symlink";case 3:return"character-device";case 4:return"block-device";case 5:return"directory";case 6:return"fifo";case 7:return"contiguous-file";case 72:return"pax-header";case 55:return"pax-global-header";case 27:return"gnu-long-link-path";case 28:case 30:return"gnu-long-path"}return null};var toTypeflag=function(e){switch(e){case"file":return 0;case"link":return 1;case"symlink":return 2;case"character-device":return 3;case"block-device":return 4;case"directory":return 5;case"fifo":return 6;case"contiguous-file":return 7;case"pax-header":return 72}return 0};var indexOf=function(e,t,r,n){for(;rt)return i.slice(0,t)+" ";else return n.slice(0,t-e.length)+e+" "};function parse256(e){var t;if(e[0]===128)t=true;else if(e[0]===255)t=false;else return null;var r=[];for(var n=e.length-1;n>0;n--){var i=e[n];if(t)r.push(i);else r.push(255-i)}var a=0;var o=r.length;for(n=0;n=Math.pow(10,r))r++;return t+r+e};t.decodeLongPath=function(e,t){return decodeStr(e,0,e.length,t)};t.encodePax=function(e){var t="";if(e.name)t+=addLength(" path="+e.name+"\n");if(e.linkname)t+=addLength(" linkpath="+e.linkname+"\n");var r=e.pax;if(r){for(var n in r){t+=addLength(" "+n+"="+r[n]+"\n")}}return Buffer.from(t)};t.decodePax=function(e){var t={};while(e.length){var r=0;while(r100){var u=n.indexOf("/");if(u===-1)return null;i+=i?"/"+n.slice(0,u):n.slice(0,u);n=n.slice(u+1)}if(Buffer.byteLength(n)>100||Buffer.byteLength(i)>155)return null;if(e.linkname&&Buffer.byteLength(e.linkname)>100)return null;t.write(n);t.write(encodeOct(e.mode&c,6),100);t.write(encodeOct(e.uid,6),108);t.write(encodeOct(e.gid,6),116);t.write(encodeOct(e.size,11),124);t.write(encodeOct(e.mtime.getTime()/1e3|0,11),136);t[156]=a+toTypeflag(e.type);if(e.linkname)t.write(e.linkname,157);o.copy(t,d);s.copy(t,h);if(e.uname)t.write(e.uname,265);if(e.gname)t.write(e.gname,297);t.write(encodeOct(e.devmajor||0,6),329);t.write(encodeOct(e.devminor||0,6),337);if(i)t.write(i,345);t.write(encodeOct(cksum(t),6),148);return t};t.decode=function(e,t,r){var n=e[156]===0?0:e[156]-a;var i=decodeStr(e,0,100,t);var s=decodeOct(e,100,8);var c=decodeOct(e,108,8);var p=decodeOct(e,116,8);var g=decodeOct(e,124,12);var m=decodeOct(e,136,12);var b=toType(n);var v=e[157]===0?null:decodeStr(e,157,100,t);var _=decodeStr(e,265,32);var y=decodeStr(e,297,32);var w=decodeOct(e,329,8);var R=decodeOct(e,337,8);var S=cksum(e);if(S===8*32)return null;if(S!==decodeOct(e,148,8))throw new Error("Invalid tar header. Maybe the tar is corrupted or it needs to be gunzipped?");if(o.compare(e,d,d+6)===0){if(e[345])i=decodeStr(e,345,155,t)+"/"+i}else if(u.compare(e,d,d+6)===0&&l.compare(e,h,h+2)===0){}else{if(!r){throw new Error("Invalid tar header: unknown format.")}}if(n===0&&i&&i[i.length-1]==="/")n=5;return{name:i,mode:s,uid:c,gid:p,size:g,mtime:new Date(1e3*m),type:b,linkname:v,uname:_,gname:y,devmajor:w,devminor:R}}},2283:(e,t,r)=>{t.extract=r(7882);t.pack=r(4930)},4930:(e,t,r)=>{var n=r(3186);var i=r(1205);var a=r(4124);var o=Buffer.alloc;var s=r(1642).Readable;var u=r(1642).Writable;var l=r(1576).StringDecoder;var c=r(8860);var d=parseInt("755",8);var h=parseInt("644",8);var p=o(1024);var noop=function(){};var overflow=function(e,t){t&=511;if(t)e.push(p.slice(0,512-t))};function modeToType(e){switch(e&n.S_IFMT){case n.S_IFBLK:return"block-device";case n.S_IFCHR:return"character-device";case n.S_IFDIR:return"directory";case n.S_IFIFO:return"fifo";case n.S_IFLNK:return"symlink"}return"file"}var Sink=function(e){u.call(this);this.written=0;this._to=e;this._destroyed=false};a(Sink,u);Sink.prototype._write=function(e,t,r){this.written+=e.length;if(this._to.push(e))return r();this._to._drain=r};Sink.prototype.destroy=function(){if(this._destroyed)return;this._destroyed=true;this.emit("close")};var LinkSink=function(){u.call(this);this.linkname="";this._decoder=new l("utf-8");this._destroyed=false};a(LinkSink,u);LinkSink.prototype._write=function(e,t,r){this.linkname+=this._decoder.write(e);r()};LinkSink.prototype.destroy=function(){if(this._destroyed)return;this._destroyed=true;this.emit("close")};var Void=function(){u.call(this);this._destroyed=false};a(Void,u);Void.prototype._write=function(e,t,r){r(new Error("No body allowed for this entry"))};Void.prototype.destroy=function(){if(this._destroyed)return;this._destroyed=true;this.emit("close")};var Pack=function(e){if(!(this instanceof Pack))return new Pack(e);s.call(this,e);this._drain=noop;this._finalized=false;this._finalizing=false;this._destroyed=false;this._stream=null};a(Pack,s);Pack.prototype.entry=function(e,t,r){if(this._stream)throw new Error("already piping an entry");if(this._finalized||this._destroyed)return;if(typeof t==="function"){r=t;t=null}if(!r)r=noop;var n=this;if(!e.size||e.type==="symlink")e.size=0;if(!e.type)e.type=modeToType(e.mode);if(!e.mode)e.mode=e.type==="directory"?d:h;if(!e.uid)e.uid=0;if(!e.gid)e.gid=0;if(!e.mtime)e.mtime=new Date;if(typeof t==="string")t=Buffer.from(t);if(Buffer.isBuffer(t)){e.size=t.length;this._encode(e);var a=this.push(t);overflow(n,e.size);if(a)process.nextTick(r);else this._drain=r;return new Void}if(e.type==="symlink"&&!e.linkname){var o=new LinkSink;i(o,(function(t){if(t){n.destroy();return r(t)}e.linkname=o.linkname;n._encode(e);r()}));return o}this._encode(e);if(e.type!=="file"&&e.type!=="contiguous-file"){process.nextTick(r);return new Void}var s=new Sink(this);this._stream=s;i(s,(function(t){n._stream=null;if(t){n.destroy();return r(t)}if(s.written!==e.size){n.destroy();return r(new Error("size mismatch"))}overflow(n,e.size);if(n._finalizing)n.finalize();r()}));return s};Pack.prototype.finalize=function(){if(this._stream){this._finalizing=true;return}if(this._finalized)return;this._finalized=true;this.push(p);this.push(null)};Pack.prototype.destroy=function(e){if(this._destroyed)return;this._destroyed=true;if(e)this.emit("error",e);this.emit("close");if(this._stream&&this._stream.destroy)this._stream.destroy()};Pack.prototype._encode=function(e){if(!e.pax){var t=c.encode(e);if(t){this.push(t);return}}this._encodePax(e)};Pack.prototype._encodePax=function(e){var t=c.encodePax({name:e.name,linkname:e.linkname,pax:e.pax});var r={name:"PaxHeader",mode:e.mode,uid:e.uid,gid:e.gid,size:t.length,mtime:e.mtime,type:"pax-header",linkname:e.linkname&&"PaxHeader",uname:e.uname,gname:e.gname,devmajor:e.devmajor,devminor:e.devminor};this.push(c.encode(r));this.push(t);overflow(this,t.length);r.size=e.size;r.type=e.type;this.push(c.encode(r))};Pack.prototype._read=function(e){var t=this._drain;this._drain=noop;t()};e.exports=Pack},4294:(e,t,r)=>{e.exports=r(4219)},4219:(e,t,r)=>{var n=r(1808);var i=r(4404);var a=r(3685);var o=r(5687);var s=r(2361);var u=r(9491);var l=r(3837);t.httpOverHttp=httpOverHttp;t.httpsOverHttp=httpsOverHttp;t.httpOverHttps=httpOverHttps;t.httpsOverHttps=httpsOverHttps;function httpOverHttp(e){var t=new TunnelingAgent(e);t.request=a.request;return t}function httpsOverHttp(e){var t=new TunnelingAgent(e);t.request=a.request;t.createSocket=createSecureSocket;t.defaultPort=443;return t}function httpOverHttps(e){var t=new TunnelingAgent(e);t.request=o.request;return t}function httpsOverHttps(e){var t=new TunnelingAgent(e);t.request=o.request;t.createSocket=createSecureSocket;t.defaultPort=443;return t}function TunnelingAgent(e){var t=this;t.options=e||{};t.proxyOptions=t.options.proxy||{};t.maxSockets=t.options.maxSockets||a.Agent.defaultMaxSockets;t.requests=[];t.sockets=[];t.on("free",(function onFree(e,r,n,i){var a=toOptions(r,n,i);for(var o=0,s=t.requests.length;o=this.maxSockets){i.requests.push(a);return}i.createSocket(a,(function(t){t.on("free",onFree);t.on("close",onCloseOrRemove);t.on("agentRemove",onCloseOrRemove);e.onSocket(t);function onFree(){i.emit("free",t,a)}function onCloseOrRemove(e){i.removeSocket(t);t.removeListener("free",onFree);t.removeListener("close",onCloseOrRemove);t.removeListener("agentRemove",onCloseOrRemove)}}))};TunnelingAgent.prototype.createSocket=function createSocket(e,t){var r=this;var n={};r.sockets.push(n);var i=mergeOptions({},r.proxyOptions,{method:"CONNECT",path:e.host+":"+e.port,agent:false,headers:{host:e.host+":"+e.port}});if(e.localAddress){i.localAddress=e.localAddress}if(i.proxyAuth){i.headers=i.headers||{};i.headers["Proxy-Authorization"]="Basic "+new Buffer(i.proxyAuth).toString("base64")}c("making CONNECT request");var a=r.request(i);a.useChunkedEncodingByDefault=false;a.once("response",onResponse);a.once("upgrade",onUpgrade);a.once("connect",onConnect);a.once("error",onError);a.end();function onResponse(e){e.upgrade=true}function onUpgrade(e,t,r){process.nextTick((function(){onConnect(e,t,r)}))}function onConnect(i,o,s){a.removeAllListeners();o.removeAllListeners();if(i.statusCode!==200){c("tunneling socket could not be established, statusCode=%d",i.statusCode);o.destroy();var u=new Error("tunneling socket could not be established, "+"statusCode="+i.statusCode);u.code="ECONNRESET";e.request.emit("error",u);r.removeSocket(n);return}if(s.length>0){c("got illegal response body from proxy");o.destroy();var u=new Error("got illegal response body from proxy");u.code="ECONNRESET";e.request.emit("error",u);r.removeSocket(n);return}c("tunneling connection has established");r.sockets[r.sockets.indexOf(n)]=o;return t(o)}function onError(t){a.removeAllListeners();c("tunneling socket could not be established, cause=%s\n",t.message,t.stack);var i=new Error("tunneling socket could not be established, "+"cause="+t.message);i.code="ECONNRESET";e.request.emit("error",i);r.removeSocket(n)}};TunnelingAgent.prototype.removeSocket=function removeSocket(e){var t=this.sockets.indexOf(e);if(t===-1){return}this.sockets.splice(t,1);var r=this.requests.shift();if(r){this.createSocket(r,(function(e){r.request.onSocket(e)}))}};function createSecureSocket(e,t){var r=this;TunnelingAgent.prototype.createSocket.call(r,e,(function(n){var a=e.request.getHeader("host");var o=mergeOptions({},r.options,{socket:n,servername:a?a.replace(/:.*$/,""):e.host});var s=i.connect(0,o);r.sockets[r.sockets.indexOf(n)]=s;t(s)}))}function toOptions(e,t,r){if(typeof e==="string"){return{host:e,port:t,localAddress:r}}return e}function mergeOptions(e){for(var t=1,r=arguments.length;t{e.exports=r(3837).deprecate},5840:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});Object.defineProperty(t,"v1",{enumerable:true,get:function(){return n.default}});Object.defineProperty(t,"v3",{enumerable:true,get:function(){return i.default}});Object.defineProperty(t,"v4",{enumerable:true,get:function(){return a.default}});Object.defineProperty(t,"v5",{enumerable:true,get:function(){return o.default}});Object.defineProperty(t,"NIL",{enumerable:true,get:function(){return s.default}});Object.defineProperty(t,"version",{enumerable:true,get:function(){return u.default}});Object.defineProperty(t,"validate",{enumerable:true,get:function(){return l.default}});Object.defineProperty(t,"stringify",{enumerable:true,get:function(){return c.default}});Object.defineProperty(t,"parse",{enumerable:true,get:function(){return d.default}});var n=_interopRequireDefault(r(8628));var i=_interopRequireDefault(r(6409));var a=_interopRequireDefault(r(5122));var o=_interopRequireDefault(r(9120));var s=_interopRequireDefault(r(5332));var u=_interopRequireDefault(r(1595));var l=_interopRequireDefault(r(6900));var c=_interopRequireDefault(r(8950));var d=_interopRequireDefault(r(4848));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}},4569:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(6113));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function md5(e){if(Array.isArray(e)){e=Buffer.from(e)}else if(typeof e==="string"){e=Buffer.from(e,"utf8")}return n.default.createHash("md5").update(e).digest()}var i=md5;t["default"]=i},5332:(e,t)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var r="00000000-0000-0000-0000-000000000000";t["default"]=r},4848:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(6900));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function parse(e){if(!(0,n.default)(e)){throw TypeError("Invalid UUID")}let t;const r=new Uint8Array(16);r[0]=(t=parseInt(e.slice(0,8),16))>>>24;r[1]=t>>>16&255;r[2]=t>>>8&255;r[3]=t&255;r[4]=(t=parseInt(e.slice(9,13),16))>>>8;r[5]=t&255;r[6]=(t=parseInt(e.slice(14,18),16))>>>8;r[7]=t&255;r[8]=(t=parseInt(e.slice(19,23),16))>>>8;r[9]=t&255;r[10]=(t=parseInt(e.slice(24,36),16))/1099511627776&255;r[11]=t/4294967296&255;r[12]=t>>>24&255;r[13]=t>>>16&255;r[14]=t>>>8&255;r[15]=t&255;return r}var i=parse;t["default"]=i},814:(e,t)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var r=/^(?:[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}|00000000-0000-0000-0000-000000000000)$/i;t["default"]=r},807:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=rng;var n=_interopRequireDefault(r(6113));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}const i=new Uint8Array(256);let a=i.length;function rng(){if(a>i.length-16){n.default.randomFillSync(i);a=0}return i.slice(a,a+=16)}},5274:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(6113));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function sha1(e){if(Array.isArray(e)){e=Buffer.from(e)}else if(typeof e==="string"){e=Buffer.from(e,"utf8")}return n.default.createHash("sha1").update(e).digest()}var i=sha1;t["default"]=i},8950:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(6900));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}const i=[];for(let e=0;e<256;++e){i.push((e+256).toString(16).substr(1))}function stringify(e,t=0){const r=(i[e[t+0]]+i[e[t+1]]+i[e[t+2]]+i[e[t+3]]+"-"+i[e[t+4]]+i[e[t+5]]+"-"+i[e[t+6]]+i[e[t+7]]+"-"+i[e[t+8]]+i[e[t+9]]+"-"+i[e[t+10]]+i[e[t+11]]+i[e[t+12]]+i[e[t+13]]+i[e[t+14]]+i[e[t+15]]).toLowerCase();if(!(0,n.default)(r)){throw TypeError("Stringified UUID is invalid")}return r}var a=stringify;t["default"]=a},8628:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(807));var i=_interopRequireDefault(r(8950));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}let a;let o;let s=0;let u=0;function v1(e,t,r){let l=t&&r||0;const c=t||new Array(16);e=e||{};let d=e.node||a;let h=e.clockseq!==undefined?e.clockseq:o;if(d==null||h==null){const t=e.random||(e.rng||n.default)();if(d==null){d=a=[t[0]|1,t[1],t[2],t[3],t[4],t[5]]}if(h==null){h=o=(t[6]<<8|t[7])&16383}}let p=e.msecs!==undefined?e.msecs:Date.now();let g=e.nsecs!==undefined?e.nsecs:u+1;const m=p-s+(g-u)/1e4;if(m<0&&e.clockseq===undefined){h=h+1&16383}if((m<0||p>s)&&e.nsecs===undefined){g=0}if(g>=1e4){throw new Error("uuid.v1(): Can't create more than 10M uuids/sec")}s=p;u=g;o=h;p+=122192928e5;const b=((p&268435455)*1e4+g)%4294967296;c[l++]=b>>>24&255;c[l++]=b>>>16&255;c[l++]=b>>>8&255;c[l++]=b&255;const v=p/4294967296*1e4&268435455;c[l++]=v>>>8&255;c[l++]=v&255;c[l++]=v>>>24&15|16;c[l++]=v>>>16&255;c[l++]=h>>>8|128;c[l++]=h&255;for(let e=0;e<6;++e){c[l+e]=d[e]}return t||(0,i.default)(c)}var l=v1;t["default"]=l},6409:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(5998));var i=_interopRequireDefault(r(4569));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}const a=(0,n.default)("v3",48,i.default);var o=a;t["default"]=o},5998:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=_default;t.URL=t.DNS=void 0;var n=_interopRequireDefault(r(8950));var i=_interopRequireDefault(r(4848));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function stringToBytes(e){e=unescape(encodeURIComponent(e));const t=[];for(let r=0;r{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(807));var i=_interopRequireDefault(r(8950));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function v4(e,t,r){e=e||{};const a=e.random||(e.rng||n.default)();a[6]=a[6]&15|64;a[8]=a[8]&63|128;if(t){r=r||0;for(let e=0;e<16;++e){t[r+e]=a[e]}return t}return(0,i.default)(a)}var a=v4;t["default"]=a},9120:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(5998));var i=_interopRequireDefault(r(5274));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}const a=(0,n.default)("v5",80,i.default);var o=a;t["default"]=o},6900:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(814));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function validate(e){return typeof e==="string"&&n.default.test(e)}var i=validate;t["default"]=i},1595:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(6900));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function version(e){if(!(0,n.default)(e)){throw TypeError("Invalid UUID")}return parseInt(e.substr(14,1),16)}var i=version;t["default"]=i},2940:e=>{e.exports=wrappy;function wrappy(e,t){if(e&&t)return wrappy(e)(t);if(typeof e!=="function")throw new TypeError("need wrapper function");Object.keys(e).forEach((function(t){wrapper[t]=e[t]}));return wrapper;function wrapper(){var t=new Array(arguments.length);for(var r=0;r{t.exports=e(import.meta.url)("assert")},4300:t=>{t.exports=e(import.meta.url)("buffer")},2057:t=>{t.exports=e(import.meta.url)("constants")},6113:t=>{t.exports=e(import.meta.url)("crypto")},2361:t=>{t.exports=e(import.meta.url)("events")},7147:t=>{t.exports=e(import.meta.url)("fs")},3685:t=>{t.exports=e(import.meta.url)("http")},5687:t=>{t.exports=e(import.meta.url)("https")},1808:t=>{t.exports=e(import.meta.url)("net")},7718:t=>{t.exports=e(import.meta.url)("node:child_process")},7561:t=>{t.exports=e(import.meta.url)("node:fs")},7742:t=>{t.exports=e(import.meta.url)("node:process")},4492:t=>{t.exports=e(import.meta.url)("node:stream")},5628:t=>{t.exports=e(import.meta.url)("node:zlib")},2037:t=>{t.exports=e(import.meta.url)("os")},1017:t=>{t.exports=e(import.meta.url)("path")},2781:t=>{t.exports=e(import.meta.url)("stream")},1576:t=>{t.exports=e(import.meta.url)("string_decoder")},4404:t=>{t.exports=e(import.meta.url)("tls")},3837:t=>{t.exports=e(import.meta.url)("util")},6955:(e,t,r)=>{r.a(e,(async e=>{var t=r(7718);var n=r(7742);var i=r(4492);var a=r(7561);var o=r(5628);var s=r(2283);var u=r(2186);const l=u.getInput("container",{required:true});const c=JSON.parse(u.getInput("error-log-paths",{required:true}));const d=u.getInput("log-tarball-prefix",{required:true});const h=u.getInput("tests-label",{required:true});const p=u.getInput("test-timeout",{required:true});try{if(t.spawnSync("docker",["run","--name","base","-v",`${n.cwd()}/build.tar.zst:/build.tar.zst`,"--workdir","/__w/leap/leap",l,"sh","-c","zstdcat /build.tar.zst | tar x"],{stdio:"inherit"}).status)throw new Error("Failed to create base container");if(t.spawnSync("docker",["commit","base","baseimage"],{stdio:"inherit"}).status)throw new Error("Failed to create base image");if(t.spawnSync("docker",["rm","base"],{stdio:"inherit"}).status)throw new Error("Failed to remove base container");const e=t.spawnSync("docker",["run","--rm","baseimage","bash","-e","-o","pipefail","-c",`cd build; ctest -L '${h}' --show-only=json-v1`]);if(e.status)throw new Error("Failed to discover tests with label");const r=JSON.parse(e.stdout).tests;let g=[];r.forEach((e=>{g.push(new Promise((r=>{t.spawn("docker",["run","--security-opt","seccomp=unconfined","-e","GITHUB_ACTIONS=True","--name",e.name,"--init","baseimage","bash","-c",`cd build; ctest --output-on-failure -R '^${e.name}$' --timeout ${p}`],{stdio:"inherit"}).on("close",(e=>r(e)))})))}));const m=await Promise.all(g);for(let e=0;e{if(!e.name.startsWith(`__w/leap/leap/build`)){t.on("end",(()=>r()));t.resume();return}e.name=e.name.substring(`__w/leap/leap/`.length);if(e.name!=="build/"&&c.filter((t=>e.name.startsWith(t))).length===0){t.on("end",(()=>r()));t.resume();return}t.pipe(l.entry(e,r))})).on("finish",(()=>{l.finalize()}));t.spawn("docker",["export",r[e].name]).stdout.pipe(n);i.promises.pipeline(l,o.createGzip(),a.createWriteStream(`${d}-${r[e].name}-logs.tar.gz`))}}catch(e){u.setFailed(`Uncaught exception ${e.message}`)}e()}),1)}};var r={};function __nccwpck_require__(e){var n=r[e];if(n!==undefined){return n.exports}var i=r[e]={exports:{}};var a=true;try{t[e].call(i.exports,i,i.exports,__nccwpck_require__);a=false}finally{if(a)delete r[e]}return i.exports}(()=>{var e=typeof Symbol==="function"?Symbol("webpack then"):"__webpack_then__";var t=typeof Symbol==="function"?Symbol("webpack exports"):"__webpack_exports__";var completeQueue=e=>{if(e){e.forEach((e=>e.r--));e.forEach((e=>e.r--?e.r++:e()))}};var completeFunction=e=>!--e.r&&e();var queueFunction=(e,t)=>e?e.push(t):completeFunction(t);var wrapDeps=r=>r.map((r=>{if(r!==null&&typeof r==="object"){if(r[e])return r;if(r.then){var n=[];r.then((e=>{i[t]=e;completeQueue(n);n=0}));var i={};i[e]=(e,t)=>(queueFunction(n,e),r["catch"](t));return i}}var a={};a[e]=e=>completeFunction(e);a[t]=r;return a}));__nccwpck_require__.a=(r,n,i)=>{var a=i&&[];var o=r.exports;var s;var u;var l;var c=true;var d=false;var whenAll=(t,r,n)=>{if(d)return;d=true;r.r+=t.length;t.map(((t,i)=>t[e](r,n)));d=false};var h=new Promise(((e,t)=>{l=t;u=()=>(e(o),completeQueue(a),a=0)}));h[t]=o;h[e]=(e,t)=>{if(c){return completeFunction(e)}if(s)whenAll(s,e,t);queueFunction(a,e);h["catch"](t)};r.exports=h;n((e=>{if(!e)return u();s=wrapDeps(e);var r,n;var i=new Promise(((e,i)=>{r=()=>e(n=s.map((e=>e[t])));r.r=0;whenAll(s,r,i)}));return r.r?i:n})).then(u,l);c=false}})();if(typeof __nccwpck_require__!=="undefined")__nccwpck_require__.ab=new URL(".",import.meta.url).pathname.slice(import.meta.url.match(/^file:\/\/\/\w:/)?1:0,-1)+"/";var n=__nccwpck_require__(6955);n=await n; \ No newline at end of file +var n=r(4300);var i=n.Buffer;function copyProps(e,t){for(var r in e){t[r]=e[r]}}if(i.from&&i.alloc&&i.allocUnsafe&&i.allocUnsafeSlow){e.exports=n}else{copyProps(n,t);t.Buffer=SafeBuffer}function SafeBuffer(e,t,r){return i(e,t,r)}SafeBuffer.prototype=Object.create(i.prototype);copyProps(i,SafeBuffer);SafeBuffer.from=function(e,t,r){if(typeof e==="number"){throw new TypeError("Argument must not be a number")}return i(e,t,r)};SafeBuffer.alloc=function(e,t,r){if(typeof e!=="number"){throw new TypeError("Argument must be a number")}var n=i(e);if(t!==undefined){if(typeof r==="string"){n.fill(t,r)}else{n.fill(t)}}else{n.fill(0)}return n};SafeBuffer.allocUnsafe=function(e){if(typeof e!=="number"){throw new TypeError("Argument must be a number")}return i(e)};SafeBuffer.allocUnsafeSlow=function(e){if(typeof e!=="number"){throw new TypeError("Argument must be a number")}return n.SlowBuffer(e)}},4841:(e,t,r)=>{var n=r(1867).Buffer;var i=n.isEncoding||function(e){e=""+e;switch(e&&e.toLowerCase()){case"hex":case"utf8":case"utf-8":case"ascii":case"binary":case"base64":case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":case"raw":return true;default:return false}};function _normalizeEncoding(e){if(!e)return"utf8";var t;while(true){switch(e){case"utf8":case"utf-8":return"utf8";case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return"utf16le";case"latin1":case"binary":return"latin1";case"base64":case"ascii":case"hex":return e;default:if(t)return;e=(""+e).toLowerCase();t=true}}}function normalizeEncoding(e){var t=_normalizeEncoding(e);if(typeof t!=="string"&&(n.isEncoding===i||!i(e)))throw new Error("Unknown encoding: "+e);return t||e}t.s=StringDecoder;function StringDecoder(e){this.encoding=normalizeEncoding(e);var t;switch(this.encoding){case"utf16le":this.text=utf16Text;this.end=utf16End;t=4;break;case"utf8":this.fillLast=utf8FillLast;t=4;break;case"base64":this.text=base64Text;this.end=base64End;t=3;break;default:this.write=simpleWrite;this.end=simpleEnd;return}this.lastNeed=0;this.lastTotal=0;this.lastChar=n.allocUnsafe(t)}StringDecoder.prototype.write=function(e){if(e.length===0)return"";var t;var r;if(this.lastNeed){t=this.fillLast(e);if(t===undefined)return"";r=this.lastNeed;this.lastNeed=0}else{r=0}if(r>5===6)return 2;else if(e>>4===14)return 3;else if(e>>3===30)return 4;return e>>6===2?-1:-2}function utf8CheckIncomplete(e,t,r){var n=t.length-1;if(n=0){if(i>0)e.lastNeed=i-1;return i}if(--n=0){if(i>0)e.lastNeed=i-2;return i}if(--n=0){if(i>0){if(i===2)i=0;else e.lastNeed=i-3}return i}return 0}function utf8CheckExtraBytes(e,t,r){if((t[0]&192)!==128){e.lastNeed=0;return"�"}if(e.lastNeed>1&&t.length>1){if((t[1]&192)!==128){e.lastNeed=1;return"�"}if(e.lastNeed>2&&t.length>2){if((t[2]&192)!==128){e.lastNeed=2;return"�"}}}}function utf8FillLast(e){var t=this.lastTotal-this.lastNeed;var r=utf8CheckExtraBytes(this,e,t);if(r!==undefined)return r;if(this.lastNeed<=e.length){e.copy(this.lastChar,t,0,this.lastNeed);return this.lastChar.toString(this.encoding,0,this.lastTotal)}e.copy(this.lastChar,t,0,e.length);this.lastNeed-=e.length}function utf8Text(e,t){var r=utf8CheckIncomplete(this,e,t);if(!this.lastNeed)return e.toString("utf8",t);this.lastTotal=r;var n=e.length-(r-this.lastNeed);e.copy(this.lastChar,0,n);return e.toString("utf8",t,n)}function utf8End(e){var t=e&&e.length?this.write(e):"";if(this.lastNeed)return t+"�";return t}function utf16Text(e,t){if((e.length-t)%2===0){var r=e.toString("utf16le",t);if(r){var n=r.charCodeAt(r.length-1);if(n>=55296&&n<=56319){this.lastNeed=2;this.lastTotal=4;this.lastChar[0]=e[e.length-2];this.lastChar[1]=e[e.length-1];return r.slice(0,-1)}}return r}this.lastNeed=1;this.lastTotal=2;this.lastChar[0]=e[e.length-1];return e.toString("utf16le",t,e.length-1)}function utf16End(e){var t=e&&e.length?this.write(e):"";if(this.lastNeed){var r=this.lastTotal-this.lastNeed;return t+this.lastChar.toString("utf16le",0,r)}return t}function base64Text(e,t){var r=(e.length-t)%3;if(r===0)return e.toString("base64",t);this.lastNeed=3-r;this.lastTotal=3;if(r===1){this.lastChar[0]=e[e.length-1]}else{this.lastChar[0]=e[e.length-2];this.lastChar[1]=e[e.length-1]}return e.toString("base64",t,e.length-r)}function base64End(e){var t=e&&e.length?this.write(e):"";if(this.lastNeed)return t+this.lastChar.toString("base64",0,3-this.lastNeed);return t}function simpleWrite(e){return e.toString(this.encoding)}function simpleEnd(e){return e&&e.length?this.write(e):""}},7882:(e,t,r)=>{var n=r(3837);var i=r(336);var a=r(8860);var o=r(1642).Writable;var s=r(1642).PassThrough;var noop=function(){};var overflow=function(e){e&=511;return e&&512-e};var emptyStream=function(e,t){var r=new Source(e,t);r.end();return r};var mixinPax=function(e,t){if(t.path)e.name=t.path;if(t.linkpath)e.linkname=t.linkpath;if(t.size)e.size=parseInt(t.size,10);e.pax=t;return e};var Source=function(e,t){this._parent=e;this.offset=t;s.call(this,{autoDestroy:false})};n.inherits(Source,s);Source.prototype.destroy=function(e){this._parent.destroy(e)};var Extract=function(e){if(!(this instanceof Extract))return new Extract(e);o.call(this,e);e=e||{};this._offset=0;this._buffer=i();this._missing=0;this._partial=false;this._onparse=noop;this._header=null;this._stream=null;this._overflow=null;this._cb=null;this._locked=false;this._destroyed=false;this._pax=null;this._paxGlobal=null;this._gnuLongPath=null;this._gnuLongLinkPath=null;var t=this;var r=t._buffer;var oncontinue=function(){t._continue()};var onunlock=function(e){t._locked=false;if(e)return t.destroy(e);if(!t._stream)oncontinue()};var onstreamend=function(){t._stream=null;var e=overflow(t._header.size);if(e)t._parse(e,ondrain);else t._parse(512,onheader);if(!t._locked)oncontinue()};var ondrain=function(){t._buffer.consume(overflow(t._header.size));t._parse(512,onheader);oncontinue()};var onpaxglobalheader=function(){var e=t._header.size;t._paxGlobal=a.decodePax(r.slice(0,e));r.consume(e);onstreamend()};var onpaxheader=function(){var e=t._header.size;t._pax=a.decodePax(r.slice(0,e));if(t._paxGlobal)t._pax=Object.assign({},t._paxGlobal,t._pax);r.consume(e);onstreamend()};var ongnulongpath=function(){var n=t._header.size;this._gnuLongPath=a.decodeLongPath(r.slice(0,n),e.filenameEncoding);r.consume(n);onstreamend()};var ongnulonglinkpath=function(){var n=t._header.size;this._gnuLongLinkPath=a.decodeLongPath(r.slice(0,n),e.filenameEncoding);r.consume(n);onstreamend()};var onheader=function(){var n=t._offset;var i;try{i=t._header=a.decode(r.slice(0,512),e.filenameEncoding,e.allowUnknownFormat)}catch(e){t.emit("error",e)}r.consume(512);if(!i){t._parse(512,onheader);oncontinue();return}if(i.type==="gnu-long-path"){t._parse(i.size,ongnulongpath);oncontinue();return}if(i.type==="gnu-long-link-path"){t._parse(i.size,ongnulonglinkpath);oncontinue();return}if(i.type==="pax-global-header"){t._parse(i.size,onpaxglobalheader);oncontinue();return}if(i.type==="pax-header"){t._parse(i.size,onpaxheader);oncontinue();return}if(t._gnuLongPath){i.name=t._gnuLongPath;t._gnuLongPath=null}if(t._gnuLongLinkPath){i.linkname=t._gnuLongLinkPath;t._gnuLongLinkPath=null}if(t._pax){t._header=i=mixinPax(i,t._pax);t._pax=null}t._locked=true;if(!i.size||i.type==="directory"){t._parse(512,onheader);t.emit("entry",i,emptyStream(t,n),onunlock);return}t._stream=new Source(t,n);t.emit("entry",i,t._stream,onunlock);t._parse(i.size,onstreamend);oncontinue()};this._onheader=onheader;this._parse(512,onheader)};n.inherits(Extract,o);Extract.prototype.destroy=function(e){if(this._destroyed)return;this._destroyed=true;if(e)this.emit("error",e);this.emit("close");if(this._stream)this._stream.emit("close")};Extract.prototype._parse=function(e,t){if(this._destroyed)return;this._offset+=e;this._missing=e;if(t===this._onheader)this._partial=false;this._onparse=t};Extract.prototype._continue=function(){if(this._destroyed)return;var e=this._cb;this._cb=noop;if(this._overflow)this._write(this._overflow,undefined,e);else e()};Extract.prototype._write=function(e,t,r){if(this._destroyed)return;var n=this._stream;var i=this._buffer;var a=this._missing;if(e.length)this._partial=true;if(e.lengtha){o=e.slice(a);e=e.slice(0,a)}if(n)n.end(e);else i.append(e);this._overflow=o;this._onparse()};Extract.prototype._final=function(e){if(this._partial)return this.destroy(new Error("Unexpected end of data"));e()};e.exports=Extract},8860:(e,t)=>{var r=Buffer.alloc;var n="0000000000000000000";var i="7777777777777777777";var a="0".charCodeAt(0);var o=Buffer.from("ustar\0","binary");var s=Buffer.from("00","binary");var u=Buffer.from("ustar ","binary");var l=Buffer.from(" \0","binary");var c=parseInt("7777",8);var d=257;var h=263;var clamp=function(e,t,r){if(typeof e!=="number")return r;e=~~e;if(e>=t)return t;if(e>=0)return e;e+=t;if(e>=0)return e;return 0};var toType=function(e){switch(e){case 0:return"file";case 1:return"link";case 2:return"symlink";case 3:return"character-device";case 4:return"block-device";case 5:return"directory";case 6:return"fifo";case 7:return"contiguous-file";case 72:return"pax-header";case 55:return"pax-global-header";case 27:return"gnu-long-link-path";case 28:case 30:return"gnu-long-path"}return null};var toTypeflag=function(e){switch(e){case"file":return 0;case"link":return 1;case"symlink":return 2;case"character-device":return 3;case"block-device":return 4;case"directory":return 5;case"fifo":return 6;case"contiguous-file":return 7;case"pax-header":return 72}return 0};var indexOf=function(e,t,r,n){for(;rt)return i.slice(0,t)+" ";else return n.slice(0,t-e.length)+e+" "};function parse256(e){var t;if(e[0]===128)t=true;else if(e[0]===255)t=false;else return null;var r=[];for(var n=e.length-1;n>0;n--){var i=e[n];if(t)r.push(i);else r.push(255-i)}var a=0;var o=r.length;for(n=0;n=Math.pow(10,r))r++;return t+r+e};t.decodeLongPath=function(e,t){return decodeStr(e,0,e.length,t)};t.encodePax=function(e){var t="";if(e.name)t+=addLength(" path="+e.name+"\n");if(e.linkname)t+=addLength(" linkpath="+e.linkname+"\n");var r=e.pax;if(r){for(var n in r){t+=addLength(" "+n+"="+r[n]+"\n")}}return Buffer.from(t)};t.decodePax=function(e){var t={};while(e.length){var r=0;while(r100){var u=n.indexOf("/");if(u===-1)return null;i+=i?"/"+n.slice(0,u):n.slice(0,u);n=n.slice(u+1)}if(Buffer.byteLength(n)>100||Buffer.byteLength(i)>155)return null;if(e.linkname&&Buffer.byteLength(e.linkname)>100)return null;t.write(n);t.write(encodeOct(e.mode&c,6),100);t.write(encodeOct(e.uid,6),108);t.write(encodeOct(e.gid,6),116);t.write(encodeOct(e.size,11),124);t.write(encodeOct(e.mtime.getTime()/1e3|0,11),136);t[156]=a+toTypeflag(e.type);if(e.linkname)t.write(e.linkname,157);o.copy(t,d);s.copy(t,h);if(e.uname)t.write(e.uname,265);if(e.gname)t.write(e.gname,297);t.write(encodeOct(e.devmajor||0,6),329);t.write(encodeOct(e.devminor||0,6),337);if(i)t.write(i,345);t.write(encodeOct(cksum(t),6),148);return t};t.decode=function(e,t,r){var n=e[156]===0?0:e[156]-a;var i=decodeStr(e,0,100,t);var s=decodeOct(e,100,8);var c=decodeOct(e,108,8);var p=decodeOct(e,116,8);var g=decodeOct(e,124,12);var m=decodeOct(e,136,12);var b=toType(n);var v=e[157]===0?null:decodeStr(e,157,100,t);var _=decodeStr(e,265,32);var y=decodeStr(e,297,32);var w=decodeOct(e,329,8);var R=decodeOct(e,337,8);var S=cksum(e);if(S===8*32)return null;if(S!==decodeOct(e,148,8))throw new Error("Invalid tar header. Maybe the tar is corrupted or it needs to be gunzipped?");if(o.compare(e,d,d+6)===0){if(e[345])i=decodeStr(e,345,155,t)+"/"+i}else if(u.compare(e,d,d+6)===0&&l.compare(e,h,h+2)===0){}else{if(!r){throw new Error("Invalid tar header: unknown format.")}}if(n===0&&i&&i[i.length-1]==="/")n=5;return{name:i,mode:s,uid:c,gid:p,size:g,mtime:new Date(1e3*m),type:b,linkname:v,uname:_,gname:y,devmajor:w,devminor:R}}},2283:(e,t,r)=>{t.extract=r(7882);t.pack=r(4930)},4930:(e,t,r)=>{var n=r(3186);var i=r(1205);var a=r(4124);var o=Buffer.alloc;var s=r(1642).Readable;var u=r(1642).Writable;var l=r(1576).StringDecoder;var c=r(8860);var d=parseInt("755",8);var h=parseInt("644",8);var p=o(1024);var noop=function(){};var overflow=function(e,t){t&=511;if(t)e.push(p.slice(0,512-t))};function modeToType(e){switch(e&n.S_IFMT){case n.S_IFBLK:return"block-device";case n.S_IFCHR:return"character-device";case n.S_IFDIR:return"directory";case n.S_IFIFO:return"fifo";case n.S_IFLNK:return"symlink"}return"file"}var Sink=function(e){u.call(this);this.written=0;this._to=e;this._destroyed=false};a(Sink,u);Sink.prototype._write=function(e,t,r){this.written+=e.length;if(this._to.push(e))return r();this._to._drain=r};Sink.prototype.destroy=function(){if(this._destroyed)return;this._destroyed=true;this.emit("close")};var LinkSink=function(){u.call(this);this.linkname="";this._decoder=new l("utf-8");this._destroyed=false};a(LinkSink,u);LinkSink.prototype._write=function(e,t,r){this.linkname+=this._decoder.write(e);r()};LinkSink.prototype.destroy=function(){if(this._destroyed)return;this._destroyed=true;this.emit("close")};var Void=function(){u.call(this);this._destroyed=false};a(Void,u);Void.prototype._write=function(e,t,r){r(new Error("No body allowed for this entry"))};Void.prototype.destroy=function(){if(this._destroyed)return;this._destroyed=true;this.emit("close")};var Pack=function(e){if(!(this instanceof Pack))return new Pack(e);s.call(this,e);this._drain=noop;this._finalized=false;this._finalizing=false;this._destroyed=false;this._stream=null};a(Pack,s);Pack.prototype.entry=function(e,t,r){if(this._stream)throw new Error("already piping an entry");if(this._finalized||this._destroyed)return;if(typeof t==="function"){r=t;t=null}if(!r)r=noop;var n=this;if(!e.size||e.type==="symlink")e.size=0;if(!e.type)e.type=modeToType(e.mode);if(!e.mode)e.mode=e.type==="directory"?d:h;if(!e.uid)e.uid=0;if(!e.gid)e.gid=0;if(!e.mtime)e.mtime=new Date;if(typeof t==="string")t=Buffer.from(t);if(Buffer.isBuffer(t)){e.size=t.length;this._encode(e);var a=this.push(t);overflow(n,e.size);if(a)process.nextTick(r);else this._drain=r;return new Void}if(e.type==="symlink"&&!e.linkname){var o=new LinkSink;i(o,(function(t){if(t){n.destroy();return r(t)}e.linkname=o.linkname;n._encode(e);r()}));return o}this._encode(e);if(e.type!=="file"&&e.type!=="contiguous-file"){process.nextTick(r);return new Void}var s=new Sink(this);this._stream=s;i(s,(function(t){n._stream=null;if(t){n.destroy();return r(t)}if(s.written!==e.size){n.destroy();return r(new Error("size mismatch"))}overflow(n,e.size);if(n._finalizing)n.finalize();r()}));return s};Pack.prototype.finalize=function(){if(this._stream){this._finalizing=true;return}if(this._finalized)return;this._finalized=true;this.push(p);this.push(null)};Pack.prototype.destroy=function(e){if(this._destroyed)return;this._destroyed=true;if(e)this.emit("error",e);this.emit("close");if(this._stream&&this._stream.destroy)this._stream.destroy()};Pack.prototype._encode=function(e){if(!e.pax){var t=c.encode(e);if(t){this.push(t);return}}this._encodePax(e)};Pack.prototype._encodePax=function(e){var t=c.encodePax({name:e.name,linkname:e.linkname,pax:e.pax});var r={name:"PaxHeader",mode:e.mode,uid:e.uid,gid:e.gid,size:t.length,mtime:e.mtime,type:"pax-header",linkname:e.linkname&&"PaxHeader",uname:e.uname,gname:e.gname,devmajor:e.devmajor,devminor:e.devminor};this.push(c.encode(r));this.push(t);overflow(this,t.length);r.size=e.size;r.type=e.type;this.push(c.encode(r))};Pack.prototype._read=function(e){var t=this._drain;this._drain=noop;t()};e.exports=Pack},4294:(e,t,r)=>{e.exports=r(4219)},4219:(e,t,r)=>{var n=r(1808);var i=r(4404);var a=r(3685);var o=r(5687);var s=r(2361);var u=r(9491);var l=r(3837);t.httpOverHttp=httpOverHttp;t.httpsOverHttp=httpsOverHttp;t.httpOverHttps=httpOverHttps;t.httpsOverHttps=httpsOverHttps;function httpOverHttp(e){var t=new TunnelingAgent(e);t.request=a.request;return t}function httpsOverHttp(e){var t=new TunnelingAgent(e);t.request=a.request;t.createSocket=createSecureSocket;t.defaultPort=443;return t}function httpOverHttps(e){var t=new TunnelingAgent(e);t.request=o.request;return t}function httpsOverHttps(e){var t=new TunnelingAgent(e);t.request=o.request;t.createSocket=createSecureSocket;t.defaultPort=443;return t}function TunnelingAgent(e){var t=this;t.options=e||{};t.proxyOptions=t.options.proxy||{};t.maxSockets=t.options.maxSockets||a.Agent.defaultMaxSockets;t.requests=[];t.sockets=[];t.on("free",(function onFree(e,r,n,i){var a=toOptions(r,n,i);for(var o=0,s=t.requests.length;o=this.maxSockets){i.requests.push(a);return}i.createSocket(a,(function(t){t.on("free",onFree);t.on("close",onCloseOrRemove);t.on("agentRemove",onCloseOrRemove);e.onSocket(t);function onFree(){i.emit("free",t,a)}function onCloseOrRemove(e){i.removeSocket(t);t.removeListener("free",onFree);t.removeListener("close",onCloseOrRemove);t.removeListener("agentRemove",onCloseOrRemove)}}))};TunnelingAgent.prototype.createSocket=function createSocket(e,t){var r=this;var n={};r.sockets.push(n);var i=mergeOptions({},r.proxyOptions,{method:"CONNECT",path:e.host+":"+e.port,agent:false,headers:{host:e.host+":"+e.port}});if(e.localAddress){i.localAddress=e.localAddress}if(i.proxyAuth){i.headers=i.headers||{};i.headers["Proxy-Authorization"]="Basic "+new Buffer(i.proxyAuth).toString("base64")}c("making CONNECT request");var a=r.request(i);a.useChunkedEncodingByDefault=false;a.once("response",onResponse);a.once("upgrade",onUpgrade);a.once("connect",onConnect);a.once("error",onError);a.end();function onResponse(e){e.upgrade=true}function onUpgrade(e,t,r){process.nextTick((function(){onConnect(e,t,r)}))}function onConnect(i,o,s){a.removeAllListeners();o.removeAllListeners();if(i.statusCode!==200){c("tunneling socket could not be established, statusCode=%d",i.statusCode);o.destroy();var u=new Error("tunneling socket could not be established, "+"statusCode="+i.statusCode);u.code="ECONNRESET";e.request.emit("error",u);r.removeSocket(n);return}if(s.length>0){c("got illegal response body from proxy");o.destroy();var u=new Error("got illegal response body from proxy");u.code="ECONNRESET";e.request.emit("error",u);r.removeSocket(n);return}c("tunneling connection has established");r.sockets[r.sockets.indexOf(n)]=o;return t(o)}function onError(t){a.removeAllListeners();c("tunneling socket could not be established, cause=%s\n",t.message,t.stack);var i=new Error("tunneling socket could not be established, "+"cause="+t.message);i.code="ECONNRESET";e.request.emit("error",i);r.removeSocket(n)}};TunnelingAgent.prototype.removeSocket=function removeSocket(e){var t=this.sockets.indexOf(e);if(t===-1){return}this.sockets.splice(t,1);var r=this.requests.shift();if(r){this.createSocket(r,(function(e){r.request.onSocket(e)}))}};function createSecureSocket(e,t){var r=this;TunnelingAgent.prototype.createSocket.call(r,e,(function(n){var a=e.request.getHeader("host");var o=mergeOptions({},r.options,{socket:n,servername:a?a.replace(/:.*$/,""):e.host});var s=i.connect(0,o);r.sockets[r.sockets.indexOf(n)]=s;t(s)}))}function toOptions(e,t,r){if(typeof e==="string"){return{host:e,port:t,localAddress:r}}return e}function mergeOptions(e){for(var t=1,r=arguments.length;t{e.exports=r(3837).deprecate},5840:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});Object.defineProperty(t,"v1",{enumerable:true,get:function(){return n.default}});Object.defineProperty(t,"v3",{enumerable:true,get:function(){return i.default}});Object.defineProperty(t,"v4",{enumerable:true,get:function(){return a.default}});Object.defineProperty(t,"v5",{enumerable:true,get:function(){return o.default}});Object.defineProperty(t,"NIL",{enumerable:true,get:function(){return s.default}});Object.defineProperty(t,"version",{enumerable:true,get:function(){return u.default}});Object.defineProperty(t,"validate",{enumerable:true,get:function(){return l.default}});Object.defineProperty(t,"stringify",{enumerable:true,get:function(){return c.default}});Object.defineProperty(t,"parse",{enumerable:true,get:function(){return d.default}});var n=_interopRequireDefault(r(8628));var i=_interopRequireDefault(r(6409));var a=_interopRequireDefault(r(5122));var o=_interopRequireDefault(r(9120));var s=_interopRequireDefault(r(5332));var u=_interopRequireDefault(r(1595));var l=_interopRequireDefault(r(6900));var c=_interopRequireDefault(r(8950));var d=_interopRequireDefault(r(4848));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}},4569:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(6113));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function md5(e){if(Array.isArray(e)){e=Buffer.from(e)}else if(typeof e==="string"){e=Buffer.from(e,"utf8")}return n.default.createHash("md5").update(e).digest()}var i=md5;t["default"]=i},5332:(e,t)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var r="00000000-0000-0000-0000-000000000000";t["default"]=r},4848:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(6900));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function parse(e){if(!(0,n.default)(e)){throw TypeError("Invalid UUID")}let t;const r=new Uint8Array(16);r[0]=(t=parseInt(e.slice(0,8),16))>>>24;r[1]=t>>>16&255;r[2]=t>>>8&255;r[3]=t&255;r[4]=(t=parseInt(e.slice(9,13),16))>>>8;r[5]=t&255;r[6]=(t=parseInt(e.slice(14,18),16))>>>8;r[7]=t&255;r[8]=(t=parseInt(e.slice(19,23),16))>>>8;r[9]=t&255;r[10]=(t=parseInt(e.slice(24,36),16))/1099511627776&255;r[11]=t/4294967296&255;r[12]=t>>>24&255;r[13]=t>>>16&255;r[14]=t>>>8&255;r[15]=t&255;return r}var i=parse;t["default"]=i},814:(e,t)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var r=/^(?:[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}|00000000-0000-0000-0000-000000000000)$/i;t["default"]=r},807:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=rng;var n=_interopRequireDefault(r(6113));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}const i=new Uint8Array(256);let a=i.length;function rng(){if(a>i.length-16){n.default.randomFillSync(i);a=0}return i.slice(a,a+=16)}},5274:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(6113));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function sha1(e){if(Array.isArray(e)){e=Buffer.from(e)}else if(typeof e==="string"){e=Buffer.from(e,"utf8")}return n.default.createHash("sha1").update(e).digest()}var i=sha1;t["default"]=i},8950:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(6900));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}const i=[];for(let e=0;e<256;++e){i.push((e+256).toString(16).substr(1))}function stringify(e,t=0){const r=(i[e[t+0]]+i[e[t+1]]+i[e[t+2]]+i[e[t+3]]+"-"+i[e[t+4]]+i[e[t+5]]+"-"+i[e[t+6]]+i[e[t+7]]+"-"+i[e[t+8]]+i[e[t+9]]+"-"+i[e[t+10]]+i[e[t+11]]+i[e[t+12]]+i[e[t+13]]+i[e[t+14]]+i[e[t+15]]).toLowerCase();if(!(0,n.default)(r)){throw TypeError("Stringified UUID is invalid")}return r}var a=stringify;t["default"]=a},8628:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(807));var i=_interopRequireDefault(r(8950));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}let a;let o;let s=0;let u=0;function v1(e,t,r){let l=t&&r||0;const c=t||new Array(16);e=e||{};let d=e.node||a;let h=e.clockseq!==undefined?e.clockseq:o;if(d==null||h==null){const t=e.random||(e.rng||n.default)();if(d==null){d=a=[t[0]|1,t[1],t[2],t[3],t[4],t[5]]}if(h==null){h=o=(t[6]<<8|t[7])&16383}}let p=e.msecs!==undefined?e.msecs:Date.now();let g=e.nsecs!==undefined?e.nsecs:u+1;const m=p-s+(g-u)/1e4;if(m<0&&e.clockseq===undefined){h=h+1&16383}if((m<0||p>s)&&e.nsecs===undefined){g=0}if(g>=1e4){throw new Error("uuid.v1(): Can't create more than 10M uuids/sec")}s=p;u=g;o=h;p+=122192928e5;const b=((p&268435455)*1e4+g)%4294967296;c[l++]=b>>>24&255;c[l++]=b>>>16&255;c[l++]=b>>>8&255;c[l++]=b&255;const v=p/4294967296*1e4&268435455;c[l++]=v>>>8&255;c[l++]=v&255;c[l++]=v>>>24&15|16;c[l++]=v>>>16&255;c[l++]=h>>>8|128;c[l++]=h&255;for(let e=0;e<6;++e){c[l+e]=d[e]}return t||(0,i.default)(c)}var l=v1;t["default"]=l},6409:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(5998));var i=_interopRequireDefault(r(4569));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}const a=(0,n.default)("v3",48,i.default);var o=a;t["default"]=o},5998:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=_default;t.URL=t.DNS=void 0;var n=_interopRequireDefault(r(8950));var i=_interopRequireDefault(r(4848));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function stringToBytes(e){e=unescape(encodeURIComponent(e));const t=[];for(let r=0;r{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(807));var i=_interopRequireDefault(r(8950));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function v4(e,t,r){e=e||{};const a=e.random||(e.rng||n.default)();a[6]=a[6]&15|64;a[8]=a[8]&63|128;if(t){r=r||0;for(let e=0;e<16;++e){t[r+e]=a[e]}return t}return(0,i.default)(a)}var a=v4;t["default"]=a},9120:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(5998));var i=_interopRequireDefault(r(5274));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}const a=(0,n.default)("v5",80,i.default);var o=a;t["default"]=o},6900:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(814));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function validate(e){return typeof e==="string"&&n.default.test(e)}var i=validate;t["default"]=i},1595:(e,t,r)=>{Object.defineProperty(t,"__esModule",{value:true});t["default"]=void 0;var n=_interopRequireDefault(r(6900));function _interopRequireDefault(e){return e&&e.__esModule?e:{default:e}}function version(e){if(!(0,n.default)(e)){throw TypeError("Invalid UUID")}return parseInt(e.substr(14,1),16)}var i=version;t["default"]=i},2940:e=>{e.exports=wrappy;function wrappy(e,t){if(e&&t)return wrappy(e)(t);if(typeof e!=="function")throw new TypeError("need wrapper function");Object.keys(e).forEach((function(t){wrapper[t]=e[t]}));return wrapper;function wrapper(){var t=new Array(arguments.length);for(var r=0;r{t.exports=e(import.meta.url)("assert")},4300:t=>{t.exports=e(import.meta.url)("buffer")},2057:t=>{t.exports=e(import.meta.url)("constants")},6113:t=>{t.exports=e(import.meta.url)("crypto")},2361:t=>{t.exports=e(import.meta.url)("events")},7147:t=>{t.exports=e(import.meta.url)("fs")},3685:t=>{t.exports=e(import.meta.url)("http")},5687:t=>{t.exports=e(import.meta.url)("https")},1808:t=>{t.exports=e(import.meta.url)("net")},7718:t=>{t.exports=e(import.meta.url)("node:child_process")},7561:t=>{t.exports=e(import.meta.url)("node:fs")},7742:t=>{t.exports=e(import.meta.url)("node:process")},4492:t=>{t.exports=e(import.meta.url)("node:stream")},5628:t=>{t.exports=e(import.meta.url)("node:zlib")},2037:t=>{t.exports=e(import.meta.url)("os")},1017:t=>{t.exports=e(import.meta.url)("path")},2781:t=>{t.exports=e(import.meta.url)("stream")},1576:t=>{t.exports=e(import.meta.url)("string_decoder")},4404:t=>{t.exports=e(import.meta.url)("tls")},3837:t=>{t.exports=e(import.meta.url)("util")},6955:(e,t,r)=>{r.a(e,(async e=>{var t=r(7718);var n=r(7742);var i=r(4492);var a=r(7561);var o=r(5628);var s=r(2283);var u=r(2186);const l=u.getInput("container",{required:true});const c=JSON.parse(u.getInput("error-log-paths",{required:true}));const d=u.getInput("log-tarball-prefix",{required:true});const h=u.getInput("tests-label",{required:true});const p=u.getInput("test-timeout",{required:true});const g=n.env.GITHUB_REPOSITORY.split("/")[1];try{if(t.spawnSync("docker",["run","--name","base","-v",`${n.cwd()}/build.tar.zst:/build.tar.zst`,"--workdir",`/__w/${g}/${g}`,l,"sh","-c","zstdcat /build.tar.zst | tar x"],{stdio:"inherit"}).status)throw new Error("Failed to create base container");if(t.spawnSync("docker",["commit","base","baseimage"],{stdio:"inherit"}).status)throw new Error("Failed to create base image");if(t.spawnSync("docker",["rm","base"],{stdio:"inherit"}).status)throw new Error("Failed to remove base container");const e=t.spawnSync("docker",["run","--rm","baseimage","bash","-e","-o","pipefail","-c",`cd build; ctest -L '${h}' --show-only=json-v1`]);if(e.status)throw new Error("Failed to discover tests with label");const r=JSON.parse(e.stdout).tests;let m=[];r.forEach((e=>{m.push(new Promise((r=>{t.spawn("docker",["run","--security-opt","seccomp=unconfined","-e","GITHUB_ACTIONS=True","--name",e.name,"--init","baseimage","bash","-c",`cd build; ctest --output-on-failure -R '^${e.name}$' --timeout ${p}`],{stdio:"inherit"}).on("close",(e=>r(e)))})))}));const b=await Promise.all(m);for(let e=0;e{if(!e.name.startsWith(`__w/${g}/${g}/build`)){t.on("end",(()=>r()));t.resume();return}e.name=e.name.substring(`__w/${g}/${g}/`.length);if(e.name!=="build/"&&c.filter((t=>e.name.startsWith(t))).length===0){t.on("end",(()=>r()));t.resume();return}t.pipe(l.entry(e,r))})).on("finish",(()=>{l.finalize()}));t.spawn("docker",["export",r[e].name]).stdout.pipe(n);i.promises.pipeline(l,o.createGzip(),a.createWriteStream(`${d}-${r[e].name}-logs.tar.gz`))}}catch(e){u.setFailed(`Uncaught exception ${e.message}`)}e()}),1)}};var r={};function __nccwpck_require__(e){var n=r[e];if(n!==undefined){return n.exports}var i=r[e]={exports:{}};var a=true;try{t[e].call(i.exports,i,i.exports,__nccwpck_require__);a=false}finally{if(a)delete r[e]}return i.exports}(()=>{var e=typeof Symbol==="function"?Symbol("webpack then"):"__webpack_then__";var t=typeof Symbol==="function"?Symbol("webpack exports"):"__webpack_exports__";var completeQueue=e=>{if(e){e.forEach((e=>e.r--));e.forEach((e=>e.r--?e.r++:e()))}};var completeFunction=e=>!--e.r&&e();var queueFunction=(e,t)=>e?e.push(t):completeFunction(t);var wrapDeps=r=>r.map((r=>{if(r!==null&&typeof r==="object"){if(r[e])return r;if(r.then){var n=[];r.then((e=>{i[t]=e;completeQueue(n);n=0}));var i={};i[e]=(e,t)=>(queueFunction(n,e),r["catch"](t));return i}}var a={};a[e]=e=>completeFunction(e);a[t]=r;return a}));__nccwpck_require__.a=(r,n,i)=>{var a=i&&[];var o=r.exports;var s;var u;var l;var c=true;var d=false;var whenAll=(t,r,n)=>{if(d)return;d=true;r.r+=t.length;t.map(((t,i)=>t[e](r,n)));d=false};var h=new Promise(((e,t)=>{l=t;u=()=>(e(o),completeQueue(a),a=0)}));h[t]=o;h[e]=(e,t)=>{if(c){return completeFunction(e)}if(s)whenAll(s,e,t);queueFunction(a,e);h["catch"](t)};r.exports=h;n((e=>{if(!e)return u();s=wrapDeps(e);var r,n;var i=new Promise(((e,i)=>{r=()=>e(n=s.map((e=>e[t])));r.r=0;whenAll(s,r,i)}));return r.r?i:n})).then(u,l);c=false}})();if(typeof __nccwpck_require__!=="undefined")__nccwpck_require__.ab=new URL(".",import.meta.url).pathname.slice(import.meta.url.match(/^file:\/\/\/\w:/)?1:0,-1)+"/";var n=__nccwpck_require__(6955);n=await n; \ No newline at end of file diff --git a/.github/actions/parallel-ctest-containers/main.mjs b/.github/actions/parallel-ctest-containers/main.mjs index e88014bb51..b517fa1b0d 100644 --- a/.github/actions/parallel-ctest-containers/main.mjs +++ b/.github/actions/parallel-ctest-containers/main.mjs @@ -12,8 +12,10 @@ const log_tarball_prefix = core.getInput('log-tarball-prefix', {required: true}) const tests_label = core.getInput('tests-label', {required: true}); const test_timeout = core.getInput('test-timeout', {required: true}); +const repo_name = process.env.GITHUB_REPOSITORY.split('/')[1]; + try { - if(child_process.spawnSync("docker", ["run", "--name", "base", "-v", `${process.cwd()}/build.tar.zst:/build.tar.zst`, "--workdir", "/__w/leap/leap", container, "sh", "-c", "zstdcat /build.tar.zst | tar x"], {stdio:"inherit"}).status) + if(child_process.spawnSync("docker", ["run", "--name", "base", "-v", `${process.cwd()}/build.tar.zst:/build.tar.zst`, "--workdir", `/__w/${repo_name}/${repo_name}`, container, "sh", "-c", "zstdcat /build.tar.zst | tar x"], {stdio:"inherit"}).status) throw new Error("Failed to create base container"); if(child_process.spawnSync("docker", ["commit", "base", "baseimage"], {stdio:"inherit"}).status) throw new Error("Failed to create base image"); @@ -45,13 +47,13 @@ try { let packer = tar.pack(); extractor.on('entry', (header, stream, next) => { - if(!header.name.startsWith(`__w/leap/leap/build`)) { + if(!header.name.startsWith(`__w/${repo_name}/${repo_name}/build`)) { stream.on('end', () => next()); stream.resume(); return; } - header.name = header.name.substring(`__w/leap/leap/`.length); + header.name = header.name.substring(`__w/${repo_name}/${repo_name}/`.length); if(header.name !== "build/" && error_log_paths.filter(p => header.name.startsWith(p)).length === 0) { stream.on('end', () => next()); stream.resume(); diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index e3f60238f7..1bed791bfb 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -7,6 +7,20 @@ on: - "release/*" pull_request: workflow_dispatch: + inputs: + override-cdt: + description: 'Override cdt target' + type: string + override-cdt-prerelease: + type: choice + description: Override cdt prelease + options: + - default + - true + - false + override-eos-system-contracts: + description: 'Override eos-system-contracts ref' + type: string permissions: packages: read @@ -17,87 +31,59 @@ defaults: shell: bash jobs: - d: - name: Discover Platforms - runs-on: ubuntu-latest - outputs: - missing-platforms: ${{steps.discover.outputs.missing-platforms}} - p: ${{steps.discover.outputs.platforms}} - steps: - - name: Discover Platforms - id: discover - uses: AntelopeIO/discover-platforms-action@v1 - with: - platform-file: .cicd/platforms.json - password: ${{secrets.GITHUB_TOKEN}} - package-name: builders - build-platforms: - name: Build Platforms - needs: d - if: needs.d.outputs.missing-platforms != '[]' - strategy: - fail-fast: false - matrix: - platform: ${{fromJSON(needs.d.outputs.missing-platforms)}} - runs-on: ["self-hosted", "enf-x86-beefy"] + build-base: + name: Run Build Workflow + uses: ./.github/workflows/build_base.yaml permissions: packages: write contents: read + + v: + name: Discover Versions + runs-on: ubuntu-latest + outputs: + cdt-target: ${{steps.versions.outputs.cdt-target}} + cdt-prerelease: ${{steps.versions.outputs.cdt-prerelease}} + eos-system-contracts-ref: ${{steps.versions.outputs.eos-system-contracts-ref}} steps: - - name: Login to Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{github.repository_owner}} - password: ${{secrets.GITHUB_TOKEN}} - - name: Build and push - uses: docker/build-push-action@v3 - with: - push: true - tags: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - file: ${{fromJSON(needs.d.outputs.p)[matrix.platform].dockerfile}} + - name: Setup cdt and eos-system-contracts versions + id: versions + env: + GH_TOKEN: ${{secrets.GITHUB_TOKEN}} + run: | + DEFAULTS_JSON=$(curl -sSfL $(gh api https://api.github.com/repos/${{github.repository}}/contents/.cicd/defaults.json?ref=${{github.sha}} --jq .download_url)) + echo cdt-target=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.target') >> $GITHUB_OUTPUT + echo cdt-prerelease=$(echo "$DEFAULTS_JSON" | jq -r '.cdt.prerelease') >> $GITHUB_OUTPUT + echo eos-system-contracts-ref=$(echo "$DEFAULTS_JSON" | jq -r '.eossystemcontracts.ref') >> $GITHUB_OUTPUT - Build: - needs: [d, build-platforms] - if: always() && needs.d.result == 'success' && (needs.build-platforms.result == 'success' || needs.build-platforms.result == 'skipped') + if [[ "${{inputs.override-cdt}}" != "" ]]; then + echo cdt-target=${{inputs.override-cdt}} >> $GITHUB_OUTPUT + fi + if [[ "${{inputs.override-cdt-prerelease}}" == +(true|false) ]]; then + echo cdt-prerelease=${{inputs.override-cdt-prerelease}} >> $GITHUB_OUTPUT + fi + if [[ "${{inputs.override-eos-system-contracts}}" != "" ]]; then + echo eos-system-contracts-ref=${{inputs.override-eos-system-contracts}} >> $GITHUB_OUTPUT + fi + + dev-package: + name: Build leap-dev package + needs: [build-base] + if: always() && needs.build-base.result == 'success' strategy: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] - runs-on: ["self-hosted", "enf-x86-beefy"] - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} - steps: - - uses: actions/checkout@v3 - with: - submodules: recursive - - name: Build - id: build - run: | - # https://github.com/actions/runner/issues/2033 - chown -R $(id -u):$(id -g) $PWD - cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -GNinja - cmake --build build - tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst - - name: Upload builddir - uses: AntelopeIO/upload-artifact-large-chunks-action@v1 - with: - name: ${{matrix.platform}}-build - path: build.tar.zst - - dev-package: - name: Build leap-dev package - needs: [d, Build] - if: always() && needs.Build.result == 'success' runs-on: ubuntu-latest - container: ${{fromJSON(needs.d.outputs.p)['ubuntu20'].image}} + container: ${{fromJSON(needs.build-base.outputs.p)[matrix.platform].image}} steps: - uses: actions/checkout@v3 with: - submodules: true + submodules: recursive - name: Download builddir uses: actions/download-artifact@v3 with: - name: ubuntu20-build + name: ${{matrix.platform}}-build - name: Build dev package run: | zstdcat build.tar.zst | tar x @@ -105,27 +91,28 @@ jobs: cpack - name: Install dev package run: | - apt update && apt upgrade -y - apt install -y ./build/leap_*.deb ./build/leap-dev*.deb + apt-get update && apt-get upgrade -y + apt-get install -y ./build/leap_*.deb ./build/leap-dev*.deb - name: Test using TestHarness run: | python3 -c "from TestHarness import Cluster" - name: Upload dev package uses: actions/upload-artifact@v3 with: - name: leap-dev-ubuntu20-amd64 + name: leap-dev-${{matrix.platform}}-amd64 path: build/leap-dev*.deb + tests: name: Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' + needs: [build-base] + if: always() && needs.build-base.result == 'success' strategy: fail-fast: false matrix: platform: [ubuntu20, ubuntu22] runs-on: ["self-hosted", "enf-x86-hightier"] container: - image: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + image: ${{fromJSON(needs.build-base.outputs.p)[matrix.platform].image}} options: --security-opt seccomp=unconfined steps: - uses: actions/checkout@v3 @@ -143,8 +130,8 @@ jobs: np-tests: name: NP Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' + needs: [build-base] + if: always() && needs.build-base.result == 'success' strategy: fail-fast: false matrix: @@ -159,7 +146,7 @@ jobs: - name: Run tests in parallel containers uses: ./.github/actions/parallel-ctest-containers with: - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + container: ${{fromJSON(needs.build-base.outputs.p)[matrix.platform].image}} error-log-paths: '["build/etc", "build/var", "build/leap-ignition-wd", "build/TestLogs"]' log-tarball-prefix: ${{matrix.platform}} tests-label: nonparallelizable_tests @@ -173,8 +160,8 @@ jobs: lr-tests: name: LR Tests - needs: [d, Build] - if: always() && needs.Build.result == 'success' + needs: [build-base] + if: always() && needs.build-base.result == 'success' strategy: fail-fast: false matrix: @@ -189,7 +176,7 @@ jobs: - name: Run tests in parallel containers uses: ./.github/actions/parallel-ctest-containers with: - container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + container: ${{fromJSON(needs.build-base.outputs.p)[matrix.platform].image}} error-log-paths: '["build/etc", "build/var", "build/leap-ignition-wd", "build/TestLogs"]' log-tarball-prefix: ${{matrix.platform}} tests-label: long_running_tests @@ -201,11 +188,104 @@ jobs: name: ${{matrix.platform}}-lr-logs path: '*-logs.tar.gz' + libtester-tests: + name: libtester tests + needs: [build-base, v, dev-package] + if: always() && needs.v.result == 'success' && needs.dev-package.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + test: [build-tree, make-dev-install, deb-install] + runs-on: ["self-hosted", "enf-x86-midtier"] + container: ${{ matrix.test != 'deb-install' && fromJSON(needs.build-base.outputs.p)[matrix.platform].image || matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} + env: + DEBIAN_FRONTEND: noninteractive + TZ: Etc/UTC + steps: + - name: Update Package Index & Upgrade Packages + run: | + apt-get update + apt-get upgrade -y + + # LEAP + - if: ${{ matrix.test != 'deb-install' }} + name: Clone leap + uses: actions/checkout@v3 + with: + submodules: recursive + - if: ${{ matrix.test != 'deb-install' }} + name: Download leap builddir + uses: actions/download-artifact@v3 + with: + name: ${{matrix.platform}}-build + - if: ${{ matrix.test != 'deb-install' }} + name: Extract leap build + run: | + zstdcat build.tar.zst | tar x + - if: ${{ matrix.test == 'build-tree' }} + name: Set leap_DIR env var + run: | + echo "leap_DIR=$PWD/build/lib/cmake/leap" >> "$GITHUB_ENV" + - if: ${{ matrix.test == 'make-dev-install' }} + name: leap dev-install + run: | + cmake --install build + cmake --install build --component dev + - if: ${{ matrix.test == 'make-dev-install' }} + name: Delete leap artifacts + run: | + rm -r * + - if: ${{ matrix.test == 'deb-install' }} + name: Download leap-dev + uses: actions/download-artifact@v3 + with: + name: leap-dev-${{matrix.platform}}-amd64 + - if: ${{ matrix.test == 'deb-install' }} + name: Install leap-dev Package + run: | + apt-get install -y ./*.deb + rm ./*.deb + + # CDT + - name: Download cdt + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: cdt + file: 'cdt_.*amd64.deb' + target: '${{needs.v.outputs.cdt-target}}' + prereleases: ${{fromJSON(needs.v.outputs.cdt-prerelease)}} + artifact-name: cdt_ubuntu_package_amd64 + token: ${{github.token}} + - name: Install cdt Packages + run: | + apt-get install -y ./*.deb + rm ./*.deb + + # Reference Contracts + - name: checkout eos-system-contracts + uses: actions/checkout@v3 + with: + repository: eosnetworkfoundation/eos-system-contracts + path: eos-system-contracts + ref: '${{needs.v.outputs.eos-system-contracts-ref}}' + - if: ${{ matrix.test == 'deb-install' }} + name: Install eos-system-contracts deps + run: | + apt-get -y install cmake build-essential + - name: Build & Test eos-system-contracts + run: | + cmake -S eos-system-contracts -B eos-system-contracts/build -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTS=On -DSYSTEM_ENABLE_LEAP_VERSION_CHECK=Off -DSYSTEM_ENABLE_CDT_VERSION_CHECK=Off + cmake --build eos-system-contracts/build -- -j $(nproc) + cd eos-system-contracts/build/tests + ctest --output-on-failure -j $(nproc) + all-passing: name: All Required Tests Passed - needs: [dev-package, tests, np-tests] + needs: [dev-package, tests, np-tests, libtester-tests] if: always() runs-on: ubuntu-latest steps: - - if: needs.dev-package.result != 'success' || needs.tests.result != 'success' || needs.np-tests.result != 'success' + - if: needs.dev-package.result != 'success' || needs.tests.result != 'success' || needs.np-tests.result != 'success' || needs.libtester-tests.result != 'success' run: false diff --git a/.github/workflows/build_base.yaml b/.github/workflows/build_base.yaml new file mode 100644 index 0000000000..b1771fbfbf --- /dev/null +++ b/.github/workflows/build_base.yaml @@ -0,0 +1,87 @@ +name: "Build leap" + +on: + workflow_dispatch: + workflow_call: + outputs: + p: + description: "Discovered Build Platforms" + value: ${{ jobs.d.outputs.p }} + +permissions: + packages: read + contents: read + +defaults: + run: + shell: bash + +jobs: + d: + name: Discover Platforms + runs-on: ubuntu-latest + outputs: + missing-platforms: ${{steps.discover.outputs.missing-platforms}} + p: ${{steps.discover.outputs.platforms}} + steps: + - name: Discover Platforms + id: discover + uses: AntelopeIO/discover-platforms-action@v1 + with: + platform-file: .cicd/platforms.json + password: ${{secrets.GITHUB_TOKEN}} + package-name: builders + + build-platforms: + name: Build Platforms + needs: d + if: needs.d.outputs.missing-platforms != '[]' + strategy: + fail-fast: false + matrix: + platform: ${{fromJSON(needs.d.outputs.missing-platforms)}} + runs-on: ["self-hosted", "enf-x86-beefy"] + permissions: + packages: write + contents: read + steps: + - name: Login to Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{github.repository_owner}} + password: ${{secrets.GITHUB_TOKEN}} + - name: Build and push + uses: docker/build-push-action@v3 + with: + push: true + tags: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + file: ${{fromJSON(needs.d.outputs.p)[matrix.platform].dockerfile}} + + Build: + name: Build leap + needs: [d, build-platforms] + if: always() && needs.d.result == 'success' && (needs.build-platforms.result == 'success' || needs.build-platforms.result == 'skipped') + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + runs-on: ["self-hosted", "enf-x86-beefy"] + container: ${{fromJSON(needs.d.outputs.p)[matrix.platform].image}} + steps: + - uses: actions/checkout@v3 + with: + submodules: recursive + - name: Build + id: build + run: | + # https://github.com/actions/runner/issues/2033 + chown -R $(id -u):$(id -g) $PWD + cmake -B build -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DENABLE_LEAP_DEV_DEB=On -GNinja + cmake --build build + tar -pc --exclude "*.o" build | zstd --long -T0 -9 > build.tar.zst + - name: Upload builddir + uses: AntelopeIO/upload-artifact-large-chunks-action@v1 + with: + name: ${{matrix.platform}}-build + path: build.tar.zst \ No newline at end of file diff --git a/.github/workflows/performance_harness_run.yaml b/.github/workflows/performance_harness_run.yaml new file mode 100644 index 0000000000..90602d8345 --- /dev/null +++ b/.github/workflows/performance_harness_run.yaml @@ -0,0 +1,21 @@ +name: "Performance Harness Run" + +on: + workflow_dispatch: + +permissions: + packages: read + contents: read + +defaults: + run: + shell: bash + +jobs: + tmp: + name: Stub + runs-on: ubuntu-latest + steps: + - name: Workflow Stub + run: | + echo "Workflow Stub" diff --git a/.github/workflows/ph_backward_compatibility.yaml b/.github/workflows/ph_backward_compatibility.yaml new file mode 100644 index 0000000000..e166c92eff --- /dev/null +++ b/.github/workflows/ph_backward_compatibility.yaml @@ -0,0 +1,70 @@ +name: "Performance Harness Backwards Compatibility" + +on: + workflow_dispatch: + +permissions: + packages: read + contents: read + +defaults: + run: + shell: bash + +jobs: + build-base: + name: Run Build Workflow + uses: ./.github/workflows/build_base.yaml + permissions: + packages: write + contents: read + + tests: + name: Tests + needs: [build-base] + if: always() && needs.build-base.result == 'success' + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + release: [3.1, 3.2, 4.0] + runs-on: ["self-hosted", "enf-x86-lowtier"] + container: + image: ${{fromJSON(needs.build-base.outputs.p)[matrix.platform].image}} + options: --security-opt seccomp=unconfined + steps: + - uses: actions/checkout@v3 + - name: Download builddir + uses: actions/download-artifact@v3 + with: + name: ${{matrix.platform}}-build + - name: Extract Build Directory + run: | + zstdcat build.tar.zst | tar x + - name: Download Prev Leap Version + uses: AntelopeIO/asset-artifact-download-action@v2 + with: + owner: AntelopeIO + repo: leap + file: '(leap).*${{matrix.platform}}.04.*(x86_64|amd64).deb' + target: '${{matrix.release}}' + token: ${{github.token}} + - name: Install leap & replace binaries for PH use + run: | + apt-get update + apt-get install -y ./leap*.deb + rm build/bin/nodeos + rm build/bin/cleos + cp /usr/bin/nodeos build/bin + cp /usr/bin/cleos build/bin + ./build/bin/nodeos --version + - if: ${{ matrix.release == '3.1' || matrix.release == '3.2' }} + name: Run Performance Tests (=v4.0) + run: | + cd build + ctest --output-on-failure -R performance_test --timeout 480 diff --git a/.github/workflows/pinned_build.yaml b/.github/workflows/pinned_build.yaml new file mode 100644 index 0000000000..54b87ee93e --- /dev/null +++ b/.github/workflows/pinned_build.yaml @@ -0,0 +1,51 @@ +name: "Pinned Build" + +on: + workflow_dispatch: + +permissions: + packages: read + contents: read + +defaults: + run: + shell: bash + +jobs: + Build: + name: Build + strategy: + fail-fast: false + matrix: + platform: [ubuntu20, ubuntu22] + runs-on: ["self-hosted", "enf-x86-beefy-long"] + container: ${{ matrix.platform == 'ubuntu20' && 'ubuntu:focal' || 'ubuntu:jammy' }} + steps: + - name: Update and Install git + run: | + apt-get update + apt-get install -y git + git --version + - name: Clone leap + uses: actions/checkout@v3 + with: + submodules: recursive + - name: Install dependencies + run: | + # https://github.com/actions/runner/issues/2033 + chown -R $(id -u):$(id -g) $PWD + ./scripts/install_deps.sh + - name: Build Pinned Build + env: + LEAP_PINNED_INSTALL_PREFIX: /usr + run: | + ./scripts/pinned_build.sh deps build "$(nproc)" + - name: Upload package + uses: actions/upload-artifact@v3 + with: + name: leap-${{matrix.platform}}-pinned-amd64 + path: build/leap_*.deb + - name: Run Parallel Tests + run: | + cd build + ctest --output-on-failure -j $(nproc) -LE "(nonparallelizable_tests|long_running_tests)" --timeout 420 diff --git a/.gitignore b/.gitignore index d4636fd221..012739af57 100644 --- a/.gitignore +++ b/.gitignore @@ -75,6 +75,8 @@ witness_node_data_dir *.pyc *.pyo +*.gdb_history + Testing/* build.tar.gz [Bb]uild*/* diff --git a/.gitmodules b/.gitmodules index ab01b3d5c0..022c13dfb4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -31,3 +31,6 @@ [submodule "libraries/cli11/cli11"] path = libraries/cli11/cli11 url = https://github.com/AntelopeIO/CLI11.git +[submodule "libraries/boost"] + path = libraries/boost + url = https://github.com/boostorg/boost.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 049183b252..479cd1ea81 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -101,12 +101,6 @@ else() set(no_whole_archive_flag "--no-whole-archive") endif() -set(Boost_USE_MULTITHREADED ON) -set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -# Most boost deps get implictly picked up via fc, as just about everything links to fc. In addition we pick up -# the pthread dependency through fc. -find_package(Boost 1.71 REQUIRED COMPONENTS program_options unit_test_framework system) - if( APPLE AND UNIX ) # Apple Specific Options Here message( STATUS "Configuring Leap on macOS" ) @@ -174,9 +168,10 @@ endif() message( STATUS "Using '${EOSIO_ROOT_KEY}' as public key for 'eosio' account" ) -find_package( Gperftools QUIET ) -if( GPERFTOOLS_FOUND ) - message( STATUS "Found gperftools; compiling Leap with TCMalloc") +option(ENABLE_TCMALLOC "use tcmalloc (requires gperftools)" OFF) +if( ENABLE_TCMALLOC ) + find_package( Gperftools REQUIRED ) + message( STATUS "Compiling Leap with TCMalloc") #if doing this by the book, simply link_libraries( ${GPERFTOOLS_TCMALLOC} ) here. That will #give the performance benefits of tcmalloc but since it won't be linked last #the heap profiler & checker may not be accurate. This here is rather undocumented behavior @@ -247,7 +242,6 @@ install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/tests/TestHarness DESTINATION ${CM PATTERN "*.json" PATTERN "__pycache__" EXCLUDE PATTERN "CMakeFiles" EXCLUDE) -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/tests/launcher.py DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_testing/tests COMPONENT dev EXCLUDE_FROM_ALL) if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.21) # Cmake versions < 3.21 did not support installing symbolic links to a directory via install(FILES ...) @@ -278,6 +272,18 @@ configure_file(${CMAKE_SOURCE_DIR}/libraries/cli11/bash-completion/completions/c install(FILES libraries/cli11/bash-completion/completions/leap-util DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) install(FILES libraries/cli11/bash-completion/completions/cleos DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/bash-completion/completions COMPONENT base) +# Add the boost submodule we used to build to our install package, so headers can be found for libtester +install(DIRECTORY "${CMAKE_SOURCE_DIR}/libraries/boost/" + DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/leap_boost + COMPONENT dev EXCLUDE_FROM_ALL + PATTERN ".git/*" EXCLUDE + PATTERN "example/*" EXCLUDE + PATTERN "bench/*" EXCLUDE + PATTERN "doc/*" EXCLUDE + PATTERN "libs/*/test" EXCLUDE + PATTERN "tools/*/test" EXCLUDE + ) + add_custom_target(dev-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_BINARY_DIR}" @@ -287,5 +293,7 @@ add_custom_target(dev-install include(doxygen) +option(ENABLE_LEAP_DEV_DEB "Enable building the leap-dev .deb package" OFF) + include(package.cmake) include(CPack) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index a12004f73f..8b1135bd40 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -14,12 +14,6 @@ if (LLVM_DIR STREQUAL "" OR NOT LLVM_DIR) set(LLVM_DIR @LLVM_DIR@) endif() -find_package( Gperftools QUIET ) -if( GPERFTOOLS_FOUND ) - message( STATUS "Found gperftools; compiling tests with TCMalloc") - list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) -endif() - if(NOT "@LLVM_FOUND@" STREQUAL "") find_package(LLVM @LLVM_VERSION@ EXACT REQUIRED CONFIG) llvm_map_components_to_libnames(LLVM_LIBS support core passes mcjit native DebugInfoDWARF orcjit) @@ -41,14 +35,10 @@ else ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall") endif ( APPLE ) +set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) + +add_subdirectory( @CMAKE_INSTALL_FULL_DATAROOTDIR@/leap_boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@ NO_DEFAULT_PATH) @@ -94,12 +84,18 @@ macro(add_eosio_test_executable test_name) ${libbn256} @GMP_LIBRARY@ - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} + Boost::date_time + Boost::filesystem + Boost::system + Boost::chrono + Boost::multi_index + Boost::multiprecision + Boost::interprocess + Boost::asio + Boost::signals2 + Boost::iostreams "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY} + Boost::unit_test_framework ${LLVM_LIBS} @@ -115,7 +111,6 @@ macro(add_eosio_test_executable test_name) endif() target_include_directories( ${test_name} PUBLIC - ${Boost_INCLUDE_DIRS} @OPENSSL_INCLUDE_DIR@ @CMAKE_INSTALL_PREFIX@ @CMAKE_INSTALL_FULL_INCLUDEDIR@ diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index aa67d25595..6beb37467b 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -12,12 +12,6 @@ if (LLVM_DIR STREQUAL "" OR NOT LLVM_DIR) set(LLVM_DIR @LLVM_DIR@) endif() -find_package( Gperftools QUIET ) -if( GPERFTOOLS_FOUND ) - message( STATUS "Found gperftools; compiling tests with TCMalloc") - list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) -endif() - if(NOT "@LLVM_FOUND@" STREQUAL "") find_package(LLVM @LLVM_VERSION@ EXACT REQUIRED CONFIG) llvm_map_components_to_libnames(LLVM_LIBS support core passes mcjit native DebugInfoDWARF orcjit) @@ -38,14 +32,10 @@ else ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall") endif ( APPLE ) +set( Boost_USE_MULTITHREADED ON ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) -find_package(Boost @Boost_MAJOR_VERSION@.@Boost_MINOR_VERSION@ EXACT REQUIRED COMPONENTS - date_time - filesystem - system - chrono - iostreams - unit_test_framework) + +add_subdirectory( @CMAKE_SOURCE_DIR@/libraries/boost ${PROJECT_BINARY_DIR}/libraries/boost EXCLUDE_FROM_ALL) find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing NO_DEFAULT_PATH) find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain NO_DEFAULT_PATH) @@ -91,12 +81,18 @@ macro(add_eosio_test_executable test_name) ${libbn256} @GMP_LIBRARY@ - ${Boost_FILESYSTEM_LIBRARY} - ${Boost_SYSTEM_LIBRARY} - ${Boost_CHRONO_LIBRARY} - ${Boost_IOSTREAMS_LIBRARY} + Boost::date_time + Boost::filesystem + Boost::system + Boost::chrono + Boost::multi_index + Boost::multiprecision + Boost::interprocess + Boost::asio + Boost::signals2 + Boost::iostreams "-lz" # Needed by Boost iostreams - ${Boost_DATE_TIME_LIBRARY} + Boost::unit_test_framework ${LLVM_LIBS} @@ -112,7 +108,6 @@ macro(add_eosio_test_executable test_name) endif() target_include_directories( ${test_name} PUBLIC - ${Boost_INCLUDE_DIRS} @OPENSSL_INCLUDE_DIR@ @CMAKE_SOURCE_DIR@/libraries/chain/include @CMAKE_BINARY_DIR@/libraries/chain/include diff --git a/README.md b/README.md index 71582a1ce4..1ffaa053e1 100644 --- a/README.md +++ b/README.md @@ -132,12 +132,13 @@ sudo apt-get install -y \ build-essential \ cmake \ git \ - libboost-all-dev \ libcurl4-openssl-dev \ libgmp-dev \ libssl-dev \ llvm-11-dev \ - python3-numpy + python3-numpy \ + file \ + zlib1g-dev ``` To build, make sure you are in the root of the `leap` repo, then run the following command: ```bash diff --git a/docs/01_nodeos/03_plugins/chain_plugin/index.md b/docs/01_nodeos/03_plugins/chain_plugin/index.md index c6fa5dd9dc..0b932e78ac 100644 --- a/docs/01_nodeos/03_plugins/chain_plugin/index.md +++ b/docs/01_nodeos/03_plugins/chain_plugin/index.md @@ -131,8 +131,7 @@ Config Options for eosio::chain_plugin: received via the P2P network are not relayed and transactions cannot be pushed via the chain API. - In "speculative" mode: (DEPRECATED: - head mode recommended) database + In "speculative" mode: database contains state changes by transactions in the blockchain up to the head block as well as some transactions not yet @@ -179,7 +178,16 @@ Config Options for eosio::chain_plugin: code cache --eos-vm-oc-compile-threads arg (=1) Number of threads to use for EOS VM OC tier-up - --eos-vm-oc-enable Enable EOS VM OC tier-up runtime + --eos-vm-oc-enable arg (=auto) Enable EOS VM OC tier-up runtime + ('auto', 'all', 'none'). + 'auto' - EOS VM OC tier-up is enabled + for eosio.* accounts, read-only trxs, + and applying blocks. + 'all' - EOS VM OC tier-up is enabled + for all contract execution. + 'none' - EOS VM OC tier-up is + completely disabled. + --enable-account-queries arg (=0) enable queries to find accounts by various metadata. --max-nonprivileged-inline-action-size arg (=4096) @@ -192,13 +200,17 @@ Config Options for eosio::chain_plugin: feature. Setting above 0 enables this feature. --transaction-retry-interval-sec arg (=20) - How often, in seconds, to resend an - incoming transaction to network if not + How often, in seconds, to resend an + incoming transaction to network if not seen in a block. + Needs to be at least twice as large as + p2p-dedup-cache-expire-time-sec. --transaction-retry-max-expiration-sec arg (=120) - Maximum allowed transaction expiration - for retry transactions, will retry + Maximum allowed transaction expiration + for retry transactions, will retry transactions up to this value. + Should be larger than + transaction-retry-interval-sec. --transaction-finality-status-max-storage-size-gb arg Maximum size (in GiB) allowed to be allocated for the Transaction Finality diff --git a/docs/01_nodeos/03_plugins/producer_plugin/index.md b/docs/01_nodeos/03_plugins/producer_plugin/index.md index 23475c278b..2008de7d6f 100644 --- a/docs/01_nodeos/03_plugins/producer_plugin/index.md +++ b/docs/01_nodeos/03_plugins/producer_plugin/index.md @@ -122,8 +122,6 @@ Config Options for eosio::producer_plugin: transaction queue. Exceeding this value will subjectively drop transaction with resource exhaustion. - --disable-subjective-billing arg (=1) Disable subjective CPU billing for - API/P2P transactions --disable-subjective-account-billing arg Account which is excluded from subjective CPU billing diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 4b041dd047..e7ad9b144e 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -5,6 +5,10 @@ set(SOFTFLOAT_INSTALL_COMPONENT "dev") set(EOSVM_INSTALL_COMPONENT "dev") set(BN256_INSTALL_COMPONENT "dev") +set( Boost_USE_MULTITHREADED ON ) +set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) +add_subdirectory( boost EXCLUDE_FROM_ALL ) + add_subdirectory( libfc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) diff --git a/libraries/appbase b/libraries/appbase index 9c57bbbee4..b75b31e14f 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 9c57bbbee43dbc7f46a558760086b6e8bf8f2940 +Subproject commit b75b31e14f966fa3de6246e120dcba36c6ce5264 diff --git a/libraries/boost b/libraries/boost new file mode 160000 index 0000000000..b6928ae5c9 --- /dev/null +++ b/libraries/boost @@ -0,0 +1 @@ +Subproject commit b6928ae5c92e21a04bbe17a558e6e066dbe632f6 diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 6a41319865..242132824c 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -105,6 +105,7 @@ add_library( eosio_chain wast_to_wasm.cpp wasm_interface.cpp + wasm_interface_collection.cpp wasm_eosio_validation.cpp wasm_eosio_injection.cpp wasm_config.cpp @@ -127,6 +128,7 @@ add_library( eosio_chain protocol_feature_manager.cpp producer_schedule.cpp genesis_intrinsics.cpp + symbol.cpp whitelisted_intrinsics.cpp thread_utils.cpp platform_timer_accuracy.cpp @@ -134,8 +136,17 @@ add_library( eosio_chain ${HEADERS} ) +## Boost::accumulators depends on Boost::numeric_ublas, which is still missing cmake support (see +## https://github.com/boostorg/cmake/issues/39). Until this is fixed, manually add Boost::numeric_ublas +## as an interface library +## ---------------------------------------------------------------------------------------------------- +add_library(boost_numeric_ublas INTERFACE) +add_library(Boost::numeric_ublas ALIAS boost_numeric_ublas) + target_link_libraries( eosio_chain PUBLIC bn256 fc chainbase eosio_rapidjson Logging IR WAST WASM softfloat builtins ${CHAIN_EOSVM_LIBRARIES} ${LLVM_LIBS} ${CHAIN_RT_LINKAGE} + Boost::signals2 Boost::hana Boost::property_tree Boost::multi_index Boost::asio Boost::lockfree + Boost::assign Boost::accumulators ) target_include_directories( eosio_chain PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 58e56bcf02..0e6232b8af 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace eosio { namespace chain { @@ -23,11 +24,11 @@ namespace eosio { namespace chain { template inline fc::variant variant_from_stream(fc::datastream& stream, const abi_serializer::yield_function_t& yield) { - fc::yield_function_t y = [&yield](){ yield(0); }; // create yield function matching fc::variant requirements, 0 for recursive depth T temp; fc::raw::unpack( stream, temp ); - y(); - return fc::variant( temp, y ); + yield(0); + // create yield function matching fc::variant requirements, 0 for recursive depth + return fc::variant( temp, [yield](){ yield(0); } ); } template @@ -129,7 +130,7 @@ namespace eosio { namespace chain { } void abi_serializer::set_abi(abi_def abi, const yield_function_t& yield) { - impl::abi_traverse_context ctx(yield); + impl::abi_traverse_context ctx(yield, fc::microseconds{}); EOS_ASSERT(starts_with(abi.version, "eosio::abi/1."), unsupported_abi_version_exception, "ABI has an unsupported version"); @@ -235,7 +236,7 @@ namespace eosio { namespace chain { } bool abi_serializer::is_type(const std::string_view& type, const yield_function_t& yield)const { - impl::abi_traverse_context ctx(yield); + impl::abi_traverse_context ctx(yield, fc::microseconds{}); return _is_type(type, ctx); } @@ -464,23 +465,27 @@ namespace eosio { namespace chain { } fc::variant abi_serializer::binary_to_variant( const std::string_view& type, const bytes& binary, const yield_function_t& yield, bool short_path )const { - impl::binary_to_variant_context ctx(*this, yield, type); + impl::binary_to_variant_context ctx(*this, yield, fc::microseconds{}, type); ctx.short_path = short_path; return _binary_to_variant(type, binary, ctx); } - fc::variant abi_serializer::binary_to_variant( const std::string_view& type, const bytes& binary, const fc::microseconds& max_serialization_time, bool short_path )const { - return binary_to_variant( type, binary, create_yield_function(max_serialization_time), short_path ); + fc::variant abi_serializer::binary_to_variant( const std::string_view& type, const bytes& binary, const fc::microseconds& max_action_data_serialization_time, bool short_path )const { + impl::binary_to_variant_context ctx(*this, create_depth_yield_function(), max_action_data_serialization_time, type); + ctx.short_path = short_path; + return _binary_to_variant(type, binary, ctx); } fc::variant abi_serializer::binary_to_variant( const std::string_view& type, fc::datastream& binary, const yield_function_t& yield, bool short_path )const { - impl::binary_to_variant_context ctx(*this, yield, type); + impl::binary_to_variant_context ctx(*this, yield, fc::microseconds{}, type); ctx.short_path = short_path; return _binary_to_variant(type, binary, ctx); } - fc::variant abi_serializer::binary_to_variant( const std::string_view& type, fc::datastream& binary, const fc::microseconds& max_serialization_time, bool short_path )const { - return binary_to_variant( type, binary, create_yield_function(max_serialization_time), short_path ); + fc::variant abi_serializer::binary_to_variant( const std::string_view& type, fc::datastream& binary, const fc::microseconds& max_action_data_serialization_time, bool short_path )const { + impl::binary_to_variant_context ctx(*this, create_depth_yield_function(), max_action_data_serialization_time, type); + ctx.short_path = short_path; + return _binary_to_variant(type, binary, ctx); } void abi_serializer::_variant_to_binary( const std::string_view& type, const fc::variant& var, fc::datastream& ds, impl::variant_to_binary_context& ctx )const @@ -543,14 +548,15 @@ namespace eosio { namespace chain { bool disallow_additional_fields = false; for( uint32_t i = 0; i < st.fields.size(); ++i ) { const auto& field = st.fields[i]; - if( vo.contains( string(field.name).c_str() ) ) { + bool present = vo.contains(string(field.name).c_str()); + if( present || is_optional(field.type) ) { if( disallow_additional_fields ) EOS_THROW( pack_exception, "Unexpected field '${f}' found in input object while processing struct '${p}'", ("f", ctx.maybe_shorten(field.name))("p", ctx.get_path_string()) ); { auto h1 = ctx.push_to_path( impl::field_path_item{ .parent_struct_itr = s_itr, .field_ordinal = i } ); auto h2 = ctx.disallow_extensions_unless( &field == &st.fields.back() ); - _variant_to_binary(_remove_bin_extension(field.type), vo[field.name], ds, ctx); + _variant_to_binary(_remove_bin_extension(field.type), present ? vo[field.name] : fc::variant(nullptr), ds, ctx); } } else if( ends_with(field.type, "$") && ctx.extensions_allowed() ) { disallow_additional_fields = true; @@ -603,23 +609,27 @@ namespace eosio { namespace chain { } FC_CAPTURE_AND_RETHROW() } bytes abi_serializer::variant_to_binary( const std::string_view& type, const fc::variant& var, const yield_function_t& yield, bool short_path )const { - impl::variant_to_binary_context ctx(*this, yield, type); + impl::variant_to_binary_context ctx(*this, yield, fc::microseconds{}, type); ctx.short_path = short_path; return _variant_to_binary(type, var, ctx); } - bytes abi_serializer::variant_to_binary( const std::string_view& type, const fc::variant& var, const fc::microseconds& max_serialization_time, bool short_path ) const { - return variant_to_binary( type, var, create_yield_function(max_serialization_time), short_path ); + bytes abi_serializer::variant_to_binary( const std::string_view& type, const fc::variant& var, const fc::microseconds& max_action_data_serialization_time, bool short_path ) const { + impl::variant_to_binary_context ctx(*this, create_depth_yield_function(), max_action_data_serialization_time, type); + ctx.short_path = short_path; + return _variant_to_binary(type, var, ctx); } void abi_serializer::variant_to_binary( const std::string_view& type, const fc::variant& var, fc::datastream& ds, const yield_function_t& yield, bool short_path )const { - impl::variant_to_binary_context ctx(*this, yield, type); + impl::variant_to_binary_context ctx(*this, yield, fc::microseconds{}, type); ctx.short_path = short_path; _variant_to_binary(type, var, ds, ctx); } - void abi_serializer::variant_to_binary( const std::string_view& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time, bool short_path ) const { - variant_to_binary( type, var, create_yield_function(max_serialization_time), short_path ); + void abi_serializer::variant_to_binary( const std::string_view& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_action_data_serialization_time, bool short_path ) const { + impl::variant_to_binary_context ctx(*this, create_depth_yield_function(), max_action_data_serialization_time, type); + ctx.short_path = short_path; + _variant_to_binary(type, var, ds, ctx); } type_name abi_serializer::get_action_type(name action)const { @@ -650,9 +660,7 @@ namespace eosio { namespace chain { namespace impl { fc::scoped_exit> abi_traverse_context::enter_scope() { - std::function callback = [old_recursion_depth=recursion_depth, this](){ - recursion_depth = old_recursion_depth; - }; + std::function callback = [this](){ --recursion_depth; }; ++recursion_depth; yield( recursion_depth ); diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 10aa7e9066..6dc0f9a1b6 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include #include @@ -1094,4 +1094,19 @@ action_name apply_context::get_sender() const { return action_name(); } +// Context | OC? +//------------------------------------------------------------------------------- +// Building block | baseline, OC for eosio.* +// Applying block | OC unless a producer, OC for eosio.* including producers +// Speculative API trx | baseline, OC for eosio.* +// Speculative P2P trx | baseline, OC for eosio.* +// Compute trx | baseline, OC for eosio.* +// Read only trx | OC +bool apply_context::should_use_eos_vm_oc()const { + return receiver.prefix() == config::system_account_name // "eosio"_n, all cases use OC + || (is_applying_block() && !control.is_producer_node()) // validating/applying block + || trx_context.is_read_only(); +} + + } } /// eosio::chain diff --git a/libraries/chain/asset.cpp b/libraries/chain/asset.cpp index f59e36690b..4c55dc01e9 100644 --- a/libraries/chain/asset.cpp +++ b/libraries/chain/asset.cpp @@ -1,5 +1,7 @@ +#include #include #include +#include #include #include @@ -22,11 +24,11 @@ int64_t asset::precision()const { string asset::to_string()const { string sign = amount < 0 ? "-" : ""; int64_t abs_amount = std::abs(amount); - string result = fc::to_string( static_cast(abs_amount) / precision()); + string result = std::to_string( static_cast(abs_amount) / precision()); if( decimals() ) { auto fract = static_cast(abs_amount) % precision(); - result += "." + fc::to_string(precision() + fract).erase(0,1); + result += "." + std::to_string(precision() + fract).erase(0,1); } return sign + result + " " + symbol_name(); } @@ -34,12 +36,12 @@ string asset::to_string()const { asset asset::from_string(const string& from) { try { - string s = fc::trim(from); + string s = boost::algorithm::trim_copy(from); // Find space in order to split amount and symbol auto space_pos = s.find(' '); EOS_ASSERT((space_pos != string::npos), asset_type_exception, "Asset's amount and symbol should be separated with space"); - auto symbol_str = fc::trim(s.substr(space_pos + 1)); + auto symbol_str = boost::algorithm::trim_copy(s.substr(space_pos + 1)); auto amount_str = s.substr(0, space_pos); // Ensure that if decimal point is used (.), decimal fraction is specified diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 87744676ca..082aeb02f6 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -462,8 +462,11 @@ namespace eosio { namespace chain { inline static uint32_t default_initial_version = block_log::max_supported_version; std::mutex mtx; - signed_block_ptr head; - block_id_type head_id; + struct signed_block_with_id { + signed_block_ptr ptr; + block_id_type id; + }; + std::optional head; virtual ~block_log_impl() = default; @@ -482,34 +485,30 @@ namespace eosio { namespace chain { virtual signed_block_ptr read_head() = 0; void update_head(const signed_block_ptr& b, const std::optional& id = {}) { - head = b; - if (id) { - head_id = *id; - } else { - if (head) { - head_id = b->calculate_id(); - } else { - head_id = {}; - } - } + if (b) + head = { b, id ? *id : b->calculate_id() }; + else + head = {}; } }; // block_log_impl /// Would remove pre-existing block log and index, never write blocks into disk. struct empty_block_log final : block_log_impl { + uint32_t first_block_number = std::numeric_limits::max(); + explicit empty_block_log(const std::filesystem::path& log_dir) { std::filesystem::remove(log_dir / "blocks.log"); std::filesystem::remove(log_dir / "blocks.index"); } - uint32_t first_block_num() final { return head ? head->block_num() : 1; } + uint32_t first_block_num() final { return head ? head->ptr->block_num() : first_block_number; } void append(const signed_block_ptr& b, const block_id_type& id, const std::vector& packed_block) final { update_head(b, id); } uint64_t get_block_pos(uint32_t block_num) final { return block_log::npos; } void reset(const genesis_state& gs, const signed_block_ptr& first_block) final { update_head(first_block); } - void reset(const chain_id_type& chain_id, uint32_t first_block_num) final {} + void reset(const chain_id_type& chain_id, uint32_t first_block_num) final { first_block_number = first_block_num; } void flush() final {} signed_block_ptr read_block_by_num(uint32_t block_num) final { return {}; }; @@ -589,7 +588,7 @@ namespace eosio { namespace chain { } uint64_t get_block_pos(uint32_t block_num) final { - if (!(head && block_num <= block_header::num_from_id(head_id) && + if (!(head && block_num <= block_header::num_from_id(head->id) && block_num >= working_block_file_first_block_num())) return block_log::npos; index_file.seek(sizeof(uint64_t) * (block_num - index_first_block_num())); @@ -705,7 +704,7 @@ namespace eosio { namespace chain { uint32_t num_blocks; this->block_file.seek_end(-sizeof(uint32_t)); fc::raw::unpack(this->block_file, num_blocks); - return this->head->block_num() - num_blocks + 1; + return this->head->ptr->block_num() - num_blocks + 1; } void reset(uint32_t first_bnum, std::variant&& chain_context, uint32_t version) { @@ -738,7 +737,6 @@ namespace eosio { namespace chain { this->reset(first_block_num, chain_id, block_log::max_supported_version); this->head.reset(); - head_id = {}; } void flush() final { @@ -802,7 +800,7 @@ namespace eosio { namespace chain { size_t copy_from_pos = get_block_pos(first_block_num); block_file.seek_end(-sizeof(uint32_t)); size_t copy_sz = block_file.tellp() - copy_from_pos; - const uint32_t num_blocks_in_log = chain::block_header::num_from_id(head_id) - first_block_num + 1; + const uint32_t num_blocks_in_log = chain::block_header::num_from_id(head->id) - first_block_num + 1; const size_t offset_bytes = copy_from_pos - copy_to_pos; const size_t offset_blocks = first_block_num - index_first_block_num; @@ -990,7 +988,7 @@ namespace eosio { namespace chain { block_file.close(); index_file.close(); - catalog.add(preamble.first_block_num, this->head->block_num(), block_file.get_file_path().parent_path(), + catalog.add(preamble.first_block_num, this->head->ptr->block_num(), block_file.get_file_path().parent_path(), "blocks"); using std::swap; @@ -1005,7 +1003,7 @@ namespace eosio { namespace chain { preamble.ver = block_log::max_supported_version; preamble.chain_context = preamble.chain_id(); - preamble.first_block_num = this->head->block_num() + 1; + preamble.first_block_num = this->head->ptr->block_num() + 1; preamble.write_to(block_file); } @@ -1016,7 +1014,7 @@ namespace eosio { namespace chain { } void post_append(uint64_t pos) final { - if (head->block_num() % stride == 0) { + if (head->ptr->block_num() % stride == 0) { split_log(); } } @@ -1119,7 +1117,7 @@ namespace eosio { namespace chain { if ((pos & prune_config.prune_threshold) != (end & prune_config.prune_threshold)) num_blocks_in_log = prune(fc::log_level::debug); else - num_blocks_in_log = chain::block_header::num_from_id(head_id) - first_block_number + 1; + num_blocks_in_log = chain::block_header::num_from_id(head->id) - first_block_number + 1; fc::raw::pack(block_file, num_blocks_in_log); } @@ -1140,7 +1138,7 @@ namespace eosio { namespace chain { uint32_t prune(const fc::log_level& loglevel) { if (!head) return 0; - const uint32_t head_num = chain::block_header::num_from_id(head_id); + const uint32_t head_num = chain::block_header::num_from_id(head->id); if (head_num - first_block_number < prune_config.prune_blocks) return head_num - first_block_number + 1; @@ -1250,12 +1248,12 @@ namespace eosio { namespace chain { signed_block_ptr block_log::head() const { std::lock_guard g(my->mtx); - return my->head; + return my->head ? my->head->ptr : signed_block_ptr{}; } - block_id_type block_log::head_id() const { + std::optional block_log::head_id() const { std::lock_guard g(my->mtx); - return my->head_id; + return my->head ? my->head->id : std::optional{}; } uint32_t block_log::first_block_num() const { @@ -1371,16 +1369,61 @@ namespace eosio { namespace chain { } // static - std::optional block_log::extract_genesis_state(const std::filesystem::path& block_dir) { - std::filesystem::path p(block_dir / "blocks.log"); - for_each_file_in_dir_matches(block_dir, R"(blocks-1-\d+\.log)", - [&p](std::filesystem::path log_path) { p = std::move(log_path); }); - return block_log_data(p).get_genesis_state(); + std::optional block_log::extract_chain_context(const std::filesystem::path& block_dir, + const std::filesystem::path& retained_dir) { + std::filesystem::path first_block_file; + if (!retained_dir.empty() && std::filesystem::exists(retained_dir)) { + for_each_file_in_dir_matches(retained_dir, R"(blocks-1-\d+\.log)", + [&](std::filesystem::path log_path) { + first_block_file = std::move(log_path); + }); + } + + if (first_block_file.empty() && std::filesystem::exists(block_dir / "blocks.log")) { + first_block_file = block_dir / "blocks.log"; + } + + if (!first_block_file.empty()) { + return block_log_data(first_block_file).get_preamble().chain_context; + } + + if (!retained_dir.empty() && std::filesystem::exists(retained_dir)) { + const std::regex my_filter(R"(blocks-\d+-\d+\.log)"); + std::smatch what; + std::filesystem::directory_iterator end_itr; // Default ctor yields past-the-end + for (std::filesystem::directory_iterator p(retained_dir); p != end_itr; ++p) { + // Skip if not a file + if (!std::filesystem::is_regular_file(p->status())) + continue; + // skip if it does not match the pattern + std::string file = p->path().filename().string(); + if (!std::regex_match(file, what, my_filter)) + continue; + return block_log_data(p->path()).chain_id(); + } + } + return {}; + } + + // static + std::optional block_log::extract_genesis_state(const std::filesystem::path& block_dir, + const std::filesystem::path& retained_dir) { + auto context = extract_chain_context(block_dir, retained_dir); + if (!context || std::holds_alternative(*context)) + return {}; + return std::get(*context); } // static - chain_id_type block_log::extract_chain_id(const std::filesystem::path& data_dir) { - return block_log_data(data_dir / "blocks.log").chain_id(); + std::optional block_log::extract_chain_id(const std::filesystem::path& block_dir, + const std::filesystem::path& retained_dir) { + auto context = extract_chain_context(block_dir, retained_dir); + if (!context) + return {}; + return std::visit(overloaded{ + [](const chain_id_type& id){ return id; }, + [](const genesis_state& gs){ return gs.compute_chain_id(); } + } , *context); } // static @@ -1538,7 +1581,7 @@ namespace eosio { namespace chain { ilog("blocks.log and blocks.index agree on number of blocks"); if (interval == 0) { - interval = std::max((log_bundle.log_index.num_blocks() + 7u) >> 3, 1u); + interval = std::max((log_bundle.log_index.num_blocks() + 7) >> 3, 1U); } uint32_t expected_block_num = log_bundle.log_data.first_block_num(); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 9a01f45cdc..c3fa424eaf 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -239,6 +240,7 @@ struct controller_impl { controller::config conf; const chain_id_type chain_id; // read by thread_pool threads, value will not be changed bool replaying = false; + bool is_producer_node = false; // true if node is configured as a block producer db_read_mode read_mode = db_read_mode::HEAD; bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped std::optional subjective_cpu_leeway; @@ -249,14 +251,11 @@ struct controller_impl { deep_mind_handler* deep_mind_logger = nullptr; bool okay_to_print_integrity_hash_on_stop = false; - std::thread::id main_thread_id; thread_local static platform_timer timer; // a copy for main thread and each read-only thread #if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) thread_local static vm::wasm_allocator wasm_alloc; // a copy for main thread and each read-only thread #endif - wasm_interface wasmif; // used by main thread and all threads for EOSVMOC - std::mutex threaded_wasmifs_mtx; - std::unordered_map> threaded_wasmifs; // one for each read-only thread, used by eos-vm and eos-vm-jit + wasm_interface_collection wasm_if_collect; app_window_type app_window = app_window_type::write; typedef pair handler_key; @@ -315,8 +314,7 @@ struct controller_impl { chain_id( chain_id ), read_mode( cfg.read_mode ), thread_pool(), - main_thread_id( std::this_thread::get_id() ), - wasmif( conf.wasm_runtime, conf.eosvmoc_tierup, db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty() ) + wasm_if_collect( conf.wasm_runtime, conf.eosvmoc_tierup, db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty() ) { fork_db.open( [this]( block_timestamp_type timestamp, const flat_set& cur_features, @@ -342,12 +340,7 @@ struct controller_impl { set_activation_handler(); self.irreversible_block.connect([this](const block_state_ptr& bsp) { - // producer_plugin has already asserted irreversible_block signal is - // called in write window - wasmif.current_lib(bsp->block_num); - for (auto& w: threaded_wasmifs) { - w.second->current_lib(bsp->block_num); - } + wasm_if_collect.current_lib(bsp->block_num); }); @@ -402,11 +395,13 @@ struct controller_impl { } } - void dmlog_applied_transaction(const transaction_trace_ptr& t) { + void dmlog_applied_transaction(const transaction_trace_ptr& t, const signed_transaction* trx = nullptr) { // dmlog_applied_transaction is called by push_scheduled_transaction // where transient transactions are not possible, and by push_transaction // only when the transaction is not transient if (auto dm_logger = get_deep_mind_logger(false)) { + if (trx && is_onblock(*t)) + dm_logger->on_onblock(*trx); dm_logger->on_applied_transaction(self.head_block_num() + 1, t); } } @@ -414,10 +409,10 @@ struct controller_impl { void log_irreversible() { EOS_ASSERT( fork_db.root(), fork_database_exception, "fork database not properly initialized" ); - const block_id_type log_head_id = blog.head_id(); - const bool valid_log_head = !log_head_id.empty(); + const std::optional log_head_id = blog.head_id(); + const bool valid_log_head = !!log_head_id; - const auto lib_num = valid_log_head ? block_header::num_from_id(log_head_id) : (blog.first_block_num() - 1); + const auto lib_num = valid_log_head ? block_header::num_from_id(*log_head_id) : (blog.first_block_num() - 1); auto root_id = fork_db.root()->id; @@ -425,7 +420,8 @@ struct controller_impl { EOS_ASSERT( root_id == log_head_id, fork_database_exception, "fork database root does not match block log head" ); } else { EOS_ASSERT( fork_db.root()->block_num == lib_num, fork_database_exception, - "empty block log expects the first appended block to build off a block that is not the fork database root. root block number: ${block_num}, lib: ${lib_num}", ("block_num", fork_db.root()->block_num) ("lib_num", lib_num) ); + "The first block ${lib_num} when starting with an empty block log should be the block after fork database root ${bn}.", + ("lib_num", lib_num)("bn", fork_db.root()->block_num) ); } const auto fork_head = fork_db_head(); @@ -447,8 +443,6 @@ struct controller_impl { if( read_mode == db_read_mode::IRREVERSIBLE ) { controller::block_report br; apply_block( br, *bitr, controller::block_status::complete, trx_meta_cache_lookup{} ); - head = (*bitr); - fork_db.mark_valid( head ); } emit( self.irreversible_block, *bitr ); @@ -560,7 +554,10 @@ struct controller_impl { ilog( "no irreversible blocks need to be replayed" ); } - if( !except_ptr && !check_shutdown() && fork_db.head() ) { + if (snapshot_head_block != 0 && !blog_head) { + // loading from snapshot without a block log so fork_db can't be considered valid + fork_db.reset( *head ); + } else if( !except_ptr && !check_shutdown() && fork_db.head() ) { auto head_block_num = head->block_num; auto branch = fork_db.fetch_branch( fork_db.head()->id ); int rev = 0; @@ -591,19 +588,21 @@ struct controller_impl { void startup(std::function shutdown, std::function check_shutdown, const snapshot_reader_ptr& snapshot) { EOS_ASSERT( snapshot, snapshot_exception, "No snapshot reader provided" ); this->shutdown = shutdown; - ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); try { snapshot->validate(); if( auto blog_head = blog.head() ) { + ilog( "Starting initialization from snapshot and block log ${b}-${e}, this may take a significant amount of time", + ("b", blog.first_block_num())("e", blog_head->block_num()) ); read_from_snapshot( snapshot, blog.first_block_num(), blog_head->block_num() ); } else { + ilog( "Starting initialization from snapshot and no block log, this may take a significant amount of time" ); read_from_snapshot( snapshot, 0, std::numeric_limits::max() ); - const uint32_t lib_num = head->block_num; - EOS_ASSERT( lib_num > 0, snapshot_exception, + EOS_ASSERT( head->block_num > 0, snapshot_exception, "Snapshot indicates controller head at block number 0, but that is not allowed. " "Snapshot is invalid." ); - blog.reset( chain_id, lib_num + 1 ); + blog.reset( chain_id, head->block_num + 1 ); } + ilog( "Snapshot loaded, lib: ${lib}", ("lib", head->block_num) ); init(check_shutdown); ilog( "Finished initialization from snapshot" ); @@ -1176,7 +1175,7 @@ struct controller_impl { transaction_checktime_timer trx_timer(timer); const packed_transaction trx( std::move( etrx ) ); - transaction_context trx_context( self, trx, std::move(trx_timer), start ); + transaction_context trx_context( self, trx, trx.id(), std::move(trx_timer), start ); if (auto dm_logger = get_deep_mind_logger(trx_context.is_transient())) { dm_logger->on_onerror(etrx); @@ -1342,7 +1341,7 @@ struct controller_impl { uint32_t cpu_time_to_bill_us = billed_cpu_time_us; transaction_checktime_timer trx_timer( timer ); - transaction_context trx_context( self, *trx->packed_trx(), std::move(trx_timer) ); + transaction_context trx_context( self, *trx->packed_trx(), gtrx.trx_id, std::move(trx_timer) ); trx_context.leeway = fc::microseconds(0); // avoid stealing cpu resource trx_context.block_deadline = block_deadline; trx_context.max_transaction_time_subjective = max_transaction_time; @@ -1556,7 +1555,7 @@ struct controller_impl { const signed_transaction& trn = trx->packed_trx()->get_signed_transaction(); transaction_checktime_timer trx_timer(timer); - transaction_context trx_context(self, *trx->packed_trx(), std::move(trx_timer), start, trx->get_trx_type()); + transaction_context trx_context(self, *trx->packed_trx(), trx->id(), std::move(trx_timer), start, trx->get_trx_type()); if ((bool)subjective_cpu_leeway && self.is_speculative_block()) { trx_context.leeway = *subjective_cpu_leeway; } @@ -1627,7 +1626,7 @@ struct controller_impl { emit(self.accepted_transaction, trx); } - dmlog_applied_transaction(trace); + dmlog_applied_transaction(trace, &trn); emit(self.applied_transaction, std::tie(trace, trx->packed_trx())); } } @@ -1930,7 +1929,7 @@ struct controller_impl { /** * @post regardless of the success of commit block there is no active pending block */ - void commit_block( bool add_to_fork_db ) { + void commit_block( controller::block_status s ) { auto reset_pending_on_exit = fc::make_scoped_exit([this]{ pending.reset(); }); @@ -1939,24 +1938,26 @@ struct controller_impl { EOS_ASSERT( std::holds_alternative(pending->_block_stage), block_validate_exception, "cannot call commit_block until pending block is completed" ); - auto bsp = std::get(pending->_block_stage)._block_state; + const auto& bsp = std::get(pending->_block_stage)._block_state; - if( add_to_fork_db ) { + if( s == controller::block_status::incomplete ) { fork_db.add( bsp ); fork_db.mark_valid( bsp ); emit( self.accepted_block_header, bsp ); - head = fork_db.head(); - EOS_ASSERT( bsp == head, fork_database_exception, "committed block did not become the new head in fork database"); + EOS_ASSERT( bsp == fork_db.head(), fork_database_exception, "committed block did not become the new head in fork database"); + } else if (s != controller::block_status::irreversible) { + fork_db.mark_valid( bsp ); } + head = bsp; // at block level, no transaction specific logging is possible - if (auto dm_logger = get_deep_mind_logger(false)) { + if (auto* dm_logger = get_deep_mind_logger(false)) { dm_logger->on_accepted_block(bsp); } emit( self.accepted_block, bsp ); - if( add_to_fork_db ) { + if( s == controller::block_status::incomplete ) { log_irreversible(); } } catch (...) { @@ -2156,7 +2157,7 @@ struct controller_impl { pending->_block_stage = completed_block{ bsp }; br = pending->_block_report; // copy before commit block destroys pending - commit_block(false); + commit_block(s); br.total_time = fc::time_point::now() - start; return; } catch ( const std::bad_alloc& ) { @@ -2308,7 +2309,6 @@ struct controller_impl { controller::block_report br; if( s == controller::block_status::irreversible ) { apply_block( br, bsp, s, trx_meta_cache_lookup{} ); - head = bsp; // On replay, log_irreversible is not called and so no irreversible_block signal is emitted. // So emit it explicitly here. @@ -2334,8 +2334,6 @@ struct controller_impl { if( new_head->header.previous == head->id ) { try { apply_block( br, new_head, s, trx_lookup ); - fork_db.mark_valid( new_head ); - head = new_head; } catch ( const std::exception& e ) { fork_db.remove( new_head->id ); throw; @@ -2368,8 +2366,6 @@ struct controller_impl { br = controller::block_report{}; apply_block( br, *ritr, (*ritr)->is_valid() ? controller::block_status::validated : controller::block_status::complete, trx_lookup ); - fork_db.mark_valid( *ritr ); - head = *ritr; } catch ( const std::bad_alloc& ) { throw; } catch ( const boost::interprocess::bad_alloc& ) { @@ -2400,7 +2396,6 @@ struct controller_impl { for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { br = controller::block_report{}; apply_block( br, *ritr, controller::block_status::validated /* we previously validated these blocks*/, trx_lookup ); - head = *ritr; } std::rethrow_exception(except); } // end if exception @@ -2665,11 +2660,6 @@ struct controller_impl { trx.set_reference_block( self.head_block_id() ); } - // onblock transaction cannot be transient - if (auto dm_logger = get_deep_mind_logger(false)) { - dm_logger->on_onblock(trx); - } - return trx; } @@ -2682,31 +2672,6 @@ struct controller_impl { return (blog.first_block_num() != 0) ? blog.first_block_num() : fork_db.root()->block_num; } -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - bool is_eos_vm_oc_enabled() const { - return ( conf.eosvmoc_tierup || conf.wasm_runtime == wasm_interface::vm_type::eos_vm_oc ); - } -#endif - - // only called from non-main threads (read-only trx execution threads) - // when producer_plugin starts them - void init_thread_local_data() { - EOS_ASSERT( !is_on_main_thread(), misc_exception, "init_thread_local_data called on the main thread"); -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if ( is_eos_vm_oc_enabled() ) - // EOSVMOC needs further initialization of its thread local data - wasmif.init_thread_local_data(); - else -#endif - { - std::lock_guard g(threaded_wasmifs_mtx); - // Non-EOSVMOC needs a wasmif per thread - threaded_wasmifs[std::this_thread::get_id()] = std::make_unique( conf.wasm_runtime, conf.eosvmoc_tierup, db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty()); - } - } - - bool is_on_main_thread() { return main_thread_id == std::this_thread::get_id(); }; - void set_to_write_window() { app_window = app_window_type::write; } @@ -2717,25 +2682,22 @@ struct controller_impl { return app_window == app_window_type::write; } - wasm_interface& get_wasm_interface() { - if ( is_on_main_thread() #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - || is_eos_vm_oc_enabled() + bool is_eos_vm_oc_enabled() const { + return wasm_if_collect.is_eos_vm_oc_enabled(); + } #endif - ) - return wasmif; - else - return *threaded_wasmifs[std::this_thread::get_id()]; + + void init_thread_local_data() { + wasm_if_collect.init_thread_local_data(db, conf.state_dir, conf.eosvmoc_config, !conf.profile_accounts.empty()); + } + + wasm_interface_collection& get_wasm_interface() { + return wasm_if_collect; } void code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num) { - // The caller of this function apply_eosio_setcode has already asserted that - // the transaction is not a read-only trx, which implies we are - // in write window. Safe to call threaded_wasmifs's code_block_num_last_used - wasmif.code_block_num_last_used(code_hash, vm_type, vm_version, block_num); - for (auto& w: threaded_wasmifs) { - w.second->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); - } + wasm_if_collect.code_block_num_last_used(code_hash, vm_type, vm_version, block_num); } block_state_ptr fork_db_head() const; @@ -2999,7 +2961,7 @@ block_state_ptr controller::finalize_block( block_report& br, const signer_callb void controller::commit_block() { validate_db_available_size(); - my->commit_block(true); + my->commit_block(block_status::incomplete); } deque controller::abort_block() { @@ -3432,7 +3394,7 @@ const apply_handler* controller::find_apply_handler( account_name receiver, acco } return nullptr; } -wasm_interface& controller::get_wasm_interface() { +wasm_interface_collection& controller::get_wasm_interface() { return my->get_wasm_interface(); } @@ -3612,7 +3574,6 @@ vm::wasm_allocator& controller::get_wasm_allocator() { return my->wasm_alloc; } #endif - #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED bool controller::is_eos_vm_oc_enabled() const { return my->is_eos_vm_oc_enabled(); @@ -3720,6 +3681,14 @@ void controller::replace_account_keys( name account, name permission, const publ rlm.verify_account_ram_usage(account); } +void controller::set_producer_node(bool is_producer_node) { + my->is_producer_node = is_producer_node; +} + +bool controller::is_producer_node()const { + return my->is_producer_node; +} + void controller::set_db_read_only_mode() { mutable_db().set_read_only_mode(); } diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index eef8795d29..3cad62bcf5 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -6,6 +6,7 @@ #include #include #include +#include namespace eosio::chain { @@ -23,6 +24,7 @@ namespace impl { struct abi_traverse_context_with_path; struct binary_to_variant_context; struct variant_to_binary_context; + struct action_data_to_variant_context; } /** @@ -48,7 +50,6 @@ struct abi_serializer { bool is_szarray(const std::string_view& type)const; bool is_optional(const std::string_view& type)const; bool is_type( const std::string_view& type, const yield_function_t& yield )const; - [[deprecated("use the overload with yield_function_t[=create_yield_function(max_serialization_time)]")]] bool is_type(const std::string_view& type, const fc::microseconds& max_serialization_time)const; bool is_builtin_type(const std::string_view& type)const; bool is_integer(const std::string_view& type) const; @@ -67,33 +68,29 @@ struct abi_serializer { std::optional get_error_message( uint64_t error_code )const; fc::variant binary_to_variant( const std::string_view& type, const bytes& binary, const yield_function_t& yield, bool short_path = false )const; - [[deprecated("use the overload with yield_function_t[=create_yield_function(max_serialization_time)]")]] - fc::variant binary_to_variant( const std::string_view& type, const bytes& binary, const fc::microseconds& max_serialization_time, bool short_path = false )const; + fc::variant binary_to_variant( const std::string_view& type, const bytes& binary, const fc::microseconds& max_action_data_serialization_time, bool short_path = false )const; fc::variant binary_to_variant( const std::string_view& type, fc::datastream& binary, const yield_function_t& yield, bool short_path = false )const; - [[deprecated("use the overload with yield_function_t[=create_yield_function(max_serialization_time)]")]] - fc::variant binary_to_variant( const std::string_view& type, fc::datastream& binary, const fc::microseconds& max_serialization_time, bool short_path = false )const; + fc::variant binary_to_variant( const std::string_view& type, fc::datastream& binary, const fc::microseconds& max_action_data_serialization_time, bool short_path = false )const; - [[deprecated("use the overload with yield_function_t[=create_yield_function(max_serialization_time)]")]] - bytes variant_to_binary( const std::string_view& type, const fc::variant& var, const fc::microseconds& max_serialization_time, bool short_path = false )const; + bytes variant_to_binary( const std::string_view& type, const fc::variant& var, const fc::microseconds& max_action_data_serialization_time, bool short_path = false )const; bytes variant_to_binary( const std::string_view& type, const fc::variant& var, const yield_function_t& yield, bool short_path = false )const; - [[deprecated("use the overload with yield_function_t[=create_yield_function(max_serialization_time)]")]] - void variant_to_binary( const std::string_view& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_serialization_time, bool short_path = false )const; + void variant_to_binary( const std::string_view& type, const fc::variant& var, fc::datastream& ds, const fc::microseconds& max_action_data_serialization_time, bool short_path = false )const; void variant_to_binary( const std::string_view& type, const fc::variant& var, fc::datastream& ds, const yield_function_t& yield, bool short_path = false )const; template static void to_variant( const T& o, fc::variant& vo, const Resolver& resolver, const yield_function_t& yield ); template - [[deprecated("use the overload with yield_function_t[=create_yield_function(max_serialization_time)]")]] - static void to_variant( const T& o, fc::variant& vo, const Resolver& resolver, const fc::microseconds& max_serialization_time ); + static void to_variant( const T& o, fc::variant& vo, const Resolver& resolver, const fc::microseconds& max_action_data_serialization_time ); template static void to_log_variant( const T& o, fc::variant& vo, const Resolver& resolver, const yield_function_t& yield ); + template + static void to_log_variant( const T& o, fc::variant& vo, const Resolver& resolver, const fc::microseconds& max_action_data_serialization_time ); template static void from_variant( const fc::variant& v, T& o, const Resolver& resolver, const yield_function_t& yield ); template - [[deprecated("use the overload with yield_function_t[=create_yield_function(max_serialization_time)]")]] - static void from_variant( const fc::variant& v, T& o, const Resolver& resolver, const fc::microseconds& max_serialization_time ); + static void from_variant( const fc::variant& v, T& o, const Resolver& resolver, const fc::microseconds& max_action_data_serialization_time ); template static bool is_empty_abi(const Vec& abi_vec) @@ -120,14 +117,10 @@ struct abi_serializer { static constexpr size_t max_recursion_depth = 32; // arbitrary depth to prevent infinite recursion // create standard yield function that checks for max_serialization_time and max_recursion_depth. - // now() deadline caputered at time of this call + // restricts serialization time from creation of yield function until serialization is complete. + // now() deadline captured at time of this call static yield_function_t create_yield_function(const fc::microseconds& max_serialization_time) { - fc::time_point deadline = fc::time_point::now(); - if( max_serialization_time > fc::microseconds::maximum() - deadline.time_since_epoch() ) { - deadline = fc::time_point::maximum(); - } else { - deadline += max_serialization_time; - } + fc::time_point deadline = fc::time_point::now().safe_add(max_serialization_time); return [max_serialization_time, deadline](size_t recursion_depth) { EOS_ASSERT( recursion_depth < max_recursion_depth, abi_recursion_depth_exception, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); @@ -137,6 +130,13 @@ struct abi_serializer { }; } + static yield_function_t create_depth_yield_function() { + return [](size_t recursion_depth) { + EOS_ASSERT( recursion_depth < max_recursion_depth, abi_recursion_depth_exception, + "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) ); + }; + } + private: map> typedefs; @@ -172,8 +172,9 @@ struct abi_serializer { namespace impl { const static size_t hex_log_max_size = 64; struct abi_traverse_context { - explicit abi_traverse_context( abi_serializer::yield_function_t yield ) + abi_traverse_context( abi_serializer::yield_function_t yield, fc::microseconds max_action_data_serialization ) : yield(std::move( yield )) + , max_action_serialization_time(max_action_data_serialization) { } @@ -187,7 +188,9 @@ namespace impl { protected: abi_serializer::yield_function_t yield; - size_t recursion_depth = 0; + // if set then restricts each individual action data serialization + fc::microseconds max_action_serialization_time; + size_t recursion_depth = 1; bool log = false; }; @@ -226,8 +229,8 @@ namespace impl { using path_item = std::variant; struct abi_traverse_context_with_path : public abi_traverse_context { - abi_traverse_context_with_path( const abi_serializer& abis, abi_serializer::yield_function_t yield, const std::string_view& type ) - : abi_traverse_context( std::move( yield ) ), abis(abis) + abi_traverse_context_with_path( const abi_serializer& abis, abi_serializer::yield_function_t yield, fc::microseconds max_action_data_serialization_time, const std::string_view& type ) + : abi_traverse_context( std::move( yield ), max_action_data_serialization_time ), abis(abis) { set_path_root(type); } @@ -263,6 +266,22 @@ namespace impl { using abi_traverse_context_with_path::abi_traverse_context_with_path; }; + struct action_data_to_variant_context : public binary_to_variant_context { + action_data_to_variant_context( const abi_serializer& abis, const abi_traverse_context& ctx, const std::string_view& type ) + : binary_to_variant_context(abis, ctx, type) + { + short_path = true; // Just to be safe while avoiding the complexity of threading an override boolean all over the place + if (max_action_serialization_time.count() > 0) { + fc::time_point deadline = fc::time_point::now().safe_add(max_action_serialization_time); + yield = [deadline, y=yield, max=max_action_serialization_time](size_t depth) { + y(depth); // call provided yield that might include an overall time limit or not + EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, + "serialization action data time limit ${t}us exceeded", ("t", max) ); + }; + } + } + }; + struct variant_to_binary_context : public abi_traverse_context_with_path { using abi_traverse_context_with_path::abi_traverse_context_with_path; @@ -491,11 +510,10 @@ namespace impl { auto type = abi.get_action_type(act.name); if (!type.empty()) { try { - binary_to_variant_context _ctx(abi, ctx, type); - _ctx.short_path = true; // Just to be safe while avoiding the complexity of threading an override boolean all over the place + action_data_to_variant_context _ctx(abi, ctx, type); mvo( "data", abi._binary_to_variant( type, act.data, _ctx )); } catch(...) { - // any failure to serialize data, then leave as not serailzed + // any failure to serialize data, then leave as not serialized set_hex_data(mvo, "data", act.data); } } else { @@ -552,8 +570,7 @@ namespace impl { const abi_serializer& abi = *abi_optional; auto type = abi.get_action_result_type(act.name); if (!type.empty()) { - binary_to_variant_context _ctx(abi, ctx, type); - _ctx.short_path = true; // Just to be safe while avoiding the complexity of threading an override boolean all over the place + action_data_to_variant_context _ctx(abi, ctx, type); mvo( "return_value_data", abi._binary_to_variant( type, act_trace.return_value, _ctx )); } } @@ -650,7 +667,7 @@ namespace impl { for (auto feature : new_protocol_features) { mutable_variant_object feature_mvo; add(feature_mvo, "feature_digest", feature, resolver, ctx); - pf_array.push_back(feature_mvo); + pf_array.push_back(std::move(feature_mvo)); } mvo("new_protocol_features", pf_array); } @@ -719,7 +736,7 @@ namespace impl { * and can be degraded to the normal ::from_variant(...) processing */ template = 1> - static void extract( const fc::variant& v, M& o, Resolver, abi_traverse_context& ctx ) + static void extract( const fc::variant& v, M& o, const Resolver&, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); from_variant(v, o); @@ -808,13 +825,14 @@ namespace impl { from_variant(data, act.data); valid_empty_data = act.data.empty(); } else if ( data.is_object() ) { - auto abi = resolver(act.account); - if (abi) { - auto type = abi->get_action_type(act.name); + auto abi_optional = resolver(act.account); + if (abi_optional) { + const abi_serializer& abi = *abi_optional; + auto type = abi.get_action_type(act.name); if (!type.empty()) { - variant_to_binary_context _ctx(*abi, ctx, type); + variant_to_binary_context _ctx(abi, ctx, type); _ctx.short_path = true; // Just to be safe while avoiding the complexity of threading an override boolean all over the place - act.data = std::move( abi->_variant_to_binary( type, data, _ctx )); + act.data = abi._variant_to_binary( type, data, _ctx ); valid_empty_data = act.data.empty(); } } @@ -941,42 +959,55 @@ namespace impl { template void abi_serializer::to_variant( const T& o, fc::variant& vo, const Resolver& resolver, const yield_function_t& yield ) try { mutable_variant_object mvo; - impl::abi_traverse_context ctx( yield ); + impl::abi_traverse_context ctx( yield, fc::microseconds{} ); impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); vo = std::move(mvo["_"]); } FC_RETHROW_EXCEPTIONS(error, "Failed to serialize: ${type}", ("type", boost::core::demangle( typeid(o).name() ) )) template -void abi_serializer::to_variant( const T& o, fc::variant& vo, const Resolver& resolver, const fc::microseconds& max_serialization_time ) { - to_variant( o, vo, resolver, create_yield_function(max_serialization_time) ); -} +void abi_serializer::to_variant( const T& o, fc::variant& vo, const Resolver& resolver, const fc::microseconds& max_action_data_serialization_time ) try { + mutable_variant_object mvo; + impl::abi_traverse_context ctx( create_depth_yield_function(), max_action_data_serialization_time ); + impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); + vo = std::move(mvo["_"]); +} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize: ${type}", ("type", boost::core::demangle( typeid(o).name() ) )) template void abi_serializer::to_log_variant( const T& o, fc::variant& vo, const Resolver& resolver, const yield_function_t& yield ) try { mutable_variant_object mvo; - impl::abi_traverse_context ctx( yield ); + impl::abi_traverse_context ctx( yield, fc::microseconds{} ); ctx.logging(); impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); vo = std::move(mvo["_"]); } FC_RETHROW_EXCEPTIONS(error, "Failed to serialize: ${type}", ("type", boost::core::demangle( typeid(o).name() ) )) +template +void abi_serializer::to_log_variant( const T& o, fc::variant& vo, const Resolver& resolver, const fc::microseconds& max_action_data_serialization_time ) try { + mutable_variant_object mvo; + impl::abi_traverse_context ctx( create_depth_yield_function(), max_action_data_serialization_time ); + ctx.logging(); + impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); + vo = std::move(mvo["_"]); +} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize: ${type}", ("type", boost::core::demangle( typeid(o).name() ) )) template void abi_serializer::from_variant( const fc::variant& v, T& o, const Resolver& resolver, const yield_function_t& yield ) try { - impl::abi_traverse_context ctx( yield ); + impl::abi_traverse_context ctx( yield, fc::microseconds{} ); impl::abi_from_variant::extract(v, o, resolver, ctx); } FC_RETHROW_EXCEPTIONS(error, "Failed to deserialize variant", ("variant",v)) template -void abi_serializer::from_variant( const fc::variant& v, T& o, const Resolver& resolver, const fc::microseconds& max_serialization_time ) { - from_variant( v, o, resolver, create_yield_function(max_serialization_time) ); -} +void abi_serializer::from_variant( const fc::variant& v, T& o, const Resolver& resolver, const fc::microseconds& max_action_data_serialization_time ) try { + impl::abi_traverse_context ctx( create_depth_yield_function(), max_action_data_serialization_time ); + impl::abi_from_variant::extract(v, o, resolver, ctx); +} FC_RETHROW_EXCEPTIONS(error, "Failed to deserialize variant", ("variant",v)) using abi_serializer_cache_t = std::unordered_map>; +using resolver_fn_t = std::function(const account_name& name)>; class abi_resolver { public: - abi_resolver(abi_serializer_cache_t&& abi_serializers) : + explicit abi_resolver(abi_serializer_cache_t&& abi_serializers) : abi_serializers(std::move(abi_serializers)) {} @@ -993,7 +1024,7 @@ class abi_resolver { class abi_serializer_cache_builder { public: - abi_serializer_cache_builder(std::function(const account_name& name)> resolver) : + explicit abi_serializer_cache_builder(resolver_fn_t resolver) : resolver_(std::move(resolver)) { } @@ -1037,8 +1068,47 @@ class abi_serializer_cache_builder { } } - std::function(const account_name& name)> resolver_; + resolver_fn_t resolver_; abi_serializer_cache_t abi_serializers; }; +/* + * This is equivalent to a resolver, except that everytime the abi_serializer for an account + * is retrieved, it is stored in an unordered_map, so we won't waste time retrieving it again. + * This is handy when parsing packed_transactions received in a fc::variant. + */ +class caching_resolver { +public: + explicit caching_resolver(resolver_fn_t resolver) : + resolver_(std::move(resolver)) + { + } + + // make it non-copiable (we should only move it for performance reasons) + caching_resolver(const caching_resolver&) = delete; + caching_resolver& operator=(const caching_resolver&) = delete; + + std::optional> operator()(const account_name& account) const { + auto it = abi_serializers.find(account); + if (it != abi_serializers.end()) { + if (it->second) + return *it->second; + return {}; + } + auto serializer = resolver_(account); + auto& dest = abi_serializers[account]; // add entry regardless + if (serializer) { + // we got a serializer, so move it into the cache + dest = abi_serializer_cache_t::mapped_type{std::move(*serializer)}; + return *dest; // and return a reference to it + } + return {}; + }; + +private: + const resolver_fn_t resolver_; + mutable abi_serializer_cache_t abi_serializers; +}; + + } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 78a4fa0e0a..090531bfcb 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -598,6 +598,9 @@ class apply_context { action_name get_sender() const; + bool is_applying_block() const { return trx_context.explicit_billed_cpu_time; } + bool should_use_eos_vm_oc()const; + /// Fields: public: diff --git a/libraries/chain/include/eosio/chain/block_log.hpp b/libraries/chain/include/eosio/chain/block_log.hpp index f700aa1b07..10a2a598ad 100644 --- a/libraries/chain/include/eosio/chain/block_log.hpp +++ b/libraries/chain/include/eosio/chain/block_log.hpp @@ -67,7 +67,7 @@ namespace eosio { namespace chain { signed_block_ptr read_head()const; //use blocklog signed_block_ptr head()const; - block_id_type head_id()const; + std::optional head_id()const; uint32_t first_block_num() const; @@ -82,9 +82,17 @@ namespace eosio { namespace chain { static std::filesystem::path repair_log( const std::filesystem::path& data_dir, uint32_t truncate_at_block = 0, const char* reversible_block_dir_name="" ); - static std::optional extract_genesis_state( const std::filesystem::path& data_dir ); + using chain_context = std::variant; + static std::optional extract_chain_context(const std::filesystem::path& data_dir, + const std::filesystem::path& retained_dir); - static chain_id_type extract_chain_id( const std::filesystem::path& data_dir ); + static std::optional + extract_genesis_state(const std::filesystem::path& data_dir, + const std::filesystem::path& retained_dir = std::filesystem::path{}); + + static std::optional + extract_chain_id(const std::filesystem::path& data_dir, + const std::filesystem::path& retained_dir = std::filesystem::path{}); static void construct_index(const std::filesystem::path& block_file_name, const std::filesystem::path& index_file_name); diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index c10aff3e53..68da901025 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -40,6 +40,7 @@ namespace eosio { namespace chain { class account_object; class deep_mind_handler; class subjective_billing; + class wasm_interface_collection; using resource_limits::resource_limits_manager; using apply_handler = std::function; using forked_branch_callback = std::function; @@ -90,7 +91,7 @@ namespace eosio { namespace chain { wasm_interface::vm_type wasm_runtime = chain::config::default_wasm_runtime; eosvmoc::config eosvmoc_config; - bool eosvmoc_tierup = false; + wasm_interface::vm_oc_enable eosvmoc_tierup = wasm_interface::vm_oc_enable::oc_auto; db_read_mode read_mode = db_read_mode::HEAD; validation_mode block_validation_mode = validation_mode::FULL; @@ -321,6 +322,8 @@ namespace eosio { namespace chain { #if defined(EOSIO_EOS_VM_RUNTIME_ENABLED) || defined(EOSIO_EOS_VM_JIT_RUNTIME_ENABLED) vm::wasm_allocator& get_wasm_allocator(); +#endif +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED bool is_eos_vm_oc_enabled() const; #endif @@ -346,27 +349,7 @@ namespace eosio { namespace chain { */ const apply_handler* find_apply_handler( account_name contract, scope_name scope, action_name act )const; - wasm_interface& get_wasm_interface(); - - - std::optional get_abi_serializer( account_name n, const abi_serializer::yield_function_t& yield )const { - if( n.good() ) { - try { - const auto& a = get_account( n ); - if( abi_def abi; abi_serializer::to_abi( a.abi, abi )) - return abi_serializer( std::move(abi), yield ); - } FC_CAPTURE_AND_LOG((n)) - } - return std::optional(); - } - - template - fc::variant to_variant_with_abi( const T& obj, const abi_serializer::yield_function_t& yield )const { - fc::variant pretty_output; - abi_serializer::to_variant( obj, pretty_output, - [&]( account_name n ){ return get_abi_serializer( n, yield ); }, yield ); - return pretty_output; - } + wasm_interface_collection& get_wasm_interface(); static chain_id_type extract_chain_id(snapshot_reader& snapshot); @@ -375,6 +358,9 @@ namespace eosio { namespace chain { void replace_producer_keys( const public_key_type& key ); void replace_account_keys( name account, name permission, const public_key_type& key ); + void set_producer_node(bool is_producer_node); + bool is_producer_node()const; + void set_db_read_only_mode(); void unset_db_read_only_mode(); void init_thread_local_data(); diff --git a/libraries/chain/include/eosio/chain/log_catalog.hpp b/libraries/chain/include/eosio/chain/log_catalog.hpp index f4a6265342..70a60574f7 100644 --- a/libraries/chain/include/eosio/chain/log_catalog.hpp +++ b/libraries/chain/include/eosio/chain/log_catalog.hpp @@ -1,17 +1,17 @@ #pragma once -#include -#include #include #include +#include #include +#include namespace eosio { namespace chain { template -void for_each_file_in_dir_matches(const std::filesystem::path& dir, std::string pattern, Lambda&& lambda) { - const std::regex my_filter(pattern); +void for_each_file_in_dir_matches(const std::filesystem::path& dir, std::string_view pattern, Lambda&& lambda) { + const std::regex my_filter(pattern.begin(), pattern.size()); std::smatch what; std::filesystem::directory_iterator end_itr; // Default ctor yields past-the-end for (std::filesystem::directory_iterator p(dir); p != end_itr; ++p) { @@ -36,10 +36,10 @@ struct log_catalog { using block_num_t = uint32_t; struct mapped_type { - block_num_t last_block_num; + block_num_t last_block_num = 0; std::filesystem::path filename_base; }; - using collection_t = boost::container::flat_map; + using collection_t = std::map; using size_type = typename collection_t::size_type; static constexpr size_type npos = std::numeric_limits::max(); @@ -84,9 +84,10 @@ struct log_catalog { archive_dir = make_absolute_dir(log_dir, archive_path); } - for_each_file_in_dir_matches(retained_dir, std::string(name) + suffix_pattern, [this](std::filesystem::path path) { + std::string pattern = std::string(name) + suffix_pattern; + for_each_file_in_dir_matches(retained_dir, pattern, [this](std::filesystem::path path) { auto log_path = path; - auto index_path = path.replace_extension("index"); + const auto& index_path = path.replace_extension("index"); auto path_without_extension = log_path.parent_path() / log_path.stem().string(); LogData log(log_path); @@ -94,8 +95,10 @@ struct log_catalog { verifier.verify(log, log_path); // check if index file matches the log file - if (!index_matches_data(index_path, log)) - log.construct_index(index_path); + if (!index_matches_data(index_path, log)) { + ilog("Recreating index for: ${i}", ("i", index_path.string())); + log.construct_index( index_path ); + } auto existing_itr = collection.find(log.first_block_num()); if (existing_itr != collection.end()) { @@ -112,7 +115,7 @@ struct log_catalog { } } - collection.insert_or_assign(log.first_block_num(), mapped_type{log.last_block_num(), path_without_extension}); + collection.insert_or_assign(log.first_block_num(), mapped_type{log.last_block_num(), std::move(path_without_extension)}); }); } @@ -120,24 +123,19 @@ struct log_catalog { if (!std::filesystem::exists(index_path)) return false; - auto num_blocks_in_index = std::filesystem::file_size(index_path) / sizeof(uint64_t); - if (num_blocks_in_index != log.num_blocks()) + LogIndex log_i; + log_i.open(index_path); + + if (log_i.num_blocks() != log.num_blocks()) return false; - // make sure the last 8 bytes of index and log matches - fc::cfile index_file; - index_file.set_file_path(index_path); - index_file.open("r"); - index_file.seek_end(-sizeof(uint64_t)); - uint64_t pos; - index_file.read(reinterpret_cast(&pos), sizeof(pos)); - return pos == log.last_block_position(); + return log_i.back() == log.last_block_position(); } std::optional get_block_position(uint32_t block_num) { try { if (active_index != npos) { - auto active_item = collection.nth(active_index); + auto active_item = std::next(collection.begin(), active_index); if (active_item->first <= block_num && block_num <= active_item->second.last_block_num) { return log_index.nth_block_position(block_num - log_data.first_block_num()); } @@ -151,7 +149,7 @@ struct log_catalog { auto name = it->second.filename_base; log_data.open(name.replace_extension("log")); log_index.open(name.replace_extension("index")); - active_index = collection.index_of(it); + active_index = std::distance(collection.begin(), it); return log_index.nth_block_position(block_num - log_data.first_block_num()); } return {}; @@ -204,7 +202,7 @@ struct log_catalog { /// Add a new entry into the catalog. /// /// Notice that \c start_block_num must be monotonically increasing between the invocations of this function - /// so that the new entry would be inserted at the end of the flat_map; otherwise, \c active_index would be + /// so that the new entry would be inserted at the 'end' of the map; otherwise, \c active_index would be /// invalidated and the mapping between the log data their block range would be wrong. This function is only used /// during the splitting of block log. Using this function for other purpose should make sure if the monotonically /// increasing block num guarantee can be met. @@ -216,23 +214,24 @@ struct log_catalog { std::filesystem::path new_path = retained_dir / buf; rename_bundle(dir / name, new_path); size_type items_to_erase = 0; - collection.emplace(start_block_num, mapped_type{end_block_num, new_path}); + collection.emplace(start_block_num, mapped_type{end_block_num, std::move(new_path)}); if (collection.size() >= max_retained_files) { items_to_erase = max_retained_files > 0 ? collection.size() - max_retained_files : collection.size(); + auto last = std::next( collection.begin(), items_to_erase); - for (auto it = collection.begin(); it < collection.begin() + items_to_erase; ++it) { + for (auto it = collection.begin(); it != last; ++it) { auto orig_name = it->second.filename_base; if (archive_dir.empty()) { // delete the old files when no backup dir is specified std::filesystem::remove(orig_name.replace_extension("log")); std::filesystem::remove(orig_name.replace_extension("index")); } else { - // move the the archive dir + // move the archive dir rename_bundle(orig_name, archive_dir / orig_name.filename()); } } - collection.erase(collection.begin(), collection.begin() + items_to_erase); + collection.erase(collection.begin(), last); active_index = active_index == npos || active_index < items_to_erase ? npos : active_index - items_to_erase; @@ -258,7 +257,7 @@ struct log_catalog { active_index = npos; auto it = collection.upper_bound(block_num); - if (it == collection.begin() || block_num > (it - 1)->second.last_block_num) { + if (it == collection.begin() || block_num > std::prev(it)->second.last_block_num) { std::for_each(it, collection.end(), remove_files); collection.erase(it, collection.end()); return 0; @@ -267,7 +266,7 @@ struct log_catalog { auto name = truncate_it->second.filename_base; std::filesystem::rename(name.replace_extension("log"), new_name.replace_extension("log")); std::filesystem::rename(name.replace_extension("index"), new_name.replace_extension("index")); - std::for_each(truncate_it + 1, collection.end(), remove_files); + std::for_each(std::next(truncate_it), collection.end(), remove_files); auto result = truncate_it->first; collection.erase(truncate_it, collection.end()); return result; diff --git a/libraries/chain/include/eosio/chain/log_index.hpp b/libraries/chain/include/eosio/chain/log_index.hpp index 477134d535..82ca91a0d1 100644 --- a/libraries/chain/include/eosio/chain/log_index.hpp +++ b/libraries/chain/include/eosio/chain/log_index.hpp @@ -31,7 +31,7 @@ class log_index { bool is_open() const { return file_.is_open(); } uint64_t back() { return nth_block_position(num_blocks()-1); } - unsigned num_blocks() const { return num_blocks_; } + uint32_t num_blocks() const { return num_blocks_; } uint64_t nth_block_position(uint32_t n) { file_.seek(n*sizeof(uint64_t)); uint64_t r; diff --git a/libraries/chain/include/eosio/chain/name.hpp b/libraries/chain/include/eosio/chain/name.hpp index 20bdc51549..c1a5b423f0 100644 --- a/libraries/chain/include/eosio/chain/name.hpp +++ b/libraries/chain/include/eosio/chain/name.hpp @@ -75,6 +75,41 @@ namespace eosio::chain { friend constexpr bool operator != ( const name& a, uint64_t b ) { return a.value != b; } constexpr explicit operator bool()const { return value != 0; } + + /** + * Returns the prefix. + * for exmaple: + * "eosio.any" -> "eosio" + * "eosio" -> "eosio" + */ + constexpr name prefix() const { + uint64_t result = value; + bool not_dot_character_seen = false; + uint64_t mask = 0xFull; + + // Get characters one-by-one in name in order from right to left + for (int32_t offset = 0; offset <= 59;) { + auto c = (value >> offset) & mask; + + if (!c) { // if this character is a dot + if (not_dot_character_seen) { // we found the rightmost dot character + result = (value >> offset) << offset; + break; + } + } else { + not_dot_character_seen = true; + } + + if (offset == 0) { + offset += 4; + mask = 0x1Full; + } else { + offset += 5; + } + } + + return name{ result }; + } }; // Each char of the string is encoded into 5-bit chunk and left-shifted diff --git a/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp b/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp index 2aebf62c9b..fd9475d438 100644 --- a/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp +++ b/libraries/chain/include/eosio/chain/snapshot_scheduler.hpp @@ -20,6 +20,8 @@ #include #include +#include + namespace eosio::chain { namespace bmi = boost::multi_index; @@ -38,10 +40,19 @@ class snapshot_scheduler { struct snapshot_request_information { uint32_t block_spacing = 0; uint32_t start_block_num = 0; - uint32_t end_block_num = 0; + uint32_t end_block_num = std::numeric_limits::max(); std::string snapshot_description = ""; }; + // this struct used to hold request params in api call + // it is differentiate between 0 and empty values + struct snapshot_request_params { + std::optional block_spacing; + std::optional start_block_num; + std::optional end_block_num; + std::optional snapshot_description; + }; + struct snapshot_request_id_information { uint32_t snapshot_request_id = 0; }; @@ -205,6 +216,7 @@ class snapshot_scheduler { FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_information, (head_block_id) (head_block_num) (head_block_time) (version) (snapshot_name)) FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_request_information, (block_spacing) (start_block_num) (end_block_num) (snapshot_description)) +FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_request_params, (block_spacing) (start_block_num) (end_block_num) (snapshot_description)) FC_REFLECT(eosio::chain::snapshot_scheduler::snapshot_request_id_information, (snapshot_request_id)) FC_REFLECT(eosio::chain::snapshot_scheduler::get_snapshot_requests_result, (snapshot_requests)) FC_REFLECT_DERIVED(eosio::chain::snapshot_scheduler::snapshot_schedule_information, (eosio::chain::snapshot_scheduler::snapshot_request_id_information)(eosio::chain::snapshot_scheduler::snapshot_request_information), (pending_snapshots)) diff --git a/libraries/chain/include/eosio/chain/symbol.hpp b/libraries/chain/include/eosio/chain/symbol.hpp index 071505d83d..a8b8eb8918 100644 --- a/libraries/chain/include/eosio/chain/symbol.hpp +++ b/libraries/chain/include/eosio/chain/symbol.hpp @@ -1,12 +1,12 @@ #pragma once #include +#include #include #include #include #include -namespace eosio { - namespace chain { +namespace eosio::chain { /** class symbol represents a token and contains precision and name. @@ -65,20 +65,7 @@ namespace eosio { explicit symbol(uint64_t v = CORE_SYMBOL): m_value(v) { EOS_ASSERT(valid(), symbol_type_exception, "invalid symbol: ${name}", ("name",name())); } - static symbol from_string(const string& from) - { - try { - string s = fc::trim(from); - EOS_ASSERT(!s.empty(), symbol_type_exception, "creating symbol from empty string"); - auto comma_pos = s.find(','); - EOS_ASSERT(comma_pos != string::npos, symbol_type_exception, "missing comma in symbol"); - auto prec_part = s.substr(0, comma_pos); - uint8_t p = fc::to_int64(prec_part); - string name_part = s.substr(comma_pos + 1); - EOS_ASSERT( p <= max_precision, symbol_type_exception, "precision ${p} should be <= 18", ("p", p)); - return symbol(string_to_symbol(p, name_part.c_str())); - } FC_CAPTURE_LOG_AND_RETHROW((from)) - } + static symbol from_string(const string& from); uint64_t value() const { return m_value; } bool valid() const { @@ -184,8 +171,7 @@ namespace eosio { { return std::tie(lhs.sym, lhs.contract) > std::tie(rhs.sym, rhs.contract); } - } // namespace chain -} // namespace eosio +} // namespace eosio::chain namespace fc { inline void to_variant(const eosio::chain::symbol& var, fc::variant& vo) { vo = var.to_string(); } diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index 9dfd988d0c..d3e9e8a261 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -37,9 +37,13 @@ namespace eosio { namespace chain { /// Spawn threads, can be re-started after stop(). /// Assumes start()/stop() called from the same thread or externally protected. + /// Blocks until all threads are created and completed their init function, or an exception is thrown + /// during thread startup or an init function. Exceptions thrown during these stages are rethrown from start() + /// but some threads might still have been started. Calling stop() after such a failure is safe. /// @param num_threads is number of threads spawned /// @param on_except is the function to call if io_context throws an exception, is called from thread pool thread. - /// if an empty function then logs and rethrows exception on thread which will terminate. + /// if an empty function then logs and rethrows exception on thread which will terminate. Not called + /// for exceptions during the init function (such exceptions are rethrown from start()) /// @param init is an optional function to call at startup to initialize any data. /// @throw assert_exception if already started and not stopped. void start( size_t num_threads, on_except_t on_except, init_t init = {} ) { @@ -47,9 +51,25 @@ namespace eosio { namespace chain { _ioc_work.emplace( boost::asio::make_work_guard( _ioc ) ); _ioc.restart(); _thread_pool.reserve( num_threads ); - for( size_t i = 0; i < num_threads; ++i ) { - _thread_pool.emplace_back( std::thread( &named_thread_pool::run_thread, this, i, on_except, init ) ); + + std::promise start_complete; + std::atomic threads_remaining = num_threads; + std::exception_ptr pending_exception; + std::mutex pending_exception_mutex; + + try { + for( size_t i = 0; i < num_threads; ++i ) { + _thread_pool.emplace_back( std::thread( &named_thread_pool::run_thread, this, i, on_except, init, std::ref(start_complete), + std::ref(threads_remaining), std::ref(pending_exception), std::ref(pending_exception_mutex) ) ); + } } + catch( ... ) { + /// only an exception from std::thread's ctor should end up here. shut down all threads to ensure no + /// potential access to the promise, atomic, etc above performed after throwing out of start + stop(); + throw; + } + start_complete.get_future().get(); } /// destroy work guard, stop io_context, join thread_pool @@ -63,16 +83,42 @@ namespace eosio { namespace chain { } private: - void run_thread( size_t i, const on_except_t& on_except, const init_t& init ) { - std::string tn = boost::core::demangle(typeid(this).name()); - auto offset = tn.rfind("::"); - if (offset != std::string::npos) - tn.erase(0, offset+2); - tn = tn.substr(0, tn.find('>')) + "-" + std::to_string( i ); + void run_thread( size_t i, const on_except_t& on_except, const init_t& init, std::promise& start_complete, + std::atomic& threads_remaining, std::exception_ptr& pending_exception, std::mutex& pending_exception_mutex ) { + + std::string tn; + + auto decrement_remaining = [&]() { + if( !--threads_remaining ) { + if( pending_exception ) + start_complete.set_exception( pending_exception ); + else + start_complete.set_value(); + } + }; + + try { + try { + tn = boost::core::demangle(typeid(this).name()); + auto offset = tn.rfind("::"); + if (offset != std::string::npos) + tn.erase(0, offset+2); + tn = tn.substr(0, tn.find('>')) + "-" + std::to_string( i ); + fc::set_thread_name( tn ); + if ( init ) + init(); + } FC_LOG_AND_RETHROW() + } + catch( ... ) { + std::lock_guard l( pending_exception_mutex ); + pending_exception = std::current_exception(); + decrement_remaining(); + return; + } + + decrement_remaining(); + try { - fc::set_os_thread_name( tn ); - if ( init ) - init(); _ioc.run(); } catch( const fc::exception& e ) { if( on_except ) { diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index fb2abc65b7..430defce27 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -37,6 +37,7 @@ namespace eosio { namespace chain { transaction_context( controller& c, const packed_transaction& t, + const transaction_id_type& trx_id, // trx_id diff than t.id() before replace_deferred transaction_checktime_timer&& timer, fc::time_point start = fc::time_point::now(), transaction_metadata::trx_type type = transaction_metadata::trx_type::input); @@ -127,6 +128,7 @@ namespace eosio { namespace chain { controller& control; const packed_transaction& packed_trx; + const transaction_id_type& id; std::optional undo_session; transaction_trace_ptr trace; fc::time_point start; @@ -184,7 +186,6 @@ namespace eosio { namespace chain { speculative_executed_adjusted_max_transaction_time // prev_billed_cpu_time_us > 0 }; tx_cpu_usage_exceeded_reason tx_cpu_usage_reason = tx_cpu_usage_exceeded_reason::account_cpu_limit; - fc::microseconds tx_cpu_usage_amount; }; } } diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 5481cea3e1..99466395d7 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -370,7 +370,7 @@ namespace eosio::chain { } template - static inline auto has_field( F flags, E field ) + static constexpr auto has_field( F flags, E field ) -> std::enable_if_t< std::is_integral::value && std::is_unsigned::value && std::is_enum::value && std::is_same< F, std::underlying_type_t >::value, bool> { @@ -378,7 +378,7 @@ namespace eosio::chain { } template - static inline auto set_field( F flags, E field, bool value = true ) + static constexpr auto set_field( F flags, E field, bool value = true ) -> std::enable_if_t< std::is_integral::value && std::is_unsigned::value && std::is_enum::value && std::is_same< F, std::underlying_type_t >::value, F > { diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 8d832a17ff..7e67d28151 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -40,7 +40,13 @@ namespace eosio { namespace chain { } } - wasm_interface(vm_type vm, bool eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile); + enum class vm_oc_enable { + oc_auto, + oc_all, + oc_none + }; + + wasm_interface(vm_type vm, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile); ~wasm_interface(); // initialize exec per thread @@ -55,28 +61,32 @@ namespace eosio { namespace chain { //indicate that a particular code probably won't be used after given block_num void code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num); - //indicate the current LIB. evicts old cache entries - void current_lib(const uint32_t lib); + //indicate the current LIB. evicts old cache entries, each evicted entry is provided to callback + void current_lib(const uint32_t lib, const std::function& callback); //Calls apply or error on a given code void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context); //Returns true if the code is cached bool is_code_cached(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) const; - - // If substitute_apply is set, then apply calls it before doing anything else. If substitute_apply returns true, - // then apply returns immediately. - std::function substitute_apply; private: unique_ptr my; - vm_type vm; }; } } // eosio::chain namespace eosio{ namespace chain { std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime); + inline std::ostream& operator<<(std::ostream& os, wasm_interface::vm_oc_enable t) { + if (t == wasm_interface::vm_oc_enable::oc_auto) { + os << "auto"; + } else if (t == wasm_interface::vm_oc_enable::oc_all) { + os << "all"; + } else if (t == wasm_interface::vm_oc_enable::oc_none) { + os << "none"; + } + return os; + } }} FC_REFLECT_ENUM( eosio::chain::wasm_interface::vm_type, (eos_vm)(eos_vm_jit)(eos_vm_oc) ) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp new file mode 100644 index 0000000000..4ee4ac7388 --- /dev/null +++ b/libraries/chain/include/eosio/chain/wasm_interface_collection.hpp @@ -0,0 +1,65 @@ +#pragma once +#include +#include +#include +#include + +namespace eosio::chain { + + /** + * @class wasm_interface_collection manages the active wasm_interface to use for execution. + */ + class wasm_interface_collection { + public: + inline static bool test_disable_tierup = false; // set by unittests to test tierup failing + + wasm_interface_collection(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, + const chainbase::database& d, const std::filesystem::path& data_dir, + const eosvmoc::config& eosvmoc_config, bool profile); + + ~wasm_interface_collection(); + + void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context); + + // used for tests, only valid on main thread + bool is_code_cached(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) { + EOS_ASSERT(is_on_main_thread(), wasm_execution_error, "is_code_cached called off the main thread"); + return wasmif.is_code_cached(code_hash, vm_type, vm_version); + } + + // update current lib of all wasm interfaces + void current_lib(const uint32_t lib); + + // only called from non-main threads (read-only trx execution threads) when producer_plugin starts them + void init_thread_local_data(const chainbase::database& d, const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, bool profile); + +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + bool is_eos_vm_oc_enabled() const { + return ((eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) || wasm_runtime == wasm_interface::vm_type::eos_vm_oc); + } +#endif + + void code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num); + + // If substitute_apply is set, then apply calls it before doing anything else. If substitute_apply returns true, + // then apply returns immediately. Provided function must be multi-thread safe. + std::function substitute_apply; + + private: + bool is_on_main_thread() { return main_thread_id == std::this_thread::get_id(); }; + + private: + const std::thread::id main_thread_id; + const wasm_interface::vm_type wasm_runtime; + const wasm_interface::vm_oc_enable eosvmoc_tierup; + + wasm_interface wasmif; // used by main thread + std::mutex threaded_wasmifs_mtx; + std::unordered_map> threaded_wasmifs; // one for each read-only thread, used by eos-vm and eos-vm-jit + +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + std::unique_ptr eosvmoc; // used by all threads +#endif + }; + +} // eosio::chain diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 825861ac58..e7ea5776e6 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -41,31 +41,13 @@ namespace eosio { namespace chain { uint8_t vm_version = 0; }; struct by_hash; - struct by_first_block_num; struct by_last_block_num; -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - struct eosvmoc_tier { - eosvmoc_tier(const std::filesystem::path& d, const eosvmoc::config& c, const chainbase::database& db) - : cc(d, c, db) { - // construct exec for the main thread - init_thread_local_data(); - } - - // Support multi-threaded execution. - void init_thread_local_data() { - exec = std::make_unique(cc); - } - - eosvmoc::code_cache_async cc; - - // Each thread requires its own exec and mem. Defined in wasm_interface.cpp - thread_local static std::unique_ptr exec; - thread_local static eosvmoc::memory mem; - }; -#endif - - wasm_interface_impl(wasm_interface::vm_type vm, bool eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) : db(d), wasm_runtime_time(vm) { + wasm_interface_impl(wasm_interface::vm_type vm, const chainbase::database& d, + const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) + : db(d) + , wasm_runtime_time(vm) + { #ifdef EOSIO_EOS_VM_RUNTIME_ENABLED if(vm == wasm_interface::vm_type::eos_vm) runtime_interface = std::make_unique>(); @@ -84,22 +66,9 @@ namespace eosio { namespace chain { #endif if(!runtime_interface) EOS_THROW(wasm_exception, "${r} wasm runtime not supported on this platform and/or configuration", ("r", vm)); - -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(eosvmoc_tierup) { - EOS_ASSERT(vm != wasm_interface::vm_type::eos_vm_oc, wasm_exception, "You can't use EOS VM OC as the base runtime when tier up is activated"); - eosvmoc.emplace(data_dir, eosvmoc_config, d); - } -#endif } - ~wasm_interface_impl() { - if(is_shutting_down) - for(wasm_cache_index::iterator it = wasm_instantiation_cache.begin(); it != wasm_instantiation_cache.end(); ++it) - wasm_instantiation_cache.modify(it, [](wasm_cache_entry& e) { - e.module.release()->fast_shutdown(); - }); - } + ~wasm_interface_impl() = default; bool is_code_cached(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version) const { wasm_cache_index::iterator it = wasm_instantiation_cache.find( boost::make_tuple(code_hash, vm_type, vm_version) ); @@ -114,14 +83,16 @@ namespace eosio { namespace chain { }); } - void current_lib(uint32_t lib) { + // reports each code_hash and vm_version that will be erased to callback + void current_lib(uint32_t lib, const std::function& callback) { //anything last used before or on the LIB can be evicted const auto first_it = wasm_instantiation_cache.get().begin(); const auto last_it = wasm_instantiation_cache.get().upper_bound(lib); -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(eosvmoc) for(auto it = first_it; it != last_it; it++) - eosvmoc->cc.free_code(it->code_hash, it->vm_version); -#endif + if (callback) { + for(auto it = first_it; it != last_it; it++) { + callback(it->code_hash, it->vm_version); + } + } wasm_instantiation_cache.get().erase(first_it, last_it); } @@ -158,7 +129,6 @@ namespace eosio { namespace chain { return it->module; } - bool is_shutting_down = false; std::unique_ptr runtime_interface; typedef boost::multi_index_container< @@ -178,10 +148,6 @@ namespace eosio { namespace chain { const chainbase::database& db; const wasm_interface::vm_type wasm_runtime_time; - -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - std::optional eosvmoc; -#endif }; } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp index fe7ff49788..d753a9dcaa 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp @@ -15,7 +15,6 @@ #include -#include namespace std { template<> struct hash { @@ -39,7 +38,7 @@ struct config; class code_cache_base { public: - code_cache_base(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); + code_cache_base(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); ~code_cache_base(); const int& fd() const { return _cache_fd; } @@ -78,9 +77,20 @@ class code_cache_base { local::datagram_protocol::socket _compile_monitor_write_socket{_ctx}; local::datagram_protocol::socket _compile_monitor_read_socket{_ctx}; - //these are really only useful to the async code cache, but keep them here so - //free_code can be shared - std::unordered_set _queued_compiles; + //these are really only useful to the async code cache, but keep them here so free_code can be shared + using queued_compilies_t = boost::multi_index_container< + code_tuple, + indexed_by< + sequenced<>, + hashed_unique, + composite_key< code_tuple, + member, + member + > + > + > + >; + queued_compilies_t _queued_compiles; std::unordered_map _outstanding_compiles_and_poison; size_t _free_bytes_eviction_threshold; @@ -95,13 +105,13 @@ class code_cache_base { class code_cache_async : public code_cache_base { public: - code_cache_async(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); + code_cache_async(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); ~code_cache_async(); //If code is in cache: returns pointer & bumps to front of MRU list //If code is not in cache, and not blacklisted, and not currently compiling: return nullptr and kick off compile //otherwise: return nullptr - const code_descriptor* const get_descriptor_for_code(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure); + const code_descriptor* const get_descriptor_for_code(bool high_priority, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure); private: std::thread _monitor_reply_thread; diff --git a/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp b/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp index 09df1e023a..5cb0cd79ad 100644 --- a/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/runtime_interface.hpp @@ -13,7 +13,6 @@ class apply_context; class wasm_instantiated_module_interface { public: virtual void apply(apply_context& context) = 0; - virtual void fast_shutdown() {} virtual ~wasm_instantiated_module_interface(); }; diff --git a/libraries/chain/platform_timer_asio_fallback.cpp b/libraries/chain/platform_timer_asio_fallback.cpp index a372dee8e2..3c861284fc 100644 --- a/libraries/chain/platform_timer_asio_fallback.cpp +++ b/libraries/chain/platform_timer_asio_fallback.cpp @@ -30,7 +30,7 @@ platform_timer::platform_timer() { std::promise p; auto f = p.get_future(); checktime_thread = std::thread([&p]() { - fc::set_os_thread_name("checktime"); + fc::set_thread_name("checktime"); checktime_ios = std::make_unique(); boost::asio::io_service::work work(*checktime_ios); p.set_value(); diff --git a/libraries/chain/platform_timer_kqueue.cpp b/libraries/chain/platform_timer_kqueue.cpp index ed7033c33e..3cb341a031 100644 --- a/libraries/chain/platform_timer_kqueue.cpp +++ b/libraries/chain/platform_timer_kqueue.cpp @@ -51,7 +51,7 @@ platform_timer::platform_timer() { FC_ASSERT(kevent64(kqueue_fd, &quit_event, 1, NULL, 0, KEVENT_FLAG_IMMEDIATE, NULL) == 0, "failed to create quit event"); kevent_thread = std::thread([]() { - fc::set_os_thread_name("checktime"); + fc::set_thread_name("checktime"); while(true) { struct kevent64_s anEvent; int c = kevent64(kqueue_fd, NULL, 0, &anEvent, 1, 0, NULL); diff --git a/libraries/chain/snapshot_scheduler.cpp b/libraries/chain/snapshot_scheduler.cpp index ee7a356fe0..38191222ac 100644 --- a/libraries/chain/snapshot_scheduler.cpp +++ b/libraries/chain/snapshot_scheduler.cpp @@ -8,7 +8,6 @@ namespace eosio::chain { // snapshot_scheduler_listener void snapshot_scheduler::on_start_block(uint32_t height, chain::controller& chain) { - bool serialize_needed = false; bool snapshot_executed = false; auto execute_snapshot_with_log = [this, height, &snapshot_executed, &chain](const auto& req) { @@ -25,28 +24,18 @@ void snapshot_scheduler::on_start_block(uint32_t height, chain::controller& chai std::vector unschedule_snapshot_request_ids; for(const auto& req: _snapshot_requests.get<0>()) { // -1 since its called from start block - bool recurring_snapshot = req.block_spacing && (height >= req.start_block_num + 1) && (!((height - req.start_block_num - 1) % req.block_spacing)); - bool onetime_snapshot = (!req.block_spacing) && (height == req.start_block_num + 1); - - // assume "asap" for snapshot with missed/zero start, it can have spacing - if(!req.start_block_num) { - // update start_block_num with current height only if this is recurring - // if non recurring, will be executed and unscheduled - if(req.block_spacing && height) { - auto& snapshot_by_id = _snapshot_requests.get(); - auto it = snapshot_by_id.find(req.snapshot_request_id); - _snapshot_requests.modify(it, [&height](auto& p) { p.start_block_num = height - 1; }); - serialize_needed = true; - } - execute_snapshot_with_log(req); - } else if(recurring_snapshot || onetime_snapshot) { + bool recurring_snapshot = req.block_spacing && (height >= req.start_block_num + 1) && (!((height - req.start_block_num - 1) % req.block_spacing)); + bool onetime_snapshot = (!req.block_spacing) && (height == req.start_block_num + 1); + + bool marked_for_deletion = ((!req.block_spacing) && (height >= req.start_block_num + 1)) || // if one time snapshot executed or scheduled for the past, it should be gone + (height > 0 && ((height-1) >= req.end_block_num)); // any snapshot can expire by end block num (end_block_num can be max value) + + if(recurring_snapshot || onetime_snapshot) { execute_snapshot_with_log(req); } // cleanup - remove expired (or invalid) request - if((!req.start_block_num && !req.block_spacing) || - (!req.block_spacing && height >= (req.start_block_num + 1)) || - (req.end_block_num > 0 && height >= (req.end_block_num + 1))) { + if(marked_for_deletion) { unschedule_snapshot_request_ids.push_back(req.snapshot_request_id); } } @@ -54,9 +43,6 @@ void snapshot_scheduler::on_start_block(uint32_t height, chain::controller& chai for(const auto& i: unschedule_snapshot_request_ids) { unschedule_snapshot(i); } - - // store db to filesystem - if(serialize_needed) x_serialize(); } void snapshot_scheduler::on_irreversible_block(const signed_block_ptr& lib, const chain::controller& chain) { @@ -80,15 +66,8 @@ snapshot_scheduler::snapshot_schedule_result snapshot_scheduler::schedule_snapsh auto& snapshot_by_value = _snapshot_requests.get(); auto existing = snapshot_by_value.find(std::make_tuple(sri.block_spacing, sri.start_block_num, sri.end_block_num)); EOS_ASSERT(existing == snapshot_by_value.end(), chain::duplicate_snapshot_request, "Duplicate snapshot request"); - - if(sri.end_block_num > 0) { - // if "end" is specified, it should be greater then start - EOS_ASSERT(sri.start_block_num <= sri.end_block_num, chain::invalid_snapshot_request, "End block number should be greater or equal to start block number"); - // if also block_spacing specified, check it - if(sri.block_spacing > 0) { - EOS_ASSERT(sri.start_block_num + sri.block_spacing <= sri.end_block_num, chain::invalid_snapshot_request, "Block spacing exceeds defined by start and end range"); - } - } + EOS_ASSERT(sri.start_block_num <= sri.end_block_num, chain::invalid_snapshot_request, "End block number should be greater or equal to start block number"); + EOS_ASSERT(sri.start_block_num + sri.block_spacing <= sri.end_block_num, chain::invalid_snapshot_request, "Block spacing exceeds defined by start and end range"); _snapshot_requests.emplace(snapshot_schedule_information{{_snapshot_id++}, {sri.block_spacing, sri.start_block_num, sri.end_block_num, sri.snapshot_description}, {}}); x_serialize(); diff --git a/libraries/chain/symbol.cpp b/libraries/chain/symbol.cpp new file mode 100644 index 0000000000..048f887d1d --- /dev/null +++ b/libraries/chain/symbol.cpp @@ -0,0 +1,21 @@ +#include +#include + +namespace eosio::chain { + + symbol symbol::from_string(const string& from) +{ + try { + string s = boost::algorithm::trim_copy(from); + EOS_ASSERT(!s.empty(), symbol_type_exception, "creating symbol from empty string"); + auto comma_pos = s.find(','); + EOS_ASSERT(comma_pos != string::npos, symbol_type_exception, "missing comma in symbol"); + auto prec_part = s.substr(0, comma_pos); + uint8_t p = fc::to_int64(prec_part); + string name_part = s.substr(comma_pos + 1); + EOS_ASSERT( p <= max_precision, symbol_type_exception, "precision ${p} should be <= 18", ("p", p)); + return symbol(string_to_symbol(p, name_part.c_str())); + } FC_CAPTURE_LOG_AND_RETHROW((from)); +} + +} // namespace eosio::chain diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 28e0fb7c7d..3ee699c496 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -46,11 +46,13 @@ namespace eosio { namespace chain { transaction_context::transaction_context( controller& c, const packed_transaction& t, + const transaction_id_type& trx_id, transaction_checktime_timer&& tmr, fc::time_point s, transaction_metadata::trx_type type) :control(c) ,packed_trx(t) + ,id(trx_id) ,undo_session() ,trace(std::make_shared()) ,start(s) @@ -62,7 +64,7 @@ namespace eosio { namespace chain { if (!c.skip_db_sessions() && !is_read_only()) { undo_session.emplace(c.mutable_db().start_undo_session(true)); } - trace->id = packed_trx.id(); + trace->id = id; trace->block_num = c.head_block_num() + 1; trace->block_time = c.pending_block_time(); trace->producer_block_id = c.pending_producer_block_id(); @@ -295,7 +297,7 @@ namespace eosio { namespace chain { init( initial_net_usage ); if ( !is_read_only() ) { - record_transaction( packed_trx.id(), trx.expiration ); + record_transaction( id, trx.expiration ); } } @@ -469,7 +471,7 @@ namespace eosio { namespace chain { "not enough time left in block to complete executing transaction ${billing_timer}us", ("now", now)("deadline", _deadline)("start", start)("billing_timer", now - pseudo_start) ); } else if( deadline_exception_code == tx_cpu_usage_exceeded::code_value ) { - std::string assert_msg = "transaction was executing for too long ${billing_timer}us"; + std::string assert_msg = "transaction ${id} was executing for too long ${billing_timer}us"; if (subjective_cpu_bill_us > 0) { assert_msg += " with a subjective cpu of (${subjective} us)"; } @@ -477,10 +479,10 @@ namespace eosio { namespace chain { assert_msg += get_tx_cpu_usage_exceeded_reason_msg(limit); if (cpu_limit_due_to_greylist) { assert_msg = "greylisted " + assert_msg; - EOS_THROW( greylist_cpu_usage_exceeded, assert_msg, + EOS_THROW( greylist_cpu_usage_exceeded, assert_msg, ("id", packed_trx.id()) ("billing_timer", now - pseudo_start)("subjective", subjective_cpu_bill_us)("limit", limit) ); } else { - EOS_THROW( tx_cpu_usage_exceeded, assert_msg, + EOS_THROW( tx_cpu_usage_exceeded, assert_msg, ("id", packed_trx.id()) ("billing_timer", now - pseudo_start)("subjective", subjective_cpu_bill_us)("limit", limit) ); } } else if( deadline_exception_code == leeway_deadline_exception::code_value ) { @@ -753,7 +755,7 @@ namespace eosio { namespace chain { uint32_t trx_size = 0; const auto& cgto = control.mutable_db().create( [&]( auto& gto ) { - gto.trx_id = packed_trx.id(); + gto.trx_id = id; gto.payer = first_auth; gto.sender = account_name(); /// delayed transactions have no sender gto.sender_id = transaction_id_to_sender_id( gto.trx_id ); diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index c66a514eec..abe014d946 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -32,16 +32,14 @@ namespace eosio { namespace chain { - wasm_interface::wasm_interface(vm_type vm, bool eosvmoc_tierup, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) - : my( new wasm_interface_impl(vm, eosvmoc_tierup, d, data_dir, eosvmoc_config, profile) ), vm( vm ) {} + wasm_interface::wasm_interface(vm_type vm, const chainbase::database& d, const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, bool profile) + : my( new wasm_interface_impl(vm, d, data_dir, eosvmoc_config, profile) ) {} wasm_interface::~wasm_interface() {} #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED void wasm_interface::init_thread_local_data() { - if (my->eosvmoc) - my->eosvmoc->init_thread_local_data(); - else if (vm == wasm_interface::vm_type::eos_vm_oc && my->runtime_interface) + if (my->wasm_runtime_time == wasm_interface::vm_type::eos_vm_oc && my->runtime_interface) my->runtime_interface->init_thread_local_data(); } #endif @@ -72,51 +70,17 @@ namespace eosio { namespace chain { //there are a couple opportunties for improvement here-- //Easy: Cache the Module created here so it can be reused for instantiaion //Hard: Kick off instantiation in a separate thread at this location - } - - void wasm_interface::indicate_shutting_down() { - my->is_shutting_down = true; } void wasm_interface::code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num) { my->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); } - void wasm_interface::current_lib(const uint32_t lib) { - my->current_lib(lib); + void wasm_interface::current_lib(const uint32_t lib, const std::function& callback) { + my->current_lib(lib, callback); } void wasm_interface::apply( const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context ) { - if(substitute_apply && substitute_apply(code_hash, vm_type, vm_version, context)) - return; -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - if(my->eosvmoc) { - const chain::eosvmoc::code_descriptor* cd = nullptr; - chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; - try { - cd = my->eosvmoc->cc.get_descriptor_for_code(code_hash, vm_version, context.control.is_write_window(), failure); - } - catch(...) { - //swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline - //In the future, consider moving bits of EOS VM that can fire exceptions and such out of this call path - static bool once_is_enough; - if(!once_is_enough) - elog("EOS VM OC has encountered an unexpected failure"); - once_is_enough = true; - } - if(cd) { - my->eosvmoc->exec->execute(*cd, my->eosvmoc->mem, context); - return; - } - else if (context.trx_context.is_read_only()) { - if (failure == chain::eosvmoc::code_cache_base::get_cd_failure::temporary) { - EOS_ASSERT(false, ro_trx_vm_oc_compile_temporary_failure, "get_descriptor_for_code failed with temporary failure"); - } else { - EOS_ASSERT(false, ro_trx_vm_oc_compile_permanent_failure, "get_descriptor_for_code failed with permanent failure"); - } - } - } -#endif my->get_instantiated_module(code_hash, vm_type, vm_version, context.trx_context)->apply(context); } @@ -124,13 +88,8 @@ namespace eosio { namespace chain { return my->is_code_cached(code_hash, vm_type, vm_version); } - wasm_instantiated_module_interface::~wasm_instantiated_module_interface() {} - wasm_runtime_interface::~wasm_runtime_interface() {} - -#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED - thread_local std::unique_ptr wasm_interface_impl::eosvmoc_tier::exec {}; - thread_local eosvmoc::memory wasm_interface_impl::eosvmoc_tier::mem{ wasm_constraints::maximum_linear_memory/wasm_constraints::wasm_page_size }; -#endif + wasm_instantiated_module_interface::~wasm_instantiated_module_interface() = default; + wasm_runtime_interface::~wasm_runtime_interface() = default; std::istream& operator>>(std::istream& in, wasm_interface::vm_type& runtime) { std::string s; diff --git a/libraries/chain/wasm_interface_collection.cpp b/libraries/chain/wasm_interface_collection.cpp new file mode 100644 index 0000000000..eace6f6517 --- /dev/null +++ b/libraries/chain/wasm_interface_collection.cpp @@ -0,0 +1,131 @@ +#include +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED +#include +#else +#define _REGISTER_EOSVMOC_INTRINSIC(CLS, MOD, METHOD, WASM_SIG, NAME, SIG) +#endif + +namespace eosio::chain { + +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED +struct eosvmoc_tier { + eosvmoc_tier(const std::filesystem::path& d, const eosvmoc::config& c, const chainbase::database& db) + : cc(d, c, db) { + // construct exec for the main thread + init_thread_local_data(); + } + + // Support multi-threaded execution. + void init_thread_local_data() { + exec = std::make_unique(cc); + } + + eosvmoc::code_cache_async cc; + + // Each thread requires its own exec and mem. + thread_local static std::unique_ptr exec; + thread_local static eosvmoc::memory mem; +}; + +thread_local std::unique_ptr eosvmoc_tier::exec{}; +thread_local eosvmoc::memory eosvmoc_tier::mem{wasm_constraints::maximum_linear_memory / wasm_constraints::wasm_page_size}; +#endif + +wasm_interface_collection::wasm_interface_collection(wasm_interface::vm_type vm, wasm_interface::vm_oc_enable eosvmoc_tierup, + const chainbase::database& d, const std::filesystem::path& data_dir, + const eosvmoc::config& eosvmoc_config, bool profile) + : main_thread_id(std::this_thread::get_id()) + , wasm_runtime(vm) + , eosvmoc_tierup(eosvmoc_tierup) + , wasmif(vm, d, data_dir, eosvmoc_config, profile) { +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if (eosvmoc_tierup != wasm_interface::vm_oc_enable::oc_none) { + EOS_ASSERT(vm != wasm_interface::vm_type::eos_vm_oc, wasm_exception, "You can't use EOS VM OC as the base runtime when tier up is activated"); + eosvmoc = std::make_unique(data_dir, eosvmoc_config, d); + } +#endif +} + +wasm_interface_collection::~wasm_interface_collection() = default; + +void wasm_interface_collection::apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context) { + if (substitute_apply && substitute_apply(code_hash, vm_type, vm_version, context)) + return; +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if (eosvmoc && (eosvmoc_tierup == wasm_interface::vm_oc_enable::oc_all || context.should_use_eos_vm_oc())) { + const chain::eosvmoc::code_descriptor* cd = nullptr; + chain::eosvmoc::code_cache_base::get_cd_failure failure = chain::eosvmoc::code_cache_base::get_cd_failure::temporary; + try { + const bool high_priority = context.get_receiver().prefix() == chain::config::system_account_name; + cd = eosvmoc->cc.get_descriptor_for_code(high_priority, code_hash, vm_version, context.control.is_write_window(), failure); + if (test_disable_tierup) + cd = nullptr; + } catch (...) { + // swallow errors here, if EOS VM OC has gone in to the weeds we shouldn't bail: continue to try and run baseline + // In the future, consider moving bits of EOS VM that can fire exceptions and such out of this call path + static bool once_is_enough; + if (!once_is_enough) + elog("EOS VM OC has encountered an unexpected failure"); + once_is_enough = true; + } + if (cd) { + if (!context.is_applying_block()) // read_only_trx_test.py looks for this log statement + tlog("${a} speculatively executing ${h} with eos vm oc", ("a", context.get_receiver())("h", code_hash)); + eosvmoc->exec->execute(*cd, eosvmoc->mem, context); + return; + } + } +#endif + if (is_on_main_thread()) { + wasmif.apply(code_hash, vm_type, vm_version, context); + return; + } + threaded_wasmifs[std::this_thread::get_id()]->apply(code_hash, vm_type, vm_version, context); +} + +// update current lib of all wasm interfaces +void wasm_interface_collection::current_lib(const uint32_t lib) { + // producer_plugin has already asserted irreversible_block signal is called in write window + std::function cb{}; +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if (eosvmoc) { + cb = [&](const digest_type& code_hash, uint8_t vm_version) { + eosvmoc->cc.free_code(code_hash, vm_version); + }; + } +#endif + wasmif.current_lib(lib, cb); + for (auto& w : threaded_wasmifs) { + w.second->current_lib(lib, cb); + } +} + +// only called from non-main threads (read-only trx execution threads) when producer_plugin starts them +void wasm_interface_collection::init_thread_local_data(const chainbase::database& d, const std::filesystem::path& data_dir, + const eosvmoc::config& eosvmoc_config, bool profile) { + EOS_ASSERT(!is_on_main_thread(), misc_exception, "init_thread_local_data called on the main thread"); +#ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED + if (is_eos_vm_oc_enabled()) { + // EOSVMOC needs further initialization of its thread local data + if (eosvmoc) + eosvmoc->init_thread_local_data(); + wasmif.init_thread_local_data(); + } +#endif + + std::lock_guard g(threaded_wasmifs_mtx); + // Non-EOSVMOC needs a wasmif per thread + threaded_wasmifs[std::this_thread::get_id()] = std::make_unique(wasm_runtime, d, data_dir, eosvmoc_config, profile); +} + +void wasm_interface_collection::code_block_num_last_used(const digest_type& code_hash, uint8_t vm_type, uint8_t vm_version, uint32_t block_num) { + // The caller of this function apply_eosio_setcode has already asserted that + // the transaction is not a read-only trx, which implies we are + // in write window. Safe to call threaded_wasmifs's code_block_num_last_used + wasmif.code_block_num_last_used(code_hash, vm_type, vm_version, block_num); + for (auto& w : threaded_wasmifs) { + w.second->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); + } +} + +} // namespace eosio::chain diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index 60cac3dc19..37eca74914 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -38,7 +38,7 @@ static constexpr size_t descriptor_ptr_from_file_start = header_offset + offseto static_assert(sizeof(code_cache_header) <= header_size, "code_cache_header too big"); -code_cache_async::code_cache_async(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : +code_cache_async::code_cache_async(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : code_cache_base(data_dir, eosvmoc_config, db), _result_queue(eosvmoc_config.threads * 2), _threads(eosvmoc_config.threads) @@ -48,7 +48,7 @@ code_cache_async::code_cache_async(const std::filesystem::path data_dir, const e wait_on_compile_monitor_message(); _monitor_reply_thread = std::thread([this]() { - fc::set_os_thread_name("oc-monitor"); + fc::set_thread_name("oc-monitor"); _ctx.run(); }); } @@ -106,7 +106,7 @@ std::tuple code_cache_async::consume_compile_thread_queue() { } -const code_descriptor* const code_cache_async::get_descriptor_for_code(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) { +const code_descriptor* const code_cache_async::get_descriptor_for_code(bool high_priority, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) { //if there are any outstanding compiles, process the result queue now //When app is in write window, all tasks are running sequentially and read-only threads //are not running. Safe to update cache entries. @@ -156,13 +156,16 @@ const code_descriptor* const code_cache_async::get_descriptor_for_code(const dig it->second = false; return nullptr; } - if(_queued_compiles.find(ct) != _queued_compiles.end()) { + if(auto it = _queued_compiles.get().find(boost::make_tuple(std::ref(code_id), vm_version)); it != _queued_compiles.get().end()) { failure = get_cd_failure::temporary; // Compile might not be done yet return nullptr; } if(_outstanding_compiles_and_poison.size() >= _threads) { - _queued_compiles.emplace(ct); + if (high_priority) + _queued_compiles.push_front(ct); + else + _queued_compiles.push_back(ct); failure = get_cd_failure::temporary; // Compile might not be done yet return nullptr; } @@ -221,7 +224,7 @@ const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(const return &*_cache_index.push_front(std::move(std::get(result.result))).first; } -code_cache_base::code_cache_base(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : +code_cache_base::code_cache_base(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : _db(db), _cache_file_path(data_dir/"code_cache.bin") { @@ -377,7 +380,8 @@ void code_cache_base::free_code(const digest_type& code_id, const uint8_t& vm_ve } //if it's in the queued list, erase it - _queued_compiles.erase({code_id, vm_version}); + if(auto i = _queued_compiles.get().find(boost::make_tuple(std::ref(code_id), vm_version)); i != _queued_compiles.get().end()) + _queued_compiles.get().erase(i); //however, if it's currently being compiled there is no way to cancel the compile, //so instead set a poison boolean that indicates not to insert the code in to the cache diff --git a/libraries/chain/webassembly/runtimes/eos-vm.cpp b/libraries/chain/webassembly/runtimes/eos-vm.cpp index 522d39e13d..b6292a4361 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm.cpp @@ -201,10 +201,6 @@ class eos_vm_profiling_module : public wasm_instantiated_module_interface { } } - void fast_shutdown() override { - _prof.clear(); - } - profile_data* start(apply_context& context) { name account = context.get_receiver(); if(!context.control.is_profiling(account)) return nullptr; @@ -242,7 +238,7 @@ std::unique_ptr eos_vm_runtime::instan wasm_code_ptr code((uint8_t*)code_bytes, code_size); apply_options options = { .max_pages = 65536, .max_call_depth = 0 }; - std::unique_ptr bkend = std::make_unique(code, code_size, nullptr, options); + std::unique_ptr bkend = std::make_unique(code, code_size, nullptr, options, false); // uses 2-passes parsing eos_vm_host_functions_t::resolve(bkend->get_module()); return std::make_unique>(this, std::move(bkend)); } catch(eosio::vm::exception& e) { @@ -264,7 +260,7 @@ std::unique_ptr eos_vm_profile_runtime::inst wasm_code_ptr code((uint8_t*)code_bytes, code_size); apply_options options = { .max_pages = 65536, .max_call_depth = 0 }; - std::unique_ptr bkend = std::make_unique(code, code_size, nullptr, options); + std::unique_ptr bkend = std::make_unique(code, code_size, nullptr, options, false); // uses 2-passes parsing eos_vm_host_functions_t::resolve(bkend->get_module()); return std::make_unique(std::move(bkend), code_bytes, code_size); } catch(eosio::vm::exception& e) { diff --git a/libraries/chainbase b/libraries/chainbase index 0cc3c62aa6..bffb7ebde6 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 0cc3c62aa641ea89e4f89e61eb2662fd4da92684 +Subproject commit bffb7ebde635be15d406d74d6fef46f4c744d441 diff --git a/libraries/eos-vm b/libraries/eos-vm index 329db27d88..1e9345f96a 160000 --- a/libraries/eos-vm +++ b/libraries/eos-vm @@ -1 +1 @@ -Subproject commit 329db27d888dce32c96b4f209cdea45f1d07e5e7 +Subproject commit 1e9345f96a4dcefa3a16ff51b58e2e7df739eeff diff --git a/libraries/libfc/CMakeLists.txt b/libraries/libfc/CMakeLists.txt index ac86842034..742501ca9f 100644 --- a/libraries/libfc/CMakeLists.txt +++ b/libraries/libfc/CMakeLists.txt @@ -65,19 +65,6 @@ file( GLOB_RECURSE fc_headers ${CMAKE_CURRENT_SOURCE_DIR} *.hpp *.h ) add_library(fc ${fc_sources} ${fc_headers}) -function(detect_thread_name) - include(CheckSymbolExists) - list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) - list(APPEND CMAKE_REQUIRED_LIBRARIES "-pthread") - check_symbol_exists(pthread_setname_np pthread.h HAVE_PTHREAD_SETNAME_NP) - if(HAVE_PTHREAD_SETNAME_NP) - set_source_files_properties(src/log/logger_config.cpp PROPERTIES COMPILE_DEFINITIONS FC_USE_PTHREAD_NAME_NP) - endif() -endfunction() -if(CMAKE_SYSTEM_NAME MATCHES "Linux") - detect_thread_name() -endif() - # Yuck: newer CMake files from boost iostreams will effectively target_link_libraries(Boost::iostreams z;bz2;lzma;zstd) # without first "finding" those libraries. This resolves to simple -lz -lbz2 etc: it'll look for those libraries in the linker's # library search path. This is most problematic on macOS where something like libzstd isn't in the standard search path. Historically @@ -91,12 +78,6 @@ if(APPLE) add_library(zstd INTERFACE) endif() -find_package(Boost 1.66 REQUIRED COMPONENTS - date_time - chrono - unit_test_framework - iostreams) - find_path(GMP_INCLUDE_DIR NAMES gmp.h) find_library(GMP_LIBRARY gmp) if(NOT GMP_LIBRARY MATCHES ${CMAKE_SHARED_LIBRARY_SUFFIX}) @@ -130,7 +111,8 @@ if(APPLE) find_library(security_framework Security) find_library(corefoundation_framework CoreFoundation) endif() -target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Threads::Threads +target_link_libraries( fc PUBLIC Boost::date_time Boost::chrono Boost::iostreams Boost::interprocess Boost::multi_index Boost::dll + Boost::multiprecision Boost::beast Boost::asio Boost::thread Boost::unit_test_framework Threads::Threads OpenSSL::Crypto ZLIB::ZLIB ${PLATFORM_SPECIFIC_LIBS} ${CMAKE_DL_LIBS} secp256k1 ${security_framework} ${corefoundation_framework}) # Critically, this ensures that OpenSSL 1.1 & 3.0 both have a variant of BN_zero() with void return value. But it also allows access diff --git a/libraries/libfc/include/fc/array.hpp b/libraries/libfc/include/fc/array.hpp index 8106bfd098..08315d2c6b 100644 --- a/libraries/libfc/include/fc/array.hpp +++ b/libraries/libfc/include/fc/array.hpp @@ -124,7 +124,7 @@ namespace fc { { static const char* name() { - static std::string _name = std::string("fc::array<")+std::string(fc::get_typename::name())+","+ fc::to_string(N) + ">"; + static std::string _name = std::string("fc::array<")+std::string(fc::get_typename::name())+","+ std::to_string(N) + ">"; return _name.c_str(); } }; diff --git a/libraries/libfc/include/fc/io/json_relaxed.hpp b/libraries/libfc/include/fc/io/json_relaxed.hpp index 3983e783dc..9362a7a442 100644 --- a/libraries/libfc/include/fc/io/json_relaxed.hpp +++ b/libraries/libfc/include/fc/io/json_relaxed.hpp @@ -606,7 +606,7 @@ namespace fc { namespace json_relaxed in.get(); return obj; } - FC_THROW_EXCEPTION( parse_error_exception, "Expected '}' after ${variant}", ("variant", obj ) ); + FC_THROW_EXCEPTION( parse_error_exception, "Expected '}' after ${variant}", ("variant", std::move(obj) ) ); } catch( const fc::eof_exception& e ) { diff --git a/libraries/libfc/include/fc/log/logger_config.hpp b/libraries/libfc/include/fc/log/logger_config.hpp index 5a4eeed2f8..2474a5eb0b 100644 --- a/libraries/libfc/include/fc/log/logger_config.hpp +++ b/libraries/libfc/include/fc/log/logger_config.hpp @@ -73,7 +73,6 @@ namespace fc { void configure_logging( const std::filesystem::path& log_config ); bool configure_logging( const logging_config& l ); - void set_os_thread_name( const std::string& name ); void set_thread_name( const std::string& name ); const std::string& get_thread_name(); } diff --git a/libraries/libfc/include/fc/mutex.hpp b/libraries/libfc/include/fc/mutex.hpp new file mode 100644 index 0000000000..518a9ccd8b --- /dev/null +++ b/libraries/libfc/include/fc/mutex.hpp @@ -0,0 +1,210 @@ +#pragma once + +// Enable thread safety attributes only with clang. +// The attributes can be safely erased when compiling with other compilers. +#if defined(__clang__) && (!defined(SWIG)) +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) + +#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) + +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) + +#define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +#define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) + +#define REQUIRES_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) + +#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) + +#define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) + +#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) + +#define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) + +#define RELEASE_GENERIC(...) THREAD_ANNOTATION_ATTRIBUTE__(release_generic_capability(__VA_ARGS__)) + +#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) + +#define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) + +#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) + +#define ASSERT_SHARED_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) + +#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + +#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + +#include +#include + +namespace fc { + +// Defines an annotated interface for mutexes. +// These methods can be implemented to use any internal mutex implementation. +class CAPABILITY("mutex") mutex { +private: + std::mutex mutex_; + +public: + // Acquire/lock this mutex exclusively. Only one thread can have exclusive + // access at any one time. Write operations to guarded data require an + // exclusive lock. + void lock() ACQUIRE() { mutex_.lock(); } + + // Release/unlock an exclusive mutex. + void unlock() RELEASE() { mutex_.unlock(); } + + // Try to acquire the mutex. Returns true on success, and false on failure. + bool try_lock() TRY_ACQUIRE(true) { return mutex_.try_lock(); } +}; + +// Defines an annotated interface for mutexes. +// These methods can be implemented to use any internal mutex implementation. +class CAPABILITY("shared_mutex") shared_mutex { +private: + std::shared_mutex mutex_; + +public: + // Acquire/lock this mutex exclusively. Only one thread can have exclusive + // access at any one time. Write operations to guarded data require an + // exclusive lock. + void lock() ACQUIRE() { mutex_.lock(); } + + // Acquire/lock this mutex for read operations, which require only a shared + // lock. This assumes a multiple-reader, single writer semantics. Multiple + // threads may acquire the mutex simultaneously as readers, but a writer + // must wait for all of them to release the mutex before it can acquire it + // exclusively. + void lock_shared() ACQUIRE_SHARED() { mutex_.lock_shared(); } + + // Release/unlock an exclusive mutex. + void unlock() RELEASE() { mutex_.unlock(); } + + // Release/unlock a shared mutex. + void unlock_shared() RELEASE_SHARED() { mutex_.unlock_shared(); } + + // Try to acquire the mutex. Returns true on success, and false on failure. + bool try_lock() TRY_ACQUIRE(true) { return mutex_.try_lock(); } + + // Try to acquire the mutex for read operations. + bool try_lock_shared() TRY_ACQUIRE_SHARED(true) { return mutex_.try_lock_shared(); } + + // Assert that this mutex is currently held by the calling thread. + // void AssertHeld() ASSERT_CAPABILITY(this); + + // Assert that is mutex is currently held for read operations. + // void AssertReaderHeld() ASSERT_SHARED_CAPABILITY(this); + + // For negative capabilities. + // const Mutex& operator!() const { return *this; } +}; + +// Tag types for selecting a constructor. +struct adopt_lock_t {} inline constexpr adopt_lock = {}; +struct defer_lock_t {} inline constexpr defer_lock = {}; +struct shared_lock_t {} inline constexpr shared_lock = {}; + +// LockGuard is an RAII class that acquires a mutex in its constructor, and +// releases it in its destructor. +template +class SCOPED_CAPABILITY lock_guard { +private: + M& mut; + +public: + // Acquire mu, implicitly acquire *this and associate it with mu. + lock_guard(M& mu) ACQUIRE(mu) + : mut(mu) { + mu.lock(); + } + + // Assume mu is held, implicitly acquire *this and associate it with mu. + lock_guard(M& mu, adopt_lock_t) REQUIRES(mu) + : mut(mu) {} + + ~lock_guard() RELEASE() { mut.unlock(); } +}; + +// unique_lock is an RAII class that acquires a mutex in its constructor, and +// releases it in its destructor. +template +class SCOPED_CAPABILITY unique_lock { +private: + using mutex_type = M; + + M* mut; + bool locked; + +public: + unique_lock() noexcept + : mut(nullptr) + , locked(false) {} + + // Acquire mu, implicitly acquire *this and associate it with mu. + explicit unique_lock(M& mu) ACQUIRE(mu) + : mut(&mu) + , locked(true) { + mut->lock(); + } + + // Assume mu is held, implicitly acquire *this and associate it with mu. + unique_lock(M& mu, adopt_lock_t) REQUIRES(mu) + : mut(&mu) + , locked(true) {} + + // Assume mu is not held, implicitly acquire *this and associate it with mu. + unique_lock(M& mu, defer_lock_t) EXCLUDES(mu) + : mut(mu) + , locked(false) {} + + // Release *this and all associated mutexes, if they are still held. + // There is no warning if the scope was already unlocked before. + ~unique_lock() RELEASE() { + if (locked) + mut->unlock(); + } + + // Acquire all associated mutexes exclusively. + void lock() ACQUIRE() { + mut->lock(); + locked = true; + } + + // Try to acquire all associated mutexes exclusively. + bool try_lock() TRY_ACQUIRE(true) { return locked = mut->try_lock(); } + + // Release all associated mutexes. Warn on double unlock. + void unlock() RELEASE() { + mut->unlock(); + locked = false; + } + + mutex_type* release() noexcept RETURN_CAPABILITY(this) { + mutex_type* res = mut; + mut = nullptr; + locked = false; + return res; + } + + mutex_type* mutex() const noexcept { return mut; } + + bool owns_lock() const noexcept { return locked; } + + explicit operator bool() const noexcept { return locked; } +}; + +} // namespace fc diff --git a/libraries/libfc/include/fc/network/listener.hpp b/libraries/libfc/include/fc/network/listener.hpp new file mode 100644 index 0000000000..50aec37cf1 --- /dev/null +++ b/libraries/libfc/include/fc/network/listener.hpp @@ -0,0 +1,265 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +namespace fc { + +inline std::string to_string(const boost::asio::ip::tcp::endpoint& endpoint) { + const auto& ip_addr = endpoint.address(); + std::string ip_addr_string = ip_addr.to_string(); + if (ip_addr.is_v6()) { + ip_addr_string = "[" + ip_addr_string + "]"; + } + return ip_addr_string + ":" + std::to_string(endpoint.port()); +} + +inline std::pair split_host_port(std::string_view endpoint) { + std::string::size_type colon_pos = endpoint.rfind(':'); + if (colon_pos != std::string::npos) { + auto port = endpoint.substr(colon_pos + 1); + auto hostname = + (endpoint[0] == '[' && colon_pos >= 2) ? endpoint.substr(1, colon_pos - 2) : endpoint.substr(0, colon_pos); + return { std::string(hostname), std::string(port) }; + } else { + return { std::string(endpoint), {} }; + } +} + +template +struct listener_base; + +template <> +struct listener_base { + listener_base(const std::string&) {} +}; + +template <> +struct listener_base { + std::filesystem::path path_; + listener_base(const std::string& local_address) : path_(std::filesystem::absolute(local_address)) {} + ~listener_base() { + std::error_code ec; + std::filesystem::remove(path_, ec); + } +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// +/// +/// @brief fc::listener is template class to simplify the code for accepting new socket connections. +/// It can be used for both tcp or Unix socket connection. +/// +/// @note Users should use fc::create_listener() instead, this class is the implementation +/// detail for fc::create_listener(). +/// +///////////////////////////////////////////////////////////////////////////////////////////// +template +struct listener : listener_base, std::enable_shared_from_this> { + private: + typename Protocol::acceptor acceptor_; + boost::asio::deadline_timer accept_error_timer_; + boost::posix_time::time_duration accept_timeout_; + logger& logger_; + std::string extra_listening_log_info_; + CreateSession create_session_; + + public: + using endpoint_type = typename Protocol::endpoint; + listener(boost::asio::io_context& executor, logger& logger, boost::posix_time::time_duration accept_timeout, + const std::string& local_address, const endpoint_type& endpoint, + const std::string& extra_listening_log_info, const CreateSession& create_session) + : listener_base(local_address), acceptor_(executor, endpoint), accept_error_timer_(executor), + accept_timeout_(accept_timeout), logger_(logger), extra_listening_log_info_(extra_listening_log_info), + create_session_(create_session) {} + + const auto& acceptor() const { return acceptor_; } + + void do_accept() { + acceptor_.async_accept([self = this->shared_from_this()](boost::system::error_code ec, auto&& peer_socket) { + self->on_accept(ec, std::forward(peer_socket)); + }); + } + + template + void on_accept(boost::system::error_code ec, Socket&& socket) { + if (!ec) { + create_session_(std::forward(socket)); + do_accept(); + } else if (ec == boost::system::errc::too_many_files_open) { + // retry accept() after timeout to avoid cpu loop on accept + fc_elog(logger_, "open file limit reached: not accepting new connections for next ${timeout}ms", + ("timeout", accept_timeout_.total_milliseconds())); + accept_error_timer_.expires_from_now(accept_timeout_); + accept_error_timer_.async_wait([self = this->shared_from_this()](boost::system::error_code ec) { + if (!ec) + self->do_accept(); + }); + } else if (int code = ec.value(); code == ENETDOWN || code == EPROTO || code == ENOPROTOOPT || + code == EHOSTDOWN || code == EHOSTUNREACH || code == EOPNOTSUPP || + code == ENETUNREACH +#ifdef ENONET + || code == ENONET +#endif +#ifdef __APPLE__ + //guard against failure of asio's internal SO_NOSIGPIPE call after accept() + || code == EINVAL +#endif + ) { + // according to https://man7.org/linux/man-pages/man2/accept.2.html, reliable application should + // retry when these error codes are returned + fc_wlog(logger_, "closing connection, accept error: ${m}", ("m", ec.message())); + do_accept(); + } else { + fc_elog(logger_, "Unrecoverable accept error, stop listening: ${m}", ("m", ec.message())); + } + } + + void log_listening(const endpoint_type& endpoint, const std::string& local_address) { + std::string info; + if constexpr (std::is_same_v) { + info = fc::to_string(endpoint) + " resolved from " + local_address; + } else { + info = "Unix socket " + local_address; + } + info += extra_listening_log_info_; + fc_ilog(logger_, "start listening on ${info}", ("info", info)); + } +}; + +/// @brief create a stream-oriented socket listener which listens on the specified \c address and calls \c +/// create_session whenever a socket is accepted. +/// +/// @details +/// This function is used for listening on TCP or Unix socket address and creating corresponding session when the +/// socket is accepted. +/// +/// For TCP socket, the address format can be :port or :port where the `:port` part is mandatory. +/// If only the port is specified, all network interfaces are listened. The function can listen on multiple IP addresses +/// if the specified hostname is resolved to multiple IP addresses; in other words, it can create more than one +/// fc::listener objects. If port is not specified or none of the resolved address can be listened, an std::system_error +/// with std::errc::bad_address error code will be thrown. +/// +/// For Unix socket, this function will temporary change current working directory to the parent of the specified \c +/// address (i.e. socket file path), listen on the filename component of the path, and then restore the working +/// directory before return. This is the workaround for the socket file paths limitation which is around 100 characters. +/// +/// The lifetime of the created listener objects is controlled by \c executor, the created objects will be destroyed +/// when \c executor.stop() is called. +/// +/// @note +/// This function is not thread safe for Unix socket because it will temporarily change working directory without any +/// lock. Any code which depends the current working directory (such as opening files with relative paths) in other +/// threads should be protected. +/// +/// @tparam Protocol either \c boost::asio::ip::tcp or \c boost::asio::local::stream_protocol +/// @throws std::system_error or boost::system::system_error +template +void create_listener(boost::asio::io_context& executor, logger& logger, boost::posix_time::time_duration accept_timeout, + const std::string& address, const std::string& extra_listening_log_info, + const CreateSession& create_session) { + using tcp = boost::asio::ip::tcp; + if constexpr (std::is_same_v) { + auto [host, port] = split_host_port(address); + if (port.empty()) { + fc_elog(logger, "port is not specified for address ${addr}", ("addr", address)); + throw std::system_error(std::make_error_code(std::errc::bad_address)); + } + + boost::system::error_code ec; + tcp::resolver resolver(executor); + auto endpoints = resolver.resolve(host, port, tcp::resolver::passive, ec); + if (ec) { + fc_elog(logger, "failed to resolve address: ${msg}", ("msg", ec.message())); + throw std::system_error(ec); + } + + int listened = 0; + std::optional unspecified_ipv4_addr; + bool has_unspecified_ipv6_only = false; + + auto create_listener = [&](const auto& endpoint) { + const auto& ip_addr = endpoint.address(); + try { + auto listener = std::make_shared>( + executor, logger, accept_timeout, address, endpoint, extra_listening_log_info, create_session); + listener->log_listening(endpoint, address); + listener->do_accept(); + ++listened; + has_unspecified_ipv6_only = ip_addr.is_unspecified() && ip_addr.is_v6(); + if (has_unspecified_ipv6_only) { + boost::asio::ip::v6_only option; + listener->acceptor().get_option(option); + has_unspecified_ipv6_only &= option.value(); + } + + } catch (boost::system::system_error& ex) { + fc_wlog(logger, "unable to listen on ${ip_addr}:${port} resolved from ${address}: ${msg}", + ("ip_addr", ip_addr.to_string())("port", endpoint.port())("address", address)("msg", ex.what())); + } + }; + + for (const auto& ep : endpoints) { + const auto& endpoint = ep.endpoint(); + const auto& ip_addr = endpoint.address(); + if (ip_addr.is_unspecified() && ip_addr.is_v4() && endpoints.size() > 1) { + // it is an error to bind a socket to the same port for both ipv6 and ipv4 INADDR_ANY address when + // the system has ipv4-mapped ipv6 enabled by default, we just skip the ipv4 for now. + unspecified_ipv4_addr = endpoint; + continue; + } + create_listener(endpoint); + } + + if (unspecified_ipv4_addr.has_value() && has_unspecified_ipv6_only) { + create_listener(*unspecified_ipv4_addr); + } + + if (listened == 0) { + fc_elog(logger, "none of the addresses resolved from ${addr} can be listened to", ("addr", address)); + throw std::system_error(std::make_error_code(std::errc::bad_address)); + } + } else { + using stream_protocol = boost::asio::local::stream_protocol; + static_assert(std::is_same_v); + + namespace fs = std::filesystem; + auto cwd = fs::current_path(); + fs::path sock_path = address; + + fs::create_directories(sock_path.parent_path()); + // The maximum length of the socket path is defined by sockaddr_un::sun_path. On Linux, + // according to unix(7), it is 108 bytes. On FreeBSD, according to unix(4), it is 104 bytes. + // Therefore, we create the unix socket with the relative path to its parent path to avoid the + // problem. + fs::current_path(sock_path.parent_path()); + auto restore = fc::make_scoped_exit([cwd] { fs::current_path(cwd); }); + + stream_protocol::endpoint endpoint{ sock_path.filename().string() }; + + boost::system::error_code ec; + stream_protocol::socket test_socket(executor); + test_socket.connect(endpoint, ec); + + // looks like a service is already running on that socket, don't touch it... fail out + if (ec == boost::system::errc::success) { + fc_elog(logger, "The unix socket path ${addr} is already in use", ("addr", address)); + throw std::system_error(std::make_error_code(std::errc::address_in_use)); + } + else if (ec == boost::system::errc::connection_refused) { + // socket exists but no one home, go ahead and remove it and continue on + fs::remove(sock_path); + } + + auto listener = std::make_shared>( + executor, logger, accept_timeout, address, endpoint, extra_listening_log_info, create_session); + listener->log_listening(endpoint, address); + listener->do_accept(); + } +} +} // namespace fc diff --git a/libraries/libfc/include/fc/reflect/reflect.hpp b/libraries/libfc/include/fc/reflect/reflect.hpp index 1c72517557..910bcdd383 100644 --- a/libraries/libfc/include/fc/reflect/reflect.hpp +++ b/libraries/libfc/include/fc/reflect/reflect.hpp @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -200,7 +201,7 @@ template<> struct reflector { \ switch( elem ) { \ BOOST_PP_SEQ_FOR_EACH( FC_REFLECT_ENUM_TO_STRING, ENUM, FIELDS ) \ default: \ - fc::throw_bad_enum_cast( fc::to_string(int64_t(elem)).c_str(), BOOST_PP_STRINGIZE(ENUM) ); \ + fc::throw_bad_enum_cast( std::to_string(int64_t(elem)).c_str(), BOOST_PP_STRINGIZE(ENUM) ); \ }\ return nullptr; \ } \ @@ -211,7 +212,7 @@ template<> struct reflector { \ switch( elem ) { \ BOOST_PP_SEQ_FOR_EACH( FC_REFLECT_ENUM_TO_FC_STRING, ENUM, FIELDS ) \ } \ - return fc::to_string(int64_t(elem)); \ + return std::to_string(int64_t(elem)); \ } \ static std::string to_fc_string(int64_t i) { \ return to_fc_string(ENUM(i)); \ diff --git a/libraries/libfc/include/fc/string.hpp b/libraries/libfc/include/fc/string.hpp index 1e86fc4ffb..1d0792ba72 100644 --- a/libraries/libfc/include/fc/string.hpp +++ b/libraries/libfc/include/fc/string.hpp @@ -8,19 +8,9 @@ namespace fc int64_t to_int64( const std::string& ); uint64_t to_uint64( const std::string& ); double to_double( const std::string& ); - std::string to_string( double ); - std::string to_string( uint64_t ); - std::string to_string( int64_t ); - std::string to_string( uint16_t ); - inline std::string to_string( int32_t v ) { return to_string( int64_t(v) ); } - inline std::string to_string( uint32_t v ){ return to_string( uint64_t(v) ); } -#ifdef __APPLE__ - inline std::string to_string( size_t s) { return to_string(uint64_t(s)); } -#endif class variant_object; std::string format_string( const std::string&, const variant_object&, bool minimize = false ); - std::string trim( const std::string& ); /** * Convert '\t', '\r', '\n', '\\' and '"' to "\t\r\n\\\"" if escape_ctrl == on diff --git a/libraries/libfc/include/fc/time.hpp b/libraries/libfc/include/fc/time.hpp index d2d0dfc80a..96b7156781 100644 --- a/libraries/libfc/include/fc/time.hpp +++ b/libraries/libfc/include/fc/time.hpp @@ -1,6 +1,7 @@ #pragma once #include #include +#include #ifdef _MSC_VER #pragma warning (push) @@ -11,7 +12,8 @@ namespace fc { class microseconds { public: constexpr explicit microseconds( int64_t c = 0) :_count(c){} - static constexpr microseconds maximum() { return microseconds(0x7fffffffffffffffll); } + static constexpr microseconds maximum() { return microseconds(std::numeric_limits::max()); } + static constexpr microseconds minimum() { return microseconds(std::numeric_limits::min()); } friend constexpr microseconds operator + (const microseconds& l, const microseconds& r ) { return microseconds(l._count+r._count); } friend constexpr microseconds operator - (const microseconds& l, const microseconds& r ) { return microseconds(l._count-r._count); } @@ -49,6 +51,18 @@ namespace fc { std::string to_iso_string()const; static time_point from_iso_string( const std::string& s ); + // protect against overflow/underflow + constexpr time_point& safe_add( const microseconds& m ) { + if (m.count() > 0 && elapsed > microseconds::maximum() - m) { + elapsed = microseconds::maximum(); + } else if (m.count() < 0 && elapsed < microseconds::minimum() - m) { + elapsed = microseconds::minimum(); + } else { + elapsed += m; + } + return *this; + } + constexpr const microseconds& time_since_epoch()const { return elapsed; } constexpr uint32_t sec_since_epoch()const { return elapsed.count() / 1000000; } constexpr bool operator > ( const time_point& t )const { return elapsed._count > t.elapsed._count; } @@ -81,7 +95,7 @@ namespace fc { constexpr explicit time_point_sec( const time_point& t ) :utc_seconds( t.time_since_epoch().count() / 1000000ll ){} - static constexpr time_point_sec maximum() { return time_point_sec(0xffffffff); } + static constexpr time_point_sec maximum() { return time_point_sec(std::numeric_limits::max()); } static constexpr time_point_sec min() { return time_point_sec(0); } constexpr time_point to_time_point()const { return time_point( fc::seconds( utc_seconds) ); } diff --git a/libraries/libfc/include/fc/variant_object.hpp b/libraries/libfc/include/fc/variant_object.hpp index fe0dcd4e4e..bd03279d2f 100644 --- a/libraries/libfc/include/fc/variant_object.hpp +++ b/libraries/libfc/include/fc/variant_object.hpp @@ -206,13 +206,20 @@ namespace fc ///@} + explicit mutable_variant_object( variant v ) + :_key_value( new std::vector() ) + { + *this = v.get_object(); + } + template>::value>> + typename = std::enable_if_t>::value && + !std::is_base_of>::value && + !std::is_base_of>::value>> explicit mutable_variant_object( T&& v ) :_key_value( new std::vector() ) { - *this = variant(fc::forward(v)).get_object(); + *this = std::move(variant(fc::forward(v)).get_object()); } mutable_variant_object(); @@ -228,11 +235,22 @@ namespace fc mutable_variant_object( mutable_variant_object&& ); mutable_variant_object( const mutable_variant_object& ); - mutable_variant_object( const variant_object& ); + explicit mutable_variant_object( const variant_object& ); + /* + * Use with care as the internal shared state of variant_object is moved. + * asserts on exclusive ownership of variant_object shared state. Not thread safe. + */ + explicit mutable_variant_object( variant_object&& ); mutable_variant_object& operator=( mutable_variant_object&& ); mutable_variant_object& operator=( const mutable_variant_object& ); mutable_variant_object& operator=( const variant_object& ); + /** + * Use with care as the internal shared state of variant_object is moved. + * asserts on exclusive ownership of variant_object shared state. Not thread safe. + */ + mutable_variant_object& operator=( variant_object&& ); + private: std::unique_ptr< std::vector< entry > > _key_value; friend class variant_object; diff --git a/libraries/libfc/libraries/bn256 b/libraries/libfc/libraries/bn256 index 63c6c9919c..da781dbd15 160000 --- a/libraries/libfc/libraries/bn256 +++ b/libraries/libfc/libraries/bn256 @@ -1 +1 @@ -Subproject commit 63c6c9919c98a76c23209a321a7d006c4f44ce53 +Subproject commit da781dbd15c23b53339240f21458c7ae2ad061c4 diff --git a/libraries/libfc/src/exception.cpp b/libraries/libfc/src/exception.cpp index b715f54c4a..6231f83291 100644 --- a/libraries/libfc/src/exception.cpp +++ b/libraries/libfc/src/exception.cpp @@ -133,7 +133,7 @@ namespace fc } void from_variant( const variant& v, exception& ll ) { - auto obj = v.get_object(); + const auto& obj = v.get_object(); if( obj.contains( "stack" ) ) ll.my->_elog = obj["stack"].as(); if( obj.contains( "code" ) ) diff --git a/libraries/libfc/src/io/json.cpp b/libraries/libfc/src/io/json.cpp index e7ed058995..b731e9ffbd 100644 --- a/libraries/libfc/src/io/json.cpp +++ b/libraries/libfc/src/io/json.cpp @@ -206,7 +206,7 @@ namespace fc in.get(); return obj; } - FC_THROW_EXCEPTION( parse_error_exception, "Expected '}' after ${variant}", ("variant", obj ) ); + FC_THROW_EXCEPTION( parse_error_exception, "Expected '}' after ${variant}", ("variant", std::move(obj) ) ); } catch( const fc::eof_exception& e ) { diff --git a/libraries/libfc/src/log/console_appender.cpp b/libraries/libfc/src/log/console_appender.cpp index a0d3312448..8cdb3fd48d 100644 --- a/libraries/libfc/src/log/console_appender.cpp +++ b/libraries/libfc/src/log/console_appender.cpp @@ -101,7 +101,7 @@ namespace fc { const log_context context = m.get_context(); std::string file_line = context.get_file().substr( 0, 22 ); file_line += ':'; - file_line += fixed_size( 6, fc::to_string( context.get_line_number() ) ); + file_line += fixed_size( 6, std::to_string( context.get_line_number() ) ); std::string line; line.reserve( 256 ); diff --git a/libraries/libfc/src/log/gelf_appender.cpp b/libraries/libfc/src/log/gelf_appender.cpp index 3b6692bef0..20b8e7b63d 100644 --- a/libraries/libfc/src/log/gelf_appender.cpp +++ b/libraries/libfc/src/log/gelf_appender.cpp @@ -137,7 +137,7 @@ namespace fc my->thread = std::thread([this] { try { - fc::set_os_thread_name("gelf"); + fc::set_thread_name("gelf"); my->io_context.run(); } catch (std::exception& ex) { fprintf(stderr, "GELF logger caught exception at %s:%d : %s\n", __FILE__, __LINE__, ex.what()); @@ -169,7 +169,7 @@ namespace fc gelf_message["_timestamp_ns"] = time_ns; static uint64_t gelf_log_counter; - gelf_message["_log_id"] = fc::to_string(++gelf_log_counter); + gelf_message["_log_id"] = std::to_string(++gelf_log_counter); switch (context.get_log_level()) { diff --git a/libraries/libfc/src/log/log_message.cpp b/libraries/libfc/src/log/log_message.cpp index a43bb4afb2..491713b325 100644 --- a/libraries/libfc/src/log/log_message.cpp +++ b/libraries/libfc/src/log/log_message.cpp @@ -57,7 +57,7 @@ namespace fc log_context::log_context( const variant& v ) :my( std::make_shared() ) { - auto obj = v.get_object(); + const auto& obj = v.get_object(); my->level = obj["level"].as(); my->file = obj["file"].as_string(); my->line = obj["line"].as_uint64(); @@ -73,7 +73,7 @@ namespace fc std::string log_context::to_string()const { - return my->thread_name + " " + my->file + ":" + fc::to_string(my->line) + " " + my->method; + return my->thread_name + " " + my->file + ":" + std::to_string(my->line) + " " + my->method; } diff --git a/libraries/libfc/src/log/logger_config.cpp b/libraries/libfc/src/log/logger_config.cpp index e15897d71d..5b81090d75 100644 --- a/libraries/libfc/src/log/logger_config.cpp +++ b/libraries/libfc/src/log/logger_config.cpp @@ -10,6 +10,9 @@ #include #include +#define BOOST_DLL_USE_STD_FS +#include + namespace fc { log_config& log_config::get() { @@ -133,26 +136,22 @@ namespace fc { } static thread_local std::string thread_name; - void set_os_thread_name( const std::string& name ) { -#ifdef FC_USE_PTHREAD_NAME_NP - pthread_setname_np( pthread_self(), name.c_str() ); -#endif - } + void set_thread_name( const std::string& name ) { thread_name = name; +#if defined(__linux__) || defined(__FreeBSD__) + pthread_setname_np( pthread_self(), name.c_str() ); +#elif defined(__APPLE__) + pthread_setname_np( name.c_str() ); +#endif } const std::string& get_thread_name() { - if( thread_name.empty() ) { -#ifdef FC_USE_PTHREAD_NAME_NP - char thr_name[64]; - int rc = pthread_getname_np( pthread_self(), thr_name, 64 ); - if( rc == 0 ) { - thread_name = thr_name; + if(thread_name.empty()) { + try { + thread_name = boost::dll::program_location().filename().generic_string(); + } catch (...) { + thread_name = "unknown"; } -#else - static int thread_count = 0; - thread_name = std::string( "thread-" ) + fc::to_string( thread_count++ ); -#endif } return thread_name; } diff --git a/libraries/libfc/src/string.cpp b/libraries/libfc/src/string.cpp index cb0a79fde6..9220cc0372 100644 --- a/libraries/libfc/src/string.cpp +++ b/libraries/libfc/src/string.cpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include @@ -60,33 +59,6 @@ namespace fc { } FC_RETHROW_EXCEPTIONS( warn, "${i} => double", ("i",i) ) } - - std::string to_string(double d) - { - // +2 is required to ensure that the double is rounded correctly when read back in. http://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html - std::stringstream ss; - ss << std::setprecision(std::numeric_limits::digits10 + 2) << std::fixed << d; - return ss.str(); - } - - std::string to_string( uint64_t d) - { - return boost::lexical_cast(d); - } - - std::string to_string( int64_t d) - { - return boost::lexical_cast(d); - } - std::string to_string( uint16_t d) - { - return boost::lexical_cast(d); - } - std::string trim( const std::string& s ) - { - return boost::algorithm::trim_copy(s); - } - std::pair escape_str( std::string& str, escape_control_chars escape_ctrl, std::size_t max_len, std::string_view add_truncate_str ) { diff --git a/libraries/libfc/src/time.cpp b/libraries/libfc/src/time.cpp index cb9fc426e4..d1f0ccf5d2 100644 --- a/libraries/libfc/src/time.cpp +++ b/libraries/libfc/src/time.cpp @@ -48,7 +48,7 @@ namespace fc { if (count >= 0) { uint64_t secs = (uint64_t)count / 1000000ULL; uint64_t msec = ((uint64_t)count % 1000000ULL) / 1000ULL; - std::string padded_ms = fc::to_string((uint64_t)(msec + 1000ULL)).substr(1); + std::string padded_ms = std::to_string((uint64_t)(msec + 1000ULL)).substr(1); const auto ptime = boost::posix_time::from_time_t(time_t(secs)); return boost::posix_time::to_iso_extended_string(ptime) + "." + padded_ms; } else { diff --git a/libraries/libfc/src/variant.cpp b/libraries/libfc/src/variant.cpp index 04359dacc2..da1e648da0 100644 --- a/libraries/libfc/src/variant.cpp +++ b/libraries/libfc/src/variant.cpp @@ -466,6 +466,14 @@ bool variant::as_bool()const } } +static std::string s_fc_to_string(double d) +{ + // +2 is required to ensure that the double is rounded correctly when read back in. http://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html + std::stringstream ss; + ss << std::setprecision(std::numeric_limits::digits10 + 2) << std::fixed << d; + return ss.str(); +} + std::string variant::as_string()const { switch( get_type() ) @@ -473,11 +481,11 @@ std::string variant::as_string()const case string_type: return **reinterpret_cast(this); case double_type: - return to_string(*reinterpret_cast(this)); + return s_fc_to_string(*reinterpret_cast(this)); case int64_type: - return to_string(*reinterpret_cast(this)); + return std::to_string(*reinterpret_cast(this)); case uint64_type: - return to_string(*reinterpret_cast(this)); + return std::to_string(*reinterpret_cast(this)); case bool_type: return *reinterpret_cast(this) ? "true" : "false"; case blob_type: diff --git a/libraries/libfc/src/variant_object.cpp b/libraries/libfc/src/variant_object.cpp index fbcca7fe1a..61954b595f 100644 --- a/libraries/libfc/src/variant_object.cpp +++ b/libraries/libfc/src/variant_object.cpp @@ -286,6 +286,16 @@ namespace fc { } + mutable_variant_object::mutable_variant_object( variant_object&& obj ) + : _key_value( new std::vector() ) + { + assert(obj._key_value.use_count() == 1); // should only be used if data not shared + if (obj._key_value.use_count() == 1) + *_key_value = std::move(*obj._key_value); + else + *_key_value = *obj._key_value; + } + mutable_variant_object::mutable_variant_object( const mutable_variant_object& obj ) : _key_value( new std::vector(*obj._key_value) ) { @@ -302,6 +312,16 @@ namespace fc return *this; } + mutable_variant_object& mutable_variant_object::operator=( variant_object&& obj ) + { + assert(obj._key_value.use_count() == 1); // should only be used if data not shared + if (obj._key_value.use_count() == 1) + *_key_value = std::move(*obj._key_value); + else + *_key_value = *obj._key_value; + return *this; + } + mutable_variant_object& mutable_variant_object::operator=( mutable_variant_object&& obj ) { if (this != &obj) diff --git a/libraries/state_history/include/eosio/state_history/log.hpp b/libraries/state_history/include/eosio/state_history/log.hpp index abbf3f03a7..567294c2a2 100644 --- a/libraries/state_history/include/eosio/state_history/log.hpp +++ b/libraries/state_history/include/eosio/state_history/log.hpp @@ -88,7 +88,7 @@ namespace state_history { std::filesystem::path retained_dir = "retained"; std::filesystem::path archive_dir = "archive"; uint32_t stride = 1000000; - uint32_t max_retained_files = 10; + uint32_t max_retained_files = UINT32_MAX; }; } // namespace state_history @@ -128,7 +128,7 @@ struct locked_decompress_stream { namespace detail { -std::vector zlib_decompress(fc::cfile& file, uint64_t compressed_size) { +inline std::vector zlib_decompress(fc::cfile& file, uint64_t compressed_size) { if (compressed_size) { std::vector compressed(compressed_size); file.read(compressed.data(), compressed_size); @@ -137,7 +137,7 @@ std::vector zlib_decompress(fc::cfile& file, uint64_t compressed_size) { return {}; } -std::vector zlib_decompress(fc::datastream& strm, uint64_t compressed_size) { +inline std::vector zlib_decompress(fc::datastream& strm, uint64_t compressed_size) { if (compressed_size) { return state_history::zlib_decompress({strm.pos(), compressed_size}); } @@ -282,7 +282,7 @@ class counter { class state_history_log { private: const char* const name = ""; - state_history_log_config config; + state_history_log_config _config; // provide exclusive access to all data of this object since accessed from the main thread and the ship thread mutable std::mutex _mx; @@ -304,7 +304,7 @@ class state_history_log { state_history_log(const char* name, const std::filesystem::path& log_dir, state_history_log_config conf = {}) : name(name) - , config(std::move(conf)) { + , _config(std::move(conf)) { log.set_file_path(log_dir/(std::string(name) + ".log")); index.set_file_path(log_dir/(std::string(name) + ".index")); @@ -326,7 +326,7 @@ class state_history_log { _begin_block = _end_block = catalog.last_block_num() +1; } } - }, config); + }, _config); //check for conversions to/from pruned log, as long as log contains something if(_begin_block != _end_block) { @@ -334,7 +334,7 @@ class state_history_log { log.seek(0); read_header(first_header); - auto prune_config = std::get_if(&config); + auto prune_config = std::get_if(&_config); if((is_ship_log_pruned(first_header.magic) == false) && prune_config) { //need to convert non-pruned to pruned; first prune any ranges we can (might be none) @@ -361,7 +361,7 @@ class state_history_log { if(_begin_block == _end_block) return; - auto prune_config = std::get_if(&config); + auto prune_config = std::get_if(&_config); if(!prune_config || !prune_config->vacuum_on_close) return; @@ -371,6 +371,10 @@ class state_history_log { vacuum(); } + const state_history_log_config& config() const { + return _config; + } + // begin end std::pair block_range() const { std::lock_guard g(_mx); @@ -456,7 +460,7 @@ class state_history_log { return get_block_id_i(block_num); } -#ifdef BOOST_TEST_MODULE +#ifdef BOOST_TEST fc::cfile& get_log_file() { return log;} #endif @@ -499,7 +503,7 @@ class state_history_log { } } - auto prune_config = std::get_if(&config); + auto prune_config = std::get_if(&_config); if (block_num < _end_block) { // This is typically because of a fork, and we need to truncate the log back to the beginning of the fork. static uint32_t start_block_num = block_num; @@ -552,7 +556,7 @@ class state_history_log { log.flush(); index.flush(); - auto partition_config = std::get_if(&config); + auto partition_config = std::get_if(&_config); if (partition_config && block_num % partition_config->stride == 0) { split_log(); } @@ -608,7 +612,7 @@ class state_history_log { } void prune(const fc::log_level& loglevel) { - auto prune_config = std::get_if(&config); + auto prune_config = std::get_if(&_config); if(!prune_config) return; diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index c07a1bda71..fef992a2f5 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -404,6 +404,9 @@ namespace eosio { namespace testing { cfg.contracts_console = true; cfg.eosvmoc_config.cache_size = 1024*1024*8; + // don't use auto tier up for tests, since the point is to test diff vms + cfg.eosvmoc_tierup = chain::wasm_interface::vm_oc_enable::oc_none; + for(int i = 0; i < boost::unit_test::framework::master_test_suite().argc; ++i) { if(boost::unit_test::framework::master_test_suite().argv[i] == std::string("--eos-vm")) cfg.wasm_runtime = chain::wasm_interface::vm_type::eos_vm; diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index b35e33b640..756516f173 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -301,11 +302,14 @@ namespace eosio { namespace testing { if( !expected_chain_id ) { expected_chain_id = controller::extract_chain_id_from_db( cfg.state_dir ); if( !expected_chain_id ) { - if( std::filesystem::is_regular_file( cfg.blocks_dir / "blocks.log" ) ) { - expected_chain_id = block_log::extract_chain_id( cfg.blocks_dir ); - } else { - expected_chain_id = genesis_state().compute_chain_id(); + std::filesystem::path retained_dir; + auto partitioned_config = std::get_if(&cfg.blog); + if (partitioned_config) { + retained_dir = partitioned_config->retained_dir; + if (retained_dir.is_relative()) + retained_dir = cfg.blocks_dir/retained_dir; } + expected_chain_id = block_log::extract_chain_id( cfg.blocks_dir, retained_dir ); } } diff --git a/package.cmake b/package.cmake index ff3aebbd4b..dd1c1b8e57 100644 --- a/package.cmake +++ b/package.cmake @@ -46,13 +46,17 @@ set(CPACK_PACKAGE_HOMEPAGE_URL "https://github.com/AntelopeIO/leap") set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON) set(CPACK_DEBIAN_BASE_PACKAGE_SECTION "utils") +if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.22) + set(CPACK_DEBIAN_COMPRESSION_TYPE "zstd") +endif() set(CPACK_DEBIAN_PACKAGE_CONFLICTS "eosio, mandel") set(CPACK_RPM_PACKAGE_CONFLICTS "eosio, mandel") -#only consider "base" and "dev" components for per-component packages -get_cmake_property(CPACK_COMPONENTS_ALL COMPONENTS) -list(REMOVE_ITEM CPACK_COMPONENTS_ALL "Unspecified") +set(CPACK_COMPONENTS_ALL "base") +if(ENABLE_LEAP_DEV_DEB) + list(APPEND CPACK_COMPONENTS_ALL "dev") +endif() #enable per component packages for .deb; ensure main package is just "leap", not "leap-base", and make the dev package have "leap-dev" at the front not the back set(CPACK_DEB_COMPONENT_INSTALL ON) @@ -61,7 +65,7 @@ set(CPACK_DEBIAN_BASE_FILE_NAME "${CPACK_DEBIAN_FILE_NAME}.deb") string(REGEX REPLACE "^(${CMAKE_PROJECT_NAME})" "\\1-dev" CPACK_DEBIAN_DEV_FILE_NAME "${CPACK_DEBIAN_BASE_FILE_NAME}") #deb package tooling will be unable to detect deps for the dev package. llvm is tricky since we don't know what package could have been used; try to figure it out -set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libboost-all-dev, libssl-dev, libgmp-dev, python3-numpy") +set(CPACK_DEBIAN_DEV_PACKAGE_DEPENDS "libssl-dev, libgmp-dev, python3-distutils, python3-numpy, zlib1g-dev") find_program(DPKG_QUERY "dpkg-query") if(DPKG_QUERY AND OS_RELEASE MATCHES "\n?ID=\"?ubuntu" AND LLVM_CMAKE_DIR) execute_process(COMMAND "${DPKG_QUERY}" -S "${LLVM_CMAKE_DIR}" COMMAND cut -d: -f1 RESULT_VARIABLE LLVM_PKG_FIND_RESULT OUTPUT_VARIABLE LLVM_PKG_FIND_OUTPUT) diff --git a/plugins/chain_api_plugin/chain.swagger.yaml b/plugins/chain_api_plugin/chain.swagger.yaml index 37abad30f6..08d831fad0 100644 --- a/plugins/chain_api_plugin/chain.swagger.yaml +++ b/plugins/chain_api_plugin/chain.swagger.yaml @@ -345,29 +345,18 @@ paths: schema: title: "GetProducersResponse" type: object - additionalProperties: false - minProperties: 3 - required: - - active - - pending - - proposed properties: - active: - type: array - nullable: true - items: - $ref: "https://docs.eosnetwork.com/openapi/v2.0/ProducerSchedule.yaml" - pending: - type: array - nullable: true - items: - $ref: "https://docs.eosnetwork.com/openapi/v2.0/ProducerSchedule.yaml" - proposed: + rows: type: array nullable: true items: - $ref: "https://docs.eosnetwork.com/openapi/v2.0/ProducerSchedule.yaml" - + $ref: "https://docs.eosnetwork.com/openapi/v2.0/Producer.yaml" + total_producer_vote_weight: + type: string + description: The sum of all producer votes. + more: + type: string + description: If not all producers were returned with the first request, more contains the lower bound to use for the next request. /get_raw_code_and_abi: post: @@ -793,7 +782,7 @@ paths: /compute_transaction: post: - description: Executes specified transaction and creates a transaction trace, including resource usage, and then reverts all state changes but not contribute to the subjective billing for the account. If the transaction has signatures, they are processed, but any failures are ignored. Transactions which fail always include the transaction failure trace. Warning, users with exposed nodes who have enabled the compute_transaction endpoint should implement some sort of throttling to protect from Denial of Service attacks. + description: Executes specified transaction and creates a transaction trace, including resource usage, and then reverts all state changes but not contribute to the subjective billing for the account. If the transaction has signatures, they are processed, but any failures are ignored. Transactions which fail always include the transaction failure trace. Warning, users with exposed nodes who have enabled the compute_transaction endpoint should implement some throttling to protect from Denial of Service attacks. operationId: compute_transaction requestBody: content: @@ -815,7 +804,106 @@ paths: packed_trx: type: string description: Transaction object, JSON to hex + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing + /get_code_hash: + post: + description: Retrieves the code hash for a smart contract deployed on the blockchain. Once you have the code hash of a contract, you can compare it with a known or expected value to ensure that the contract code has not been modified or tampered with. + operationId: get_code_hash + requestBody: + content: + application/json: + schema: + type: object + properties: + account_name: + description: The name of the account for which you want to retrieve the code hash. It represents the account that owns the smart contract code. + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + properties: + account_name: + description: The name of the account where the smart contract was deployed. + type: string + code_hash: + type: string + description: A string that represents the hash value of the specified account's smart contract code. + + /get_transaction_id: + post: + description: Retrieves the transaction ID (also known as the transaction hash) of a specified transaction on the blockchain. + operationId: get_transaction_id + requestBody: + content: + application/json: + schema: + type: object + description: The transaction in JSON format for which the ID should be retrieved. + $ref: "https://docs.eosnetwork.com/openapi/v2.0/Transaction.yaml" + responses: + "200": + description: OK + content: + application/json: + schema: + type: string + description: The transaction ID. + + /get_producer_schedule: + post: + description: Retrieves the current producer schedule from the blockchain, which includes the list of active producers and their respective rotation schedule. + operationId: get_producer_schedule + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + properties: + active: + description: A JSON object that encapsulates the list of active producers schedule and its version. + $ref: "https://docs.eosnetwork.com/openapi/v2.0/ProducerSchedule.yaml" + pending: + description: A JSON object that encapsulates the list of pending producers schedule and its version. + $ref: "https://docs.eosnetwork.com/openapi/v2.0/ProducerSchedule.yaml" + proposed: + description: A JSON object that encapsulates the list of proposed producers schedule and its version. + $ref: "https://docs.eosnetwork.com/openapi/v2.0/ProducerSchedule.yaml" + + /send_read_only_transaction: + post: + description: Sends a read-only transaction in JSON format to the blockchain. This transaction is not intended for inclusion in the blockchain. When a user sends a transaction, which modifies the blockchain state, the connected node will fail the transaction. + operationId: send_read_only_transaction + requestBody: + content: + application/json: + schema: + type: object + properties: + transaction: + type: object + properties: + compression: + type: boolean + description: Compression used, usually false + packed_context_free_data: + type: string + description: JSON to hex + packed_trx: + type: string + description: Transaction object JSON to hex responses: "200": description: OK @@ -823,3 +911,20 @@ paths: application/json: schema: description: Returns Nothing + + /push_block: + post: + description: Sends a block to the blockchain. + operationId: push_block + requestBody: + content: + application/json: + schema: + $ref: "https://docs.eosnetwork.com/openapi/v2.0/Block.yaml" + responses: + "200": + description: OK + content: + application/json: + schema: + description: Returns Nothing \ No newline at end of file diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index ab9816d1c5..e862199ae0 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include namespace eosio { @@ -11,15 +12,15 @@ using namespace eosio; class chain_api_plugin_impl { public: - chain_api_plugin_impl(controller& db) + explicit chain_api_plugin_impl(controller& db) : db(db) {} controller& db; }; -chain_api_plugin::chain_api_plugin(){} -chain_api_plugin::~chain_api_plugin(){} +chain_api_plugin::chain_api_plugin() = default; +chain_api_plugin::~chain_api_plugin() = default; void chain_api_plugin::set_program_options(options_description&, options_description&) {} void chain_api_plugin::plugin_initialize(const variables_map&) {} @@ -41,27 +42,76 @@ parse_params +chain_apis::read_only::get_transaction_id_params +parse_params(const std::string& body) { + if (body.empty()) { + EOS_THROW(chain::invalid_http_request, "A Request body is required"); + } + + try { + fc::variant trx_var = fc::json::from_string( body ); + if( trx_var.is_object() ) { + fc::variant_object& vo = trx_var.get_object(); + if( vo.contains("actions") && vo["actions"].is_array() ) { + fc::mutable_variant_object mvo{vo}; + fc::variants& action_variants = mvo["actions"].get_array(); + for( auto& action_v : action_variants ) { + if( action_v.is_object() ) { + fc::variant_object& action_vo = action_v.get_object(); + if( action_vo.contains( "data" ) && action_vo.contains( "hex_data" ) ) { + fc::mutable_variant_object maction_vo{action_vo}; + maction_vo["data"] = maction_vo["hex_data"]; + action_vo = maction_vo; + vo = mvo; + } else if( action_vo.contains( "data" ) ) { + if( !action_vo["data"].is_string() ) { + EOS_THROW(chain::invalid_http_request, "Request supports only un-exploded 'data' (hex form)"); + } + } + } + else { + EOS_THROW(chain::invalid_http_request, "Transaction contains invalid or empty action"); + } + } + } + else { + EOS_THROW(chain::invalid_http_request, "Transaction actions are missing or invalid"); + } + } + else { + EOS_THROW(chain::invalid_http_request, "Transaction object is missing or invalid"); + } + auto trx = trx_var.as(); + if( trx.id() == transaction().id() ) { + EOS_THROW(chain::invalid_http_request, "Invalid transaction object"); + } + return trx; + } EOS_RETHROW_EXCEPTIONS(chain::invalid_http_request, "Invalid transaction"); +} + +#define CALL_WITH_400(api_name, category, api_handle, api_namespace, call_name, http_response_code, params_type) \ {std::string("/v1/" #api_name "/" #call_name), \ + api_category::category,\ [api_handle](string&&, string&& body, url_response_callback&& cb) mutable { \ auto deadline = api_handle.start(); \ try { \ auto params = parse_params(body);\ - FC_CHECK_DEADLINE(deadline);\ fc::variant result( api_handle.call_name( std::move(params), deadline ) ); \ - cb(http_response_code, deadline, std::move(result)); \ + cb(http_response_code, std::move(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ }} -#define CHAIN_RO_CALL(call_name, http_response_code, params_type) CALL_WITH_400(chain, ro_api, chain_apis::read_only, call_name, http_response_code, params_type) -#define CHAIN_RW_CALL(call_name, http_response_code, params_type) CALL_WITH_400(chain, rw_api, chain_apis::read_write, call_name, http_response_code, params_type) -#define CHAIN_RO_CALL_POST(call_name, call_result, http_response_code, params_type) CALL_WITH_400_POST(chain, ro_api, chain_apis::read_only, call_name, call_result, http_response_code, params_type) -#define CHAIN_RO_CALL_ASYNC(call_name, call_result, http_response_code, params_type) CALL_ASYNC_WITH_400(chain, ro_api, chain_apis::read_only, call_name, call_result, http_response_code, params_type) -#define CHAIN_RW_CALL_ASYNC(call_name, call_result, http_response_code, params_type) CALL_ASYNC_WITH_400(chain, rw_api, chain_apis::read_write, call_name, call_result, http_response_code, params_type) +#define CHAIN_RO_CALL(call_name, http_response_code, params_type) CALL_WITH_400(chain, chain_ro, ro_api, chain_apis::read_only, call_name, http_response_code, params_type) +#define CHAIN_RW_CALL(call_name, http_response_code, params_type) CALL_WITH_400(chain, chain_rw, rw_api, chain_apis::read_write, call_name, http_response_code, params_type) +#define CHAIN_RO_CALL_POST(call_name, call_result, http_response_code, params_type) CALL_WITH_400_POST(chain, chain_ro, ro_api, chain_apis::read_only, call_name, call_result, http_response_code, params_type) +#define CHAIN_RO_CALL_ASYNC(call_name, call_result, http_response_code, params_type) CALL_ASYNC_WITH_400(chain, chain_ro, ro_api, chain_apis::read_only, call_name, call_result, http_response_code, params_type) +#define CHAIN_RW_CALL_ASYNC(call_name, call_result, http_response_code, params_type) CALL_ASYNC_WITH_400(chain, chain_rw, rw_api, chain_apis::read_write, call_name, call_result, http_response_code, params_type) -#define CHAIN_RO_CALL_WITH_400(call_name, http_response_code, params_type) CALL_WITH_400(chain, ro_api, chain_apis::read_only, call_name, http_response_code, params_type) +#define CHAIN_RO_CALL_WITH_400(call_name, http_response_code, params_type) CALL_WITH_400(chain, chain_ro, ro_api, chain_apis::read_only, call_name, http_response_code, params_type) void chain_api_plugin::plugin_startup() { ilog( "starting chain_api_plugin" ); @@ -76,7 +126,8 @@ void chain_api_plugin::plugin_startup() { ro_api.set_shorten_abi_errors( !http_plugin::verbose_errors() ); _http_plugin.add_api( { - CHAIN_RO_CALL(get_info, 200, http_params_types::no_params)}, appbase::exec_queue::read_only, appbase::priority::medium_high); + CALL_WITH_400(chain, node, ro_api, chain_apis::read_only, get_info, 200, http_params_types::no_params) + }, appbase::exec_queue::read_only, appbase::priority::medium_high); _http_plugin.add_api({ CHAIN_RO_CALL(get_activated_protocol_features, 200, http_params_types::possible_no_params), CHAIN_RO_CALL_POST(get_block, fc::variant, 200, http_params_types::params_required), // _POST because get_block() returns a lambda to be executed on the http thread pool @@ -133,4 +184,4 @@ void chain_api_plugin::plugin_startup() { void chain_api_plugin::plugin_shutdown() {} -} +} \ No newline at end of file diff --git a/plugins/chain_plugin/CMakeLists.txt b/plugins/chain_plugin/CMakeLists.txt index 0648d20fb4..ae21541990 100644 --- a/plugins/chain_plugin/CMakeLists.txt +++ b/plugins/chain_plugin/CMakeLists.txt @@ -11,7 +11,7 @@ if(EOSIO_ENABLE_DEVELOPER_OPTIONS) target_compile_definitions(chain_plugin PUBLIC EOSIO_DEVELOPER) endif() -target_link_libraries( chain_plugin eosio_chain custom_appbase appbase resource_monitor_plugin ) +target_link_libraries( chain_plugin eosio_chain custom_appbase appbase resource_monitor_plugin Boost::bimap ) target_include_directories( chain_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include" "${CMAKE_CURRENT_SOURCE_DIR}/../../libraries/appbase/include" "${CMAKE_CURRENT_SOURCE_DIR}/../resource_monitor_plugin/include") -add_subdirectory( test ) \ No newline at end of file +add_subdirectory( test ) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 716d266876..6073a8637d 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -114,6 +114,32 @@ void validate(boost::any& v, } } +void validate(boost::any& v, + const std::vector& values, + wasm_interface::vm_oc_enable* /* target_type */, + int) +{ + using namespace boost::program_options; + + // Make sure no previous assignment to 'v' was made. + validators::check_first_occurrence(v); + + // Extract the first string from 'values'. If there is more than + // one string, it's an error, and exception will be thrown. + std::string s = validators::get_single_string(values); + boost::algorithm::to_lower(s); + + if (s == "auto") { + v = boost::any(wasm_interface::vm_oc_enable::oc_auto); + } else if (s == "all" || s == "true" || s == "on" || s == "yes" || s == "1") { + v = boost::any(wasm_interface::vm_oc_enable::oc_all); + } else if (s == "none" || s == "false" || s == "off" || s == "no" || s == "0") { + v = boost::any(wasm_interface::vm_oc_enable::oc_none); + } else { + throw validation_error(validation_error::invalid_option_value); + } +} + } // namespace chain using namespace eosio; @@ -149,7 +175,6 @@ class chain_plugin_impl { std::optional chain_config; std::optional chain; std::optional genesis; - //txn_msg_rate_limits rate_limits; std::optional wasm_runtime; fc::microseconds abi_serializer_max_time_us; std::optional snapshot_path; @@ -186,6 +211,16 @@ class chain_plugin_impl { std::optional _account_query_db; std::optional _trx_retry_db; chain_apis::trx_finality_status_processing_ptr _trx_finality_status_processing; + + static void handle_guard_exception(const chain::guard_exception& e); + void do_hard_replay(const variables_map& options); + void enable_accept_transactions(); + void plugin_initialize(const variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + +private: + static void log_guard_exception(const chain::guard_exception& e); }; chain_plugin::chain_plugin() @@ -194,6 +229,7 @@ chain_plugin::chain_plugin() app().register_config_type(); app().register_config_type(); app().register_config_type(); + app().register_config_type(); } chain_plugin::~chain_plugin() = default; @@ -218,7 +254,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip #ifdef EOSIO_EOS_VM_OC_DEVELOPER wasm_runtime_opt += delim + "\"eos-vm-oc\""; - wasm_runtime_desc += "\"eos-vm-oc\" : Unsupported. Instead, use one of the other runtimes along with the option enable-eos-vm-oc.\n"; + wasm_runtime_desc += "\"eos-vm-oc\" : Unsupported. Instead, use one of the other runtimes along with the option eos-vm-oc-enable.\n"; #endif wasm_runtime_opt += ")\n" + wasm_runtime_desc; @@ -291,7 +327,7 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip "In \"head\" mode: database contains state changes up to the head block; transactions received by the node are relayed if valid.\n" "In \"irreversible\" mode: database contains state changes up to the last irreversible block; " "transactions received via the P2P network are not relayed and transactions cannot be pushed via the chain API.\n" - "In \"speculative\" mode: (DEPRECATED: head mode recommended) database contains state changes by transactions in the blockchain " + "In \"speculative\" mode: database contains state changes by transactions in the blockchain " "up to the head block as well as some transactions not yet included in the blockchain; transactions received by the node are relayed if valid.\n" ) ( "api-accept-transactions", bpo::value()->default_value(true), "Allow API transactions to be evaluated and relayed if valid.") @@ -325,16 +361,22 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip EOS_ASSERT(false, plugin_exception, ""); } }), "Number of threads to use for EOS VM OC tier-up") - ("eos-vm-oc-enable", bpo::bool_switch(), "Enable EOS VM OC tier-up runtime") + ("eos-vm-oc-enable", bpo::value()->default_value(chain::wasm_interface::vm_oc_enable::oc_auto), + "Enable EOS VM OC tier-up runtime ('auto', 'all', 'none').\n" + "'auto' - EOS VM OC tier-up is enabled for eosio.* accounts, read-only trxs, and except on producers applying blocks.\n" + "'all' - EOS VM OC tier-up is enabled for all contract execution.\n" + "'none' - EOS VM OC tier-up is completely disabled.\n") #endif ("enable-account-queries", bpo::value()->default_value(false), "enable queries to find accounts by various metadata.") ("max-nonprivileged-inline-action-size", bpo::value()->default_value(config::default_max_nonprivileged_inline_action_size), "maximum allowed size (in bytes) of an inline action for a nonprivileged account") ("transaction-retry-max-storage-size-gb", bpo::value(), "Maximum size (in GiB) allowed to be allocated for the Transaction Retry feature. Setting above 0 enables this feature.") ("transaction-retry-interval-sec", bpo::value()->default_value(20), - "How often, in seconds, to resend an incoming transaction to network if not seen in a block.") + "How often, in seconds, to resend an incoming transaction to network if not seen in a block.\n" + "Needs to be at least twice as large as p2p-dedup-cache-expire-time-sec.") ("transaction-retry-max-expiration-sec", bpo::value()->default_value(120), - "Maximum allowed transaction expiration for retry transactions, will retry transactions up to this value.") + "Maximum allowed transaction expiration for retry transactions, will retry transactions up to this value.\n" + "Should be larger than transaction-retry-interval-sec.") ("transaction-finality-status-max-storage-size-gb", bpo::value(), "Maximum size (in GiB) allowed to be allocated for the Transaction Finality Status feature. Setting above 0 enables this feature.") ("transaction-finality-status-success-duration-sec", bpo::value()->default_value(config::default_max_transaction_finality_status_success_duration_sec), @@ -348,16 +390,6 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip "If set to 0, no blocks are be written to the block log; block log file is removed after startup."); -// TODO: rate limiting - /*("per-authorized-account-transaction-msg-rate-limit-time-frame-sec", bpo::value()->default_value(default_per_auth_account_time_frame_seconds), - "The time frame, in seconds, that the per-authorized-account-transaction-msg-rate-limit is imposed over.") - ("per-authorized-account-transaction-msg-rate-limit", bpo::value()->default_value(default_per_auth_account), - "Limits the maximum rate of transaction messages that an account is allowed each per-authorized-account-transaction-msg-rate-limit-time-frame-sec.") - ("per-code-account-transaction-msg-rate-limit-time-frame-sec", bpo::value()->default_value(default_per_code_account_time_frame_seconds), - "The time frame, in seconds, that the per-code-account-transaction-msg-rate-limit is imposed over.") - ("per-code-account-transaction-msg-rate-limit", bpo::value()->default_value(default_per_code_account), - "Limits the maximum rate of transaction messages that an account's code is allowed each per-code-account-transaction-msg-rate-limit-time-frame-sec.")*/ - cli.add_options() ("genesis-json", bpo::value(), "File to read Genesis State from") ("genesis-timestamp", bpo::value(), "override the initial timestamp in the Genesis State file") @@ -460,15 +492,14 @@ namespace { } void -chain_plugin::do_hard_replay(const variables_map& options) { +chain_plugin_impl::do_hard_replay(const variables_map& options) { ilog( "Hard replay requested: deleting state database" ); - clear_directory_contents( my->chain_config->state_dir ); - auto backup_dir = block_log::repair_log( my->blocks_dir, options.at( "truncate-at-block" ).as(), config::reversible_blocks_dir_name); + clear_directory_contents( chain_config->state_dir ); + auto backup_dir = block_log::repair_log( blocks_dir, options.at( "truncate-at-block" ).as(), config::reversible_blocks_dir_name); } -void chain_plugin::plugin_initialize(const variables_map& options) { +void chain_plugin_impl::plugin_initialize(const variables_map& options) { try { - handle_sighup(); // Sets loggers ilog("initializing chain plugin"); try { @@ -479,7 +510,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { throw; } - my->chain_config = controller::config(); + chain_config = controller::config(); if( options.at( "print-build-info" ).as() || options.count( "extract-build-info") ) { if( options.at( "print-build-info" ).as() ) { @@ -503,17 +534,17 @@ void chain_plugin::plugin_initialize(const variables_map& options) { EOS_THROW( node_management_success, "reported build environment information" ); } - LOAD_VALUE_SET( options, "sender-bypass-whiteblacklist", my->chain_config->sender_bypass_whiteblacklist ); - LOAD_VALUE_SET( options, "actor-whitelist", my->chain_config->actor_whitelist ); - LOAD_VALUE_SET( options, "actor-blacklist", my->chain_config->actor_blacklist ); - LOAD_VALUE_SET( options, "contract-whitelist", my->chain_config->contract_whitelist ); - LOAD_VALUE_SET( options, "contract-blacklist", my->chain_config->contract_blacklist ); + LOAD_VALUE_SET( options, "sender-bypass-whiteblacklist", chain_config->sender_bypass_whiteblacklist ); + LOAD_VALUE_SET( options, "actor-whitelist", chain_config->actor_whitelist ); + LOAD_VALUE_SET( options, "actor-blacklist", chain_config->actor_blacklist ); + LOAD_VALUE_SET( options, "contract-whitelist", chain_config->contract_whitelist ); + LOAD_VALUE_SET( options, "contract-blacklist", chain_config->contract_blacklist ); - LOAD_VALUE_SET( options, "trusted-producer", my->chain_config->trusted_producers ); + LOAD_VALUE_SET( options, "trusted-producer", chain_config->trusted_producers ); if( options.count( "action-blacklist" )) { const std::vector& acts = options["action-blacklist"].as>(); - auto& list = my->chain_config->action_blacklist; + auto& list = chain_config->action_blacklist; for( const auto& a : acts ) { auto pos = a.find( "::" ); EOS_ASSERT( pos != std::string::npos, plugin_config_exception, "Invalid entry in action-blacklist: '${a}'", ("a", a)); @@ -525,7 +556,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if( options.count( "key-blacklist" )) { const std::vector& keys = options["key-blacklist"].as>(); - auto& list = my->chain_config->key_blacklist; + auto& list = chain_config->key_blacklist; for( const auto& key_str : keys ) { list.emplace( key_str ); } @@ -534,17 +565,17 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if( options.count( "blocks-dir" )) { auto bld = options.at( "blocks-dir" ).as(); if( bld.is_relative()) - my->blocks_dir = app().data_dir() / bld; + blocks_dir = app().data_dir() / bld; else - my->blocks_dir = bld; + blocks_dir = bld; } if( options.count( "state-dir" )) { auto sd = options.at( "state-dir" ).as(); if( sd.is_relative()) - my->state_dir = app().data_dir() / sd; + state_dir = app().data_dir() / sd; else - my->state_dir = sd; + state_dir = sd; } protocol_feature_set pfs; @@ -561,126 +592,87 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if( options.count("checkpoint") ) { auto cps = options.at("checkpoint").as>(); - my->loaded_checkpoints.reserve(cps.size()); + loaded_checkpoints.reserve(cps.size()); for( const auto& cp : cps ) { auto item = fc::json::from_string(cp).as>(); - auto itr = my->loaded_checkpoints.find(item.first); - if( itr != my->loaded_checkpoints.end() ) { + auto itr = loaded_checkpoints.find(item.first); + if( itr != loaded_checkpoints.end() ) { EOS_ASSERT( itr->second == item.second, plugin_config_exception, "redefining existing checkpoint at block number ${num}: original: ${orig} new: ${new}", ("num", item.first)("orig", itr->second)("new", item.second) ); } else { - my->loaded_checkpoints[item.first] = item.second; + loaded_checkpoints[item.first] = item.second; } } } if( options.count( "wasm-runtime" )) - my->wasm_runtime = options.at( "wasm-runtime" ).as(); + wasm_runtime = options.at( "wasm-runtime" ).as(); - LOAD_VALUE_SET( options, "profile-account", my->chain_config->profile_accounts ); + LOAD_VALUE_SET( options, "profile-account", chain_config->profile_accounts ); - my->abi_serializer_max_time_us = fc::microseconds(options.at("abi-serializer-max-time-ms").as() * 1000); + abi_serializer_max_time_us = fc::microseconds(options.at("abi-serializer-max-time-ms").as() * 1000); - my->chain_config->blocks_dir = my->blocks_dir; - my->chain_config->state_dir = my->state_dir; - my->chain_config->read_only = my->readonly; + chain_config->blocks_dir = blocks_dir; + chain_config->state_dir = state_dir; + chain_config->read_only = readonly; if (auto resmon_plugin = app().find_plugin()) { - resmon_plugin->monitor_directory(my->chain_config->blocks_dir); - resmon_plugin->monitor_directory(my->chain_config->state_dir); + resmon_plugin->monitor_directory(chain_config->blocks_dir); + resmon_plugin->monitor_directory(chain_config->state_dir); } if( options.count( "chain-state-db-size-mb" )) - my->chain_config->state_size = options.at( "chain-state-db-size-mb" ).as() * 1024 * 1024; + chain_config->state_size = options.at( "chain-state-db-size-mb" ).as() * 1024 * 1024; if( options.count( "chain-state-db-guard-size-mb" )) - my->chain_config->state_guard_size = options.at( "chain-state-db-guard-size-mb" ).as() * 1024 * 1024; + chain_config->state_guard_size = options.at( "chain-state-db-guard-size-mb" ).as() * 1024 * 1024; if( options.count( "max-nonprivileged-inline-action-size" )) - my->chain_config->max_nonprivileged_inline_action_size = options.at( "max-nonprivileged-inline-action-size" ).as(); + chain_config->max_nonprivileged_inline_action_size = options.at( "max-nonprivileged-inline-action-size" ).as(); if( options.count( "transaction-finality-status-max-storage-size-gb" )) { const uint64_t max_storage_size = options.at( "transaction-finality-status-max-storage-size-gb" ).as() * 1024 * 1024 * 1024; if (max_storage_size > 0) { const fc::microseconds success_duration = fc::seconds(options.at( "transaction-finality-status-success-duration-sec" ).as()); const fc::microseconds failure_duration = fc::seconds(options.at( "transaction-finality-status-failure-duration-sec" ).as()); - my->_trx_finality_status_processing.reset( + _trx_finality_status_processing.reset( new chain_apis::trx_finality_status_processing(max_storage_size, success_duration, failure_duration)); } } if( options.count( "chain-threads" )) { - my->chain_config->thread_pool_size = options.at( "chain-threads" ).as(); - EOS_ASSERT( my->chain_config->thread_pool_size > 0, plugin_config_exception, - "chain-threads ${num} must be greater than 0", ("num", my->chain_config->thread_pool_size) ); + chain_config->thread_pool_size = options.at( "chain-threads" ).as(); + EOS_ASSERT( chain_config->thread_pool_size > 0, plugin_config_exception, + "chain-threads ${num} must be greater than 0", ("num", chain_config->thread_pool_size) ); } - my->chain_config->sig_cpu_bill_pct = options.at("signature-cpu-billable-pct").as(); - EOS_ASSERT( my->chain_config->sig_cpu_bill_pct >= 0 && my->chain_config->sig_cpu_bill_pct <= 100, plugin_config_exception, - "signature-cpu-billable-pct must be 0 - 100, ${pct}", ("pct", my->chain_config->sig_cpu_bill_pct) ); - my->chain_config->sig_cpu_bill_pct *= config::percent_1; + chain_config->sig_cpu_bill_pct = options.at("signature-cpu-billable-pct").as(); + EOS_ASSERT( chain_config->sig_cpu_bill_pct >= 0 && chain_config->sig_cpu_bill_pct <= 100, plugin_config_exception, + "signature-cpu-billable-pct must be 0 - 100, ${pct}", ("pct", chain_config->sig_cpu_bill_pct) ); + chain_config->sig_cpu_bill_pct *= config::percent_1; - if( my->wasm_runtime ) - my->chain_config->wasm_runtime = *my->wasm_runtime; + if( wasm_runtime ) + chain_config->wasm_runtime = *wasm_runtime; - my->chain_config->force_all_checks = options.at( "force-all-checks" ).as(); - my->chain_config->disable_replay_opts = options.at( "disable-replay-opts" ).as(); - my->chain_config->contracts_console = options.at( "contracts-console" ).as(); - my->chain_config->allow_ram_billing_in_notify = options.at( "disable-ram-billing-notify-checks" ).as(); + chain_config->force_all_checks = options.at( "force-all-checks" ).as(); + chain_config->disable_replay_opts = options.at( "disable-replay-opts" ).as(); + chain_config->contracts_console = options.at( "contracts-console" ).as(); + chain_config->allow_ram_billing_in_notify = options.at( "disable-ram-billing-notify-checks" ).as(); #ifdef EOSIO_DEVELOPER - my->chain_config->disable_all_subjective_mitigations = options.at( "disable-all-subjective-mitigations" ).as(); + chain_config->disable_all_subjective_mitigations = options.at( "disable-all-subjective-mitigations" ).as(); #endif - my->chain_config->maximum_variable_signature_length = options.at( "maximum-variable-signature-length" ).as(); + chain_config->maximum_variable_signature_length = options.at( "maximum-variable-signature-length" ).as(); if( options.count( "terminate-at-block" )) - my->chain_config->terminate_at_block = options.at( "terminate-at-block" ).as(); - - if( options.count( "extract-genesis-json" ) || options.at( "print-genesis-json" ).as()) { - std::optional gs; - - if( std::filesystem::exists( my->blocks_dir / "blocks.log" )) { - gs = block_log::extract_genesis_state( my->blocks_dir ); - EOS_ASSERT( gs, - plugin_config_exception, - "Block log at '${path}' does not contain a genesis state, it only has the chain-id.", - ("path", (my->blocks_dir / "blocks.log")) - ); - } else { - wlog( "No blocks.log found at '${p}'. Using default genesis state.", - ("p", (my->blocks_dir / "blocks.log"))); - gs.emplace(); - } - - if( options.at( "print-genesis-json" ).as()) { - ilog( "Genesis JSON:\n${genesis}", ("genesis", json::to_pretty_string( *gs ))); - } - - if( options.count( "extract-genesis-json" )) { - auto p = options.at( "extract-genesis-json" ).as(); - - if( p.is_relative()) { - p = std::filesystem::current_path() / p; - } - - EOS_ASSERT( fc::json::save_to_file( *gs, p, true ), - misc_exception, - "Error occurred while writing genesis JSON to '${path}'", - ("path", p) - ); - - ilog( "Saved genesis JSON to '${path}'", ("path", p) ); - } - - EOS_THROW( extract_genesis_state_exception, "extracted genesis state from blocks.log" ); - } + chain_config->terminate_at_block = options.at( "terminate-at-block" ).as(); // move fork_db to new location - upgrade_from_reversible_to_fork_db( my.get() ); + upgrade_from_reversible_to_fork_db( this ); bool has_partitioned_block_log_options = options.count("blocks-retained-dir") || options.count("blocks-archive-dir") || options.count("blocks-log-stride") || options.count("max-retained-block-files"); @@ -689,57 +681,97 @@ void chain_plugin::plugin_initialize(const variables_map& options) { EOS_ASSERT(!has_partitioned_block_log_options || !has_retain_blocks_option, plugin_config_exception, "block-log-retain-blocks cannot be specified together with blocks-retained-dir, blocks-archive-dir or blocks-log-stride or max-retained-block-files."); - + std::filesystem::path retained_dir; if (has_partitioned_block_log_options) { - my->chain_config->blog = eosio::chain::partitioned_blocklog_config{ - .retained_dir = options.count("blocks-retained-dir") ? options.at("blocks-retained-dir").as() - : std::filesystem::path(""), + retained_dir = options.count("blocks-retained-dir") ? options.at("blocks-retained-dir").as() + : std::filesystem::path(""); + if (retained_dir.is_relative()) + retained_dir = std::filesystem::path{blocks_dir}/retained_dir; + + chain_config->blog = eosio::chain::partitioned_blocklog_config{ + .retained_dir = retained_dir, .archive_dir = options.count("blocks-archive-dir") ? options.at("blocks-archive-dir").as() - : std::filesystem::path("archive"), + : std::filesystem::path("archive"), .stride = options.count("blocks-log-stride") ? options.at("blocks-log-stride").as() : UINT32_MAX, .max_retained_files = options.count("max-retained-block-files") - ? options.at("max-retained-block-files").as() - : UINT32_MAX, + ? options.at("max-retained-block-files").as() + : UINT32_MAX, }; } else if(has_retain_blocks_option) { uint32_t block_log_retain_blocks = options.at("block-log-retain-blocks").as(); if (block_log_retain_blocks == 0) - my->chain_config->blog = eosio::chain::empty_blocklog_config{}; + chain_config->blog = eosio::chain::empty_blocklog_config{}; else { EOS_ASSERT(cfile::supports_hole_punching(), plugin_config_exception, "block-log-retain-blocks cannot be greater than 0 because the file system does not support hole " "punching"); - my->chain_config->blog = eosio::chain::prune_blocklog_config{ .prune_blocks = block_log_retain_blocks }; + chain_config->blog = eosio::chain::prune_blocklog_config{ .prune_blocks = block_log_retain_blocks }; } } + + + if( options.count( "extract-genesis-json" ) || options.at( "print-genesis-json" ).as()) { + std::optional gs; + + gs = block_log::extract_genesis_state( blocks_dir, retained_dir ); + EOS_ASSERT( gs, + plugin_config_exception, + "Block log at '${path}' does not contain a genesis state, it only has the chain-id.", + ("path", (blocks_dir / "blocks.log").generic_string()) + ); + + + if( options.at( "print-genesis-json" ).as()) { + ilog( "Genesis JSON:\n${genesis}", ("genesis", json::to_pretty_string( *gs ))); + } + + if( options.count( "extract-genesis-json" )) { + auto p = options.at( "extract-genesis-json" ).as(); + + if( p.is_relative()) { + p = std::filesystem::current_path() / p; + } + + EOS_ASSERT( fc::json::save_to_file( *gs, p, true ), + misc_exception, + "Error occurred while writing genesis JSON to '${path}'", + ("path", p.generic_string()) + ); + + ilog( "Saved genesis JSON to '${path}'", ("path", p.generic_string()) ); + } + + EOS_THROW( extract_genesis_state_exception, "extracted genesis state from blocks.log" ); + } + if( options.at( "delete-all-blocks" ).as()) { ilog( "Deleting state database and blocks" ); if( options.at( "truncate-at-block" ).as() > 0 ) wlog( "The --truncate-at-block option does not make sense when deleting all blocks." ); - clear_directory_contents( my->chain_config->state_dir ); - clear_directory_contents( my->blocks_dir ); + clear_directory_contents( chain_config->state_dir ); + clear_directory_contents( blocks_dir ); } else if( options.at( "hard-replay-blockchain" ).as()) { do_hard_replay(options); } else if( options.at( "replay-blockchain" ).as()) { ilog( "Replay requested: deleting state database" ); if( options.at( "truncate-at-block" ).as() > 0 ) wlog( "The --truncate-at-block option does not work for a regular replay of the blockchain." ); - clear_chainbase_files( my->chain_config->state_dir ); + clear_chainbase_files( chain_config->state_dir ); } else if( options.at( "truncate-at-block" ).as() > 0 ) { wlog( "The --truncate-at-block option can only be used with --hard-replay-blockchain." ); } std::optional chain_id; if (options.count( "snapshot" )) { - my->snapshot_path = options.at( "snapshot" ).as(); - EOS_ASSERT( std::filesystem::exists(*my->snapshot_path), plugin_config_exception, - "Cannot load snapshot, ${name} does not exist", ("name", my->snapshot_path->generic_string()) ); + snapshot_path = options.at( "snapshot" ).as(); + EOS_ASSERT( std::filesystem::exists(*snapshot_path), plugin_config_exception, + "Cannot load snapshot, ${name} does not exist", ("name", snapshot_path->generic_string()) ); // recover genesis information from the snapshot // used for validation code below - auto infile = std::ifstream(my->snapshot_path->generic_string(), (std::ios::in | std::ios::binary)); + auto infile = std::ifstream(snapshot_path->generic_string(), (std::ios::in | std::ios::binary)); istream_snapshot_reader reader(infile); reader.validate(); chain_id = controller::extract_chain_id(reader); @@ -752,46 +784,40 @@ void chain_plugin::plugin_initialize(const variables_map& options) { plugin_config_exception, "--snapshot is incompatible with --genesis-json as the snapshot contains genesis information"); - auto shared_mem_path = my->chain_config->state_dir / "shared_memory.bin"; + auto shared_mem_path = chain_config->state_dir / "shared_memory.bin"; EOS_ASSERT( !std::filesystem::is_regular_file(shared_mem_path), plugin_config_exception, "Snapshot can only be used to initialize an empty database." ); - if( std::filesystem::is_regular_file( my->blocks_dir / "blocks.log" )) { - auto block_log_genesis = block_log::extract_genesis_state(my->blocks_dir); - if( block_log_genesis ) { - const auto& block_log_chain_id = block_log_genesis->compute_chain_id(); - EOS_ASSERT( *chain_id == block_log_chain_id, - plugin_config_exception, - "snapshot chain ID (${snapshot_chain_id}) does not match the chain ID from the genesis state in the block log (${block_log_chain_id})", - ("snapshot_chain_id", *chain_id) - ("block_log_chain_id", block_log_chain_id) - ); - } else { - const auto& block_log_chain_id = block_log::extract_chain_id(my->blocks_dir); - EOS_ASSERT( *chain_id == block_log_chain_id, + auto block_log_chain_id = block_log::extract_chain_id(blocks_dir, retained_dir); + + if (block_log_chain_id) { + EOS_ASSERT( *chain_id == *block_log_chain_id, plugin_config_exception, "snapshot chain ID (${snapshot_chain_id}) does not match the chain ID (${block_log_chain_id}) in the block log", ("snapshot_chain_id", *chain_id) - ("block_log_chain_id", block_log_chain_id) + ("block_log_chain_id", *block_log_chain_id) ); - } } } else { - chain_id = controller::extract_chain_id_from_db( my->chain_config->state_dir ); + chain_id = controller::extract_chain_id_from_db( chain_config->state_dir ); + auto chain_context = block_log::extract_chain_context( blocks_dir, retained_dir ); std::optional block_log_genesis; - std::optional block_log_chain_id; - - if( std::filesystem::is_regular_file( my->blocks_dir / "blocks.log" ) ) { - block_log_genesis = block_log::extract_genesis_state( my->blocks_dir ); - if( block_log_genesis ) { - block_log_chain_id = block_log_genesis->compute_chain_id(); - } else { - block_log_chain_id = block_log::extract_chain_id( my->blocks_dir ); - } + std::optional block_log_chain_id; + + if (chain_context) { + std::visit(overloaded { + [&](const genesis_state& gs) { + block_log_genesis = gs; + block_log_chain_id = gs.compute_chain_id(); + }, + [&](const chain_id_type& id) { + block_log_chain_id = id; + } + }, *chain_context); if( chain_id ) { EOS_ASSERT( *block_log_chain_id == *chain_id, block_log_exception, @@ -802,7 +828,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ); } else if (block_log_genesis) { ilog( "Starting fresh blockchain state using genesis state extracted from blocks.log." ); - my->genesis = block_log_genesis; + genesis = block_log_genesis; // Delay setting chain_id until later so that the code handling genesis-json below can know // that chain_id still only represents a chain ID extracted from the state (assuming it exists). } @@ -859,7 +885,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { chain_id = provided_genesis_chain_id; ilog( "Starting fresh blockchain state using provided genesis state." ); - my->genesis = std::move(provided_genesis); + genesis = std::move(provided_genesis); } } } else { @@ -869,9 +895,9 @@ void chain_plugin::plugin_initialize(const variables_map& options) { } if( !chain_id ) { - if( my->genesis ) { + if( genesis ) { // Uninitialized state database and genesis state extracted from block log - chain_id = my->genesis->compute_chain_id(); + chain_id = genesis->compute_chain_id(); } else { // Uninitialized state database and no genesis state provided @@ -882,48 +908,47 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ); ilog( "Starting fresh blockchain state using default genesis state." ); - my->genesis.emplace(); - chain_id = my->genesis->compute_chain_id(); + genesis.emplace(); + chain_id = genesis->compute_chain_id(); } } } if ( options.count("read-mode") ) { - my->chain_config->read_mode = options.at("read-mode").as(); + chain_config->read_mode = options.at("read-mode").as(); } - my->api_accept_transactions = options.at( "api-accept-transactions" ).as(); + api_accept_transactions = options.at( "api-accept-transactions" ).as(); - if( my->chain_config->read_mode == db_read_mode::IRREVERSIBLE ) { - if( my->api_accept_transactions ) { - my->api_accept_transactions = false; + if( chain_config->read_mode == db_read_mode::IRREVERSIBLE ) { + if( api_accept_transactions ) { + api_accept_transactions = false; wlog( "api-accept-transactions set to false due to read-mode: irreversible" ); } } - if( my->api_accept_transactions ) { + if( api_accept_transactions ) { enable_accept_transactions(); } if ( options.count("validation-mode") ) { - my->chain_config->block_validation_mode = options.at("validation-mode").as(); + chain_config->block_validation_mode = options.at("validation-mode").as(); } - my->chain_config->db_map_mode = options.at("database-map-mode").as(); + chain_config->db_map_mode = options.at("database-map-mode").as(); #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED if( options.count("eos-vm-oc-cache-size-mb") ) - my->chain_config->eosvmoc_config.cache_size = options.at( "eos-vm-oc-cache-size-mb" ).as() * 1024u * 1024u; + chain_config->eosvmoc_config.cache_size = options.at( "eos-vm-oc-cache-size-mb" ).as() * 1024u * 1024u; if( options.count("eos-vm-oc-compile-threads") ) - my->chain_config->eosvmoc_config.threads = options.at("eos-vm-oc-compile-threads").as(); - if( options["eos-vm-oc-enable"].as() ) - my->chain_config->eosvmoc_tierup = true; + chain_config->eosvmoc_config.threads = options.at("eos-vm-oc-compile-threads").as(); + chain_config->eosvmoc_tierup = options["eos-vm-oc-enable"].as(); #endif - my->account_queries_enabled = options.at("enable-account-queries").as(); + account_queries_enabled = options.at("enable-account-queries").as(); - my->chain_config->integrity_hash_on_start = options.at("integrity-hash-on-start").as(); - my->chain_config->integrity_hash_on_stop = options.at("integrity-hash-on-stop").as(); + chain_config->integrity_hash_on_start = options.at("integrity-hash-on-start").as(); + chain_config->integrity_hash_on_stop = options.at("integrity-hash-on-stop").as(); - my->chain.emplace( *my->chain_config, std::move(pfs), *chain_id ); + chain.emplace( *chain_config, std::move(pfs), *chain_id ); if( options.count( "transaction-retry-max-storage-size-gb" )) { EOS_ASSERT( !options.count( "producer-name"), plugin_config_exception, @@ -939,9 +964,9 @@ void chain_plugin::plugin_initialize(const variables_map& options) { EOS_ASSERT( trx_retry_max_expire > trx_retry_interval, plugin_config_exception, "transaction-retry-max-expiration-sec ${m} should be configured larger than transaction-retry-interval-sec ${i}", ("m", trx_retry_max_expire)("i", trx_retry_interval) ); - my->_trx_retry_db.emplace( *my->chain, max_storage_size, + _trx_retry_db.emplace( *chain, max_storage_size, fc::seconds(trx_retry_interval), fc::seconds(trx_retry_max_expire), - my->abi_serializer_max_time_us ); + abi_serializer_max_time_us ); } } @@ -978,33 +1003,33 @@ void chain_plugin::plugin_initialize(const variables_map& options) { EOS_ASSERT( options.at("p2p-accept-transactions").as() == false, plugin_config_exception, "p2p-accept-transactions must be set to false in order to enable deep-mind logging."); - my->chain->enable_deep_mind( &_deep_mind_log ); + chain->enable_deep_mind( &_deep_mind_log ); } // set up method providers - my->get_block_by_number_provider = app().get_method().register_provider( + get_block_by_number_provider = app().get_method().register_provider( [this]( uint32_t block_num ) -> signed_block_ptr { - return my->chain->fetch_block_by_number( block_num ); + return chain->fetch_block_by_number( block_num ); } ); - my->get_block_by_id_provider = app().get_method().register_provider( + get_block_by_id_provider = app().get_method().register_provider( [this]( block_id_type id ) -> signed_block_ptr { - return my->chain->fetch_block_by_id( id ); + return chain->fetch_block_by_id( id ); } ); - my->get_head_block_id_provider = app().get_method().register_provider( [this]() { - return my->chain->head_block_id(); + get_head_block_id_provider = app().get_method().register_provider( [this]() { + return chain->head_block_id(); } ); - my->get_last_irreversible_block_number_provider = app().get_method().register_provider( + get_last_irreversible_block_number_provider = app().get_method().register_provider( [this]() { - return my->chain->last_irreversible_block_num(); + return chain->last_irreversible_block_num(); } ); // relay signals to channels - my->pre_accepted_block_connection = my->chain->pre_accepted_block.connect([this](const signed_block_ptr& blk) { - auto itr = my->loaded_checkpoints.find( blk->block_num() ); - if( itr != my->loaded_checkpoints.end() ) { + pre_accepted_block_connection = chain->pre_accepted_block.connect([this](const signed_block_ptr& blk) { + auto itr = loaded_checkpoints.find( blk->block_num() ); + if( itr != loaded_checkpoints.end() ) { auto id = blk->calculate_id(); EOS_ASSERT( itr->second == id, checkpoint_exception, "Checkpoint does not match for block number ${num}: expected: ${expected} actual: ${actual}", @@ -1012,140 +1037,151 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ); } - my->pre_accepted_block_channel.publish(priority::medium, blk); + pre_accepted_block_channel.publish(priority::medium, blk); }); - my->accepted_block_header_connection = my->chain->accepted_block_header.connect( + accepted_block_header_connection = chain->accepted_block_header.connect( [this]( const block_state_ptr& blk ) { - my->accepted_block_header_channel.publish( priority::medium, blk ); + accepted_block_header_channel.publish( priority::medium, blk ); } ); - my->accepted_block_connection = my->chain->accepted_block.connect( [this]( const block_state_ptr& blk ) { - if (my->_account_query_db) { - my->_account_query_db->commit_block(blk); + accepted_block_connection = chain->accepted_block.connect( [this]( const block_state_ptr& blk ) { + if (_account_query_db) { + _account_query_db->commit_block(blk); } - if (my->_trx_retry_db) { - my->_trx_retry_db->on_accepted_block(blk); + if (_trx_retry_db) { + _trx_retry_db->on_accepted_block(blk); } - if (my->_trx_finality_status_processing) { - my->_trx_finality_status_processing->signal_accepted_block(blk); + if (_trx_finality_status_processing) { + _trx_finality_status_processing->signal_accepted_block(blk); } - my->accepted_block_channel.publish( priority::high, blk ); + accepted_block_channel.publish( priority::high, blk ); } ); - my->irreversible_block_connection = my->chain->irreversible_block.connect( [this]( const block_state_ptr& blk ) { - if (my->_trx_retry_db) { - my->_trx_retry_db->on_irreversible_block(blk); + irreversible_block_connection = chain->irreversible_block.connect( [this]( const block_state_ptr& blk ) { + if (_trx_retry_db) { + _trx_retry_db->on_irreversible_block(blk); } - if (my->_trx_finality_status_processing) { - my->_trx_finality_status_processing->signal_irreversible_block(blk); + if (_trx_finality_status_processing) { + _trx_finality_status_processing->signal_irreversible_block(blk); } - my->irreversible_block_channel.publish( priority::low, blk ); + irreversible_block_channel.publish( priority::low, blk ); } ); - my->accepted_transaction_connection = my->chain->accepted_transaction.connect( + accepted_transaction_connection = chain->accepted_transaction.connect( [this]( const transaction_metadata_ptr& meta ) { - my->accepted_transaction_channel.publish( priority::low, meta ); + accepted_transaction_channel.publish( priority::low, meta ); } ); - my->applied_transaction_connection = my->chain->applied_transaction.connect( + applied_transaction_connection = chain->applied_transaction.connect( [this]( std::tuple t ) { - if (my->_account_query_db) { - my->_account_query_db->cache_transaction_trace(std::get<0>(t)); + if (_account_query_db) { + _account_query_db->cache_transaction_trace(std::get<0>(t)); } - if (my->_trx_retry_db) { - my->_trx_retry_db->on_applied_transaction(std::get<0>(t), std::get<1>(t)); + if (_trx_retry_db) { + _trx_retry_db->on_applied_transaction(std::get<0>(t), std::get<1>(t)); } - if (my->_trx_finality_status_processing) { - my->_trx_finality_status_processing->signal_applied_transaction(std::get<0>(t), std::get<1>(t)); + if (_trx_finality_status_processing) { + _trx_finality_status_processing->signal_applied_transaction(std::get<0>(t), std::get<1>(t)); } - my->applied_transaction_channel.publish( priority::low, std::get<0>(t) ); + applied_transaction_channel.publish( priority::low, std::get<0>(t) ); } ); - if (my->_trx_finality_status_processing || my->_trx_retry_db) { - my->block_start_connection = my->chain->block_start.connect( + if (_trx_finality_status_processing || _trx_retry_db) { + block_start_connection = chain->block_start.connect( [this]( uint32_t block_num ) { - if (my->_trx_retry_db) { - my->_trx_retry_db->on_block_start(block_num); + if (_trx_retry_db) { + _trx_retry_db->on_block_start(block_num); } - if (my->_trx_finality_status_processing) { - my->_trx_finality_status_processing->signal_block_start( block_num ); + if (_trx_finality_status_processing) { + _trx_finality_status_processing->signal_block_start( block_num ); } } ); } - my->chain->add_indices(); + chain->add_indices(); } FC_LOG_AND_RETHROW() } -void chain_plugin::plugin_startup() +void chain_plugin::plugin_initialize(const variables_map& options) { + handle_sighup(); // Sets loggers + my->plugin_initialize(options); +} + +void chain_plugin_impl::plugin_startup() { try { - EOS_ASSERT( my->chain_config->read_mode != db_read_mode::IRREVERSIBLE || !accept_transactions(), plugin_config_exception, + EOS_ASSERT( chain_config->read_mode != db_read_mode::IRREVERSIBLE || !accept_transactions, plugin_config_exception, "read-mode = irreversible. transactions should not be enabled by enable_accept_transactions" ); try { auto shutdown = [](){ return app().quit(); }; auto check_shutdown = [](){ return app().is_quiting(); }; - if (my->snapshot_path) { - auto infile = std::ifstream(my->snapshot_path->generic_string(), (std::ios::in | std::ios::binary)); + if (snapshot_path) { + auto infile = std::ifstream(snapshot_path->generic_string(), (std::ios::in | std::ios::binary)); auto reader = std::make_shared(infile); - my->chain->startup(shutdown, check_shutdown, reader); + chain->startup(shutdown, check_shutdown, reader); infile.close(); - } else if( my->genesis ) { - my->chain->startup(shutdown, check_shutdown, *my->genesis); + } else if( genesis ) { + chain->startup(shutdown, check_shutdown, *genesis); } else { - my->chain->startup(shutdown, check_shutdown); + chain->startup(shutdown, check_shutdown); } } catch (const database_guard_exception& e) { log_guard_exception(e); // make sure to properly close the db - my->chain.reset(); + chain.reset(); throw; } - if(!my->readonly) { + if(!readonly) { ilog("starting chain in read/write mode"); } - if (my->genesis) { + if (genesis) { ilog("Blockchain started; head block is #${num}, genesis timestamp is ${ts}", - ("num", my->chain->head_block_num())("ts", my->genesis->initial_timestamp)); + ("num", chain->head_block_num())("ts", genesis->initial_timestamp)); } else { - ilog("Blockchain started; head block is #${num}", ("num", my->chain->head_block_num())); + ilog("Blockchain started; head block is #${num}", ("num", chain->head_block_num())); } - my->chain_config.reset(); + chain_config.reset(); - if (my->account_queries_enabled) { - my->account_queries_enabled = false; + if (account_queries_enabled) { + account_queries_enabled = false; try { - my->_account_query_db.emplace(*my->chain); - my->account_queries_enabled = true; + _account_query_db.emplace(*chain); + account_queries_enabled = true; } FC_LOG_AND_DROP(("Unable to enable account queries")); } } FC_CAPTURE_AND_RETHROW() } +void chain_plugin::plugin_startup() { + my->plugin_startup(); +} + +void chain_plugin_impl::plugin_shutdown() { + pre_accepted_block_connection.reset(); + accepted_block_header_connection.reset(); + accepted_block_connection.reset(); + irreversible_block_connection.reset(); + accepted_transaction_connection.reset(); + applied_transaction_connection.reset(); + block_start_connection.reset(); + chain.reset(); +} + void chain_plugin::plugin_shutdown() { - my->pre_accepted_block_connection.reset(); - my->accepted_block_header_connection.reset(); - my->accepted_block_connection.reset(); - my->irreversible_block_connection.reset(); - my->accepted_transaction_connection.reset(); - my->applied_transaction_connection.reset(); - my->block_start_connection.reset(); - if(app().is_quiting()) - my->chain->get_wasm_interface().indicate_shutting_down(); - my->chain.reset(); + my->plugin_shutdown(); } void chain_plugin::handle_sighup() { @@ -1206,12 +1242,16 @@ bool chain_plugin::accept_transactions() const { return my->accept_transactions; } +void chain_plugin_impl::enable_accept_transactions() { + accept_transactions = true; +} + void chain_plugin::enable_accept_transactions() { - my->accept_transactions = true; + my->enable_accept_transactions(); } -void chain_plugin::log_guard_exception(const chain::guard_exception&e ) { +void chain_plugin_impl::log_guard_exception(const chain::guard_exception&e ) { if (e.code() == chain::database_guard_exception::code_value) { elog("Database has reached an unsafe level of usage, shutting down to avoid corrupting the database. " "Please increase the value set for \"chain-state-db-size-mb\" and restart the process!"); @@ -1220,7 +1260,7 @@ void chain_plugin::log_guard_exception(const chain::guard_exception&e ) { dlog("Details: ${details}", ("details", e.to_detail_string())); } -void chain_plugin::handle_guard_exception(const chain::guard_exception& e) { +void chain_plugin_impl::handle_guard_exception(const chain::guard_exception& e) { log_guard_exception(e); elog("database chain::guard_exception, quitting..."); // log string searched for in: tests/nodeos_under_min_avail_ram.py @@ -1228,6 +1268,10 @@ void chain_plugin::handle_guard_exception(const chain::guard_exception& e) { app().quit(); } +void chain_plugin::handle_guard_exception(const chain::guard_exception& e) { + chain_plugin_impl::handle_guard_exception(e); +} + void chain_apis::api_base::handle_db_exhaustion() { elog("database memory exhausted: increase chain-state-db-size-mb"); //return 1 -- it's what programs/nodeos/main.cpp considers "BAD_ALLOC" @@ -1291,7 +1335,7 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params } read_only::get_transaction_status_results -read_only::get_transaction_status(const read_only::get_transaction_status_params& param, const fc::time_point& deadline) const { +read_only::get_transaction_status(const read_only::get_transaction_status_params& param, const fc::time_point&) const { EOS_ASSERT(trx_finality_status_proc, unsupported_feature, "Transaction Status Interface not enabled. To enable, configure nodeos with '--transaction-finality-status-max-storage-size-gb '."); trx_finality_status_processing::chain_state ch_state = trx_finality_status_proc->get_chain_state(); @@ -1337,9 +1381,6 @@ read_only::get_activated_protocol_features( const read_only::get_activated_proto if( upper_bound_value < lower_bound_value ) return result; - fc::microseconds params_time_limit = params.time_limit_ms ? fc::milliseconds(*params.time_limit_ms) : fc::milliseconds(10); - fc::time_point params_deadline = fc::time_point::now() + params_time_limit; - auto walk_range = [&]( auto itr, auto end_itr, auto&& convert_iterator ) { fc::mutable_variant_object mvo; mvo( "activation_ordinal", 0 ); @@ -1348,21 +1389,13 @@ read_only::get_activated_protocol_features( const read_only::get_activated_proto auto& activation_ordinal_value = mvo["activation_ordinal"]; auto& activation_block_num_value = mvo["activation_block_num"]; - auto cur_time = fc::time_point::now(); - for( unsigned int count = 0; - cur_time <= params_deadline && count < params.limit && itr != end_itr; - ++itr, cur_time = fc::time_point::now() ) - { - FC_CHECK_DEADLINE(deadline); + // activated protocol features are naturally limited and unlikely to ever reach max_return_items + for( ; itr != end_itr; ++itr ) { const auto& conv_itr = convert_iterator( itr ); activation_ordinal_value = conv_itr.activation_ordinal(); activation_block_num_value = conv_itr.activation_block_num(); result.activated_protocol_features.emplace_back( conv_itr->to_variant( false, &mvo ) ); - ++count; - } - if( itr != end_itr ) { - result.more = convert_iterator( itr ).activation_ordinal() ; } }; @@ -1607,8 +1640,7 @@ read_only::get_table_rows( const read_only::get_table_rows_params& p, const fc:: read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_only::get_table_by_scope_params& p, const fc::time_point& deadline )const { - fc::microseconds params_time_limit = p.time_limit_ms ? fc::milliseconds(*p.time_limit_ms) : fc::milliseconds(10); - fc::time_point params_deadline = fc::time_point::now() + params_time_limit; + fc::time_point params_deadline = p.time_limit_ms ? std::min(fc::time_point::now().safe_add(fc::milliseconds(*p.time_limit_ms)), deadline) : deadline; read_only::get_table_by_scope_result result; const auto& d = db.db(); @@ -1632,14 +1664,16 @@ read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_o return result; auto walk_table_range = [&]( auto itr, auto end_itr ) { - auto cur_time = fc::time_point::now(); - for( unsigned int count = 0; cur_time <= params_deadline && count < p.limit && itr != end_itr; ++itr, cur_time = fc::time_point::now() ) { - FC_CHECK_DEADLINE(deadline); + uint32_t limit = p.limit; + if (deadline != fc::time_point::maximum() && limit > max_return_items) + limit = max_return_items; + for( unsigned int count = 0; count < limit && itr != end_itr; ++itr, ++count ) { if( p.table && itr->table != p.table ) continue; result.rows.push_back( {itr->code, itr->scope, itr->table, itr->payer, itr->count} ); - ++count; + if (fc::time_point::now() >= params_deadline) + break; } if( itr != end_itr ) { result.more = itr->scope.to_string(); @@ -1657,7 +1691,7 @@ read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_o return result; } -vector read_only::get_currency_balance( const read_only::get_currency_balance_params& p, const fc::time_point& deadline )const { +vector read_only::get_currency_balance( const read_only::get_currency_balance_params& p, const fc::time_point& )const { const abi_def abi = eosio::chain_apis::get_abi( db, p.code ); (void)get_table_type( abi, name("accounts") ); @@ -1683,7 +1717,7 @@ vector read_only::get_currency_balance( const read_only::get_currency_bal return results; } -fc::variant read_only::get_currency_stats( const read_only::get_currency_stats_params& p, const fc::time_point& deadline )const { +fc::variant read_only::get_currency_stats( const read_only::get_currency_stats_params& p, const fc::time_point& )const { fc::mutable_variant_object results; const abi_def abi = eosio::chain_apis::get_abi( db, p.code ); @@ -1759,20 +1793,22 @@ read_only::get_producers( const read_only::get_producers_params& params, const f boost::make_tuple(secondary_table_id->id, lower.to_uint64_t()))); }(); - fc::microseconds params_time_limit = params.time_limit_ms ? fc::milliseconds(*params.time_limit_ms) : fc::milliseconds(10); - fc::time_point params_deadline = fc::time_point::now() + params_time_limit; + fc::time_point params_deadline = params.time_limit_ms ? std::min(fc::time_point::now().safe_add(fc::milliseconds(*params.time_limit_ms)), deadline) : deadline; + uint32_t limit = params.limit; + if (deadline != fc::time_point::maximum() && limit > max_return_items) + limit = max_return_items; - for( ; it != secondary_index_by_secondary.end() && it->t_id == secondary_table_id->id; ++it ) { - FC_CHECK_DEADLINE(deadline); - if (result.rows.size() >= params.limit || fc::time_point::now() > params_deadline) { - result.more = name{it->primary_key}.to_string(); - break; - } + for( unsigned int count = 0; count < limit && it != secondary_index_by_secondary.end() && it->t_id == secondary_table_id->id; ++it, ++count ) { copy_inline_row(*kv_index.find(boost::make_tuple(table_id->id, it->primary_key)), data); if (params.json) result.rows.emplace_back( abis.binary_to_variant( abis.get_table_type("producers"_n), data, abi_serializer::create_yield_function( abi_serializer_max_time ), shorten_abi_errors ) ); else - result.rows.emplace_back(fc::variant(data)); + result.rows.emplace_back(data); + if (fc::time_point::now() >= params_deadline) + break; + } + if( it != secondary_index_by_secondary.end() && it->t_id == secondary_table_id->id ) { + result.more = name{it->primary_key}.to_string(); } result.total_producer_vote_weight = get_global_row(d, abi, abis, abi_serializer_max_time, shorten_abi_errors)["total_producer_vote_weight"].as_double(); @@ -1802,7 +1838,7 @@ read_only::get_producers( const read_only::get_producers_params& params, const f return result; } -read_only::get_producer_schedule_result read_only::get_producer_schedule( const read_only::get_producer_schedule_params& p, const fc::time_point& deadline ) const { +read_only::get_producer_schedule_result read_only::get_producer_schedule( const read_only::get_producer_schedule_params& p, const fc::time_point& ) const { read_only::get_producer_schedule_result result; to_variant(db.active_producers(), result.active); if(!db.pending_producers().producers.empty()) @@ -1816,8 +1852,7 @@ read_only::get_producer_schedule_result read_only::get_producer_schedule( const read_only::get_scheduled_transactions_result read_only::get_scheduled_transactions( const read_only::get_scheduled_transactions_params& p, const fc::time_point& deadline ) const { - fc::microseconds params_time_limit = p.time_limit_ms ? fc::milliseconds(*p.time_limit_ms) : fc::milliseconds(10); - fc::time_point params_deadline = fc::time_point::now() + params_time_limit; + fc::time_point params_deadline = p.time_limit_ms ? std::min(fc::time_point::now().safe_add(fc::milliseconds(*p.time_limit_ms)), deadline) : deadline; const auto& d = db.db(); @@ -1849,11 +1884,12 @@ read_only::get_scheduled_transactions( const read_only::get_scheduled_transactio read_only::get_scheduled_transactions_result result; - auto resolver = make_resolver(db, abi_serializer::create_yield_function( abi_serializer_max_time )); + auto resolver = make_resolver(db, abi_serializer_max_time, throw_on_yield::no); uint32_t remaining = p.limit; - while (itr != idx_by_delay.end() && remaining > 0 && params_deadline > fc::time_point::now()) { - FC_CHECK_DEADLINE(deadline); + if (deadline != fc::time_point::maximum() && remaining > max_return_items) + remaining = max_return_items; + while (itr != idx_by_delay.end() && remaining > 0) { auto row = fc::mutable_variant_object() ("trx_id", itr->trx_id) ("sender", itr->sender) @@ -1871,7 +1907,7 @@ read_only::get_scheduled_transactions( const read_only::get_scheduled_transactio fc::datastream ds( itr->packed_trx.data(), itr->packed_trx.size() ); fc::raw::unpack(ds,trx); - abi_serializer::to_variant(trx, pretty_transaction, resolver, abi_serializer::create_yield_function( abi_serializer_max_time )); + abi_serializer::to_variant(trx, pretty_transaction, resolver, abi_serializer_max_time); row("transaction", pretty_transaction); } else { auto packed_transaction = bytes(itr->packed_trx.begin(), itr->packed_trx.end()); @@ -1881,6 +1917,8 @@ read_only::get_scheduled_transactions( const read_only::get_scheduled_transactio result.transactions.emplace_back(std::move(row)); ++itr; remaining--; + if (fc::time_point::now() >= params_deadline) + break; } if (itr != idx_by_delay.end()) { @@ -1890,7 +1928,7 @@ read_only::get_scheduled_transactions( const read_only::get_scheduled_transactio return result; } -chain::signed_block_ptr read_only::get_raw_block(const read_only::get_raw_block_params& params, const fc::time_point& deadline) const { +chain::signed_block_ptr read_only::get_raw_block(const read_only::get_raw_block_params& params, const fc::time_point&) const { signed_block_ptr block; std::optional block_num; @@ -1912,7 +1950,6 @@ chain::signed_block_ptr read_only::get_raw_block(const read_only::get_raw_block_ } EOS_ASSERT( block, unknown_block_exception, "Could not find block: ${block}", ("block", params.block_num_or_id)); - FC_CHECK_DEADLINE(deadline); return block; } @@ -1920,17 +1957,12 @@ chain::signed_block_ptr read_only::get_raw_block(const read_only::get_raw_block_ std::function()> read_only::get_block(const get_raw_block_params& params, const fc::time_point& deadline) const { chain::signed_block_ptr block = get_raw_block(params, deadline); - auto yield = abi_serializer::create_yield_function(deadline - fc::time_point::now()); - auto abi_cache = abi_serializer_cache_builder(make_resolver(db, std::move(yield))).add_serializers(block).get(); - FC_CHECK_DEADLINE(deadline); - using return_type = t_or_exception; return [this, - remaining_time = deadline - fc::time_point::now(), - resolver = abi_resolver(std::move(abi_cache)), - block = std::move(block)]() mutable -> return_type { + resolver = get_serializers_cache(db, block, abi_serializer_max_time), + block = std::move(block)]() mutable -> return_type { try { - return convert_block(block, resolver, remaining_time); + return convert_block(block, resolver); } CATCH_AND_RETURN(return_type); }; } @@ -1971,33 +2003,27 @@ read_only::get_block_header_result read_only::get_block_header(const read_only:: EOS_ASSERT( block, unknown_block_exception, "Could not find block header: ${block}", ("block", params.block_num_or_id)); return { block->calculate_id(), fc::variant{static_cast(*block)}, block->block_extensions}; } - - FC_CHECK_DEADLINE(deadline); - } abi_resolver read_only::get_block_serializers( const chain::signed_block_ptr& block, const fc::microseconds& max_time ) const { - auto yield = abi_serializer::create_yield_function(max_time); - return abi_resolver(abi_serializer_cache_builder(make_resolver(db, std::move(yield))).add_serializers(block).get()); + return get_serializers_cache(db, block, max_time); } -fc::variant read_only::convert_block( const chain::signed_block_ptr& block, - abi_resolver& resolver, - const fc::microseconds& max_time ) const { +fc::variant read_only::convert_block( const chain::signed_block_ptr& block, abi_resolver& resolver ) const { fc::variant pretty_output; - abi_serializer::to_variant( *block, pretty_output, resolver, abi_serializer::create_yield_function( max_time ) ); + abi_serializer::to_variant( *block, pretty_output, resolver, abi_serializer_max_time ); const auto block_id = block->calculate_id(); uint32_t ref_block_prefix = block_id._hash[1]; - return fc::mutable_variant_object( pretty_output.get_object() ) + return fc::mutable_variant_object( std::move(pretty_output.get_object()) ) ( "id", block_id ) ( "block_num", block->block_num() ) ( "ref_block_prefix", ref_block_prefix ); } -fc::variant read_only::get_block_info(const read_only::get_block_info_params& params, const fc::time_point& deadline) const { +fc::variant read_only::get_block_info(const read_only::get_block_info_params& params, const fc::time_point&) const { signed_block_ptr block; try { @@ -2026,7 +2052,7 @@ fc::variant read_only::get_block_info(const read_only::get_block_info_params& pa ("ref_block_prefix", ref_block_prefix); } -fc::variant read_only::get_block_header_state(const get_block_header_state_params& params, const fc::time_point& deadline) const { +fc::variant read_only::get_block_header_state(const get_block_header_state_params& params, const fc::time_point&) const { block_state_ptr b; std::optional block_num; std::exception_ptr e; @@ -2063,9 +2089,9 @@ void read_write::push_block(read_write::push_block_params&& params, next_functio void read_write::push_transaction(const read_write::push_transaction_params& params, next_function next) { try { auto pretty_input = std::make_shared(); - auto resolver = make_resolver(db, abi_serializer::create_yield_function( abi_serializer_max_time )); + auto resolver = caching_resolver(make_resolver(db, abi_serializer_max_time, throw_on_yield::yes)); try { - abi_serializer::from_variant(params, *pretty_input, std::move( resolver ), abi_serializer::create_yield_function( abi_serializer_max_time )); + abi_serializer::from_variant(params, *pretty_input, resolver, abi_serializer_max_time); } EOS_RETHROW_EXCEPTIONS(chain::packed_transaction_type_exception, "Invalid packed transaction") app().get_method()(pretty_input, true, transaction_metadata::trx_type::input, false, @@ -2078,7 +2104,8 @@ void read_write::push_transaction(const read_write::push_transaction_params& par try { fc::variant output; try { - output = db.to_variant_with_abi( *trx_trace_ptr, abi_serializer::create_yield_function( abi_serializer_max_time ) ); + auto resolver = get_serializers_cache(db, trx_trace_ptr, abi_serializer_max_time); + abi_serializer::to_variant(*trx_trace_ptr, output, resolver, abi_serializer_max_time); // Create map of (closest_unnotified_ancestor_action_ordinal, global_sequence) with action trace std::map< std::pair, fc::mutable_variant_object > act_traces_map; @@ -2122,10 +2149,10 @@ void read_write::push_transaction(const read_write::push_transaction_params& par return restructured_act_traces; }; - fc::mutable_variant_object output_mvo(output); + fc::mutable_variant_object output_mvo(std::move(output.get_object())); output_mvo["action_traces"] = convert_act_trace_to_tree_struct(0); - output = output_mvo; + output = std::move(output_mvo); } catch( chain::abi_exception& ) { output = *trx_trace_ptr; } @@ -2184,9 +2211,9 @@ template void api_base::send_transaction_gen(API &api, send_transaction_params_t params, next_function next) { try { auto ptrx = std::make_shared(); - auto resolver = make_resolver(api.db, abi_serializer::create_yield_function( api.abi_serializer_max_time )); + auto resolver = caching_resolver(make_resolver(api.db, api.abi_serializer_max_time, throw_on_yield::yes)); try { - abi_serializer::from_variant(params.transaction, *ptrx, resolver, abi_serializer::create_yield_function( api.abi_serializer_max_time )); + abi_serializer::from_variant(params.transaction, *ptrx, resolver, api.abi_serializer_max_time); } EOS_RETHROW_EXCEPTIONS(packed_transaction_type_exception, "Invalid packed transaction") bool retry = false; @@ -2196,7 +2223,7 @@ void api_base::send_transaction_gen(API &api, send_transaction_params_t params, retry = params.retry_trx; retry_num_blocks = params.retry_trx_num_blocks; - EOS_ASSERT( !retry || api.trx_retry.has_value(), unsupported_feature, "Transaction retry not enabled on node" ); + EOS_ASSERT( !retry || api.trx_retry.has_value(), unsupported_feature, "Transaction retry not enabled on node. transaction-retry-max-storage-size-gb is 0" ); EOS_ASSERT( !retry || (ptrx->expiration() <= api.trx_retry->get_max_expiration_time()), tx_exp_too_far_exception, "retry transaction expiration ${e} larger than allowed ${m}", ("e", ptrx->expiration())("m", api.trx_retry->get_max_expiration_time()) ); @@ -2231,15 +2258,14 @@ void api_base::send_transaction_gen(API &api, send_transaction_params_t params, } if (!retried) { // we are still on main thread here. The lambda passed to `next()` below will be executed on the http thread pool - auto yield = abi_serializer::create_yield_function(fc::microseconds::maximum()); - auto abi_cache = abi_serializer_cache_builder(api_base::make_resolver(api.db, std::move(yield))).add_serializers(trx_trace_ptr).get(); using return_type = t_or_exception; - next([&api, trx_trace_ptr, resolver = abi_resolver(std::move(abi_cache))]() mutable { + next([&api, + trx_trace_ptr, + resolver = get_serializers_cache(api.db, trx_trace_ptr, api.abi_serializer_max_time)]() mutable { try { fc::variant output; try { - abi_serializer::to_variant(*trx_trace_ptr, output, std::move(resolver), - abi_serializer::create_yield_function(api.abi_serializer_max_time)); + abi_serializer::to_variant(*trx_trace_ptr, output, resolver, api.abi_serializer_max_time); } catch( abi_exception& ) { output = *trx_trace_ptr; } @@ -2276,7 +2302,7 @@ void read_write::send_transaction2(read_write::send_transaction2_params params, return send_transaction_gen(*this, std::move(gen_params), std::move(next)); } -read_only::get_abi_results read_only::get_abi( const get_abi_params& params, const fc::time_point& deadline )const { +read_only::get_abi_results read_only::get_abi( const get_abi_params& params, const fc::time_point& )const { try { get_abi_results result; result.account_name = params.account_name; @@ -2291,7 +2317,7 @@ read_only::get_abi_results read_only::get_abi( const get_abi_params& params, con } EOS_RETHROW_EXCEPTIONS(chain::account_query_exception, "unable to retrieve account abi") } -read_only::get_code_results read_only::get_code( const get_code_params& params, const fc::time_point& deadline )const { +read_only::get_code_results read_only::get_code( const get_code_params& params, const fc::time_point& )const { try { get_code_results result; result.account_name = params.account_name; @@ -2315,7 +2341,7 @@ read_only::get_code_results read_only::get_code( const get_code_params& params, } EOS_RETHROW_EXCEPTIONS(chain::account_query_exception, "unable to retrieve account code") } -read_only::get_code_hash_results read_only::get_code_hash( const get_code_hash_params& params, const fc::time_point& deadline )const { +read_only::get_code_hash_results read_only::get_code_hash( const get_code_hash_params& params, const fc::time_point& )const { try { get_code_hash_results result; result.account_name = params.account_name; @@ -2329,7 +2355,7 @@ read_only::get_code_hash_results read_only::get_code_hash( const get_code_hash_p } EOS_RETHROW_EXCEPTIONS(chain::account_query_exception, "unable to retrieve account code hash") } -read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const get_raw_code_and_abi_params& params, const fc::time_point& deadline)const { +read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const get_raw_code_and_abi_params& params, const fc::time_point& )const { try { get_raw_code_and_abi_results result; result.account_name = params.account_name; @@ -2347,7 +2373,7 @@ read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const g } EOS_RETHROW_EXCEPTIONS(chain::account_query_exception, "unable to retrieve account code/abi") } -read_only::get_raw_abi_results read_only::get_raw_abi( const get_raw_abi_params& params, const fc::time_point& deadline )const { +read_only::get_raw_abi_results read_only::get_raw_abi( const get_raw_abi_params& params, const fc::time_point& )const { try { get_raw_abi_results result; result.account_name = params.account_name; @@ -2365,7 +2391,7 @@ read_only::get_raw_abi_results read_only::get_raw_abi( const get_raw_abi_params& } EOS_RETHROW_EXCEPTIONS(chain::account_query_exception, "unable to retrieve account abi") } -read_only::get_account_return_t read_only::get_account( const get_account_params& params, const fc::time_point& deadline ) const { +read_only::get_account_return_t read_only::get_account( const get_account_params& params, const fc::time_point& ) const { try { get_account_results result; result.account_name = params.account_name; @@ -2407,8 +2433,8 @@ read_only::get_account_return_t read_only::get_account( const get_account_params std::multimap result; while (iter != links.end() && iter->account == params.account_name ) { - auto action = iter->message_type.empty() ? std::optional() : std::optional(iter->message_type); - result.emplace(std::make_pair(iter->required_permission, linked_action{iter->code, std::move(action)})); + auto action_name = iter->message_type.empty() ? std::optional() : std::optional(iter->message_type); + result.emplace(iter->required_permission, linked_action{iter->code, action_name}); ++iter; } @@ -2528,11 +2554,11 @@ read_only::get_account_return_t read_only::get_account( const get_account_params } EOS_RETHROW_EXCEPTIONS(chain::account_query_exception, "unable to retrieve account info") } -read_only::get_required_keys_result read_only::get_required_keys( const get_required_keys_params& params, const fc::time_point& deadline )const { +read_only::get_required_keys_result read_only::get_required_keys( const get_required_keys_params& params, const fc::time_point& )const { transaction pretty_input; - auto resolver = make_resolver(db, abi_serializer::create_yield_function( abi_serializer_max_time )); + auto resolver = caching_resolver(make_resolver(db, abi_serializer_max_time, throw_on_yield::yes)); try { - abi_serializer::from_variant(params.transaction, pretty_input, resolver, abi_serializer::create_yield_function( abi_serializer_max_time )); + abi_serializer::from_variant(params.transaction, pretty_input, resolver, abi_serializer_max_time); } EOS_RETHROW_EXCEPTIONS(chain::transaction_type_exception, "Invalid transaction") auto required_keys_set = db.get_authorization_manager().get_required_keys( pretty_input, params.available_keys, fc::seconds( pretty_input.delay_sec )); @@ -2559,13 +2585,13 @@ void read_only::send_read_only_transaction(send_read_only_transaction_params par return send_transaction_gen(*this, std::move(gen_params), std::move(next)); } -read_only::get_transaction_id_result read_only::get_transaction_id( const read_only::get_transaction_id_params& params, const fc::time_point& deadline) const { +read_only::get_transaction_id_result read_only::get_transaction_id( const read_only::get_transaction_id_params& params, const fc::time_point& ) const { return params.id(); } account_query_db::get_accounts_by_authorizers_result -read_only::get_accounts_by_authorizers( const account_query_db::get_accounts_by_authorizers_params& args, const fc::time_point& deadline) const +read_only::get_accounts_by_authorizers( const account_query_db::get_accounts_by_authorizers_params& args, const fc::time_point& ) const { EOS_ASSERT(aqdb.has_value(), plugin_config_exception, "Account Queries being accessed when not enabled"); return aqdb->get_accounts_by_authorizers(args); @@ -2611,7 +2637,7 @@ chain::symbol read_only::extract_core_symbol()const { } read_only::get_consensus_parameters_results -read_only::get_consensus_parameters(const get_consensus_parameters_params&, const fc::time_point& deadline ) const { +read_only::get_consensus_parameters(const get_consensus_parameters_params&, const fc::time_point& ) const { get_consensus_parameters_results results; results.chain_config = db.get_global_properties().configuration; @@ -2626,8 +2652,8 @@ fc::variant chain_plugin::get_log_trx_trace(const transaction_trace_ptr& trx_tra fc::variant pretty_output; try { abi_serializer::to_log_variant(trx_trace, pretty_output, - chain_apis::api_base::make_resolver(chain(), abi_serializer::create_yield_function(get_abi_serializer_max_time())), - abi_serializer::create_yield_function(get_abi_serializer_max_time())); + caching_resolver(make_resolver(chain(), get_abi_serializer_max_time(), throw_on_yield::no)), + get_abi_serializer_max_time()); } catch (...) { pretty_output = trx_trace; } @@ -2638,13 +2664,18 @@ fc::variant chain_plugin::get_log_trx(const transaction& trx) const { fc::variant pretty_output; try { abi_serializer::to_log_variant(trx, pretty_output, - chain_apis::api_base::make_resolver(chain(), abi_serializer::create_yield_function(get_abi_serializer_max_time())), - abi_serializer::create_yield_function(get_abi_serializer_max_time())); + caching_resolver(make_resolver(chain(), get_abi_serializer_max_time(), throw_on_yield::no)), + get_abi_serializer_max_time()); } catch (...) { pretty_output = trx; } return pretty_output; } + +const controller::config& chain_plugin::chain_config() const { + EOS_ASSERT(my->chain_config.has_value(), plugin_exception, "chain_config not initialized"); + return *my->chain_config; +} } // namespace eosio FC_REFLECT( eosio::chain_apis::detail::ram_market_exchange_state_t, (ignore1)(ignore2)(ignore3)(core_symbol)(ignore4) ) diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 82fd082e48..6f2233a362 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -21,6 +21,7 @@ #include #include +#include namespace fc { class variant; } @@ -44,9 +45,35 @@ namespace eosio { using chain::action_name; using chain::abi_def; using chain::abi_serializer; + using chain::abi_serializer_cache_builder; using chain::abi_resolver; using chain::packed_transaction; + enum class throw_on_yield { no, yes }; + inline auto make_resolver(const controller& control, fc::microseconds abi_serializer_max_time, throw_on_yield yield_throw ) { + return [&control, abi_serializer_max_time, yield_throw](const account_name& name) -> std::optional { + if (name.good()) { + const auto* accnt = control.db().template find( name ); + if( accnt != nullptr ) { + try { + if( abi_def abi; abi_serializer::to_abi( accnt->abi, abi ) ) { + return abi_serializer( std::move( abi ), abi_serializer::create_yield_function( abi_serializer_max_time ) ); + } + } catch( ... ) { + if( yield_throw == throw_on_yield::yes ) + throw; + } + } + } + return {}; + }; + } + + template + inline abi_resolver get_serializers_cache(const controller& db, const T& obj, const fc::microseconds& max_time) { + return abi_resolver(abi_serializer_cache_builder(make_resolver(db, max_time, throw_on_yield::no)).add_serializers(obj).get()); + } + namespace chain_apis { struct empty{}; @@ -92,21 +119,10 @@ class read_write; class api_base { public: + static constexpr uint32_t max_return_items = 1000; static void handle_db_exhaustion(); static void handle_bad_alloc(); - static auto make_resolver(const controller& control, abi_serializer::yield_function_t yield) { - return [&control, yield{std::move(yield)}](const account_name &name) -> std::optional { - const auto* accnt = control.db().template find(name); - if (accnt != nullptr) { - if (abi_def abi; abi_serializer::to_abi(accnt->abi, abi)) { - return abi_serializer(std::move(abi), yield); - } - } - return {}; - }; - } - protected: struct send_transaction_params_t { bool return_failure_trace = true; @@ -147,7 +163,7 @@ class read_only : public api_base { // return deadline for call fc::time_point start() const { validate(); - return fc::time_point::now() + http_max_response_time; + return fc::time_point::now().safe_add(http_max_response_time); } void set_shorten_abi_errors( bool f ) { shorten_abi_errors = f; } @@ -207,10 +223,10 @@ class read_only : public api_base { struct get_activated_protocol_features_params { std::optional lower_bound; std::optional upper_bound; - uint32_t limit = 10; + uint32_t limit = std::numeric_limits::max(); // ignored bool search_by_block_num = false; bool reverse = false; - std::optional time_limit_ms; // defaults to 10ms + std::optional time_limit_ms; // ignored }; struct get_activated_protocol_features_results { @@ -371,8 +387,7 @@ class read_only : public api_base { // call from any thread fc::variant convert_block( const chain::signed_block_ptr& block, - abi_resolver& resolver, - const fc::microseconds& max_time ) const; + abi_resolver& resolver ) const; struct get_block_header_params { string block_num_or_id; @@ -413,7 +428,7 @@ class read_only : public api_base { string encode_type{"dec"}; //dec, hex , default=dec std::optional reverse; std::optional show_payer; // show RAM payer - std::optional time_limit_ms; // defaults to 10ms + std::optional time_limit_ms; // defaults to http-max-response-time-ms }; struct get_table_rows_result { @@ -433,7 +448,7 @@ class read_only : public api_base { string upper_bound; // upper bound of scope, optional uint32_t limit = 10; std::optional reverse; - std::optional time_limit_ms; // defaults to 10ms + std::optional time_limit_ms; // defaults to http-max-response-time-ms }; struct get_table_by_scope_result_row { name code; @@ -475,7 +490,7 @@ class read_only : public api_base { bool json = false; string lower_bound; uint32_t limit = 50; - std::optional time_limit_ms; // defaults to 10ms + std::optional time_limit_ms; // defaults to http-max-response-time-ms }; struct get_producers_result { @@ -501,7 +516,7 @@ class read_only : public api_base { bool json = false; string lower_bound; /// timestamp OR transaction ID uint32_t limit = 50; - std::optional time_limit_ms; // defaults to 10ms + std::optional time_limit_ms; // defaults to http-max-response-time-ms }; struct get_scheduled_transactions_result { @@ -563,8 +578,7 @@ class read_only : public api_base { const fc::time_point& deadline, ConvFn conv ) const { - fc::microseconds params_time_limit = p.time_limit_ms ? fc::milliseconds(*p.time_limit_ms) : fc::milliseconds(10); - fc::time_point params_deadline = fc::time_point::now() + params_time_limit; + fc::time_point params_deadline = p.time_limit_ms ? std::min(fc::time_point::now().safe_add(fc::milliseconds(*p.time_limit_ms)), deadline) : deadline; struct http_params_t { name table; @@ -632,16 +646,17 @@ class read_only : public api_base { }; auto walk_table_row_range = [&]( auto itr, auto end_itr ) { - auto cur_time = fc::time_point::now(); vector data; - for( unsigned int count = 0; - cur_time <= params_deadline && count < p.limit && itr != end_itr; - ++count, ++itr, cur_time = fc::time_point::now() ) { - FC_CHECK_DEADLINE(deadline); + uint32_t limit = p.limit; + if (deadline != fc::time_point::maximum() && limit > max_return_items) + limit = max_return_items; + for( unsigned int count = 0; count < limit && itr != end_itr; ++count, ++itr ) { const auto* itr2 = d.find( boost::make_tuple(t_id->id, itr->primary_key) ); if( itr2 == nullptr ) continue; copy_inline_row(*itr2, data); http_params.rows.emplace_back(std::move(data), itr->payer); + if (fc::time_point::now() >= params_deadline) + break; } if( itr != end_itr ) { http_params.more = true; @@ -695,8 +710,7 @@ class read_only : public api_base { abi_def&& abi, const fc::time_point& deadline ) const { - fc::microseconds params_time_limit = p.time_limit_ms ? fc::milliseconds(*p.time_limit_ms) : fc::milliseconds(10); - fc::time_point params_deadline = fc::time_point::now() + params_time_limit; + fc::time_point params_deadline = p.time_limit_ms ? std::min(fc::time_point::now().safe_add(fc::milliseconds(*p.time_limit_ms)), deadline) : deadline; struct http_params_t { name table; @@ -746,14 +760,15 @@ class read_only : public api_base { }; auto walk_table_row_range = [&]( auto itr, auto end_itr ) { - auto cur_time = fc::time_point::now(); vector data; - for( unsigned int count = 0; - cur_time <= params_deadline && count < p.limit && itr != end_itr; - ++count, ++itr, cur_time = fc::time_point::now() ) { - FC_CHECK_DEADLINE(deadline); + uint32_t limit = p.limit; + if (deadline != fc::time_point::maximum() && limit > max_return_items) + limit = max_return_items; + for( unsigned int count = 0; count < limit && itr != end_itr; ++count, ++itr ) { copy_inline_row(*itr, data); http_params.rows.emplace_back(std::move(data), itr->payer); + if (fc::time_point::now() >= params_deadline) + break; } if( itr != end_itr ) { http_params.more = true; @@ -832,7 +847,8 @@ class read_write : public api_base { // return deadline for call fc::time_point start() const { validate(); - return fc::time_point::now() + http_max_response_time; + return http_max_response_time == fc::microseconds::maximum() ? fc::time_point::maximum() + : fc::time_point::now() + http_max_response_time; } using push_block_params = chain::signed_block; @@ -925,7 +941,7 @@ class read_write : public api_base { // the following will convert the input to array of 2 uint128_t in little endian, i.e. 50f0fa8360ec998f4bb65b00c86282f5 fb54b91bfed2fe7fe39a92d999d002c5 // which is the format used by secondary index chain::key256_t k; - uint8_t buffer[32]; + uint8_t buffer[32] = {}; boost::multiprecision::export_bits(v, buffer, 8, false); memcpy(&k[0], buffer + 16, 16); memcpy(&k[1], buffer, 16); @@ -969,7 +985,6 @@ class chain_plugin : public plugin { void enable_accept_transactions(); static void handle_guard_exception(const chain::guard_exception& e); - void do_hard_replay(const variables_map& options); bool account_queries_enabled() const; bool transaction_finality_status_enabled() const; @@ -979,8 +994,8 @@ class chain_plugin : public plugin { // return variant of trx for logging, trace is modified to minimize log output fc::variant get_log_trx(const transaction& trx) const; + const controller::config& chain_config() const; private: - static void log_guard_exception(const chain::guard_exception& e); unique_ptr my; }; diff --git a/plugins/chain_plugin/test/CMakeLists.txt b/plugins/chain_plugin/test/CMakeLists.txt index aa91318d2e..81473d42e9 100644 --- a/plugins/chain_plugin/test/CMakeLists.txt +++ b/plugins/chain_plugin/test/CMakeLists.txt @@ -2,7 +2,8 @@ add_executable( test_chain_plugin test_account_query_db.cpp test_trx_retry_db.cpp test_trx_finality_status_processing.cpp + plugin_config_test.cpp main.cpp ) -target_link_libraries( test_chain_plugin chain_plugin eosio_testing) +target_link_libraries( test_chain_plugin chain_plugin eosio_testing eosio_chain_wrap ) add_test(NAME test_chain_plugin COMMAND plugins/chain_plugin/test/test_chain_plugin WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) \ No newline at end of file diff --git a/plugins/chain_plugin/test/plugin_config_test.cpp b/plugins/chain_plugin/test/plugin_config_test.cpp new file mode 100644 index 0000000000..e43b0bfbd3 --- /dev/null +++ b/plugins/chain_plugin/test/plugin_config_test.cpp @@ -0,0 +1,22 @@ +#include +#include +#include +#include +#include + +BOOST_AUTO_TEST_CASE(chain_plugin_default_tests) { + fc::temp_directory tmp; + appbase::scoped_app app; + + auto tmp_path = tmp.path().string(); + std::array args = { + "test_chain_plugin", "--blocks-log-stride", "10", "--data-dir", tmp_path.c_str(), + }; + + BOOST_CHECK(app->initialize(args.size(), const_cast(args.data()))); + auto& plugin = app->get_plugin(); + + auto* config = std::get_if(&plugin.chain_config().blog); + BOOST_REQUIRE(config); + BOOST_CHECK_EQUAL(config->max_retained_files, UINT32_MAX); +} \ No newline at end of file diff --git a/plugins/chain_plugin/test/test_trx_retry_db.cpp b/plugins/chain_plugin/test/test_trx_retry_db.cpp index 8c7a3925c9..cfad3ed512 100644 --- a/plugins/chain_plugin/test/test_trx_retry_db.cpp +++ b/plugins/chain_plugin/test/test_trx_retry_db.cpp @@ -224,11 +224,15 @@ BOOST_AUTO_TEST_CASE(trx_retry_logic) { std::promise plugin_promise; std::future plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - std::vector argv = {"test"}; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value(app->find_plugin()); - app->exec(); + try { + std::vector argv = {"test"}; + app->initialize(argv.size(), (char**)&argv[0]); + app->startup(); + plugin_promise.set_value(app->find_plugin()); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); (void)plugin_fut.get(); // wait for app to be started diff --git a/plugins/chain_plugin/trx_retry_db.cpp b/plugins/chain_plugin/trx_retry_db.cpp index 2fb1d91360..9ba86794d4 100644 --- a/plugins/chain_plugin/trx_retry_db.cpp +++ b/plugins/chain_plugin/trx_retry_db.cpp @@ -1,4 +1,5 @@ #include +#include #include #include @@ -145,7 +146,9 @@ struct trx_retry_db_impl { // Convert to variant with abi here and now because abi could change in very next transaction. // Alternatively, we could store off all the abis needed and do the conversion later, but as this is designed // to run on an API node, probably the best trade off to perform the abi serialization during block processing. - tt.trx_trace_v = control.to_variant_with_abi( *trace, abi_serializer::create_yield_function( abi_max_time ) ); + auto resolver = get_serializers_cache(control, trace, abi_max_time); + tt.trx_trace_v.clear(); + abi_serializer::to_variant(*trace, tt.trx_trace_v, resolver, abi_max_time); } catch( chain::abi_exception& ) { tt.trx_trace_v = *trace; } diff --git a/plugins/db_size_api_plugin/db_size_api_plugin.cpp b/plugins/db_size_api_plugin/db_size_api_plugin.cpp index bb26ddb097..03fc905efd 100644 --- a/plugins/db_size_api_plugin/db_size_api_plugin.cpp +++ b/plugins/db_size_api_plugin/db_size_api_plugin.cpp @@ -11,11 +11,12 @@ using namespace eosio; #define CALL_WITH_400(api_name, api_handle, call_name, INVOKE, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ + api_category::db_size, \ [api_handle](string&&, string&& body, url_response_callback&& cb) mutable { \ try { \ body = parse_params(body); \ INVOKE \ - cb(http_response_code, fc::time_point::maximum(), fc::variant(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 96a43c4a1f..09bbc2bd5f 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -1,13 +1,15 @@ + #include #include -#include +#include #include #include #include +#include #include -#include +#include #include #include @@ -31,7 +33,6 @@ namespace eosio { static http_plugin_defaults current_http_plugin_defaults; static bool verbose_http_errors = false; - void http_plugin::set_defaults(const http_plugin_defaults& config) { current_http_plugin_defaults = config; } @@ -42,6 +43,74 @@ namespace eosio { using http_plugin_impl_ptr = std::shared_ptr; + api_category to_category(std::string_view name) { + if (name == "chain_ro") return api_category::chain_ro; + if (name == "chain_rw") return api_category::chain_rw; + if (name == "db_size") return api_category::db_size; + if (name == "net_ro") return api_category::net_ro; + if (name == "net_rw") return api_category::net_rw; + if (name == "producer_ro") return api_category::producer_ro; + if (name == "producer_rw") return api_category::producer_rw; + if (name == "snapshot") return api_category::snapshot; + if (name == "trace_api") return api_category::trace_api; + if (name == "prometheus") return api_category::prometheus; + if (name == "test_control") return api_category::test_control; + return api_category::unknown; + } + + const char* from_category(api_category category) { + if (category == api_category::chain_ro) return "chain_ro"; + if (category == api_category::chain_rw) return "chain_rw"; + if (category == api_category::db_size) return "db_size"; + if (category == api_category::net_ro) return "net_ro"; + if (category == api_category::net_rw) return "net_rw"; + if (category == api_category::producer_ro) return "producer_ro"; + if (category == api_category::producer_rw) return "producer_rw"; + if (category == api_category::snapshot) return "snapshot"; + if (category == api_category::trace_api) return "trace_api"; + if (category == api_category::prometheus) return "prometheus"; + if (category == api_category::test_control) return "test_control"; + if (category == api_category::node) return "node"; + // It's a programming error when the control flow reaches this point, + // please make sure all the category names are returned from above statements. + assert(false && "No correspding category name for the category value"); + return ""; + } + + std::string category_plugin_name(api_category category) { + if (category == api_category::db_size) + return "eosio::db_size_api_plugin"; + if (category == api_category::trace_api) + return "eosio::trace_api_plugin"; + if (category == api_category::prometheus) + return "eosio::prometheus_plugin"; + if (category == api_category::test_control) + return "eosio::test_control_plugin"; + if (api_category_set({api_category::chain_ro, api_category::chain_rw}).contains(category)) + return "eosio::chain_api_plugin"; + if (api_category_set({api_category::net_ro, api_category::net_rw}).contains(category)) + return "eosio::net_api_plugin"; + if (api_category_set({api_category::producer_ro, api_category::producer_rw, api_category::snapshot}) + .contains(category)) + return "eosio::producer_api_plugin"; + // It's a programming error when the control flow reaches this point, + // please make sure all the plugin names are returned from above statements. + assert(false && "No correspding plugin for the category value"); + return {}; + } + + std::string category_names(api_category_set set) { + if (set == api_category_set::all()) return "all"; + std::string result; + for (uint32_t i = 1; i <= static_cast(api_category::test_control); i<<=1) { + if (set.contains(api_category(i))) { + result += from_category(api_category(i)); + result += " "; + } + } + return result; + } + class http_plugin_impl : public std::enable_shared_from_this { public: http_plugin_impl() = default; @@ -52,14 +121,11 @@ namespace eosio { http_plugin_impl& operator=(const http_plugin_impl&) = delete; http_plugin_impl& operator=(http_plugin_impl&&) = delete; - std::optional listen_endpoint; - - std::filesystem::path unix_sock_path; + std::map categories_by_address; - shared_ptr > beast_server; - shared_ptr > beast_unix_server; + http_plugin_state plugin_state{logger()}; + std::atomic listening; - shared_ptr plugin_state = std::make_shared(logger()); /** * Make an internal_url_handler that will run the url_handler on the app() thread and then @@ -73,10 +139,11 @@ namespace eosio { * @param content_type - json or plain txt * @return the constructed internal_url_handler */ - static detail::internal_url_handler make_app_thread_url_handler(const string& url, appbase::exec_queue to_queue, int priority, url_handler next, http_plugin_impl_ptr my, http_content_type content_type ) { + static detail::internal_url_handler make_app_thread_url_handler(api_entry&& entry, appbase::exec_queue to_queue, int priority, http_plugin_impl_ptr my, http_content_type content_type ) { detail::internal_url_handler handler; handler.content_type = content_type; - auto next_ptr = std::make_shared(std::move(next)); + handler.category = entry.category; + auto next_ptr = std::make_shared(std::move(entry.handler)); handler.fn = [my=std::move(my), priority, to_queue, next_ptr=std::move(next_ptr)] ( detail::abstract_conn_ptr conn, string&& r, string&& b, url_response_callback&& then ) { if (auto error_str = conn->verify_max_bytes_in_flight(b.size()); !error_str.empty()) { @@ -84,8 +151,8 @@ namespace eosio { return; } - url_response_callback wrapped_then = [then=std::move(then)](int code, const fc::time_point& deadline, std::optional resp) { - then(code, deadline, std::move(resp)); + url_response_callback wrapped_then = [then=std::move(then)](int code, std::optional resp) { + then(code, std::move(resp)); }; // post to the app thread taking shared ownership of next (via std::shared_ptr), @@ -111,10 +178,11 @@ namespace eosio { * @param next - the next handler for responses * @return the constructed internal_url_handler */ - static detail::internal_url_handler make_http_thread_url_handler(const string& url, url_handler next, http_content_type content_type) { + static detail::internal_url_handler make_http_thread_url_handler(api_entry&& entry, http_content_type content_type) { detail::internal_url_handler handler; handler.content_type = content_type; - handler.fn = [next=std::move(next)]( const detail::abstract_conn_ptr& conn, string&& r, string&& b, url_response_callback&& then ) mutable { + handler.category = entry.category; + handler.fn = [next=std::move(entry.handler)]( const detail::abstract_conn_ptr& conn, string&& r, string&& b, url_response_callback&& then ) mutable { try { next(std::move(r), std::move(b), std::move(then)); } catch( ... ) { @@ -123,22 +191,84 @@ namespace eosio { }; return handler; } + + bool is_unix_socket_address(const std::string& address) const { + using boost::algorithm::starts_with; + return starts_with(address, "/") || starts_with(address, "./") || starts_with(address, "../"); + } + + bool on_loopback_only(const std::string& address) { + if (is_unix_socket_address(address)) + return true; + auto [host, port] = fc::split_host_port(address); + boost::system::error_code ec; + tcp::resolver resolver(plugin_state.thread_pool.get_executor()); + auto endpoints = resolver.resolve(host, port, boost::asio::ip::tcp::resolver::passive, ec); + if (ec) { + fc_wlog(logger(), "Cannot resolve address ${addr}: ${msg}", ("addr", address)("msg", ec.message())); + return false; + } + return std::all_of(endpoints.begin(), endpoints.end(), [](const auto& ep) { + return ep.endpoint().address().is_loopback(); + }); + } + + template + void create_listener(const std::string& address, api_category_set categories) { + const boost::posix_time::milliseconds accept_timeout(500); + auto extra_listening_log_info = " for API categories: " + category_names(categories); + using socket_type = typename Protocol::socket; + auto create_session = [this, categories, address](socket_type&& socket) { + std::string remote_endpoint; + if constexpr (std::is_same_v) { + boost::system::error_code re_ec; + auto re = socket.remote_endpoint(re_ec); + remote_endpoint = re_ec ? "unknown" : fc::to_string(re); + } else { + remote_endpoint = address; + } + std::make_shared>( + std::move(socket), plugin_state, std::move(remote_endpoint), categories, address) + ->run_session(); + }; - void add_aliases_for_endpoint( const tcp::endpoint& ep, const string& host, const string& port ) { - auto resolved_port_str = std::to_string(ep.port()); - plugin_state->valid_hosts.emplace(host + ":" + port); - plugin_state->valid_hosts.emplace(host + ":" + resolved_port_str); + fc::create_listener(plugin_state.thread_pool.get_executor(), logger(), accept_timeout, address, + extra_listening_log_info, create_session); } - void create_beast_server(bool isUnix) { - if(isUnix) { - beast_unix_server = std::make_shared >(plugin_state); - fc_ilog( logger(), "created beast UNIX socket listener"); + void create_beast_server(const std::string& address, api_category_set categories) { + try { + if (is_unix_socket_address(address)) { + namespace fs = std::filesystem; + fs::path sock_path = address; + if (sock_path.is_relative()) + sock_path = fs::weakly_canonical(app().data_dir() / sock_path); + create_listener(sock_path.string(), categories); + } else { + create_listener(address, categories); + } + } catch (const fc::exception& e) { + fc_elog(logger(), "http service failed to start for ${addr}: ${e}", + ("addr", address)("e", e.to_detail_string())); + throw; + } catch (const std::exception& e) { + fc_elog(logger(), "http service failed to start for ${addr}: ${e}", ("addr", address)("e", e.what())); + throw; + } catch (...) { + fc_elog(logger(), "error thrown from http io service"); + throw; } - else { - beast_server = std::make_shared >(plugin_state); - fc_ilog( logger(), "created beast HTTP listener"); + } + + std::string addresses_for_category(api_category category) const { + std::string result; + for (const auto& [address, categories] : categories_by_address) { + if (categories.contains(category)) { + result += address; + result += " "; + } } + return result; } }; @@ -163,51 +293,82 @@ namespace eosio { else cfg.add_options() ("http-server-address", bpo::value(), - "The local IP and port to listen for incoming http connections; leave blank to disable."); + "The local IP and port to listen for incoming http connections; " + "setting to http-category-address to enable http-category-address option. leave blank to disable."); + + if (current_http_plugin_defaults.support_categories) { + cfg.add_options() + ("http-category-address", bpo::value>(), + "The local IP and port to listen for incoming http category connections." + " Syntax: category,address\n" + " Where the address can be :port, :port or unix socket path;\n" + " in addition, unix socket path must starts with '/', './' or '../'. When relative path\n" + " is used, it is relative to the data path.\n\n" + " Valid categories include chain_ro, chain_rw, db_size, net_ro, net_rw, producer_ro\n" + " producer_rw, snapshot, trace_api, prometheus, and test_control.\n\n" + " A single `hostname:port` specification can be used by multiple categories\n" + " However, two specifications having the same port with different hostname strings\n" + " are always considered as configuration error regardless of whether they can be resolved\n" + " into the same set of IP addresses.\n\n" + " Examples:\n" + " chain_ro,127.0.0.1:8080\n" + " chain_ro,127.0.0.1:8081\n" + " chain_rw,localhost:8081 # ERROR!, same port with different addresses\n" + " chain_rw,[::1]:8082\n" + " net_ro,localhost:8083\n" + " net_rw,server.domain.net:8084\n" + " producer_ro,/tmp/absolute_unix_path.sock\n" + " producer_rw,./relative_unix_path.sock\n" + " trace_api,:8086 # listen on all network interfaces\n\n" + " Notice that the behavior for `[::1]` is platform dependent. For system with IPv4 mapped IPv6 networking\n" + " is enabled, using `[::1]` will listen on both IPv4 and IPv6; other systems like FreeBSD, it will only\n" + " listen on IPv6. On the other hand, the specfications without hostnames like `:8086` will always listen on\n" + " both IPv4 and IPv6 on all platforms."); + } cfg.add_options() ("access-control-allow-origin", bpo::value()->notifier([this](const string& v) { - my->plugin_state->access_control_allow_origin = v; + my->plugin_state.access_control_allow_origin = v; fc_ilog( logger(), "configured http with Access-Control-Allow-Origin: ${o}", - ("o", my->plugin_state->access_control_allow_origin) ); + ("o", my->plugin_state.access_control_allow_origin) ); }), "Specify the Access-Control-Allow-Origin to be returned on each request") ("access-control-allow-headers", bpo::value()->notifier([this](const string& v) { - my->plugin_state->access_control_allow_headers = v; + my->plugin_state.access_control_allow_headers = v; fc_ilog( logger(), "configured http with Access-Control-Allow-Headers : ${o}", - ("o", my->plugin_state->access_control_allow_headers) ); + ("o", my->plugin_state.access_control_allow_headers) ); }), "Specify the Access-Control-Allow-Headers to be returned on each request") ("access-control-max-age", bpo::value()->notifier([this](const string& v) { - my->plugin_state->access_control_max_age = v; + my->plugin_state.access_control_max_age = v; fc_ilog( logger(), "configured http with Access-Control-Max-Age : ${o}", - ("o", my->plugin_state->access_control_max_age) ); + ("o", my->plugin_state.access_control_max_age) ); }), "Specify the Access-Control-Max-Age to be returned on each request.") ("access-control-allow-credentials", bpo::bool_switch()->notifier([this](bool v) { - my->plugin_state->access_control_allow_credentials = v; + my->plugin_state.access_control_allow_credentials = v; if( v ) fc_ilog( logger(), "configured http with Access-Control-Allow-Credentials: true" ); })->default_value(false), "Specify if Access-Control-Allow-Credentials: true should be returned on each request.") - ("max-body-size", bpo::value()->default_value(my->plugin_state->max_body_size), + ("max-body-size", bpo::value()->default_value(my->plugin_state.max_body_size), "The maximum body size in bytes allowed for incoming RPC requests") ("http-max-bytes-in-flight-mb", bpo::value()->default_value(500), "Maximum size in megabytes http_plugin should use for processing http requests. -1 for unlimited. 429 error response when exceeded." ) ("http-max-in-flight-requests", bpo::value()->default_value(-1), "Maximum number of requests http_plugin should use for processing http requests. 429 error response when exceeded." ) - ("http-max-response-time-ms", bpo::value()->default_value(30), - "Maximum time for processing a request, -1 for unlimited") + ("http-max-response-time-ms", bpo::value()->default_value(15), + "Maximum time on main thread for processing a request, -1 for unlimited") ("verbose-http-errors", bpo::bool_switch()->default_value(false), "Append the error log to HTTP responses") ("http-validate-host", boost::program_options::value()->default_value(true), "If set to false, then any incoming \"Host\" header is considered valid") ("http-alias", bpo::value>()->composing(), - "Additionaly acceptable values for the \"Host\" header of incoming HTTP requests, can be specified multiple times. Includes http/s_server_address by default.") - ("http-threads", bpo::value()->default_value( my->plugin_state->thread_pool_size ), + "Additionally acceptable values for the \"Host\" header of incoming HTTP requests, can be specified multiple times. Includes http/s_server_address by default.") + ("http-threads", bpo::value()->default_value( my->plugin_state.thread_pool_size ), "Number of worker threads in http thread pool") ("http-keep-alive", bpo::value()->default_value(true), "If set to false, do not keep HTTP connections alive, even if client requests.") @@ -217,65 +378,96 @@ namespace eosio { void http_plugin::plugin_initialize(const variables_map& options) { try { handle_sighup(); // setup logging - my->plugin_state->max_body_size = options.at( "max-body-size" ).as(); + my->plugin_state.max_body_size = options.at( "max-body-size" ).as(); verbose_http_errors = options.at( "verbose-http-errors" ).as(); - my->plugin_state->thread_pool_size = options.at( "http-threads" ).as(); - EOS_ASSERT( my->plugin_state->thread_pool_size > 0, chain::plugin_config_exception, - "http-threads ${num} must be greater than 0", ("num", my->plugin_state->thread_pool_size)); + my->plugin_state.thread_pool_size = options.at( "http-threads" ).as(); + EOS_ASSERT( my->plugin_state.thread_pool_size > 0, chain::plugin_config_exception, + "http-threads ${num} must be greater than 0", ("num", my->plugin_state.thread_pool_size)); auto max_bytes_mb = options.at( "http-max-bytes-in-flight-mb" ).as(); EOS_ASSERT( (max_bytes_mb >= -1 && max_bytes_mb < std::numeric_limits::max() / (1024 * 1024)), chain::plugin_config_exception, "http-max-bytes-in-flight-mb (${max_bytes_mb}) must be equal to or greater than -1 and less than ${max}", ("max_bytes_mb", max_bytes_mb) ("max", std::numeric_limits::max() / (1024 * 1024)) ); if ( max_bytes_mb == -1 ) { - my->plugin_state->max_bytes_in_flight = std::numeric_limits::max(); + my->plugin_state.max_bytes_in_flight = std::numeric_limits::max(); } else { - my->plugin_state->max_bytes_in_flight = max_bytes_mb * 1024 * 1024; + my->plugin_state.max_bytes_in_flight = max_bytes_mb * 1024 * 1024; } - my->plugin_state->max_requests_in_flight = options.at( "http-max-in-flight-requests" ).as(); + my->plugin_state.max_requests_in_flight = options.at( "http-max-in-flight-requests" ).as(); int64_t max_reponse_time_ms = options.at("http-max-response-time-ms").as(); EOS_ASSERT( max_reponse_time_ms == -1 || max_reponse_time_ms >= 0, chain::plugin_config_exception, "http-max-response-time-ms must be -1, or non-negative: ${m}", ("m", max_reponse_time_ms) ); - // set to one year for -1, unlimited, since this is added to fc::time_point::now() for a deadline - my->plugin_state->max_response_time = max_reponse_time_ms == -1 ? - fc::days(365) : fc::microseconds( max_reponse_time_ms * 1000 ); + my->plugin_state.max_response_time = max_reponse_time_ms == -1 ? + fc::microseconds::maximum() : fc::microseconds( max_reponse_time_ms * 1000 ); - my->plugin_state->validate_host = options.at("http-validate-host").as(); + my->plugin_state.validate_host = options.at("http-validate-host").as(); if( options.count( "http-alias" )) { const auto& aliases = options["http-alias"].as>(); - my->plugin_state->valid_hosts.insert(aliases.begin(), aliases.end()); + for (const auto& alias : aliases ) { + auto [host, port] = fc::split_host_port(alias); + my->plugin_state.valid_hosts.insert(host); + } } - my->plugin_state->keep_alive = options.at("http-keep-alive").as(); - - tcp::resolver resolver( app().get_io_service()); - if( options.count( "http-server-address" ) && options.at( "http-server-address" ).as().length()) { - string lipstr = options.at( "http-server-address" ).as(); - string host = lipstr.substr( 0, lipstr.find( ':' )); - string port = lipstr.substr( host.size() + 1, lipstr.size()); - try { - my->listen_endpoint = *resolver.resolve( tcp::v4(), host, port ); - fc_ilog(logger(), "configured http to listen on ${h}:${p}", ("h", host)( "p", port )); - } catch ( const boost::system::system_error& ec ) { - fc_elog(logger(), "failed to configure http to listen on ${h}:${p} (${m})", - ("h", host)( "p", port )( "m", ec.what())); - } + my->plugin_state.keep_alive = options.at("http-keep-alive").as(); - // add in resolved hosts and ports as well - if (my->listen_endpoint) { - my->add_aliases_for_endpoint(*my->listen_endpoint, host, port); + std::string http_server_address; + if (options.count("http-server-address")) { + http_server_address = options.at("http-server-address").as(); + if (http_server_address.size() && http_server_address != "http-category-address") { + my->categories_by_address[http_server_address].insert(api_category::node); } } - if( options.count( "unix-socket-path" ) && !options.at( "unix-socket-path" ).as().empty()) { - std::filesystem::path sock_path = options.at("unix-socket-path").as(); - if (sock_path.is_relative()) - sock_path = app().data_dir() / sock_path; - - my->unix_sock_path = sock_path; + if (options.count("unix-socket-path") && !options.at("unix-socket-path").as().empty()) { + std::string unix_sock_path = options.at("unix-socket-path").as(); + if (unix_sock_path.size()) { + if (unix_sock_path[0] != '/') unix_sock_path = "./" + unix_sock_path; + my->categories_by_address[unix_sock_path].insert(api_category::node); + } } - my->plugin_state->server_header = current_http_plugin_defaults.server_header; + if (options.count("http-category-address") != 0) { + auto plugins = options["plugin"].as>(); + auto has_plugin = [&plugins](const std::string& s) { + return std::find(plugins.begin(), plugins.end(), s) != plugins.end(); + }; + + EOS_ASSERT(http_server_address == "http-category-address" && options.count("unix-socket-path") == 0, + chain::plugin_config_exception, + "when http-category-address is specified, http-server-address must be set as " + "`http-category-address` and `unix-socket-path` must be left unspecified"); + + std::map hostnames; + auto addresses = options["http-category-address"].as>(); + for (const auto& spec : addresses) { + auto comma_pos = spec.find(','); + EOS_ASSERT(comma_pos > 0 && comma_pos != std::string_view::npos, chain::plugin_config_exception, + "http-category-address '${spec}' does not contain a required comma to separate the category and address", + ("spec", spec)); + auto category_name = spec.substr(0, comma_pos); + auto category = to_category(category_name); + + EOS_ASSERT(category != api_category::unknown, chain::plugin_config_exception, + "invalid category name `${name}` for http_category_address", ("name", std::string(category_name))); + + EOS_ASSERT(has_plugin(category_plugin_name(category)), chain::plugin_config_exception, + "--plugin=${plugin_name} is required for --http-category-address=${spec}", + ("plugin_name", category_plugin_name(category))("spec", spec)); + + auto address = spec.substr(comma_pos+1); + + auto [host, port] = fc::split_host_port(address); + if (port.size()) { + auto [itr, inserted] = hostnames.try_emplace(port, host); + EOS_ASSERT(inserted || host == itr->second, chain::plugin_config_exception, + "unable to listen to port ${port} for both ${host} and ${prev}", + ("port", port)("host", host)("prev", itr->second)); + } + my->categories_by_address[address].insert(category); + } + } + my->plugin_state.server_header = current_http_plugin_defaults.server_header; //watch out for the returns above when adding new code here @@ -285,71 +477,24 @@ namespace eosio { void http_plugin::plugin_startup() { app().executor().post(appbase::priority::high, [this] () { + // The reason we post here is because we want blockchain replay to happen before we start listening. try { - my->plugin_state->thread_pool.start( my->plugin_state->thread_pool_size, [](const fc::exception& e) { + my->plugin_state.thread_pool.start( my->plugin_state.thread_pool_size, [](const fc::exception& e) { fc_elog( logger(), "Exception in http thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); app().quit(); } ); - if(my->listen_endpoint) { - try { - my->create_beast_server(false); - - fc_ilog( logger(), "start listening for http requests (boost::beast)" ); - - my->beast_server->listen(*my->listen_endpoint); - my->beast_server->start_accept(); - } catch ( const fc::exception& e ){ - fc_elog( logger(), "http service failed to start: ${e}", ("e", e.to_detail_string()) ); - throw; - } catch ( const std::exception& e ){ - fc_elog( logger(), "http service failed to start: ${e}", ("e", e.what()) ); - throw; - } catch (...) { - fc_elog( logger(), "error thrown from http io service" ); - throw; - } - } - - if(!my->unix_sock_path.empty()) { - try { - my->create_beast_server(true); - - // The maximum length of the socket path is defined by sockaddr_un::sun_path. On Linux, - // according to unix(7), it is 108 bytes. On FreeBSD, according to unix(4), it is 104 bytes. - // Therefore, we create the unix socket with the relative path to its parent path to avoid the problem. - - auto cwd = std::filesystem::current_path(); - std::filesystem::current_path(my->unix_sock_path.parent_path()); - asio::local::stream_protocol::endpoint endpoint(my->unix_sock_path.filename().string()); - my->beast_unix_server->listen(endpoint); - std::filesystem::current_path(cwd); - - my->beast_unix_server->start_accept(); - } catch ( const fc::exception& e ){ - fc_elog( logger(), "unix socket service (${path}) failed to start: ${e}", ("e", e.to_detail_string())("path",my->unix_sock_path) ); - throw; - } catch ( const std::exception& e ){ - fc_elog( logger(), "unix socket service (${path}) failed to start: ${e}", ("e", e.what())("path",my->unix_sock_path) ); - throw; - } catch (...) { - fc_elog( logger(), "error thrown from unix socket (${path}) io service", ("path",my->unix_sock_path) ); - throw; - } + for (const auto& [address, categories]: my->categories_by_address) { + my->create_beast_server(address, categories); } - add_api({{ - std::string("/v1/node/get_supported_apis"), - [&](string&&, string&& body, url_response_callback&& cb) { - try { - auto result = (*this).get_supported_apis(); - cb(200, fc::time_point::maximum(), fc::variant(result)); - } catch (...) { - handle_exception("node", "get_supported_apis", body.empty() ? "{}" : body, cb); - } - } - }}, appbase::exec_queue::read_only); - + my->listening.store(true); + } catch(fc::exception& e) { + fc_elog(logger(), "http_plugin startup fails for ${e}", ("e", e.to_detail_string())); + app().quit(); + } catch(std::exception& e) { + fc_elog(logger(), "http_plugin startup fails for ${e}", ("e", e.what())); + app().quit(); } catch (...) { fc_elog(logger(), "http_plugin startup fails, shutting down"); app().quit(); @@ -362,37 +507,41 @@ namespace eosio { } void http_plugin::plugin_shutdown() { - if(my->beast_server) - my->beast_server->stop_listening(); - if(my->beast_unix_server) - my->beast_unix_server->stop_listening(); - - my->plugin_state->thread_pool.stop(); - - my->beast_server.reset(); - my->beast_unix_server.reset(); + my->plugin_state.thread_pool.stop(); // release http_plugin_impl_ptr shared_ptrs captured in url handlers - my->plugin_state->url_handlers.clear(); + my->plugin_state.url_handlers.clear(); fc_ilog( logger(), "exit shutdown"); } - void http_plugin::add_handler(const string& url, const url_handler& handler, appbase::exec_queue q, int priority, http_content_type content_type) { - fc_ilog( logger(), "add api url: ${c}", ("c", url) ); - auto p = my->plugin_state->url_handlers.emplace(url, my->make_app_thread_url_handler(url, q, priority, handler, my, content_type)); - EOS_ASSERT( p.second, chain::plugin_config_exception, "http url ${u} is not unique", ("u", url) ); + void log_add_handler(http_plugin_impl* my, api_entry& entry) { + auto addrs = my->addresses_for_category(entry.category); + if (addrs.size()) + addrs = "on " + addrs; + else + addrs = "disabled for category address not configured"; + fc_ilog(logger(), "add ${category} api url: ${c} ${addrs}", + ("category", from_category(entry.category))("c", entry.path)("addrs", addrs)); + } + + void http_plugin::add_handler(api_entry&& entry, appbase::exec_queue q, int priority, http_content_type content_type) { + log_add_handler(my.get(), entry); + std::string path = entry.path; + auto p = my->plugin_state.url_handlers.emplace(path, my->make_app_thread_url_handler(std::move(entry), q, priority, my, content_type)); + EOS_ASSERT( p.second, chain::plugin_config_exception, "http url ${u} is not unique", ("u", path) ); } - void http_plugin::add_async_handler(const string& url, const url_handler& handler, http_content_type content_type) { - fc_ilog( logger(), "add api url: ${c}", ("c", url) ); - auto p = my->plugin_state->url_handlers.emplace(url, my->make_http_thread_url_handler(url, handler, content_type)); - EOS_ASSERT( p.second, chain::plugin_config_exception, "http url ${u} is not unique", ("u", url) ); + void http_plugin::add_async_handler(api_entry&& entry, http_content_type content_type) { + log_add_handler(my.get(), entry); + std::string path = entry.path; + auto p = my->plugin_state.url_handlers.emplace(path, my->make_http_thread_url_handler(std::move(entry), content_type)); + EOS_ASSERT( p.second, chain::plugin_config_exception, "http url ${u} is not unique", ("u", path) ); } void http_plugin::post_http_thread_pool(std::function f) { if( f ) - boost::asio::post( my->plugin_state->thread_pool.get_executor(), f ); + boost::asio::post( my->plugin_state.thread_pool.get_executor(), f ); } void http_plugin::handle_exception( const char* api_name, const char* call_name, const string& body, const url_response_callback& cb) { @@ -401,48 +550,48 @@ namespace eosio { throw; } catch (chain::unknown_block_exception& e) { error_results results{400, "Unknown Block", error_results::error_info(e, verbose_http_errors)}; - cb( 400, fc::time_point::maximum(), fc::variant( results )); + cb( 400, fc::variant( results )); fc_dlog( logger(), "Unknown block while processing ${api}.${call}: ${e}", ("api", api_name)("call", call_name)("e", e.to_detail_string()) ); } catch (chain::invalid_http_request& e) { error_results results{400, "Invalid Request", error_results::error_info(e, verbose_http_errors)}; - cb( 400, fc::time_point::maximum(), fc::variant( results )); + cb( 400, fc::variant( results )); fc_dlog( logger(), "Invalid http request while processing ${api}.${call}: ${e}", ("api", api_name)("call", call_name)("e", e.to_detail_string()) ); } catch (chain::account_query_exception& e) { error_results results{400, "Account lookup", error_results::error_info(e, verbose_http_errors)}; - cb( 400, fc::time_point::maximum(), fc::variant( results )); + cb( 400, fc::variant( results )); fc_dlog( logger(), "Account query exception while processing ${api}.${call}: ${e}", ("api", api_name)("call", call_name)("e", e.to_detail_string()) ); } catch (chain::unsatisfied_authorization& e) { error_results results{401, "UnAuthorized", error_results::error_info(e, verbose_http_errors)}; - cb( 401, fc::time_point::maximum(), fc::variant( results )); + cb( 401, fc::variant( results )); fc_dlog( logger(), "Auth error while processing ${api}.${call}: ${e}", ("api", api_name)("call", call_name)("e", e.to_detail_string()) ); } catch (chain::tx_duplicate& e) { error_results results{409, "Conflict", error_results::error_info(e, verbose_http_errors)}; - cb( 409, fc::time_point::maximum(), fc::variant( results )); + cb( 409, fc::variant( results )); fc_dlog( logger(), "Duplicate trx while processing ${api}.${call}: ${e}", ("api", api_name)("call", call_name)("e", e.to_detail_string()) ); } catch (fc::eof_exception& e) { error_results results{422, "Unprocessable Entity", error_results::error_info(e, verbose_http_errors)}; - cb( 422, fc::time_point::maximum(), fc::variant( results )); + cb( 422, fc::variant( results )); fc_elog( logger(), "Unable to parse arguments to ${api}.${call}", ("api", api_name)( "call", call_name ) ); fc_dlog( logger(), "Bad arguments: ${args}", ("args", body) ); } catch (fc::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(e, verbose_http_errors)}; - cb( 500, fc::time_point::maximum(), fc::variant( results )); + cb( 500, fc::variant( results )); fc_dlog( logger(), "Exception while processing ${api}.${call}: ${e}", ("api", api_name)( "call", call_name )("e", e.to_detail_string()) ); } catch (std::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, e.what())), verbose_http_errors)}; - cb( 500, fc::time_point::maximum(), fc::variant( results )); + cb( 500, fc::variant( results )); fc_dlog( logger(), "STD Exception encountered while processing ${api}.${call}: ${e}", ("api", api_name)("call", call_name)("e", e.what()) ); } catch (...) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, "Unknown Exception" )), verbose_http_errors)}; - cb( 500, fc::time_point::maximum(), fc::variant( results )); + cb( 500, fc::variant( results )); fc_elog( logger(), "Unknown Exception encountered while processing ${api}.${call}", ("api", api_name)( "call", call_name ) ); } @@ -451,39 +600,31 @@ namespace eosio { } } - bool http_plugin::is_on_loopback() const { - return (!my->listen_endpoint || my->listen_endpoint->address().is_loopback()); - } - - bool http_plugin::is_secure() const { - return (!my->listen_endpoint || my->listen_endpoint->address().is_loopback()); + bool http_plugin::is_on_loopback(api_category category) const { + return std::all_of(my->categories_by_address.begin(), my->categories_by_address.end(), + [&category, this](const auto& entry) { + const auto& [address, categories] = entry; + return !categories.contains(category) || my->on_loopback_only(address); + }); } bool http_plugin::verbose_errors() { return verbose_http_errors; } - http_plugin::get_supported_apis_result http_plugin::get_supported_apis()const { - get_supported_apis_result result; - - for (const auto& handler : my->plugin_state->url_handlers) { - if (handler.first != "/v1/node/get_supported_apis") - result.apis.emplace_back(handler.first); - } - - return result; - } - fc::microseconds http_plugin::get_max_response_time()const { - return my->plugin_state->max_response_time; + return my->plugin_state.max_response_time; } size_t http_plugin::get_max_body_size()const { - return my->plugin_state->max_body_size; + return my->plugin_state.max_body_size; } void http_plugin::register_update_metrics(std::function&& fun) { - my->plugin_state->update_metrics = std::move(fun); + my->plugin_state.update_metrics = std::move(fun); } + std::atomic& http_plugin::listening() { + return my->listening; + } } diff --git a/plugins/http_plugin/include/eosio/http_plugin/api_category.hpp b/plugins/http_plugin/include/eosio/http_plugin/api_category.hpp new file mode 100644 index 0000000000..031e2277d4 --- /dev/null +++ b/plugins/http_plugin/include/eosio/http_plugin/api_category.hpp @@ -0,0 +1,47 @@ +#pragma once +#include +#include +namespace eosio { + +enum class api_category : uint32_t { + unknown = 0, + chain_ro = 1 << 0, + chain_rw = 1 << 1, + db_size = 1 << 2, + net_ro = 1 << 3, + net_rw = 1 << 4, + producer_ro = 1 << 5, + producer_rw = 1 << 6, + snapshot = 1 << 7, + trace_api = 1 << 8, + prometheus = 1 << 9, + test_control = 1 << 10, + node = UINT32_MAX +}; + +class api_category_set { + uint32_t data = {}; +public: + constexpr api_category_set() = default; + constexpr explicit api_category_set(api_category c) : data(static_cast(c)){} + constexpr api_category_set(std::initializer_list l) { + for (auto c: l) + insert(c); + } + constexpr bool contains(api_category category) const { + return eosio::chain::has_field(data, category); + } + constexpr void insert(api_category category) { + data = eosio::chain::set_field(data, category, true); + } + + constexpr static api_category_set all() { + return api_category_set(api_category::node); + } + + constexpr bool operator == (const api_category_set& other) const { + return data == other.data; + } +}; + +} \ No newline at end of file diff --git a/plugins/http_plugin/include/eosio/http_plugin/beast_http_listener.hpp b/plugins/http_plugin/include/eosio/http_plugin/beast_http_listener.hpp deleted file mode 100644 index 1b0daba652..0000000000 --- a/plugins/http_plugin/include/eosio/http_plugin/beast_http_listener.hpp +++ /dev/null @@ -1,138 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -namespace eosio { -// since beast_http_listener handles both TCP and UNIX endpoints we need a template here -// to get the path if makes sense, so that we can call ::unlink() before opening socket -// in beast_http_listener::listen() by tdefault return blank string -template -std::string get_endpoint_path(const T& endpt) { return {}; } - -std::string get_endpoint_path(const stream_protocol::endpoint& endpt) { return endpt.path(); } - -// Accepts incoming connections and launches the sessions -// session_type should be a subclass of beast_http_session -// protocol type must have sub types acceptor and endpoint, e.g. boost::asio::ip::tcp; -// socket type must be the socket e.g, boost::asio::ip::tcp::socket -template -class beast_http_listener : public std::enable_shared_from_this> { -private: - bool is_listening_ = false; - - std::shared_ptr plugin_state_; - - typename protocol_type::acceptor acceptor_; - socket_type socket_; - - boost::asio::deadline_timer accept_error_timer_; - -public: - beast_http_listener() = default; - beast_http_listener(const beast_http_listener&) = delete; - beast_http_listener(beast_http_listener&&) = delete; - - beast_http_listener& operator=(const beast_http_listener&) = delete; - beast_http_listener& operator=(beast_http_listener&&) = delete; - - beast_http_listener(std::shared_ptr plugin_state) : is_listening_(false), plugin_state_(std::move(plugin_state)), acceptor_(plugin_state_->thread_pool.get_executor()), socket_(plugin_state_->thread_pool.get_executor()), accept_error_timer_(plugin_state_->thread_pool.get_executor()) {} - - virtual ~beast_http_listener() { - try { - stop_listening(); - } catch(...) {} - }; - - void listen(typename protocol_type::endpoint endpoint) { - if(is_listening_) return; - - // for unix sockets we should delete the old socket - if(std::is_same::value) { - ::unlink(get_endpoint_path(endpoint).c_str()); - } - - beast::error_code ec; - // Open the acceptor - acceptor_.open(endpoint.protocol(), ec); - if(ec) { - fail(ec, "open", plugin_state_->logger, "closing port"); - return; - } - - // Allow address reuse - acceptor_.set_option(asio::socket_base::reuse_address(true), ec); - if(ec) { - fail(ec, "set_option", plugin_state_->logger, "closing port"); - return; - } - - // Bind to the server address - acceptor_.bind(endpoint, ec); - if(ec) { - fail(ec, "bind", plugin_state_->logger, "closing port"); - return; - } - - // Start listening for connections - auto max_connections = asio::socket_base::max_listen_connections; - fc_ilog(plugin_state_->logger, "acceptor_.listen()"); - acceptor_.listen(max_connections, ec); - if(ec) { - fail(ec, "listen", plugin_state_->logger, "closing port"); - return; - } - is_listening_ = true; - } - - // Start accepting incoming connections - void start_accept() { - if(!is_listening_) return; - do_accept(); - } - - bool is_listening() { - return is_listening_; - } - - void stop_listening() { - if(is_listening_) { - plugin_state_->thread_pool.stop(); - is_listening_ = false; - } - } - -private: - void do_accept() { - auto self = this->shared_from_this(); - acceptor_.async_accept(socket_, [self](beast::error_code ec) { - if(ec == boost::system::errc::too_many_files_open) { - // retry accept() after timeout to avoid cpu loop on accept - fail(ec, "accept", self->plugin_state_->logger, "too many files open - waiting 500ms"); - self->accept_error_timer_.expires_from_now(boost::posix_time::milliseconds(500)); - self->accept_error_timer_.async_wait([self = self->shared_from_this()](beast::error_code ec) { - if (!ec) - self->do_accept(); - }); - } else { - if (ec) { - fail(ec, "accept", self->plugin_state_->logger, "closing connection"); - } else { - // Create the session object and run it - std::string remote_endpoint = boost::lexical_cast(self->socket_.remote_endpoint()); - std::make_shared( - std::move(self->socket_), - self->plugin_state_, - std::move(remote_endpoint)) - ->run_session(); - } - - // Accept another connection - self->do_accept(); - } - }); - } -};// end class beast_http_Listener -}// namespace eosio diff --git a/plugins/http_plugin/include/eosio/http_plugin/beast_http_session.hpp b/plugins/http_plugin/include/eosio/http_plugin/beast_http_session.hpp index 4c2384ca14..2ac3ebb5a4 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/beast_http_session.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/beast_http_session.hpp @@ -1,7 +1,10 @@ #pragma once #include +#include + #include +#include #include #include @@ -14,21 +17,6 @@ namespace eosio { using std::chrono::steady_clock; -typedef asio::basic_stream_socket tcp_socket_t; - -using boost::asio::local::stream_protocol; - -#if BOOST_VERSION < 107300 -using local_stream = beast::basic_stream< - stream_protocol, - asio::executor, - beast::unlimited_rate_policy>; -#else -using local_stream = beast::basic_stream< - stream_protocol, - asio::any_io_executor, - beast::unlimited_rate_policy>; -#endif //------------------------------------------------------------------------------ // fail() @@ -40,20 +28,14 @@ void fail(beast::error_code ec, char const* what, fc::logger& logger, char const } -template -bool allow_host(const http::request& req, T& session, - const std::shared_ptr& plugin_state) { - auto is_conn_secure = session.is_secure(); +bool allow_host(const std::string& host_str, tcp::socket& socket, + const http_plugin_state& plugin_state) { - auto& socket = session.socket(); auto& lowest_layer = beast::get_lowest_layer(socket); auto local_endpoint = lowest_layer.local_endpoint(); - auto local_socket_host_port = local_endpoint.address().to_string() + ":" + std::to_string(local_endpoint.port()); - const std::string host_str(req["host"]); - if(host_str.empty() || !host_is_valid(*plugin_state, + if(host_str.empty() || !host_is_valid(plugin_state, host_str, - local_socket_host_port, - is_conn_secure)) { + local_endpoint.address())) { return false; } @@ -84,9 +66,11 @@ std::string to_log_string(const T& req, size_t max_size = 1024) { // use the Curiously Recurring Template Pattern so that // the same code works with both regular TCP sockets and UNIX sockets -template -class beast_http_session : public detail::abstract_conn { -protected: +template +class beast_http_session : public detail::abstract_conn, + public std::enable_shared_from_this> { + Socket socket_; + api_category_set categories_; beast::flat_buffer buffer_; // time points for timeout measurement and perf metrics @@ -99,8 +83,9 @@ class beast_http_session : public detail::abstract_conn { // HTTP response object std::optional> res_; - std::shared_ptr plugin_state_; + http_plugin_state& plugin_state_; std::string remote_endpoint_; + std::string local_address_; // whether response should be sent back to client when an exception occurs bool is_send_exception_response_ = true; @@ -127,11 +112,12 @@ class beast_http_session : public detail::abstract_conn { res_->version(req.version()); res_->set(http::field::content_type, "application/json"); res_->keep_alive(req.keep_alive()); - if(plugin_state_->server_header.size()) - res_->set(http::field::server, plugin_state_->server_header); + if(plugin_state_.server_header.size()) + res_->set(http::field::server, plugin_state_.server_header); // Request path must be absolute and not contain "..". if(req.target().empty() || req.target()[0] != '/' || req.target().find("..") != beast::string_view::npos) { + fc_dlog( plugin_state_.get_logger(), "Return bad_reqest: ${target}", ("target", std::string(req.target())) ); error_results results{static_cast(http::status::bad_request), "Illegal request-target"}; send_response( fc::json::to_string( results, fc::time_point::maximum() ), static_cast(http::status::bad_request) ); @@ -139,23 +125,24 @@ class beast_http_session : public detail::abstract_conn { } try { - if(!derived().allow_host(req)) { + if(!allow_host(req)) { + fc_dlog( plugin_state_.get_logger(), "bad host: ${HOST}", ("HOST", std::string(req["host"]))); error_results results{static_cast(http::status::bad_request), "Disallowed HTTP HOST header in the request"}; send_response( fc::json::to_string( results, fc::time_point::maximum() ), static_cast(http::status::bad_request) ); return; } - if(!plugin_state_->access_control_allow_origin.empty()) { - res_->set("Access-Control-Allow-Origin", plugin_state_->access_control_allow_origin); + if(!plugin_state_.access_control_allow_origin.empty()) { + res_->set("Access-Control-Allow-Origin", plugin_state_.access_control_allow_origin); } - if(!plugin_state_->access_control_allow_headers.empty()) { - res_->set("Access-Control-Allow-Headers", plugin_state_->access_control_allow_headers); + if(!plugin_state_.access_control_allow_headers.empty()) { + res_->set("Access-Control-Allow-Headers", plugin_state_.access_control_allow_headers); } - if(!plugin_state_->access_control_max_age.empty()) { - res_->set("Access-Control-Max-Age", plugin_state_->access_control_max_age); + if(!plugin_state_.access_control_max_age.empty()) { + res_->set("Access-Control-Max-Age", plugin_state_.access_control_max_age); } - if(plugin_state_->access_control_allow_credentials) { + if(plugin_state_.access_control_allow_credentials) { res_->set("Access-Control-Allow-Credentials", "true"); } @@ -165,28 +152,35 @@ class beast_http_session : public detail::abstract_conn { return; } - fc_dlog( plugin_state_->logger, "Request: ${ep} ${r}", + fc_dlog( plugin_state_.get_logger(), "Request: ${ep} ${r}", ("ep", remote_endpoint_)("r", to_log_string(req)) ); std::string resource = std::string(req.target()); // look for the URL handler to handle this resource - auto handler_itr = plugin_state_->url_handlers.find(resource); - if(handler_itr != plugin_state_->url_handlers.end()) { - if(plugin_state_->logger.is_enabled(fc::log_level::all)) - plugin_state_->logger.log(FC_LOG_MESSAGE(all, "resource: ${ep}", ("ep", resource))); + auto handler_itr = plugin_state_.url_handlers.find(resource); + if(handler_itr != plugin_state_.url_handlers.end() && categories_.contains(handler_itr->second.category)) { + if(plugin_state_.get_logger().is_enabled(fc::log_level::all)) + plugin_state_.get_logger().log(FC_LOG_MESSAGE(all, "resource: ${ep}", ("ep", resource))); std::string body = req.body(); auto content_type = handler_itr->second.content_type; set_content_type_header(content_type); - if (plugin_state_->update_metrics) - plugin_state_->update_metrics({resource}); + if (plugin_state_.update_metrics) + plugin_state_.update_metrics({resource}); - handler_itr->second.fn(derived().shared_from_this(), + handler_itr->second.fn(this->shared_from_this(), std::move(resource), std::move(body), - make_http_response_handler(plugin_state_, derived().shared_from_this(), content_type)); + make_http_response_handler(plugin_state_, this->shared_from_this(), content_type)); + } else if (resource == "/v1/node/get_supported_apis") { + http_plugin::get_supported_apis_result result; + for (const auto& handler : plugin_state_.url_handlers) { + if (categories_.contains(handler.second.category)) + result.apis.push_back(handler.first); + } + send_response(fc::json::to_string(fc::variant(result), fc::time_point::maximum()), 200); } else { - fc_dlog( plugin_state_->logger, "404 - not found: ${ep}", ("ep", resource) ); + fc_dlog( plugin_state_.get_logger(), "404 - not found: ${ep}", ("ep", resource) ); error_results results{static_cast(http::status::not_found), "Not Found", error_results::error_info( fc::exception( FC_LOG_MESSAGE( error, "Unknown Endpoint" ) ), http_plugin::verbose_errors() )}; @@ -210,12 +204,12 @@ class beast_http_session : public detail::abstract_conn { res->result(http::status::unauthorized); continue_state_ = continue_state_t::reject; } - res->set(http::field::server, plugin_state_->server_header); + res->set(http::field::server, plugin_state_.server_header); http::async_write( - derived().stream(), + socket_, *res, - [self = derived().shared_from_this(), res](beast::error_code ec, std::size_t bytes_transferred) { + [self = this->shared_from_this(), res](beast::error_code ec, std::size_t bytes_transferred) { self->on_write(ec, bytes_transferred, false); }); } @@ -233,42 +227,37 @@ class beast_http_session : public detail::abstract_conn { } virtual std::string verify_max_bytes_in_flight(size_t extra_bytes) final { - auto bytes_in_flight_size = plugin_state_->bytes_in_flight.load() + extra_bytes; - if(bytes_in_flight_size > plugin_state_->max_bytes_in_flight) { - fc_dlog(plugin_state_->logger, "429 - too many bytes in flight: ${bytes}", ("bytes", bytes_in_flight_size)); + auto bytes_in_flight_size = plugin_state_.bytes_in_flight.load() + extra_bytes; + if(bytes_in_flight_size > plugin_state_.max_bytes_in_flight) { + fc_dlog(plugin_state_.get_logger(), "429 - too many bytes in flight: ${bytes}", ("bytes", bytes_in_flight_size)); return "Too many bytes in flight: " + std::to_string( bytes_in_flight_size ); } return {}; } virtual std::string verify_max_requests_in_flight() final { - if(plugin_state_->max_requests_in_flight < 0) + if(plugin_state_.max_requests_in_flight < 0) return {}; - auto requests_in_flight_num = plugin_state_->requests_in_flight.load(); - if(requests_in_flight_num > plugin_state_->max_requests_in_flight) { - fc_dlog(plugin_state_->logger, "429 - too many requests in flight: ${requests}", ("requests", requests_in_flight_num)); + auto requests_in_flight_num = plugin_state_.requests_in_flight.load(); + if(requests_in_flight_num > plugin_state_.max_requests_in_flight) { + fc_dlog(plugin_state_.get_logger(), "429 - too many requests in flight: ${requests}", ("requests", requests_in_flight_num)); return "Too many requests in flight: " + std::to_string( requests_in_flight_num ); } return {}; } - // Access the derived class, this is part of - // the Curiously Recurring Template Pattern idiom. - Derived& derived() { - return static_cast(*this); - } - public: // shared_from_this() requires default constructor beast_http_session() = default; - beast_http_session(std::shared_ptr plugin_state, std::string remote_endpoint) - : plugin_state_(std::move(plugin_state)), - remote_endpoint_(std::move(remote_endpoint)) { - plugin_state_->requests_in_flight += 1; + beast_http_session(Socket&& socket, http_plugin_state& plugin_state, std::string remote_endpoint, + api_category_set categories, const std::string& local_address) + : socket_(std::move(socket)), categories_(categories), plugin_state_(plugin_state), + remote_endpoint_(std::move(remote_endpoint)), local_address_(local_address) { + plugin_state_.requests_in_flight += 1; req_parser_.emplace(); - req_parser_->body_limit(plugin_state_->max_body_size); + req_parser_->body_limit(plugin_state_.max_body_size); res_.emplace(); session_begin_ = steady_clock::now(); @@ -280,14 +269,14 @@ class beast_http_session : public detail::abstract_conn { virtual ~beast_http_session() { is_send_exception_response_ = false; - plugin_state_->requests_in_flight -= 1; - if(plugin_state_->logger.is_enabled(fc::log_level::all)) { + plugin_state_.requests_in_flight -= 1; + if(plugin_state_.get_logger().is_enabled(fc::log_level::all)) { auto session_time = steady_clock::now() - session_begin_; auto session_time_us = std::chrono::duration_cast(session_time).count(); - plugin_state_->logger.log(FC_LOG_MESSAGE(all, "session time ${t}", ("t", session_time_us))); - plugin_state_->logger.log(FC_LOG_MESSAGE(all, " read ${t}", ("t", read_time_us_))); - plugin_state_->logger.log(FC_LOG_MESSAGE(all, " handle ${t}", ("t", handle_time_us_))); - plugin_state_->logger.log(FC_LOG_MESSAGE(all, " write ${t}", ("t", write_time_us_))); + plugin_state_.get_logger().log(FC_LOG_MESSAGE(all, "session time ${t}", ("t", session_time_us))); + plugin_state_.get_logger().log(FC_LOG_MESSAGE(all, " read ${t}", ("t", read_time_us_))); + plugin_state_.get_logger().log(FC_LOG_MESSAGE(all, " handle ${t}", ("t", handle_time_us_))); + plugin_state_.get_logger().log(FC_LOG_MESSAGE(all, " write ${t}", ("t", write_time_us_))); } } @@ -296,10 +285,10 @@ class beast_http_session : public detail::abstract_conn { // Read a request http::async_read_header( - derived().stream(), + socket_, buffer_, *req_parser_, - [self = derived().shared_from_this()](beast::error_code ec, std::size_t bytes_transferred) { + [self = this->shared_from_this()](beast::error_code ec, std::size_t bytes_transferred) { self->on_read_header(ec, bytes_transferred); }); } @@ -307,9 +296,9 @@ class beast_http_session : public detail::abstract_conn { void on_read_header(beast::error_code ec, std::size_t /* bytes_transferred */) { if(ec) { if(ec == http::error::end_of_stream) // other side closed the connection - return derived().do_eof(); + return do_eof(); - return fail(ec, "read_header", plugin_state_->logger, "closing connection"); + return fail(ec, "read_header", plugin_state_.get_logger(), "closing connection"); } // Check for the Expect field value @@ -317,7 +306,7 @@ class beast_http_session : public detail::abstract_conn { bool do_continue = true; auto sv = req_parser_->get()[http::field::content_length]; if (uint64_t sz; !sv.empty() && std::from_chars(sv.data(), sv.data() + sv.size(), sz).ec == std::errc() && - sz > plugin_state_->max_body_size) { + sz > plugin_state_.max_body_size) { do_continue = false; } send_100_continue_response(do_continue); @@ -331,10 +320,10 @@ class beast_http_session : public detail::abstract_conn { void do_read() { // Read a request http::async_read( - derived().stream(), + socket_, buffer_, *req_parser_, - [self = derived().shared_from_this()](beast::error_code ec, std::size_t bytes_transferred) { + [self = this->shared_from_this()](beast::error_code ec, std::size_t bytes_transferred) { self->on_read(ec, bytes_transferred); }); } @@ -347,9 +336,9 @@ class beast_http_session : public detail::abstract_conn { // on another read. If the client disconnects, we may get // http::error::end_of_stream or asio::error::connection_reset. if(ec == http::error::end_of_stream || ec == asio::error::connection_reset) - return derived().do_eof(); + return do_eof(); - return fail(ec, "read", plugin_state_->logger, "closing connection"); + return fail(ec, "read", plugin_state_.get_logger(), "closing connection"); } auto req = req_parser_->release(); @@ -368,7 +357,7 @@ class beast_http_session : public detail::abstract_conn { boost::ignore_unused(bytes_transferred); if(ec) { - return fail(ec, "write", plugin_state_->logger, "closing connection"); + return fail(ec, "write", plugin_state_.get_logger(), "closing connection"); } auto dt = steady_clock::now() - write_begin_; @@ -377,7 +366,7 @@ class beast_http_session : public detail::abstract_conn { if(close) { // This means we should close the connection, usually because // the response indicated the "Connection: close" semantic. - return derived().do_eof(); + return do_eof(); } // create a new response object @@ -393,7 +382,7 @@ class beast_http_session : public detail::abstract_conn { case continue_state_t::reject: // request body too large. After issuing 401 response, close connection continue_state_ = continue_state_t::none; - derived().do_eof(); + do_eof(); break; default: @@ -401,7 +390,7 @@ class beast_http_session : public detail::abstract_conn { // create a new parser to clear state req_parser_.emplace(); - req_parser_->body_limit(plugin_state_->max_body_size); + req_parser_->body_limit(plugin_state_.max_body_size); // Read another request do_read_header(); @@ -416,26 +405,26 @@ class beast_http_session : public detail::abstract_conn { throw; } catch(const fc::exception& e) { err_str = e.to_detail_string(); - fc_elog(plugin_state_->logger, "fc::exception: ${w}", ("w", err_str)); + fc_elog(plugin_state_.get_logger(), "fc::exception: ${w}", ("w", err_str)); if( is_send_exception_response_ ) { error_results results{static_cast(http::status::internal_server_error), "Internal Service Error", error_results::error_info( e, http_plugin::verbose_errors() )}; - err_str = fc::json::to_string( results, fc::time_point::now() + plugin_state_->max_response_time ); + err_str = fc::json::to_string( results, fc::time_point::now().safe_add(plugin_state_.max_response_time) ); } } catch(std::exception& e) { err_str = e.what(); - fc_elog(plugin_state_->logger, "std::exception: ${w}", ("w", err_str)); + fc_elog(plugin_state_.get_logger(), "std::exception: ${w}", ("w", err_str)); if( is_send_exception_response_ ) { error_results results{static_cast(http::status::internal_server_error), "Internal Service Error", error_results::error_info( fc::exception( FC_LOG_MESSAGE( error, err_str ) ), http_plugin::verbose_errors() )}; - err_str = fc::json::to_string( results, fc::time_point::now() + plugin_state_->max_response_time ); + err_str = fc::json::to_string( results, fc::time_point::now().safe_add(plugin_state_.max_response_time) ); } } catch(...) { err_str = "Unknown exception"; - fc_elog(plugin_state_->logger, err_str); + fc_elog(plugin_state_.get_logger(), err_str); if( is_send_exception_response_ ) { error_results results{static_cast(http::status::internal_server_error), "Internal Service Error", @@ -446,10 +435,10 @@ class beast_http_session : public detail::abstract_conn { } } } catch (fc::timeout_exception& e) { - fc_elog( plugin_state_->logger, "Timeout exception ${te} attempting to handle exception: ${e}", ("te", e.to_detail_string())("e", err_str) ); + fc_elog( plugin_state_.get_logger(), "Timeout exception ${te} attempting to handle exception: ${e}", ("te", e.to_detail_string())("e", err_str) ); err_str = R"xxx({"message": "Internal Server Error"})xxx"; } catch (...) { - fc_elog( plugin_state_->logger, "Exception attempting to handle exception: ${e}", ("e", err_str) ); + fc_elog( plugin_state_.get_logger(), "Exception attempting to handle exception: ${e}", ("e", err_str) ); err_str = R"xxx({"message": "Internal Server Error"})xxx"; } @@ -460,16 +449,16 @@ class beast_http_session : public detail::abstract_conn { res_->set(http::field::server, BOOST_BEAST_VERSION_STRING); send_response(std::move(err_str), static_cast(http::status::internal_server_error)); - derived().do_eof(); + do_eof(); } } void increment_bytes_in_flight(size_t sz) { - plugin_state_->bytes_in_flight += sz; + plugin_state_.bytes_in_flight += sz; } void decrement_bytes_in_flight(size_t sz) { - plugin_state_->bytes_in_flight -= sz; + plugin_state_.bytes_in_flight -= sz; } virtual void send_response(std::string&& json, unsigned int code) final { @@ -484,16 +473,16 @@ class beast_http_session : public detail::abstract_conn { res_->prepare_payload(); // Determine if we should close the connection after - bool close = !(plugin_state_->keep_alive) || res_->need_eof(); + bool close = !(plugin_state_.keep_alive) || res_->need_eof(); - fc_dlog( plugin_state_->logger, "Response: ${ep} ${b}", + fc_dlog( plugin_state_.get_logger(), "Response: ${ep} ${b}", ("ep", remote_endpoint_)("b", to_log_string(*res_)) ); // Write the response http::async_write( - derived().stream(), + socket_, *res_, - [self = derived().shared_from_this(), payload_size, close](beast::error_code ec, std::size_t bytes_transferred) { + [self = this->shared_from_this(), payload_size, close](beast::error_code ec, std::size_t bytes_transferred) { self->decrement_bytes_in_flight(payload_size); self->on_write(ec, bytes_transferred, close); }); @@ -502,103 +491,34 @@ class beast_http_session : public detail::abstract_conn { void run_session() { if(auto error_str = verify_max_requests_in_flight(); !error_str.empty()) { send_busy_response(std::move(error_str)); - return derived().do_eof(); + return do_eof(); } - derived().run(); - } -};// end class beast_http_session - -// Handles a plain HTTP connection -class plain_session - : public beast_http_session, - public std::enable_shared_from_this { - tcp_socket_t socket_; - -public: - // Create the session - plain_session( - tcp_socket_t socket, - std::shared_ptr plugin_state, - std::string remote_endpoint) - : beast_http_session(std::move(plugin_state), std::move(remote_endpoint)), socket_(std::move(socket)) {} - - tcp_socket_t& stream() { return socket_; } - tcp_socket_t& socket() { return socket_; } - - // Start the asynchronous operation - void run() { do_read_header(); } void do_eof() { is_send_exception_response_ = false; try { - // Send a TCP shutdown + // Send a shutdown signal beast::error_code ec; - socket_.shutdown(tcp::socket::shutdown_send, ec); + socket_.shutdown(Socket::shutdown_send, ec); // At this point the connection is closed gracefully } catch(...) { handle_exception(); } } - bool is_secure() { return false; }; - - bool allow_host(const http::request& req) { - return eosio::allow_host(req, *this, plugin_state_); - } - - static constexpr auto name() { - return "plain_session"; - } -};// end class plain_session - -// unix domain sockets -class unix_socket_session - : public std::enable_shared_from_this, - public beast_http_session { - - // The socket used to communicate with the client. - stream_protocol::socket socket_; - -public: - unix_socket_session(stream_protocol::socket sock, - std::shared_ptr plugin_state, - std::string remote_endpoint) - : beast_http_session(std::move(plugin_state), std::move(remote_endpoint)), socket_(std::move(sock)) {} - - virtual ~unix_socket_session() = default; bool allow_host(const http::request& req) { - // always allow local hosts - return true; - } - - void do_eof() { - is_send_exception_response_ = false; - try { - // Send a shutdown signal - boost::system::error_code ec; - socket_.shutdown(stream_protocol::socket::shutdown_send, ec); - // At this point the connection is closed gracefully - } catch(...) { - handle_exception(); + if constexpr(std::is_same_v) { + const std::string host_str(req["host"]); + if (host_str != local_address_) + return eosio::allow_host(host_str, socket_, plugin_state_); } + return true; } - bool is_secure() { return false; }; - - void run() { - do_read_header(); - } - - stream_protocol::socket& stream() { return socket_; } - stream_protocol::socket& socket() { return socket_; } - - static constexpr auto name() { - return "unix_socket_session"; - } -};// end class unix_socket_session +}; // end class beast_http_session }// namespace eosio diff --git a/plugins/http_plugin/include/eosio/http_plugin/common.hpp b/plugins/http_plugin/include/eosio/http_plugin/common.hpp index d39e257adc..5f12134086 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/common.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/common.hpp @@ -2,18 +2,12 @@ #include // for thread pool #include -#include - -#include -#include -#include -#include -#include -#include #include #include #include +#include +#include #include #include @@ -33,6 +27,14 @@ #include #include +#include +#include +#include +#include +#include +#include + + namespace eosio { static uint16_t const uri_default_port = 80; /// Default port for wss:// @@ -70,6 +72,7 @@ using abstract_conn_ptr = std::shared_ptr; using internal_url_handler_fn = std::function; struct internal_url_handler { internal_url_handler_fn fn; + api_category category; http_content_type content_type = http_content_type::json; }; /** @@ -134,8 +137,11 @@ struct http_plugin_state { fc::logger& logger; std::function update_metrics; + fc::logger& get_logger() { return logger; } + explicit http_plugin_state(fc::logger& log) : logger(log) {} + }; /** @@ -146,29 +152,24 @@ struct http_plugin_state { * @param session_ptr - beast_http_session object on which to invoke send_response * @return lambda suitable for url_response_callback */ -auto make_http_response_handler(std::shared_ptr plugin_state, detail::abstract_conn_ptr session_ptr, http_content_type content_type) { - return [plugin_state{std::move(plugin_state)}, - session_ptr{std::move(session_ptr)}, content_type](int code, fc::time_point deadline, std::optional response) { +inline auto make_http_response_handler(http_plugin_state& plugin_state, detail::abstract_conn_ptr session_ptr, http_content_type content_type) { + return [&plugin_state, + session_ptr{std::move(session_ptr)}, content_type](int code, std::optional response) { auto payload_size = detail::in_flight_sizeof(response); if(auto error_str = session_ptr->verify_max_bytes_in_flight(payload_size); !error_str.empty()) { session_ptr->send_busy_response(std::move(error_str)); return; } - auto start = fc::time_point::now(); - if (deadline == fc::time_point::maximum()) { // no caller supplied deadline so use http configured deadline - deadline = start + plugin_state->max_response_time; - } - - plugin_state->bytes_in_flight += payload_size; + plugin_state.bytes_in_flight += payload_size; // post back to an HTTP thread to allow the response handler to be called from any thread - boost::asio::post(plugin_state->thread_pool.get_executor(), - [plugin_state, session_ptr, code, deadline, start, payload_size, response = std::move(response), content_type]() { + boost::asio::post(plugin_state.thread_pool.get_executor(), + [&plugin_state, session_ptr, code, payload_size, response = std::move(response), content_type]() { try { - plugin_state->bytes_in_flight -= payload_size; + plugin_state.bytes_in_flight -= payload_size; if (response.has_value()) { - std::string json = (content_type == http_content_type::plaintext) ? response->as_string() : fc::json::to_string(*response, deadline + (fc::time_point::now() - start)); + std::string json = (content_type == http_content_type::plaintext) ? response->as_string() : fc::json::to_string(*response, fc::time_point::maximum()); if (auto error_str = session_ptr->verify_max_bytes_in_flight(json.size()); error_str.empty()) session_ptr->send_response(std::move(json), code); else @@ -184,30 +185,21 @@ auto make_http_response_handler(std::shared_ptr plugin_state, } -bool host_port_is_valid(const http_plugin_state& plugin_state, - const std::string& header_host_port, - const string& endpoint_local_host_port) { - return !plugin_state.validate_host || header_host_port == endpoint_local_host_port || plugin_state.valid_hosts.find(header_host_port) != plugin_state.valid_hosts.end(); -} - -bool host_is_valid(const http_plugin_state& plugin_state, - const std::string& host, - const string& endpoint_local_host_port, - bool secure) { +inline bool host_is_valid(const http_plugin_state& plugin_state, + const std::string& header_host_port, + const asio::ip::address& addr) { if(!plugin_state.validate_host) { return true; } - // normalise the incoming host so that it always has the explicit port - static auto has_port_expr = std::regex("[^:]:[0-9]+$");/// ends in : without a preceeding colon which implies ipv6 - if(std::regex_search(host, has_port_expr)) { - return host_port_is_valid(plugin_state, host, endpoint_local_host_port); - } else { - // according to RFC 2732 ipv6 addresses should always be enclosed with brackets so we shouldn't need to special case here - return host_port_is_valid(plugin_state, - host + ":" + std::to_string(secure ? uri_default_secure_port : uri_default_port), - endpoint_local_host_port); + auto [hostname, port] = fc::split_host_port(header_host_port); + boost::system::error_code ec; + auto header_addr = boost::asio::ip::make_address(hostname, ec); + if (ec) + return plugin_state.valid_hosts.count(hostname); + if (header_addr.is_v4() && addr.is_v6()) { + header_addr = boost::asio::ip::address_v6::v4_mapped(header_addr.to_v4()); } + return header_addr == addr; } - }// end namespace eosio diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index 5de6eb3efa..ac308fee03 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -2,10 +2,10 @@ #include #include +#include #include #include #include - namespace eosio { using namespace appbase; @@ -13,9 +13,9 @@ namespace eosio { * @brief A callback function provided to a URL handler to * allow it to specify the HTTP response code and body * - * Arguments: response_code, deadline, response_body + * Arguments: response_code, response_body */ - using url_response_callback = std::function)>; + using url_response_callback = std::function)>; /** * @brief Callback type for a URL handler @@ -36,7 +36,12 @@ namespace eosio { * a handler. The URL is the path on the web server that triggers the * call, and the handler is the function which implements the API call */ - using api_entry = std::pair; + struct api_entry { + string path; + api_category category; + url_handler handler; + }; + using api_description = std::vector; enum class http_content_type { @@ -54,6 +59,7 @@ namespace eosio { uint16_t default_http_port{0}; //If set, a Server header will be added to the HTTP reply with this value string server_header; + bool support_categories = true; }; /** @@ -88,16 +94,16 @@ namespace eosio { void plugin_shutdown(); void handle_sighup() override; - void add_handler(const string& url, const url_handler&, appbase::exec_queue q, int priority = appbase::priority::medium_low, http_content_type content_type = http_content_type::json); - void add_api(const api_description& api, appbase::exec_queue q, int priority = appbase::priority::medium_low, http_content_type content_type = http_content_type::json) { - for (const auto& call : api) - add_handler(call.first, call.second, q, priority, content_type); + void add_handler(api_entry&& entry, appbase::exec_queue q, int priority = appbase::priority::medium_low, http_content_type content_type = http_content_type::json); + void add_api(api_description&& api, appbase::exec_queue q, int priority = appbase::priority::medium_low, http_content_type content_type = http_content_type::json) { + for (auto& call : api) + add_handler(std::move(call), q, priority, content_type); } - void add_async_handler(const string& url, const url_handler& handler, http_content_type content_type = http_content_type::json); - void add_async_api(const api_description& api, http_content_type content_type = http_content_type::json) { - for (const auto& call : api) - add_async_handler(call.first, call.second, content_type); + void add_async_handler(api_entry&& entry, http_content_type content_type = http_content_type::json); + void add_async_api(api_description&& api, http_content_type content_type = http_content_type::json) { + for (auto& call : api) + add_async_handler(std::move(call), content_type); } // standard exception handling for api handlers @@ -105,8 +111,7 @@ namespace eosio { void post_http_thread_pool(std::function f); - bool is_on_loopback() const; - bool is_secure() const; + bool is_on_loopback(api_category category) const; static bool verbose_errors(); @@ -114,8 +119,6 @@ namespace eosio { vector apis; }; - get_supported_apis_result get_supported_apis()const; - /// @return the configured http-max-response-time-ms fc::microseconds get_max_response_time()const; @@ -127,6 +130,7 @@ namespace eosio { void register_update_metrics(std::function&& fun); + std::atomic& listening(); private: std::shared_ptr my; }; diff --git a/plugins/http_plugin/include/eosio/http_plugin/macros.hpp b/plugins/http_plugin/include/eosio/http_plugin/macros.hpp index 82cd8cddcd..b7848e870c 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/macros.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/macros.hpp @@ -1,21 +1,14 @@ #pragma once -struct async_result_visitor : public fc::visitor { - template - fc::variant operator()(const T& v) const { - return fc::variant(v); - } -}; - -#define CALL_ASYNC_WITH_400(api_name, api_handle, api_namespace, call_name, call_result, http_resp_code, params_type) \ +#define CALL_ASYNC_WITH_400(api_name, category, api_handle, api_namespace, call_name, call_result, http_resp_code, params_type) \ { std::string("/v1/" #api_name "/" #call_name), \ + api_category::category, \ [api_handle, &_http_plugin](string&&, string&& body, url_response_callback&& cb) mutable { \ - auto deadline = api_handle.start(); \ + api_handle.start(); \ try { \ auto params = parse_params(body); \ - FC_CHECK_DEADLINE(deadline); \ using http_fwd_t = std::function()>; \ - api_handle.call_name( std::move(params), \ + api_handle.call_name( std::move(params), /* called on main application thread */ \ [&_http_plugin, cb=std::move(cb), body=std::move(body)] \ (const chain::next_function_variant& result) mutable { \ if (std::holds_alternative(result)) { \ @@ -25,8 +18,7 @@ struct async_result_visitor : public fc::visitor { http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ } else if (std::holds_alternative(result)) { \ - cb(http_resp_code, fc::time_point::maximum(), \ - fc::variant(std::get(std::move(result)))); \ + cb(http_resp_code, fc::variant(std::get(std::move(result)))); \ } else { \ /* api returned a function to be processed on the http_plugin thread pool */ \ assert(std::holds_alternative(result)); \ @@ -41,8 +33,7 @@ struct async_result_visitor : public fc::visitor { http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ } else { \ - cb(resp_code, fc::time_point::maximum(), \ - fc::variant(std::get(std::move(result)))) ; \ + cb(resp_code, fc::variant(std::get(std::move(result)))); \ } \ }); \ } \ @@ -57,16 +48,16 @@ struct async_result_visitor : public fc::visitor { // call an API which returns either fc::exception_ptr, or a function to be posted on the http thread pool // for execution (typically doing the final serialization) // ------------------------------------------------------------------------------------------------------ -#define CALL_WITH_400_POST(api_name, api_handle, api_namespace, call_name, call_result, http_resp_code, params_type) \ +#define CALL_WITH_400_POST(api_name, category, api_handle, api_namespace, call_name, call_result, http_resp_code, params_type) \ {std::string("/v1/" #api_name "/" #call_name), \ + api_category::category, \ [api_handle, &_http_plugin](string&&, string&& body, url_response_callback&& cb) { \ auto deadline = api_handle.start(); \ try { \ auto params = parse_params(body); \ - FC_CHECK_DEADLINE(deadline); \ using http_fwd_t = std::function()>; \ + /* called on main application thread */ \ http_fwd_t http_fwd(api_handle.call_name(std::move(params), deadline)); \ - FC_CHECK_DEADLINE(deadline); \ _http_plugin.post_http_thread_pool([resp_code=http_resp_code, cb=std::move(cb), \ body=std::move(body), \ http_fwd = std::move(http_fwd)]() { \ @@ -78,8 +69,7 @@ struct async_result_visitor : public fc::visitor { http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ } else { \ - cb(resp_code, fc::time_point::maximum(), \ - fc::variant(std::get(std::move(result)))) ; \ + cb(resp_code, fc::variant(std::get(std::move(result)))); \ } \ }); \ } catch (...) { \ diff --git a/plugins/http_plugin/tests/unit_tests.cpp b/plugins/http_plugin/tests/unit_tests.cpp index b7a801334f..15fc50bef9 100644 --- a/plugins/http_plugin/tests/unit_tests.cpp +++ b/plugins/http_plugin/tests/unit_tests.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -7,6 +8,8 @@ #include #include +#include +#include #define BOOST_TEST_MODULE http_plugin unit tests #include @@ -33,29 +36,32 @@ using tcp = net::ip::tcp; // from // ------------------------------------------------------------------------- class Db { -public: + public: void add_api(http_plugin& p) { p.add_api({ { std::string("/hello"), + api_category::node, [&](string&&, string&& body, url_response_callback&& cb) { - cb(200, fc::time_point::maximum(), fc::variant("world!")); + cb(200, fc::variant("world!")); } }, { std::string("/echo"), + api_category::node, [&](string&&, string&& body, url_response_callback&& cb) { - cb(200, fc::time_point::maximum(), fc::variant(body)); + cb(200, fc::variant(body)); } }, { std::string("/check_ones"), // returns "yes" if body only has only '1' chars, "no" otherwise + api_category::node, [&](string&&, string&& body, url_response_callback&& cb) { bool ok = std::all_of(body.begin(), body.end(), [](char c) { return c == '1'; }); - cb(200, fc::time_point::maximum(), fc::variant(ok ? string("yes") : string("no"))); + cb(200, fc::variant(ok ? string("yes") : string("no"))); } - }, + }, }, appbase::exec_queue::read_write); } -private: + private: }; // -------------------------------------------------------------------------- @@ -128,7 +134,7 @@ struct Expect100ContinueProtocol : public ProtocolCommon req.set(http::field::expect, "100-continue"); req.body() = body; req.prepare_payload(); - + http::request_serializer sr{req}; beast::error_code ec; http::write_header(this->stream, sr, ec); @@ -136,7 +142,7 @@ struct Expect100ContinueProtocol : public ProtocolCommon BOOST_CHECK_MESSAGE(expect_fail, "write_header failed"); return false; } - + { http::response res {}; beast::flat_buffer buffer; @@ -159,7 +165,7 @@ struct Expect100ContinueProtocol : public ProtocolCommon // Server is OK with the request, send the body http::write(this->stream, sr, ec); return !ec; - } + } return http::write(this->stream, req) != 0; } catch(std::exception const& e) @@ -217,50 +223,98 @@ void run_test(Protocol& p, size_t max_body_size) test_str.resize(max_body_size + 1, '1'); check_request(p, "/check_ones", test_str.c_str(), {}); // we don't expect a response } -} +} + +namespace eosio { +class chain_api_plugin : public appbase::plugin { + public: + APPBASE_PLUGIN_REQUIRES(); + virtual void set_program_options(options_description& cli, options_description& cfg) override {} + void plugin_initialize(const variables_map& options) {} + void plugin_startup() {} + void plugin_shutdown() {} +}; + +class net_api_plugin : public appbase::plugin { + public: + APPBASE_PLUGIN_REQUIRES(); + virtual void set_program_options(options_description& cli, options_description& cfg) override {} + void plugin_initialize(const variables_map& options) {} + void plugin_startup() {} + void plugin_shutdown() {} +}; + +class producer_api_plugin : public appbase::plugin { + public: + APPBASE_PLUGIN_REQUIRES(); + virtual void set_program_options(options_description& cli, options_description& cfg) override {} + void plugin_initialize(const variables_map& options) {} + void plugin_startup() {} + void plugin_shutdown() {} +}; + +static auto _chain_api_plugin = application::register_plugin(); +static auto _net_api_plugin = application::register_plugin(); +static auto _producer_api_plugin = application::register_plugin(); +} // namespace eosio + +struct http_plugin_test_fixture { + appbase::scoped_app app; + std::thread app_thread; + + http_plugin* init(std::initializer_list args) { + if (app->initialize(args.size(), const_cast(args.begin()))) { + auto plugin = app->find_plugin(); + std::atomic& listening_or_failed = plugin->listening(); + app_thread = std::thread([&]() { + try { + app->startup(); + app->exec(); + } catch (...) { + plugin = nullptr; + listening_or_failed.store(true); + } + }); + + while (!listening_or_failed.load()) { + using namespace std::chrono_literals; + std::this_thread::sleep_for(10ms); + } + return plugin; + } + return nullptr; + } + + ~http_plugin_test_fixture() { + if (app_thread.joinable()) { + app->quit(); + app_thread.join(); + } + } +}; // ------------------------------------------------------------------------- // ------------------------------------------------------------------------- -BOOST_AUTO_TEST_CASE(http_plugin_unit_tests) -{ - appbase::scoped_app app; +BOOST_FIXTURE_TEST_CASE(http_plugin_unit_tests, http_plugin_test_fixture) { - - const uint16_t default_port { 8888 }; - const char* port = "8888"; - const char* host = "127.0.0.1"; - - http_plugin::set_defaults({ - .default_unix_socket_path = "", - .default_http_port = default_port, - .server_header = "/" - }); - - const char* argv[] = { bu::framework::current_test_case().p_name->c_str(), - "--http-validate-host", "false", - "--http-threads", "4", - "--http-max-response-time-ms", "50" }; - - BOOST_CHECK(app->initialize(sizeof(argv) / sizeof(char*), const_cast(argv))); + const uint16_t default_port{8888}; + const char* port = "8888"; + const char* host = "127.0.0.1"; - std::promise plugin_promise; - std::future plugin_fut = plugin_promise.get_future(); - std::thread app_thread( [&]() { - app->startup(); - plugin_promise.set_value(app->get_plugin()); - app->exec(); - } ); + http_plugin::set_defaults({.default_unix_socket_path = "", .default_http_port = default_port, .server_header = "/"}); - auto http_plugin = plugin_fut.get(); - BOOST_CHECK(http_plugin.get_state() == abstract_plugin::started); + auto http_plugin = + init({bu::framework::current_test_case().p_name->c_str(), "--plugin", "eosio::http_plugin", + "--http-validate-host", "false", "--http-threads", "4", "--http-max-response-time-ms", "50"}); + + BOOST_REQUIRE(http_plugin); + BOOST_CHECK(http_plugin->get_state() == abstract_plugin::started); - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - Db db; - db.add_api(http_plugin); + db.add_api(*http_plugin); + + size_t max_body_size = http_plugin->get_max_body_size(); - size_t max_body_size = http_plugin.get_max_body_size(); - try { net::io_context ioc; @@ -291,9 +345,231 @@ BOOST_AUTO_TEST_CASE(http_plugin_unit_tests) } catch(std::exception const& e) { - std::cerr << "Error: " << e.what() << std::endl; + std::cerr << "Error: " << e.what() << "\n"; + } +} + +class app_log { + std::string result; + int fork_app_and_redirect_stderr(const char* redirect_filename, std::initializer_list args) { + int pid = fork(); + if (pid == 0) { + (void) freopen(redirect_filename, "w", stderr); + bool ret = 0; + try { + appbase::scoped_app app; + ret = app->initialize(args.size(), const_cast(args.begin())); + } catch (...) { + } + fclose(stderr); + exit(ret ? 0 : 1); + } else { + int chld_state; + waitpid(pid, &chld_state, 0); + BOOST_CHECK(WIFEXITED(chld_state)); + return WEXITSTATUS(chld_state); + } + } + + public: + app_log(std::initializer_list args) { + fc::temp_directory dir; + std::filesystem::path log = dir.path()/"test.stderr"; + BOOST_CHECK(fork_app_and_redirect_stderr(log.c_str(), args)); + std::ifstream file(log.c_str()); + result.assign(std::istreambuf_iterator(file), std::istreambuf_iterator()); + std::filesystem::remove(log); + } + + boost::test_tools::predicate_result contains(const char* str) const { + if (result.find(str) == std::string::npos) { + boost::test_tools::predicate_result res(false); + res.message() << "\nlog result: " << result << "\n"; + return res; + } + return true; } +}; + +BOOST_AUTO_TEST_CASE(invalid_category_addresses) { + + const char* test_name = bu::framework::current_test_case().p_name->c_str(); + + BOOST_TEST(app_log({test_name, "--plugin=eosio::http_plugin", "--http-server-address", + "http-category-address", "--http-category-address", "chain_ro,localhost:8889"}) + .contains("--plugin=eosio::chain_api_plugin is required")); + + BOOST_TEST(app_log({test_name, "--plugin=eosio::chain_api_plugin", "--http-category-address", + "chain_ro,localhost:8889"}) + .contains("http-server-address must be set as `http-category-address`")); - app->quit(); - app_thread.join(); + BOOST_TEST(app_log({test_name, "--plugin=eosio::chain_api_plugin", "--http-server-address", + "http-category-address", "--unix-socket-path", "/tmp/tmp.sock", + "--http-category-address", "chain_ro,localhost:8889"}) + .contains("`unix-socket-path` must be left unspecified")); + + BOOST_TEST(app_log({test_name, "--plugin=eosio::chain_api_plugin", "--http-server-address", + "http-category-address", "--http-category-address", "node,localhost:8889"}) + .contains("invalid category name")); + + BOOST_TEST(app_log({test_name, "--plugin=eosio::chain_api_plugin", "--http-server-address", + "http-category-address", "--http-category-address", "chain_ro,127.0.0.1:8889", + "--http-category-address", "chain_rw,localhost:8889"}) + .contains("unable to listen to port 8889")); } + +struct http_response_for { + net::io_context ioc; + http::response response; + http_response_for(const char* addr, const char* path) { + auto [host, port] = fc::split_host_port(addr); + // These objects perform our I/O + tcp::resolver resolver(ioc); + beast::tcp_stream stream(ioc); + + // Look up IP and connect to it + auto const results = resolver.resolve(host, port); + stream.connect(results); + initiate(stream, addr, path); + } + + http_response_for(std::filesystem::path addr, const char* path) { + using unix_stream = beast::basic_stream; + + unix_stream stream(ioc); + stream.connect(addr.c_str()); + initiate(stream, "", path); + } + + template + void initiate(Stream&& stream, const char* addr, const char* path) { + int http_version = 11; + http::request req{http::verb::post, path, http_version}; + if (addr) + req.set(http::field::host, addr); + req.set(http::field::user_agent, BOOST_BEAST_VERSION_STRING); + BOOST_CHECK(http::write(stream, req) != 0); + + beast::flat_buffer buffer; + http::read(stream, buffer, response); + } + + http::status status() const { return response.result(); } + + std::string body() const { return beast::buffers_to_string(response.body().data()); } +}; + +BOOST_FIXTURE_TEST_CASE(valid_category_addresses, http_plugin_test_fixture) { + fc::temp_directory dir; + auto data_dir = dir.path() / "data"; + + // clang-format off + auto http_plugin = init({bu::framework::current_test_case().p_name->c_str(), + "--data-dir", data_dir.c_str(), + "--plugin=eosio::chain_api_plugin", + "--plugin=eosio::net_api_plugin", + "--plugin=eosio::producer_api_plugin", + "--http-server-address", "http-category-address", + "--http-category-address", "chain_ro,127.0.0.1:8890", + "--http-category-address", "chain_rw,:8889", + "--http-category-address", "net_ro,127.0.0.1:8890", + "--http-category-address", "net_rw,:8889", + "--http-category-address", "producer_ro,./producer_ro.sock", + "--http-category-address", "producer_rw,../producer_rw.sock" + }); + // clang-format on + + BOOST_REQUIRE(http_plugin); + + http_plugin->add_api({{std::string("/v1/node/hello"), api_category::node, + [&](string&&, string&& body, url_response_callback&& cb) { + cb(200, fc::variant("world!")); + }}, + {std::string("/v1/chain_ro/hello"), api_category::chain_ro, + [&](string&&, string&& body, url_response_callback&& cb) { + cb(200, fc::variant("world!")); + }}, + {std::string("/v1/chain_rw/hello"), api_category::chain_rw, + [&](string&&, string&& body, url_response_callback&& cb) { + cb(200, fc::variant("world!")); + }}, + {std::string("/v1/net_ro/hello"), api_category::net_ro, + [&](string&&, string&& body, url_response_callback&& cb) { + cb(200, fc::variant("world!")); + }}, + {std::string("/v1/net_rw/hello"), api_category::net_rw, + [&](string&&, string&& body, url_response_callback&& cb) { + cb(200, fc::variant("world!")); + }}, + {std::string("/v1/producer_ro/hello"), api_category::producer_ro, + [&](string&&, string&& body, url_response_callback&& cb) { + cb(200, fc::variant("world!")); + }}, + {std::string("/v1/producer_rw/hello"), api_category::producer_rw, + [&](string&&, string&& body, url_response_callback&& cb) { + cb(200, fc::variant("world!")); + }}}, + appbase::exec_queue::read_write); + + BOOST_CHECK(http_plugin->is_on_loopback(api_category::chain_ro)); + BOOST_CHECK(http_plugin->is_on_loopback(api_category::net_ro)); + BOOST_CHECK(http_plugin->is_on_loopback(api_category::producer_ro)); + BOOST_CHECK(http_plugin->is_on_loopback(api_category::producer_rw)); + BOOST_CHECK(!http_plugin->is_on_loopback(api_category::chain_rw)); + BOOST_CHECK(!http_plugin->is_on_loopback(api_category::net_rw)); + + std::string world_string = "\"world!\""; + + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8890", "/v1/node/hello").body(), world_string); + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8889", "/v1/node/hello").body(), world_string); + + bool ip_v6_enabled = [] { + try { + net::io_context ioc; + tcp::socket s(ioc, tcp::endpoint{net::ip::address::from_string("::1"), 9999}); + return true; + } catch (...) { + return false; + } + }(); + + if (ip_v6_enabled) { + BOOST_CHECK_EQUAL(http_response_for("[::1]:8889", "/v1/node/hello").body(), world_string); + } + + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8890", "/v1/chain_ro/hello").body(), world_string); + BOOST_CHECK_EQUAL(http_response_for("localhost:8890", "/v1/chain_ro/hello").status(), http::status::bad_request); + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8890", "/v1/net_ro/hello").body(), world_string); + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8890", "/v1/chain_rw/hello").status(), http::status::not_found); + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8890", "/v1/net_rw/hello").status(), http::status::not_found); + + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8889", "/v1/chain_ro/hello").status(), http::status::not_found); + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8889", "/v1/net_ro/hello").status(), http::status::not_found); + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8889", "/v1/chain_rw/hello").body(), world_string); + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8889", "/v1/net_rw/hello").body(), world_string); + + BOOST_CHECK_EQUAL(http_response_for(data_dir / "./producer_ro.sock", "/v1/producer_ro/hello").body(), world_string); + BOOST_CHECK_EQUAL(http_response_for(data_dir / "../producer_rw.sock", "/v1/producer_rw/hello").body(), world_string); + + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8890", "/v1/node/get_supported_apis").body(), + R"({"apis":["/v1/chain_ro/hello","/v1/net_ro/hello","/v1/node/hello"]})"); + + BOOST_CHECK_EQUAL(http_response_for("127.0.0.1:8889", "/v1/node/get_supported_apis").body(), + R"({"apis":["/v1/chain_rw/hello","/v1/net_rw/hello","/v1/node/hello"]})"); +} + + +bool on_loopback(std::initializer_list args){ + appbase::scoped_app app; + BOOST_REQUIRE(app->initialize(args.size(), const_cast(args.begin()))); + return app->get_plugin().is_on_loopback(api_category::chain_rw); +} + +BOOST_AUTO_TEST_CASE(test_on_loopback) { + BOOST_CHECK(on_loopback({"test", "--plugin=eosio::http_plugin", "--http-server-address", "", "--unix-socket-path=a"})); + BOOST_CHECK(on_loopback({"test", "--plugin=eosio::http_plugin", "--http-server-address", "127.0.0.1:8888"})); + BOOST_CHECK(on_loopback({"test", "--plugin=eosio::http_plugin", "--http-server-address", "localhost:8888"})); + BOOST_CHECK(!on_loopback({"test", "--plugin=eosio::http_plugin", "--http-server-address", ":8888"})); + BOOST_CHECK(!on_loopback({"test", "--plugin=eosio::http_plugin", "--http-server-address", "example.com:8888"})); +} \ No newline at end of file diff --git a/plugins/net_api_plugin/net_api_plugin.cpp b/plugins/net_api_plugin/net_api_plugin.cpp index bbbbec8c07..7c97b4ff61 100644 --- a/plugins/net_api_plugin/net_api_plugin.cpp +++ b/plugins/net_api_plugin/net_api_plugin.cpp @@ -19,12 +19,13 @@ namespace eosio { using namespace eosio; -#define CALL_WITH_400(api_name, api_handle, call_name, INVOKE, http_response_code) \ +#define CALL_WITH_400(api_name, category, api_handle, call_name, INVOKE, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ + api_category::category, \ [&api_handle](string&&, string&& body, url_response_callback&& cb) mutable { \ try { \ INVOKE \ - cb(http_response_code, fc::time_point::maximum(), fc::variant(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ @@ -54,13 +55,13 @@ void net_api_plugin::plugin_startup() { // lifetime of plugin is lifetime of application auto& net_mgr = app().get_plugin(); app().get_plugin().add_async_api({ - CALL_WITH_400(net, net_mgr, connect, + CALL_WITH_400(net, net_rw, net_mgr, connect, INVOKE_R_R(net_mgr, connect, std::string), 201), - CALL_WITH_400(net, net_mgr, disconnect, + CALL_WITH_400(net, net_rw, net_mgr, disconnect, INVOKE_R_R(net_mgr, disconnect, std::string), 201), - CALL_WITH_400(net, net_mgr, status, + CALL_WITH_400(net, net_ro, net_mgr, status, INVOKE_R_R(net_mgr, status, std::string), 201), - CALL_WITH_400(net, net_mgr, connections, + CALL_WITH_400(net, net_ro, net_mgr, connections, INVOKE_R_V(net_mgr, connections), 201), } ); } @@ -68,11 +69,11 @@ void net_api_plugin::plugin_startup() { void net_api_plugin::plugin_initialize(const variables_map& options) { try { const auto& _http_plugin = app().get_plugin(); - if( !_http_plugin.is_on_loopback()) { + if( !_http_plugin.is_on_loopback(api_category::net_rw)) { wlog( "\n" "**********SECURITY WARNING**********\n" "* *\n" - "* -- Net API -- *\n" + "* -- Net RW API -- *\n" "* - EXPOSED to the LOCAL NETWORK - *\n" "* - USE ONLY ON SECURE NETWORKS! - *\n" "* *\n" diff --git a/plugins/net_plugin/CMakeLists.txt b/plugins/net_plugin/CMakeLists.txt index d204117ff7..ec459d1387 100644 --- a/plugins/net_plugin/CMakeLists.txt +++ b/plugins/net_plugin/CMakeLists.txt @@ -3,6 +3,10 @@ add_library( net_plugin net_plugin.cpp ${HEADERS} ) +if(CMAKE_CXX_COMPILER_ID MATCHES "Clang" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 14.0) + target_compile_options(net_plugin PUBLIC -Wthread-safety) +endif() + target_link_libraries( net_plugin chain_plugin producer_plugin appbase fc ) target_include_directories( net_plugin PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include "${CMAKE_CURRENT_SOURCE_DIR}/../../libraries/appbase/include") diff --git a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp index 7f138ef1d7..b5122f80aa 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/auto_bp_peering.hpp @@ -145,7 +145,7 @@ class bp_connection_manager { // Only called from connection strand std::size_t num_established_clients() const { uint32_t num_clients = 0; - self()->for_each_connection([&num_clients](auto&& conn) { + self()->connections.for_each_connection([&num_clients](auto&& conn) { if (established_client_connection(conn)) { ++num_clients; } @@ -158,8 +158,8 @@ class bp_connection_manager { // This should only be called after the first handshake message is received to check if an incoming connection // has exceeded the pre-configured max_client_count limit. bool exceeding_connection_limit(Connection* new_connection) const { - return auto_bp_peering_enabled() && self()->max_client_count != 0 && - established_client_connection(new_connection) && num_established_clients() > self()->max_client_count; + return auto_bp_peering_enabled() && self()->connections.get_max_client_count() != 0 && + established_client_connection(new_connection) && num_established_clients() > self()->connections.get_max_client_count(); } // Only called from main thread @@ -182,7 +182,7 @@ class bp_connection_manager { fc_dlog(self()->get_logger(), "pending_downstream_neighbors: ${pending_downstream_neighbors}", ("pending_downstream_neighbors", to_string(pending_downstream_neighbors))); - for (auto neighbor : pending_downstream_neighbors) { self()->connect(config.bp_peer_addresses[neighbor]); } + for (auto neighbor : pending_downstream_neighbors) { self()->connections.connect(config.bp_peer_addresses[neighbor], *self()->p2p_addresses.begin() ); } pending_neighbors = std::move(pending_downstream_neighbors); finder.add_upstream_neighbors(pending_neighbors); @@ -222,7 +222,7 @@ class bp_connection_manager { std::back_inserter(peers_to_drop)); fc_dlog(self()->get_logger(), "peers to drop: ${peers_to_drop}", ("peers_to_drop", to_string(peers_to_drop))); - for (auto account : peers_to_drop) { self()->disconnect(config.bp_peer_addresses[account]); } + for (auto account : peers_to_drop) { self()->connections.disconnect(config.bp_peer_addresses[account]); } active_schedule_version = schedule.version; } } diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index 8eafaba2e5..5d5d12ef40 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -9,9 +9,14 @@ namespace eosio { struct connection_status { string peer; - bool connecting = false; - bool syncing = false; - bool is_bp_peer = false; + string remote_ip; + string remote_port; + bool connecting = false; + bool syncing = false; + bool is_bp_peer = false; + bool is_socket_open = false; + bool is_blocks_only = false; + bool is_transactions_only = false; handshake_message last_handshake; }; @@ -49,4 +54,4 @@ namespace eosio { } -FC_REFLECT( eosio::connection_status, (peer)(connecting)(syncing)(is_bp_peer)(last_handshake) ) +FC_REFLECT( eosio::connection_status, (peer)(remote_ip)(remote_port)(connecting)(syncing)(is_bp_peer)(is_socket_open)(is_blocks_only)(is_transactions_only)(last_handshake) ) diff --git a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp index 2e7245c180..5ca2ba1456 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/protocol.hpp @@ -7,9 +7,6 @@ namespace eosio { using namespace chain; using namespace fc; - static_assert(sizeof(std::chrono::system_clock::duration::rep) >= 8, "system_clock is expected to be at least 64 bits"); - typedef std::chrono::system_clock::duration::rep tstamp; - struct chain_size_message { uint32_t last_irreversible_block_num = 0; block_id_type last_irreversible_block_id; @@ -83,10 +80,10 @@ namespace eosio { }; struct time_message { - tstamp org{0}; //!< origin timestamp - tstamp rec{0}; //!< receive timestamp - tstamp xmt{0}; //!< transmit timestamp - mutable tstamp dst{0}; //!< destination timestamp + int64_t org{0}; //!< origin timestamp, in nanoseconds + int64_t rec{0}; //!< receive timestamp, in nanoseconds + int64_t xmt{0}; //!< transmit timestamp, in nanoseconds + mutable int64_t dst{0}; //!< destination timestamp, in nanoseconds }; enum id_list_modes { diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7be60236cc..1328e1195a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -17,6 +18,8 @@ #include #include #include +#include +#include #include #include @@ -25,9 +28,7 @@ #include #include #include -#include #include -#include // should be defined for c++17, but clang++16 still has not implemented it #ifdef __cpp_lib_hardware_interference_size @@ -41,6 +42,21 @@ using namespace eosio::chain::plugin_interface; +namespace boost +{ + /// @brief Overload for boost::lexical_cast to convert vector of strings to string + /// + /// Used by boost::program_options to print the default value of an std::vector option + /// + /// @param v the vector to convert + /// @return the contents of the vector as a comma-separated string + template<> + inline std::string lexical_cast(const std::vector& v) + { + return boost::join(v, ","); + } +} + namespace eosio { static auto _net_plugin = application::register_plugin(); @@ -61,6 +77,9 @@ namespace eosio { using connection_ptr = std::shared_ptr; using connection_wptr = std::weak_ptr; + static constexpr int64_t block_interval_ns = + std::chrono::duration_cast(std::chrono::milliseconds(config::block_interval_ms)).count(); + const std::string logger_name("net_plugin_impl"); fc::logger logger; std::string peer_log_format; @@ -156,8 +175,8 @@ namespace eosio { >; alignas(hardware_destructive_interference_size) - mutable std::mutex unlinkable_blk_state_mtx; - unlinkable_block_state_index unlinkable_blk_state; + mutable fc::mutex unlinkable_blk_state_mtx; + unlinkable_block_state_index unlinkable_blk_state GUARDED_BY(unlinkable_blk_state_mtx); // 30 should be plenty large enough as any unlinkable block that will be usable is likely to be usable // almost immediately (blocks came in from multiple peers out of order). 30 allows for one block per // producer round until lib. When queue larger than max, remove by block timestamp farthest in the past. @@ -166,7 +185,7 @@ namespace eosio { public: // returns block id of any block removed because of a full cache std::optional add_unlinkable_block( signed_block_ptr b, const block_id_type& id ) { - std::lock_guard g(unlinkable_blk_state_mtx); + fc::lock_guard g(unlinkable_blk_state_mtx); unlinkable_blk_state.insert( {id, std::move(b)} ); // does not insert if already there if (unlinkable_blk_state.size() > max_unlinkable_cache_size) { auto& index = unlinkable_blk_state.get(); @@ -179,7 +198,7 @@ namespace eosio { } unlinkable_block_state pop_possible_linkable_block(const block_id_type& blkid) { - std::lock_guard g(unlinkable_blk_state_mtx); + fc::lock_guard g(unlinkable_blk_state_mtx); auto& index = unlinkable_blk_state.get(); auto blk_itr = index.find( blkid ); if (blk_itr != index.end()) { @@ -191,7 +210,7 @@ namespace eosio { } void expire_blocks( uint32_t lib_num ) { - std::lock_guard g(unlinkable_blk_state_mtx); + fc::lock_guard g(unlinkable_blk_state_mtx); auto& stale_blk = unlinkable_blk_state.get(); stale_blk.erase( stale_blk.lower_bound( 1 ), stale_blk.upper_bound( lib_num ) ); } @@ -205,54 +224,50 @@ namespace eosio { in_sync }; - static constexpr int64_t block_interval_ns = - std::chrono::duration_cast(std::chrono::milliseconds(config::block_interval_ms)).count(); - alignas(hardware_destructive_interference_size) - std::mutex sync_mtx; - uint32_t sync_known_lib_num{0}; - uint32_t sync_last_requested_num{0}; - uint32_t sync_next_expected_num{0}; - uint32_t sync_req_span{0}; - connection_ptr sync_source; + fc::mutex sync_mtx; + uint32_t sync_known_lib_num GUARDED_BY(sync_mtx) {0}; // highest known lib num from currently connected peers + uint32_t sync_last_requested_num GUARDED_BY(sync_mtx) {0}; // end block number of the last requested range, inclusive + uint32_t sync_next_expected_num GUARDED_BY(sync_mtx) {0}; // the next block number we need from peer + connection_ptr sync_source GUARDED_BY(sync_mtx); // connection we are currently syncing from + + const uint32_t sync_req_span {0}; + const uint32_t sync_peer_limit {0}; alignas(hardware_destructive_interference_size) std::atomic sync_state{in_sync}; + std::atomic sync_ordinal{0}; private: constexpr static auto stage_str( stages s ); bool set_state( stages newstate ); - bool is_sync_required( uint32_t fork_head_block_num ); - void request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn = connection_ptr() ); - void start_sync( const connection_ptr& c, uint32_t target ); - bool verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); + bool is_sync_required( uint32_t fork_head_block_num ); // call with locked mutex + void request_next_chunk( const connection_ptr& conn = connection_ptr() ) REQUIRES(sync_mtx); + connection_ptr find_next_sync_node(); // call with locked mutex + void start_sync( const connection_ptr& c, uint32_t target ); // locks mutex + bool verify_catchup( const connection_ptr& c, uint32_t num, const block_id_type& id ); // locks mutex public: - explicit sync_manager( uint32_t span ); + explicit sync_manager( uint32_t span, uint32_t sync_peer_limit ); static void send_handshakes(); - bool syncing_with_peer() const { return sync_state == lib_catchup; } + bool syncing_from_peer() const { return sync_state == lib_catchup; } bool is_in_sync() const { return sync_state == in_sync; } void sync_reset_lib_num( const connection_ptr& conn, bool closing ); void sync_reassign_fetch( const connection_ptr& c, go_away_reason reason ); void rejected_block( const connection_ptr& c, uint32_t blk_num ); void sync_recv_block( const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied ); - void sync_update_expected( const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied ); - void recv_handshake( const connection_ptr& c, const handshake_message& msg ); + void recv_handshake( const connection_ptr& c, const handshake_message& msg, uint32_t nblk_combined_latency ); void sync_recv_notice( const connection_ptr& c, const notice_message& msg ); - inline void reset_last_requested_num() { - std::lock_guard g(sync_mtx); - sync_last_requested_num = 0; - } }; class dispatch_manager { alignas(hardware_destructive_interference_size) - mutable std::mutex blk_state_mtx; - peer_block_state_index blk_state; + mutable fc::mutex blk_state_mtx; + peer_block_state_index blk_state GUARDED_BY(blk_state_mtx); alignas(hardware_destructive_interference_size) - mutable std::mutex local_txns_mtx; - node_transaction_index local_txns; + mutable fc::mutex local_txns_mtx; + node_transaction_index local_txns GUARDED_BY(local_txns_mtx); unlinkable_block_state_cache unlinkable_block_cache; @@ -308,30 +323,96 @@ namespace eosio { constexpr auto def_conn_retry_wait = 30; constexpr auto def_txn_expire_wait = std::chrono::seconds(3); constexpr auto def_resp_expected_wait = std::chrono::seconds(5); - constexpr auto def_sync_fetch_span = 100; + constexpr auto def_sync_fetch_span = 1000; constexpr auto def_keepalive_interval = 10000; constexpr auto message_header_size = sizeof(uint32_t); constexpr uint32_t signed_block_which = fc::get_index(); // see protocol net_message constexpr uint32_t packed_transaction_which = fc::get_index(); // see protocol net_message + class connections_manager { + alignas(hardware_destructive_interference_size) + mutable std::shared_mutex connections_mtx; + chain::flat_set connections; + chain::flat_set supplied_peers; + + alignas(hardware_destructive_interference_size) + fc::mutex connector_check_timer_mtx; + unique_ptr connector_check_timer GUARDED_BY(connector_check_timer_mtx); + + /// thread safe, only modified on startup + std::chrono::milliseconds heartbeat_timeout{def_keepalive_interval*2}; + fc::microseconds max_cleanup_time; + boost::asio::steady_timer::duration connector_period{0}; + uint32_t max_client_count{def_max_clients}; + std::function update_p2p_connection_metrics; + + private: // must call with held mutex + connection_ptr find_connection_i(const string& host) const; + void add_i(connection_ptr&& c); + void connect_i(const string& peer, const string& p2p_address); + + void connection_monitor(const std::weak_ptr& from_connection); + + public: + size_t number_connections() const; + void add_supplied_peers(const vector& peers ); + + // not thread safe, only call on startup + void init(std::chrono::milliseconds heartbeat_timeout_ms, + fc::microseconds conn_max_cleanup_time, + boost::asio::steady_timer::duration conn_period, + uint32_t maximum_client_count); + + uint32_t get_max_client_count() const { return max_client_count; } + + fc::microseconds get_connector_period() const; + + void register_update_p2p_connection_metrics(std::function&& fun); + + void connect_supplied_peers(const string& p2p_address); + + void start_conn_timer(); + void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); + void stop_conn_timer(); + + void add(connection_ptr c); + string connect(const string& host, const string& p2p_address); + string disconnect(const string& host); + void close_all(); + + std::optional status(const string& host) const; + vector connection_statuses() const; + + template + void for_each_connection(Function&& f) const; + + template + void for_each_block_connection(Function&& f) const; + + template + bool any_of_connections(UnaryPredicate&& p) const; + + template + bool any_of_block_connections(UnaryPredicate&& p) const; + }; + class net_plugin_impl : public std::enable_shared_from_this, public auto_bp_peering::bp_connection_manager { public: - unique_ptr acceptor; std::atomic current_connection_id{0}; unique_ptr< sync_manager > sync_master; unique_ptr< dispatch_manager > dispatcher; + connections_manager connections; /** * Thread safe, only updated in plugin initialize * @{ */ - string p2p_address; - string p2p_server_address; + vector p2p_addresses; + vector p2p_server_addresses; - chain::flat_set supplied_peers; vector allowed_peers; ///< peer keys allowed to connect std::map private_keys; ///< overlapping with producer keys, also authenticating non-producing nodes @@ -343,21 +424,14 @@ namespace eosio { }; possible_connections allowed_connections{None}; - boost::asio::steady_timer::duration connector_period{0}; boost::asio::steady_timer::duration txn_exp_period{0}; boost::asio::steady_timer::duration resp_expected_period{0}; std::chrono::milliseconds keepalive_interval{std::chrono::milliseconds{def_keepalive_interval}}; - std::chrono::milliseconds heartbeat_timeout{keepalive_interval * 2}; - int max_cleanup_time_ms = 0; - uint32_t max_client_count = 0; uint32_t max_nodes_per_host = 1; bool p2p_accept_transactions = true; fc::microseconds p2p_dedup_cache_expire_time_us{}; - /// Peer clock may be no more than 1 second skewed from our clock, including network latency. - const std::chrono::system_clock::duration peer_authentication_interval{std::chrono::seconds{1}}; - chain_id_type chain_id; fc::sha256 node_id; string user_agent_name; @@ -368,21 +442,12 @@ namespace eosio { /** @} */ alignas(hardware_destructive_interference_size) - mutable std::shared_mutex connections_mtx; - std::set< connection_ptr > connections; // todo: switch to a thread safe container to avoid big mutex over complete collection - - alignas(hardware_destructive_interference_size) - std::mutex connector_check_timer_mtx; - unique_ptr connector_check_timer; - int connector_checks_in_flight{0}; + fc::mutex expire_timer_mtx; + unique_ptr expire_timer GUARDED_BY(expire_timer_mtx); alignas(hardware_destructive_interference_size) - std::mutex expire_timer_mtx; - unique_ptr expire_timer; - - alignas(hardware_destructive_interference_size) - std::mutex keepalive_timer_mtx; - unique_ptr keepalive_timer; + fc::mutex keepalive_timer_mtx; + unique_ptr keepalive_timer GUARDED_BY(keepalive_timer_mtx); alignas(hardware_destructive_interference_size) std::atomic in_shutdown{false}; @@ -404,14 +469,13 @@ namespace eosio { }; - std::function update_p2p_connection_metrics; std::function increment_failed_p2p_connections; std::function increment_dropped_trxs; private: alignas(hardware_destructive_interference_size) - mutable std::mutex chain_info_mtx; // protects chain_info_t - chain_info_t chain_info; + mutable fc::mutex chain_info_mtx; // protects chain_info_t + chain_info_t chain_info GUARDED_BY(chain_info_mtx); public: void update_chain_info(); @@ -419,20 +483,16 @@ namespace eosio { uint32_t get_chain_lib_num() const; uint32_t get_chain_head_num() const; - void start_listen_loop(); - void on_accepted_block_header( const block_state_ptr& bs ); void on_accepted_block( const block_state_ptr& bs ); void transaction_ack(const std::pair&); void on_irreversible_block( const block_state_ptr& block ); - void start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection); void start_expire_timer(); void start_monitors(); void expire(); - void connection_monitor(const std::weak_ptr& from_connection, bool reschedule); /** \name Peer Timestamps * Time message handling * @{ @@ -467,16 +527,13 @@ namespace eosio { constexpr static uint16_t to_protocol_version(uint16_t v); - connection_ptr find_connection(const string& host)const; // must call with held mutex - string connect( const string& host ); - string disconnect( const string& host ); - - template - void for_each_connection(Function&& fun) const; - + void plugin_initialize(const variables_map& options); + void plugin_startup(); void plugin_shutdown(); bool in_sync() const; fc::logger& get_logger() { return logger; } + + void create_session(tcp::socket&& socket, const string listen_address); }; // peer_[x]log must be called from thread in connection strand @@ -549,7 +606,8 @@ namespace eosio { constexpr uint16_t proto_heartbeat_interval = 4; // eosio 2.1: supports configurable heartbeat interval constexpr uint16_t proto_dup_goaway_resolution = 5; // eosio 2.1: support peer address based duplicate connection resolution constexpr uint16_t proto_dup_node_id_goaway = 6; // eosio 2.1: support peer node_id based duplicate connection resolution - constexpr uint16_t proto_leap_initial = 7; // leap client, needed because none of the 2.1 versions are supported + constexpr uint16_t proto_leap_initial = 7; // leap client, needed because none of the 2.1 versions are supported + constexpr uint16_t proto_block_range = 8; // include block range in notice_message #pragma GCC diagnostic pop constexpr uint16_t net_version_max = proto_leap_initial; @@ -572,31 +630,31 @@ namespace eosio { class queued_buffer : boost::noncopyable { public: void clear_write_queue() { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); _write_queue.clear(); _sync_write_queue.clear(); _write_queue_size = 0; } void clear_out_queue() { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); while ( !_out_queue.empty() ) { _out_queue.pop_front(); } } uint32_t write_queue_size() const { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); return _write_queue_size; } bool is_out_queue_empty() const { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); return _out_queue.empty(); } bool ready_to_send() const { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); // if out_queue is not empty then async_write is in progress return ((!_sync_write_queue.empty() || !_write_queue.empty()) && _out_queue.empty()); } @@ -605,7 +663,7 @@ namespace eosio { bool add_write_queue( const std::shared_ptr>& buff, std::function callback, bool to_sync_queue ) { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); if( to_sync_queue ) { _sync_write_queue.push_back( {buff, std::move(callback)} ); } else { @@ -619,7 +677,7 @@ namespace eosio { } void fill_out_buffer( std::vector& bufs ) { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); if( !_sync_write_queue.empty() ) { // always send msgs from sync_write_queue first fill_out_buffer( bufs, _sync_write_queue ); } else { // postpone real_time write_queue if sync queue is not empty @@ -629,7 +687,7 @@ namespace eosio { } void out_callback( boost::system::error_code ec, std::size_t w ) { - std::lock_guard g( _mtx ); + fc::lock_guard g( _mtx ); for( auto& m : _out_queue ) { m.callback( ec, w ); } @@ -638,7 +696,7 @@ namespace eosio { private: struct queued_write; void fill_out_buffer( std::vector& bufs, - deque& w_queue ) { + deque& w_queue ) REQUIRES(_mtx) { while ( !w_queue.empty() ) { auto& m = w_queue.front(); bufs.emplace_back( m.buff->data(), m.buff->size() ); @@ -655,11 +713,11 @@ namespace eosio { }; alignas(hardware_destructive_interference_size) - mutable std::mutex _mtx; - uint32_t _write_queue_size{0}; - deque _write_queue; - deque _sync_write_queue; // sync_write_queue will be sent first - deque _out_queue; + mutable fc::mutex _mtx; + uint32_t _write_queue_size GUARDED_BY(_mtx) {0}; + deque _write_queue GUARDED_BY(_mtx); + deque _sync_write_queue GUARDED_BY(_mtx); // sync_write_queue will be sent first + deque _out_queue GUARDED_BY(_mtx); }; // queued_buffer @@ -705,34 +763,52 @@ namespace eosio { class connection : public std::enable_shared_from_this { public: - explicit connection( const string& endpoint ); - connection(); + enum class connection_state { connecting, connected, closing, closed }; + explicit connection( const string& endpoint, const string& listen_address ); + /// @brief ctor + /// @param socket created by boost::asio in fc::listener + /// @param address identifier of listen socket which accepted this new connection + explicit connection( tcp::socket&& socket, const string& listen_address ); ~connection() = default; + connection( const connection& ) = delete; + connection( connection&& ) = delete; + connection& operator=( const connection& ) = delete; + connection& operator=( connection&& ) = delete; + bool start_session(); bool socket_is_open() const { return socket_open.load(); } // thread safe, atomic + connection_state state() const { return conn_state.load(); } // thread safe atomic + void set_state(connection_state s); + static std::string state_str(connection_state s); const string& peer_address() const { return peer_addr; } // thread safe, const void set_connection_type( const string& peer_addr ); - bool is_transactions_only_connection()const { return connection_type == transactions_only; } + bool is_transactions_only_connection()const { return connection_type == transactions_only; } // thread safe, atomic bool is_blocks_only_connection()const { return connection_type == blocks_only; } + bool is_transactions_connection() const { return connection_type != blocks_only; } // thread safe, atomic + bool is_blocks_connection() const { return connection_type != transactions_only; } // thread safe, atomic void set_heartbeat_timeout(std::chrono::milliseconds msec) { - std::chrono::system_clock::duration dur = msec; - hb_timeout = dur.count(); + hb_timeout = msec; } + uint64_t get_peer_ping_time_ns() const { return peer_ping_time_ns; } + private: static const string unknown; - void update_endpoints(); + std::atomic peer_ping_time_ns = std::numeric_limits::max(); std::optional peer_requested; // this peer is requesting info from us alignas(hardware_destructive_interference_size) std::atomic socket_open{false}; + std::atomic conn_state{connection_state::connecting}; + + string listen_address; // address sent to peer in handshake const string peer_addr; enum connection_types : char { both, @@ -740,7 +816,9 @@ namespace eosio { blocks_only }; - std::atomic connection_type{both}; + std::atomic connection_type{both}; + std::atomic peer_start_block_num{0}; + std::atomic peer_head_block_num{0}; public: boost::asio::io_context::strand strand; @@ -761,6 +839,10 @@ namespace eosio { // kept in sync with last_handshake_recv.last_irreversible_block_num, only accessed from connection strand uint32_t peer_lib_num = 0; + std::atomic sync_ordinal{0}; + // when syncing from a peer, the last block expected of the current range + uint32_t sync_last_requested_block{0}; + alignas(hardware_destructive_interference_size) std::atomic trx_in_progress_size{0}; @@ -769,9 +851,7 @@ namespace eosio { int16_t sent_handshake_count = 0; alignas(hardware_destructive_interference_size) - std::atomic connecting{true}; - std::atomic syncing{false}; - std::atomic closing{false}; + std::atomic peer_syncing_from_us{false}; std::atomic protocol_version = 0; uint16_t net_version = net_version_max; @@ -780,21 +860,21 @@ namespace eosio { block_status_monitor block_status_monitor_; alignas(hardware_destructive_interference_size) - std::mutex response_expected_timer_mtx; - boost::asio::steady_timer response_expected_timer; + fc::mutex response_expected_timer_mtx; + boost::asio::steady_timer response_expected_timer GUARDED_BY(response_expected_timer_mtx); alignas(hardware_destructive_interference_size) - std::atomic no_retry{no_reason}; + std::atomic no_retry{no_reason}; alignas(hardware_destructive_interference_size) - mutable std::mutex conn_mtx; //< mtx for last_req .. remote_endpoint_ip - std::optional last_req; - handshake_message last_handshake_recv; - handshake_message last_handshake_sent; - block_id_type fork_head; - uint32_t fork_head_num{0}; - fc::time_point last_close; - string remote_endpoint_ip; + mutable fc::mutex conn_mtx; //< mtx for last_req .. remote_endpoint_ip + std::optional last_req GUARDED_BY(conn_mtx); + handshake_message last_handshake_recv GUARDED_BY(conn_mtx); + handshake_message last_handshake_sent GUARDED_BY(conn_mtx); + block_id_type fork_head GUARDED_BY(conn_mtx); + uint32_t fork_head_num GUARDED_BY(conn_mtx) {0}; + fc::time_point last_close GUARDED_BY(conn_mtx); + string remote_endpoint_ip GUARDED_BY(conn_mtx); connection_status get_status()const; @@ -802,28 +882,31 @@ namespace eosio { * Time message handling * @{ */ - // Members set from network data - tstamp org{0}; //!< originate timestamp - tstamp rec{0}; //!< receive timestamp - tstamp dst{0}; //!< destination timestamp - tstamp xmt{0}; //!< transmit timestamp + // See NTP protocol. https://datatracker.ietf.org/doc/rfc5905/ + std::chrono::nanoseconds org{0}; //!< origin timestamp. Time at the client when the request departed for the server. + // std::chrono::nanoseconds (not used) rec{0}; //!< receive timestamp. Time at the server when the request arrived from the client. + std::chrono::nanoseconds xmt{0}; //!< transmit timestamp, Time at the server when the response left for the client. + // std::chrono::nanoseconds (not used) dst{0}; //!< destination timestamp, Time at the client when the reply arrived from the server. /** @} */ // timestamp for the lastest message - tstamp latest_msg_time{0}; - tstamp hb_timeout{std::chrono::milliseconds{def_keepalive_interval}.count()}; - tstamp latest_blk_time{0}; + std::chrono::system_clock::time_point latest_msg_time{std::chrono::system_clock::time_point::min()}; + std::chrono::milliseconds hb_timeout{std::chrono::milliseconds{def_keepalive_interval}}; + std::chrono::system_clock::time_point latest_blk_time{std::chrono::system_clock::time_point::min()}; bool connected() const; + bool closed() const; // socket is not open or is closed or closing, thread safe bool current() const; + bool should_sync_from(uint32_t sync_next_expected_num, uint32_t sync_known_lib_num) const; /// @param reconnect true if we should try and reconnect immediately after close /// @param shutdown true only if plugin is shutting down void close( bool reconnect = true, bool shutdown = false ); private: - static void _close( connection* self, bool reconnect, bool shutdown ); // for easy capture + void _close( bool reconnect, bool shutdown ); // for easy capture bool process_next_block_message(uint32_t message_length); bool process_next_trx_message(uint32_t message_length); + void update_endpoints(); public: bool populate_handshake( handshake_message& hello ) const; @@ -849,7 +932,7 @@ namespace eosio { */ /** \brief Check heartbeat time and send Time_message */ - void check_heartbeat( tstamp current_time ); + void check_heartbeat( std::chrono::system_clock::time_point current_time ); /** \brief Populate and queue time_message */ void send_time(); @@ -863,13 +946,13 @@ namespace eosio { * packet is placed on the send queue. Calls the kernel time of * day routine and converts to a (at least) 64 bit integer. */ - static tstamp get_time() { - return std::chrono::system_clock::now().time_since_epoch().count(); + static std::chrono::nanoseconds get_time() { + return std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()); } /** @} */ void blk_send_branch( const block_id_type& msg_head_id ); - void blk_send_branch_impl( uint32_t msg_head_num, uint32_t lib_num, uint32_t head_num ); + void blk_send_branch( uint32_t msg_head_num, uint32_t lib_num, uint32_t head_num ); void blk_send(const block_id_type& blkid); void stop_send(); @@ -878,7 +961,7 @@ namespace eosio { void enqueue_buffer( const std::shared_ptr>& send_buffer, go_away_reason close_after_send, bool to_sync_queue = false); - void cancel_sync(go_away_reason); + void cancel_sync(go_away_reason reason); void flush_queues(); bool enqueue_sync_block(); void request_sync_blocks(uint32_t start, uint32_t end); @@ -922,7 +1005,10 @@ namespace eosio { void handle_message( const packed_transaction& msg ) = delete; // packed_transaction_ptr overload used instead void handle_message( packed_transaction_ptr trx ); - void process_signed_block( const block_id_type& id, signed_block_ptr msg, block_state_ptr bsp ); + // returns calculated number of blocks combined latency + uint32_t calc_block_latency(); + + void process_signed_block( const block_id_type& id, signed_block_ptr block, block_state_ptr bsp ); fc::variant_object get_logger_variant() const { fc::mutable_variant_object mvo; @@ -937,13 +1023,13 @@ namespace eosio { return mvo; } - bool incoming() const { return peer_address().empty(); } // thread safe becuase of peer_address + bool incoming() const { return peer_address().empty(); } // thread safe because of peer_address bool incoming_and_handshake_received() const { if (!incoming()) return false; - std::lock_guard g_conn( conn_mtx ); + fc::lock_guard g_conn( conn_mtx ); return !last_handshake_recv.p2p_address.empty(); } - }; + }; // class connection const string connection::unknown = ""; @@ -1000,33 +1086,69 @@ namespace eosio { } }; + - template - void net_plugin_impl::for_each_connection( Function&& f ) const { - std::shared_lock g( connections_mtx ); - for( auto& c :connections ) { - if( !f( c ) ) return; + std::tuple split_host_port_type(const std::string& peer_add) { + // host:port:[|] + if (peer_add.empty()) return {}; + + string::size_type p = peer_add[0] == '[' ? peer_add.find(']') : 0; + if (p == string::npos) { + fc_wlog( logger, "Invalid peer address: ${peer}", ("peer", peer_add) ); + return {}; } + string::size_type colon = peer_add.find(':', p); + string::size_type colon2 = peer_add.find(':', colon + 1); + string::size_type end = colon2 == string::npos + ? string::npos : peer_add.find_first_of( " :+=.,<>!$%^&(*)|-#@\t", colon2 + 1 ); // future proof by including most symbols without using regex + string host = (p > 0) ? peer_add.substr( 1, p-1 ) : peer_add.substr( 0, colon ); + string port = peer_add.substr( colon + 1, colon2 == string::npos ? string::npos : colon2 - (colon + 1)); + string type = colon2 == string::npos ? "" : end == string::npos ? + peer_add.substr( colon2 + 1 ) : peer_add.substr( colon2 + 1, end - (colon2 + 1) ); + return {std::move(host), std::move(port), std::move(type)}; } + template - void for_each_connection( Function&& f ) { - my_impl->for_each_connection(std::forward(f)); + void connections_manager::for_each_connection( Function&& f ) const { + std::shared_lock g( connections_mtx ); + std::for_each(connections.begin(), connections.end(), std::forward(f)); } template - void for_each_block_connection( Function f ) { - std::shared_lock g( my_impl->connections_mtx ); - for( auto& c : my_impl->connections ) { - if( c->is_transactions_only_connection() ) continue; - if( !f( c ) ) return; + void connections_manager::for_each_block_connection( Function&& f ) const { + std::shared_lock g( connections_mtx ); + for( auto& c : connections ) { + if (c->is_blocks_connection()) { + f(c); + } } } + template + bool connections_manager::any_of_connections(UnaryPredicate&& p) const { + std::shared_lock g(connections_mtx); + return std::any_of(connections.cbegin(), connections.cend(), std::forward(p)); + } + + template + bool connections_manager::any_of_block_connections(UnaryPredicate&& p) const { + std::shared_lock g( connections_mtx ); + for( auto& c : connections ) { + if( c->is_blocks_connection() ) { + if (p(c)) + return true; + } + } + return false; + } + + //--------------------------------------------------------------------------- - connection::connection( const string& endpoint ) - : peer_addr( endpoint ), + connection::connection( const string& endpoint, const string& listen_address ) + : listen_address( listen_address ), + peer_addr( endpoint ), strand( my_impl->thread_pool.get_executor() ), socket( new tcp::socket( my_impl->thread_pool.get_executor() ) ), log_p2p_address( endpoint ), @@ -1039,16 +1161,18 @@ namespace eosio { fc_ilog( logger, "created connection ${c} to ${n}", ("c", connection_id)("n", endpoint) ); } - connection::connection() - : peer_addr(), + connection::connection(tcp::socket&& s, const string& listen_address) + : listen_address( listen_address ), + peer_addr(), strand( my_impl->thread_pool.get_executor() ), - socket( new tcp::socket( my_impl->thread_pool.get_executor() ) ), + socket( new tcp::socket( std::move(s) ) ), connection_id( ++my_impl->current_connection_id ), response_expected_timer( my_impl->thread_pool.get_executor() ), last_handshake_recv(), last_handshake_sent() { - fc_dlog( logger, "new connection object created" ); + update_endpoints(); + fc_dlog( logger, "new connection object created for peer ${address}:${port} from listener ${addr}", ("address", log_remote_endpoint_ip)("port", log_remote_endpoint_port)("addr", listen_address) ); } // called from connection strand @@ -1061,22 +1185,13 @@ namespace eosio { log_remote_endpoint_port = ec ? unknown : std::to_string(rep.port()); local_endpoint_ip = ec2 ? unknown : lep.address().to_string(); local_endpoint_port = ec2 ? unknown : std::to_string(lep.port()); - std::lock_guard g_conn( conn_mtx ); + fc::lock_guard g_conn( conn_mtx ); remote_endpoint_ip = log_remote_endpoint_ip; } // called from connection strand - void connection::set_connection_type( const string& peer_add ) { - // host:port:[|] - string::size_type colon = peer_add.find(':'); - string::size_type colon2 = peer_add.find(':', colon + 1); - string::size_type end = colon2 == string::npos - ? string::npos : peer_add.find_first_of( " :+=.,<>!$%^&(*)|-#@\t", colon2 + 1 ); // future proof by including most symbols without using regex - string host = peer_add.substr( 0, colon ); - string port = peer_add.substr( colon + 1, colon2 == string::npos ? string::npos : colon2 - (colon + 1)); - string type = colon2 == string::npos ? "" : end == string::npos ? - peer_add.substr( colon2 + 1 ) : peer_add.substr( colon2 + 1, end - (colon2 + 1) ); - + void connection::set_connection_type( const std::string& peer_add ) { + auto [host, port, type] = split_host_port_type(peer_add); if( type.empty() ) { fc_dlog( logger, "Setting connection ${c} type for: ${peer} to both transactions and blocks", ("c", connection_id)("peer", peer_add) ); connection_type = both; @@ -1091,13 +1206,41 @@ namespace eosio { } } + std::string connection::state_str(connection_state s) { + switch (s) { + case connection_state::connecting: + return "connecting"; + case connection_state::connected: + return "connected"; + case connection_state::closing: + return "closing"; + case connection_state::closed: + return "closed"; + } + return "unknown"; + } + + void connection::set_state(connection_state s) { + auto curr = state(); + if (curr == s) + return; + if (s == connection_state::connected && curr != connection_state::connecting) + return; + fc_dlog(logger, "old connection ${id} state ${os} becoming ${ns}", ("id", connection_id)("os", state_str(curr))("ns", state_str(s))); + + conn_state = s; + } + connection_status connection::get_status()const { connection_status stat; stat.peer = peer_addr; - stat.connecting = connecting; - stat.syncing = syncing; + stat.remote_ip = log_remote_endpoint_ip; + stat.remote_port = log_remote_endpoint_port; + stat.connecting = state() == connection_state::connecting; + stat.syncing = peer_syncing_from_us; stat.is_bp_peer = is_bp_connection; - std::lock_guard g( conn_mtx ); + stat.is_socket_open = socket_is_open(); + fc::lock_guard g( conn_mtx ); stat.last_handshake = last_handshake_recv; return stat; } @@ -1106,7 +1249,6 @@ namespace eosio { bool connection::start_session() { verify_strand_in_this_thread( strand, __func__, __LINE__ ); - update_endpoints(); boost::asio::ip::tcp::no_delay nodelay( true ); boost::system::error_code ec; socket->set_option( nodelay, ec ); @@ -1122,12 +1264,38 @@ namespace eosio { } } + // thread safe, all atomics bool connection::connected() const { - return socket_is_open() && !connecting && !closing; + return socket_is_open() && state() == connection_state::connected; + } + + bool connection::closed() const { + return !socket_is_open() + || state() == connection_state::closing + || state() == connection_state::closed; } + // thread safe, all atomics bool connection::current() const { - return (connected() && !syncing); + return (connected() && !peer_syncing_from_us); + } + + // thread safe + bool connection::should_sync_from(uint32_t sync_next_expected_num, uint32_t sync_known_lib_num) const { + fc_dlog(logger, "id: ${id} blocks conn: ${t} current: ${c} socket_open: ${so} syncing from us: ${s} state: ${con} peer_start_block: ${sb} peer_head: ${h} ping: ${p}us no_retry: ${g}", + ("id", connection_id)("t", is_blocks_connection()) + ("c", current())("so", socket_is_open())("s", peer_syncing_from_us.load())("con", state_str(state())) + ("sb", peer_start_block_num.load())("h", peer_head_block_num.load())("p", get_peer_ping_time_ns()/1000)("g", reason_str(no_retry))); + if (is_blocks_connection() && current()) { + if (no_retry == go_away_reason::no_reason) { + if (peer_start_block_num <= sync_next_expected_num) { // has blocks we want + if (peer_head_block_num >= sync_known_lib_num) { // is in sync + return true; + } + } + } + } + return false; } void connection::flush_queues() { @@ -1135,48 +1303,49 @@ namespace eosio { } void connection::close( bool reconnect, bool shutdown ) { - closing = true; + set_state(connection_state::closing); strand.post( [self = shared_from_this(), reconnect, shutdown]() { - connection::_close( self.get(), reconnect, shutdown ); + self->_close( reconnect, shutdown ); }); } // called from connection strand - void connection::_close( connection* self, bool reconnect, bool shutdown ) { - self->socket_open = false; + void connection::_close( bool reconnect, bool shutdown ) { + socket_open = false; boost::system::error_code ec; - if( self->socket->is_open() ) { - self->socket->shutdown( tcp::socket::shutdown_both, ec ); - self->socket->close( ec ); - } - self->socket.reset( new tcp::socket( my_impl->thread_pool.get_executor() ) ); - self->flush_queues(); - self->connecting = false; - self->syncing = false; - self->block_status_monitor_.reset(); - ++self->consecutive_immediate_connection_close; + socket->shutdown( tcp::socket::shutdown_both, ec ); + socket->close( ec ); + socket.reset( new tcp::socket( my_impl->thread_pool.get_executor() ) ); + flush_queues(); + peer_syncing_from_us = false; + block_status_monitor_.reset(); + ++consecutive_immediate_connection_close; bool has_last_req = false; { - std::lock_guard g_conn( self->conn_mtx ); - has_last_req = self->last_req.has_value(); - self->last_handshake_recv = handshake_message(); - self->last_handshake_sent = handshake_message(); - self->last_close = fc::time_point::now(); - self->conn_node_id = fc::sha256(); + fc::lock_guard g_conn( conn_mtx ); + has_last_req = last_req.has_value(); + last_handshake_recv = handshake_message(); + last_handshake_sent = handshake_message(); + last_close = fc::time_point::now(); + conn_node_id = fc::sha256(); } if( has_last_req && !shutdown ) { - my_impl->dispatcher->retry_fetch( self->shared_from_this() ); + my_impl->dispatcher->retry_fetch( shared_from_this() ); } - self->peer_lib_num = 0; - self->peer_requested.reset(); - self->sent_handshake_count = 0; - if( !shutdown) my_impl->sync_master->sync_reset_lib_num( self->shared_from_this(), true ); - peer_ilog( self, "closing" ); - self->cancel_wait(); - self->closing = false; + peer_lib_num = 0; + peer_requested.reset(); + sent_handshake_count = 0; + if( !shutdown) my_impl->sync_master->sync_reset_lib_num( shared_from_this(), true ); + peer_ilog( this, "closing" ); + cancel_wait(); + sync_last_requested_block = 0; + org = std::chrono::nanoseconds{0}; + latest_msg_time = std::chrono::system_clock::time_point::min(); + latest_blk_time = std::chrono::system_clock::time_point::min(); + set_state(connection_state::closed); if( reconnect && !shutdown ) { - my_impl->start_conn_timer( std::chrono::milliseconds( 100 ), connection_wptr() ); + my_impl->connections.start_conn_timer( std::chrono::milliseconds( 100 ), connection_wptr() ); } } @@ -1194,7 +1363,7 @@ namespace eosio { } if( logger.is_enabled( fc::log_level::debug ) ) { - std::unique_lock g_conn( conn_mtx ); + fc::unique_lock g_conn( conn_mtx ); if( last_handshake_recv.generation >= 1 ) { peer_dlog( this, "maybe truncating branch at = ${h}:${id}", ("h", block_header::num_from_id(last_handshake_recv.head_id))("id", last_handshake_recv.head_id) ); @@ -1224,12 +1393,12 @@ namespace eosio { } else { if( on_fork ) msg_head_num = 0; // if peer on fork, start at their last lib, otherwise we can start at msg_head+1 - blk_send_branch_impl( msg_head_num, lib_num, head_num ); + blk_send_branch( msg_head_num, lib_num, head_num ); } } // called from connection strand - void connection::blk_send_branch_impl( uint32_t msg_head_num, uint32_t lib_num, uint32_t head_num ) { + void connection::blk_send_branch( uint32_t msg_head_num, uint32_t lib_num, uint32_t head_num ) { if( !peer_requested ) { auto last = msg_head_num != 0 ? msg_head_num : lib_num; peer_requested = peer_sync_state( last+1, head_num, last ); @@ -1266,14 +1435,14 @@ namespace eosio { } void connection::stop_send() { - syncing = false; + peer_syncing_from_us = false; } void connection::send_handshake() { - if (closing) + if (closed()) return; strand.post( [c = shared_from_this()]() { - std::unique_lock g_conn( c->conn_mtx ); + fc::unique_lock g_conn( c->conn_mtx ); if( c->populate_handshake( c->last_handshake_sent ) ) { static_assert( std::is_same_vsent_handshake_count ), int16_t>, "INT16_MAX based on int16_t" ); if( c->sent_handshake_count == INT16_MAX ) c->sent_handshake_count = 1; // do not wrap @@ -1290,8 +1459,8 @@ namespace eosio { } // called from connection strand - void connection::check_heartbeat( tstamp current_time ) { - if( latest_msg_time > 0 ) { + void connection::check_heartbeat( std::chrono::system_clock::time_point current_time ) { + if( latest_msg_time > std::chrono::system_clock::time_point::min() ) { if( current_time > latest_msg_time + hb_timeout ) { no_retry = benign_other; if( !peer_address().empty() ) { @@ -1302,34 +1471,45 @@ namespace eosio { close(false); } return; - } else { - const tstamp timeout = std::max(hb_timeout/2, 2*std::chrono::milliseconds(config::block_interval_ms).count()); - if ( current_time > latest_blk_time + timeout ) { + } + if (!my_impl->sync_master->syncing_from_peer()) { + const std::chrono::milliseconds timeout = std::max(hb_timeout/2, 2*std::chrono::milliseconds(config::block_interval_ms)); + if (current_time > latest_blk_time + timeout) { + peer_wlog(this, "half heartbeat timed out, sending handshake"); send_handshake(); return; } } + } + org = std::chrono::nanoseconds{0}; send_time(); } // called from connection strand void connection::send_time() { - time_message xpkt; - xpkt.org = rec; - xpkt.rec = dst; - xpkt.xmt = get_time(); - org = xpkt.xmt; - enqueue(xpkt); + if (org == std::chrono::nanoseconds{0}) { // do not send if there is already a time loop in progress + org = get_time(); + // xpkt.org == 0 means we are initiating a ping. Actual origin time is in xpkt.xmt. + time_message xpkt{ + .org = 0, + .rec = 0, + .xmt = org.count(), + .dst = 0 }; + peer_dlog(this, "send init time_message: ${t}", ("t", xpkt)); + enqueue(xpkt); + } } // called from connection strand void connection::send_time(const time_message& msg) { - time_message xpkt; - xpkt.org = msg.xmt; - xpkt.rec = msg.dst; - xpkt.xmt = get_time(); + time_message xpkt{ + .org = msg.xmt, + .rec = msg.dst, + .xmt = get_time().count(), + .dst = 0 }; + peer_dlog( this, "send time_message: ${t}, org: ${o}", ("t", xpkt)("o", org.count()) ); enqueue(xpkt); } @@ -1347,7 +1527,7 @@ namespace eosio { // called from connection strand void connection::do_queue_write() { - if( !buffer_queue.ready_to_send() || closing ) + if( !buffer_queue.ready_to_send() || closed() ) return; connection_ptr c(shared_from_this()); @@ -1360,11 +1540,18 @@ namespace eosio { try { c->buffer_queue.clear_out_queue(); // May have closed connection and cleared buffer_queue - if( !c->socket_is_open() || socket != c->socket ) { - peer_ilog( c, "async write socket ${r} before callback", ("r", c->socket_is_open() ? "changed" : "closed") ); + if (!c->socket->is_open() && c->socket_is_open()) { // if socket_open then close not called + peer_ilog(c, "async write socket closed before callback"); c->close(); return; } + if (socket != c->socket ) { // different socket, c must have created a new socket, make sure previous is closed + peer_ilog( c, "async write socket changed before callback"); + boost::system::error_code ec; + socket->shutdown( tcp::socket::shutdown_both, ec ); + socket->close( ec ); + return; + } if( ec ) { if( ec.value() != boost::asio::error::eof ) { @@ -1400,6 +1587,7 @@ namespace eosio { peer_dlog( this, "cancel sync reason = ${m}, write queue size ${o} bytes", ("m", reason_str( reason ))("o", buffer_queue.write_queue_size()) ); cancel_wait(); + sync_last_requested_block = 0; flush_queues(); switch (reason) { case validation : @@ -1559,7 +1747,7 @@ namespace eosio { block_buffer_factory buff_factory; auto sb = buff_factory.get_send_buffer( b ); - latest_blk_time = get_time(); + latest_blk_time = std::chrono::system_clock::now(); enqueue_buffer( sb, no_reason, to_sync_queue); } @@ -1584,14 +1772,14 @@ namespace eosio { // thread safe void connection::cancel_wait() { - std::lock_guard g( response_expected_timer_mtx ); + fc::lock_guard g( response_expected_timer_mtx ); response_expected_timer.cancel(); } // thread safe void connection::sync_wait() { connection_ptr c(shared_from_this()); - std::lock_guard g( response_expected_timer_mtx ); + fc::lock_guard g( response_expected_timer_mtx ); response_expected_timer.expires_from_now( my_impl->resp_expected_period ); response_expected_timer.async_wait( boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { @@ -1602,7 +1790,7 @@ namespace eosio { // thread safe void connection::fetch_wait() { connection_ptr c( shared_from_this() ); - std::lock_guard g( response_expected_timer_mtx ); + fc::lock_guard g( response_expected_timer_mtx ); response_expected_timer.expires_from_now( my_impl->resp_expected_period ); response_expected_timer.async_wait( boost::asio::bind_executor( c->strand, [c]( boost::system::error_code ec ) { @@ -1631,6 +1819,7 @@ namespace eosio { // called from connection strand void connection::request_sync_blocks(uint32_t start, uint32_t end) { + sync_last_requested_block = end; sync_request_message srm = {start,end}; enqueue( net_message(srm) ); sync_wait(); @@ -1663,12 +1852,13 @@ namespace eosio { } //----------------------------------------------------------- - sync_manager::sync_manager( uint32_t req_span ) + sync_manager::sync_manager( uint32_t span, uint32_t sync_peer_limit ) :sync_known_lib_num( 0 ) ,sync_last_requested_num( 0 ) ,sync_next_expected_num( 1 ) - ,sync_req_span( req_span ) ,sync_source() + ,sync_req_span( span ) + ,sync_peer_limit( sync_peer_limit ) ,sync_state(in_sync) { } @@ -1693,7 +1883,7 @@ namespace eosio { // called from c's connection strand void sync_manager::sync_reset_lib_num(const connection_ptr& c, bool closing) { - std::unique_lock g( sync_mtx ); + fc::unique_lock g( sync_mtx ); if( sync_state == in_sync ) { sync_source.reset(); } @@ -1706,102 +1896,107 @@ namespace eosio { // Closing connection, therefore its view of LIB can no longer be considered as we will no longer be connected. // Determine current LIB of remaining peers as our sync_known_lib_num. uint32_t highest_lib_num = 0; - for_each_block_connection( [&highest_lib_num]( const auto& cc ) { - std::lock_guard g_conn( cc->conn_mtx ); + my_impl->connections.for_each_block_connection( [&highest_lib_num]( const auto& cc ) { + fc::lock_guard g_conn( cc->conn_mtx ); if( cc->current() && cc->last_handshake_recv.last_irreversible_block_num > highest_lib_num ) { highest_lib_num = cc->last_handshake_recv.last_irreversible_block_num; } - return true; } ); sync_known_lib_num = highest_lib_num; - // if closing the connection we are currently syncing from, then reset our last requested and next expected. + // if closing the connection we are currently syncing from then request from a diff peer if( c == sync_source ) { sync_last_requested_num = 0; // if starting to sync need to always start from lib as we might be on our own fork uint32_t lib_num = my_impl->get_chain_lib_num(); - sync_next_expected_num = lib_num + 1; - request_next_chunk( std::move(g) ); + sync_next_expected_num = std::max( lib_num + 1, sync_next_expected_num ); + request_next_chunk(); + } + } + } + + connection_ptr sync_manager::find_next_sync_node() REQUIRES(sync_mtx) { + fc_dlog(logger, "Number connections ${s}, sync_next_expected_num: ${e}, sync_known_lib_num: ${l}", + ("s", my_impl->connections.number_connections())("e", sync_next_expected_num)("l", sync_known_lib_num)); + deque conns; + my_impl->connections.for_each_block_connection([sync_next_expected_num = sync_next_expected_num, + sync_known_lib_num = sync_known_lib_num, + &conns](const auto& c) { + if (c->should_sync_from(sync_next_expected_num, sync_known_lib_num)) { + conns.push_back(c); + } + }); + if (conns.size() > sync_peer_limit) { + std::partial_sort(conns.begin(), conns.begin() + sync_peer_limit, conns.end(), [](const connection_ptr& lhs, const connection_ptr& rhs) { + return lhs->get_peer_ping_time_ns() < rhs->get_peer_ping_time_ns(); + }); + conns.resize(sync_peer_limit); + } + + fc_dlog(logger, "Valid sync peers ${s}, sync_ordinal ${so}", ("s", conns.size())("so", sync_ordinal.load())); + + if (conns.empty()) { + return {}; + } + if (conns.size() == 1) { // only one available + ++sync_ordinal; + conns.front()->sync_ordinal = sync_ordinal.load(); + return conns.front(); + } + + // keep track of which node was synced from last; round-robin among the current (sync_peer_limit) lowest latency peers + ++sync_ordinal; + // example: sync_ordinal is 6 after inc above then there may be connections with 3,4,5 (5 being the last synced from) + // Choose from the lowest sync_ordinal of the sync_peer_limit of lowest latency, note 0 means not synced from yet + size_t the_one = 0; + uint32_t lowest_ordinal = std::numeric_limits::max(); + for (size_t i = 0; i < conns.size() && lowest_ordinal != 0; ++i) { + uint32_t sync_ord = conns[i]->sync_ordinal; + fc_dlog(logger, "compare sync ords, conn: ${lcid}, ord: ${l} < ${r}, ping: ${p}us", + ("lcid", conns[i]->connection_id)("l", sync_ord)("r", lowest_ordinal)("p", conns[i]->get_peer_ping_time_ns()/1000)); + if (sync_ord < lowest_ordinal) { + the_one = i; + lowest_ordinal = sync_ord; } } + fc_dlog(logger, "sync from ${c}", ("c", conns[the_one]->connection_id)); + conns[the_one]->sync_ordinal = sync_ordinal.load(); + return conns[the_one]; } // call with g_sync locked, called from conn's connection strand - void sync_manager::request_next_chunk( std::unique_lock g_sync, const connection_ptr& conn ) { + void sync_manager::request_next_chunk( const connection_ptr& conn ) REQUIRES(sync_mtx) { auto chain_info = my_impl->get_chain_info(); - fc_dlog( logger, "sync_last_requested_num: ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}", - ("r", sync_last_requested_num)("e", sync_next_expected_num)("k", sync_known_lib_num)("s", sync_req_span) ); + fc_dlog( logger, "sync_last_requested_num: ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}, head: ${h}", + ("r", sync_last_requested_num)("e", sync_next_expected_num)("k", sync_known_lib_num)("s", sync_req_span)("h", chain_info.head_num) ); - if( chain_info.head_num < sync_last_requested_num && sync_source && sync_source->current() ) { - fc_ilog( logger, "ignoring request, head is ${h} last req = ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}, source connection ${c}", + if( chain_info.head_num + sync_req_span < sync_last_requested_num && sync_source && sync_source->current() ) { + fc_dlog( logger, "ignoring request, head is ${h} last req = ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}, source connection ${c}", ("h", chain_info.head_num)("r", sync_last_requested_num)("e", sync_next_expected_num) ("k", sync_known_lib_num)("s", sync_req_span)("c", sync_source->connection_id) ); return; } + if (conn) { + // p2p_high_latency_test.py test depends on this exact log statement. + peer_ilog(conn, "Catching up with chain, our last req is ${cc}, theirs is ${t}, next expected ${n}, head ${h}", + ("cc", sync_last_requested_num)("t", sync_known_lib_num)("n", sync_next_expected_num)("h", chain_info.head_num)); + } + /* ---------- * next chunk provider selection criteria * a provider is supplied and able to be used, use it. * otherwise select the next available from the list, round-robin style. */ - connection_ptr new_sync_source = sync_source; - if (conn && conn->current() ) { - new_sync_source = conn; - } else { - std::shared_lock g( my_impl->connections_mtx ); - if( my_impl->connections.empty() ) { - new_sync_source.reset(); - } else if( my_impl->connections.size() == 1 ) { - if (!new_sync_source) { - new_sync_source = *my_impl->connections.begin(); - } - } else { - // init to a linear array search - auto cptr = my_impl->connections.begin(); - auto cend = my_impl->connections.end(); - // do we remember the previous source? - if (new_sync_source) { - //try to find it in the list - cptr = my_impl->connections.find( new_sync_source ); - cend = cptr; - if( cptr == my_impl->connections.end() ) { - //not there - must have been closed! cend is now connections.end, so just flatten the ring. - new_sync_source.reset(); - cptr = my_impl->connections.begin(); - } else { - //was found - advance the start to the next. cend is the old source. - if( ++cptr == my_impl->connections.end() && cend != my_impl->connections.end() ) { - cptr = my_impl->connections.begin(); - } - } - } - - //scan the list of peers looking for another able to provide sync blocks. - if( cptr != my_impl->connections.end() ) { - auto cstart_it = cptr; - do { - //select the first one which is current and has valid lib and break out. - if( !(*cptr)->is_transactions_only_connection() && (*cptr)->current() ) { - std::lock_guard g_conn( (*cptr)->conn_mtx ); - if( (*cptr)->last_handshake_recv.last_irreversible_block_num >= sync_known_lib_num ) { - new_sync_source = *cptr; - break; - } - } - if( ++cptr == my_impl->connections.end() ) - cptr = my_impl->connections.begin(); - } while( cptr != cstart_it ); - } - // no need to check the result, either source advanced or the whole list was checked and the old source is reused. - } - } + connection_ptr new_sync_source = (conn && conn->current()) ? conn : + find_next_sync_node(); // verify there is an available source - if( !new_sync_source || !new_sync_source->current() || new_sync_source->is_transactions_only_connection() ) { + if( !new_sync_source ) { fc_elog( logger, "Unable to continue syncing at this time"); - if( !new_sync_source ) sync_source.reset(); + sync_source.reset(); sync_known_lib_num = chain_info.lib_num; sync_last_requested_num = 0; set_state( in_sync ); // probably not, but we can't do anything else @@ -1817,42 +2012,41 @@ namespace eosio { if( end > 0 && end >= start ) { sync_last_requested_num = end; sync_source = new_sync_source; - g_sync.unlock(); request_sent = true; - new_sync_source->strand.post( [new_sync_source, start, end]() { - peer_ilog( new_sync_source, "requesting range ${s} to ${e}", ("s", start)("e", end) ); + new_sync_source->strand.post( [new_sync_source, start, end, head_num=chain_info.head_num]() { + peer_ilog( new_sync_source, "requesting range ${s} to ${e}, head ${h}", ("s", start)("e", end)("h", head_num) ); new_sync_source->request_sync_blocks( start, end ); } ); } } if( !request_sent ) { - g_sync.unlock(); + sync_source.reset(); + fc_wlog(logger, "Unable to request range, sending handshakes to everyone"); send_handshakes(); } } // static, thread safe void sync_manager::send_handshakes() { - for_each_connection( []( auto& ci ) { + my_impl->connections.for_each_connection( []( auto& ci ) { if( ci->current() ) { ci->send_handshake(); } - return true; } ); } - bool sync_manager::is_sync_required( uint32_t fork_head_block_num ) { + bool sync_manager::is_sync_required( uint32_t fork_head_block_num ) REQUIRES(sync_mtx) { fc_dlog( logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", ("req", sync_last_requested_num)( "recv", sync_next_expected_num )( "known", sync_known_lib_num ) ("head", fork_head_block_num ) ); return( sync_last_requested_num < sync_known_lib_num || - fork_head_block_num < sync_last_requested_num ); + sync_next_expected_num < sync_last_requested_num ); } // called from c's connection strand void sync_manager::start_sync(const connection_ptr& c, uint32_t target) { - std::unique_lock g_sync( sync_mtx ); + fc::unique_lock g_sync( sync_mtx ); if( target > sync_known_lib_num) { sync_known_lib_num = target; } @@ -1861,7 +2055,6 @@ namespace eosio { if( !is_sync_required( chain_info.head_num ) || target <= chain_info.lib_num ) { peer_dlog( c, "We are already caught up, my irr = ${b}, head = ${h}, target = ${t}", ("b", chain_info.lib_num)( "h", chain_info.head_num )( "t", target ) ); - c->send_handshake(); return; } @@ -1870,49 +2063,38 @@ namespace eosio { } sync_next_expected_num = std::max( chain_info.lib_num + 1, sync_next_expected_num ); - // p2p_high_latency_test.py test depends on this exact log statement. - peer_ilog( c, "Catching up with chain, our last req is ${cc}, theirs is ${t}, next expected ${n}", - ("cc", sync_last_requested_num)("t", target)("n", sync_next_expected_num) ); - - request_next_chunk( std::move( g_sync ), c ); + request_next_chunk( c ); } // called from connection strand void sync_manager::sync_reassign_fetch(const connection_ptr& c, go_away_reason reason) { - std::unique_lock g( sync_mtx ); + fc::unique_lock g( sync_mtx ); peer_ilog( c, "reassign_fetch, our last req is ${cc}, next expected is ${ne}", ("cc", sync_last_requested_num)("ne", sync_next_expected_num) ); if( c == sync_source ) { c->cancel_sync(reason); sync_last_requested_num = 0; - request_next_chunk( std::move(g) ); + request_next_chunk(); } } + inline block_id_type make_block_id( uint32_t block_num ) { + chain::block_id_type block_id; + block_id._hash[0] = fc::endian_reverse_u32(block_num); + return block_id; + } + // called from c's connection strand - void sync_manager::recv_handshake( const connection_ptr& c, const handshake_message& msg ) { + void sync_manager::recv_handshake( const connection_ptr& c, const handshake_message& msg, uint32_t nblk_combined_latency ) { - if( c->is_transactions_only_connection() ) return; + if (!c->is_blocks_connection()) + return; auto chain_info = my_impl->get_chain_info(); sync_reset_lib_num(c, false); - auto current_time_ns = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); - int64_t network_latency_ns = current_time_ns - msg.time; // net latency in nanoseconds - if( network_latency_ns < 0 ) { - peer_wlog(c, "Peer sent a handshake with a timestamp skewed by at least ${t}ms", ("t", network_latency_ns/1000000)); - network_latency_ns = 0; - } - // number of blocks syncing node is behind from a peer node, round up - uint32_t nblk_behind_by_net_latency = std::lround( static_cast(network_latency_ns) / static_cast(block_interval_ns) ); - // 2x for time it takes for message to reach back to peer node - uint32_t nblk_combined_latency = 2 * nblk_behind_by_net_latency; - // message in the log below is used in p2p_high_latency_test.py test - peer_dlog(c, "Network latency is ${lat}ms, ${num} blocks discrepancy by network latency, ${tot_num} blocks discrepancy expected once message received", - ("lat", network_latency_ns/1000000)("num", nblk_behind_by_net_latency)("tot_num", nblk_combined_latency)); - //-------------------------------- // sync need checks; (lib == last irreversible block) // @@ -1930,19 +2112,14 @@ namespace eosio { if (chain_info.head_id == msg.head_id) { peer_ilog( c, "handshake lib ${lib}, head ${head}, head id ${id}.. sync 0, lib ${l}", ("lib", msg.last_irreversible_block_num)("head", msg.head_num)("id", msg.head_id.str().substr(8,16))("l", chain_info.lib_num) ); - c->syncing = false; - notice_message note; - note.known_blocks.mode = none; - note.known_trx.mode = catch_up; - note.known_trx.pending = 0; - c->enqueue( note ); + c->peer_syncing_from_us = false; return; } if (chain_info.head_num < msg.last_irreversible_block_num) { peer_ilog( c, "handshake lib ${lib}, head ${head}, head id ${id}.. sync 1, head ${h}, lib ${l}", ("lib", msg.last_irreversible_block_num)("head", msg.head_num)("id", msg.head_id.str().substr(8,16)) ("h", chain_info.head_num)("l", chain_info.lib_num) ); - c->syncing = false; + c->peer_syncing_from_us = false; if (c->sent_handshake_count > 0) { c->send_handshake(); } @@ -1953,14 +2130,20 @@ namespace eosio { ("lib", msg.last_irreversible_block_num)("head", msg.head_num)("id", msg.head_id.str().substr(8,16)) ("h", chain_info.head_num)("l", chain_info.lib_num) ); if (msg.generation > 1 || c->protocol_version > proto_base) { + controller& cc = my_impl->chain_plug->chain(); notice_message note; note.known_trx.pending = chain_info.lib_num; note.known_trx.mode = last_irr_catch_up; note.known_blocks.mode = last_irr_catch_up; note.known_blocks.pending = chain_info.head_num; + note.known_blocks.ids.push_back(chain_info.head_id); + if (c->protocol_version >= proto_block_range) { + // begin, more efficient to encode a block num instead of retrieving actual block id + note.known_blocks.ids.push_back(make_block_id(cc.earliest_available_block_num())); + } c->enqueue( note ); } - c->syncing = true; + c->peer_syncing_from_us = true; return; } @@ -1968,7 +2151,7 @@ namespace eosio { peer_ilog( c, "handshake lib ${lib}, head ${head}, head id ${id}.. sync 3, head ${h}, lib ${l}", ("lib", msg.last_irreversible_block_num)("head", msg.head_num)("id", msg.head_id.str().substr(8,16)) ("h", chain_info.head_num)("l", chain_info.lib_num) ); - c->syncing = false; + c->peer_syncing_from_us = false; verify_catchup(c, msg.head_num, msg.head_id); return; } else if(chain_info.head_num >= msg.head_num + nblk_combined_latency) { @@ -1976,14 +2159,19 @@ namespace eosio { ("lib", msg.last_irreversible_block_num)("head", msg.head_num)("id", msg.head_id.str().substr(8,16)) ("h", chain_info.head_num)("l", chain_info.lib_num) ); if (msg.generation > 1 || c->protocol_version > proto_base) { + controller& cc = my_impl->chain_plug->chain(); notice_message note; note.known_trx.mode = none; note.known_blocks.mode = catch_up; note.known_blocks.pending = chain_info.head_num; note.known_blocks.ids.push_back(chain_info.head_id); + if (c->protocol_version >= proto_block_range) { + // begin, more efficient to encode a block num instead of retrieving actual block id + note.known_blocks.ids.push_back(make_block_id(cc.earliest_available_block_num())); + } c->enqueue( note ); } - c->syncing = false; + c->peer_syncing_from_us = false; bool on_fork = true; try { controller& cc = my_impl->chain_plug->chain(); @@ -2005,17 +2193,20 @@ namespace eosio { bool sync_manager::verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id) { request_message req; req.req_blocks.mode = catch_up; - for_each_block_connection( [num, &id, &req]( const auto& cc ) { - std::lock_guard g_conn( cc->conn_mtx ); + auto is_fork_head_greater = [num, &id, &req]( const auto& cc ) { + fc::lock_guard g_conn( cc->conn_mtx ); if( cc->fork_head_num > num || cc->fork_head == id ) { req.req_blocks.mode = none; - return false; + return true; } - return true; - } ); + return false; + }; + if (my_impl->connections.any_of_block_connections(is_fork_head_greater)) { + req.req_blocks.mode = none; + } if( req.req_blocks.mode == catch_up ) { { - std::lock_guard g( sync_mtx ); + fc::lock_guard g( sync_mtx ); peer_ilog( c, "catch_up while in ${s}, fork head num = ${fhn} " "target LIB = ${lib} next_expected = ${ne}, id ${id}...", ("s", stage_str( sync_state ))("fhn", num)("lib", sync_known_lib_num) @@ -2026,7 +2217,7 @@ namespace eosio { return false; set_state( head_catchup ); { - std::lock_guard g_conn( c->conn_mtx ); + fc::lock_guard g_conn( c->conn_mtx ); c->fork_head = id; c->fork_head_num = num; } @@ -2035,7 +2226,7 @@ namespace eosio { } else { peer_ilog( c, "none notice while in ${s}, fork head num = ${fhn}, id ${id}...", ("s", stage_str( sync_state ))("fhn", num)("id", id.str().substr(8,16)) ); - std::lock_guard g_conn( c->conn_mtx ); + fc::lock_guard g_conn( c->conn_mtx ); c->fork_head = block_id_type(); c->fork_head_num = 0; } @@ -2060,13 +2251,14 @@ namespace eosio { verify_catchup( c, msg.known_blocks.pending, id ); } else { // we already have the block, so update peer with our view of the world + peer_dlog(c, "Already have block, sending handshake"); c->send_handshake(); } } } else if (msg.known_blocks.mode == last_irr_catch_up) { { c->peer_lib_num = msg.known_trx.pending; - std::lock_guard g_conn( c->conn_mtx ); + fc::lock_guard g_conn( c->conn_mtx ); c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending; } sync_reset_lib_num(c, false); @@ -2077,8 +2269,11 @@ namespace eosio { // called from connection strand void sync_manager::rejected_block( const connection_ptr& c, uint32_t blk_num ) { c->block_status_monitor_.rejected(); - std::unique_lock g( sync_mtx ); + fc::unique_lock g( sync_mtx ); sync_last_requested_num = 0; + if (blk_num < sync_next_expected_num) { + sync_next_expected_num = my_impl->get_chain_lib_num(); + } if( c->block_status_monitor_.max_events_violated()) { peer_wlog( c, "block ${bn} not accepted, closing connection", ("bn", blk_num) ); sync_source.reset(); @@ -2086,82 +2281,85 @@ namespace eosio { c->close(); } else { g.unlock(); + peer_dlog(c, "rejected block ${bn}, sending handshake", ("bn", blk_num)); c->send_handshake(); } } - // called from connection strand - void sync_manager::sync_update_expected( const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied ) { - std::unique_lock g_sync( sync_mtx ); - if( blk_num <= sync_last_requested_num ) { - peer_dlog( c, "sync_last_requested_num: ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}", - ("r", sync_last_requested_num)("e", sync_next_expected_num)("k", sync_known_lib_num)("s", sync_req_span) ); - if (blk_num != sync_next_expected_num && !blk_applied) { - auto sync_next_expected = sync_next_expected_num; - g_sync.unlock(); - peer_dlog( c, "expected block ${ne} but got ${bn}", ("ne", sync_next_expected)("bn", blk_num) ); - return; - } - sync_next_expected_num = blk_num + 1; - } - } - // called from c's connection strand void sync_manager::sync_recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num, bool blk_applied) { - peer_dlog( c, "got block ${bn}", ("bn", blk_num) ); + peer_dlog( c, "${d} block ${bn}", ("d", blk_applied ? "applied" : "got")("bn", blk_num) ); if( app().is_quiting() ) { c->close( false, true ); return; } + c->latest_blk_time = std::chrono::system_clock::now(); c->block_status_monitor_.accepted(); - sync_update_expected( c, blk_id, blk_num, blk_applied ); - std::unique_lock g_sync( sync_mtx ); stages state = sync_state; peer_dlog( c, "state ${s}", ("s", stage_str( state )) ); if( state == head_catchup ) { + fc::unique_lock g_sync( sync_mtx ); peer_dlog( c, "sync_manager in head_catchup state" ); sync_source.reset(); g_sync.unlock(); block_id_type null_id; bool set_state_to_head_catchup = false; - for_each_block_connection( [&null_id, blk_num, &blk_id, &c, &set_state_to_head_catchup]( const auto& cp ) { - std::unique_lock g_cp_conn( cp->conn_mtx ); + my_impl->connections.for_each_block_connection( [&null_id, blk_num, &blk_id, &c, &set_state_to_head_catchup]( const auto& cp ) { + fc::unique_lock g_cp_conn( cp->conn_mtx ); uint32_t fork_head_num = cp->fork_head_num; block_id_type fork_head_id = cp->fork_head; g_cp_conn.unlock(); if( fork_head_id == null_id ) { // continue } else if( fork_head_num < blk_num || fork_head_id == blk_id ) { - std::lock_guard g_conn( c->conn_mtx ); + fc::lock_guard g_conn( c->conn_mtx ); c->fork_head = null_id; c->fork_head_num = 0; } else { set_state_to_head_catchup = true; } - return true; } ); if( set_state_to_head_catchup ) { if( set_state( head_catchup ) ) { - send_handshakes(); + peer_dlog( c, "Switching to head_catchup, sending handshakes" ); + send_handshakes(); } } else { set_state( in_sync ); + peer_dlog( c, "Switching to in_sync, sending handshakes" ); send_handshakes(); } } else if( state == lib_catchup ) { - if( blk_num >= sync_known_lib_num ) { + fc::unique_lock g_sync( sync_mtx ); + if( blk_applied && blk_num >= sync_known_lib_num ) { peer_dlog( c, "All caught up with last known last irreversible block resending handshake" ); set_state( in_sync ); g_sync.unlock(); send_handshakes(); - } else if( blk_num >= sync_last_requested_num ) { - request_next_chunk( std::move( g_sync) ); } else { - g_sync.unlock(); - peer_dlog( c, "calling sync_wait" ); - c->sync_wait(); + if (!blk_applied) { + if (blk_num >= c->sync_last_requested_block) { + peer_dlog(c, "calling cancel_wait, block ${b}", ("b", blk_num)); + c->cancel_wait(); + } else { + peer_dlog(c, "calling sync_wait, block ${b}", ("b", blk_num)); + c->sync_wait(); + } + + sync_next_expected_num = blk_num + 1; + } + + uint32_t head = my_impl->get_chain_head_num(); + if (head + sync_req_span > sync_last_requested_num) { // don't allow to get too far head (one sync_req_span) + if (sync_next_expected_num > sync_last_requested_num && sync_last_requested_num < sync_known_lib_num) { + fc_dlog(logger, "Requesting range ahead, head: ${h} blk_num: ${bn} sync_next_expected_num ${nen} sync_last_requested_num: ${lrn}", + ("h", head)("bn", blk_num)("nen", sync_next_expected_num)("lrn", sync_last_requested_num)); + request_next_chunk(); + } + } + } } } @@ -2171,7 +2369,7 @@ namespace eosio { bool dispatch_manager::add_peer_block( const block_id_type& blkid, uint32_t connection_id) { uint32_t block_num = block_header::num_from_id(blkid); - std::lock_guard g( blk_state_mtx ); + fc::lock_guard g( blk_state_mtx ); auto bptr = blk_state.get().find( std::make_tuple(block_num, std::ref(blkid), connection_id) ); bool added = (bptr == blk_state.end()); if( added ) { @@ -2182,14 +2380,14 @@ namespace eosio { bool dispatch_manager::peer_has_block( const block_id_type& blkid, uint32_t connection_id ) const { uint32_t block_num = block_header::num_from_id(blkid); - std::lock_guard g(blk_state_mtx); + fc::lock_guard g(blk_state_mtx); const auto blk_itr = blk_state.get().find( std::make_tuple(block_num, std::ref(blkid), connection_id) ); return blk_itr != blk_state.end(); } bool dispatch_manager::have_block( const block_id_type& blkid ) const { uint32_t block_num = block_header::num_from_id(blkid); - std::lock_guard g(blk_state_mtx); + fc::lock_guard g(blk_state_mtx); const auto& index = blk_state.get(); auto blk_itr = index.find( std::make_tuple(block_num, std::ref(blkid)) ); return blk_itr != index.end(); @@ -2198,7 +2396,7 @@ namespace eosio { void dispatch_manager::rm_block( const block_id_type& blkid ) { uint32_t block_num = block_header::num_from_id(blkid); fc_dlog( logger, "rm_block ${n}, id: ${id}", ("n", block_num)("id", blkid)); - std::lock_guard g(blk_state_mtx); + fc::lock_guard g(blk_state_mtx); auto& index = blk_state.get(); auto p = index.equal_range( std::make_tuple(block_num, std::ref(blkid)) ); index.erase(p.first, p.second); @@ -2206,7 +2404,7 @@ namespace eosio { bool dispatch_manager::add_peer_txn( const transaction_id_type& id, const time_point_sec& trx_expires, uint32_t connection_id, const time_point_sec& now ) { - std::lock_guard g( local_txns_mtx ); + fc::lock_guard g( local_txns_mtx ); auto tptr = local_txns.get().find( std::make_tuple( std::ref( id ), connection_id ) ); bool added = (tptr == local_txns.end()); if( added ) { @@ -2222,7 +2420,7 @@ namespace eosio { } bool dispatch_manager::have_txn( const transaction_id_type& tid ) const { - std::lock_guard g( local_txns_mtx ); + fc::lock_guard g( local_txns_mtx ); const auto tptr = local_txns.get().find( tid ); return tptr != local_txns.end(); } @@ -2231,7 +2429,7 @@ namespace eosio { size_t start_size = 0, end_size = 0; fc::time_point_sec now{time_point::now()}; - std::unique_lock g( local_txns_mtx ); + fc::unique_lock g( local_txns_mtx ); start_size = local_txns.size(); auto& old = local_txns.get(); auto ex_lo = old.lower_bound( fc::time_point_sec( 0 ) ); @@ -2245,7 +2443,7 @@ namespace eosio { void dispatch_manager::expire_blocks( uint32_t lib_num ) { unlinkable_block_cache.expire_blocks( lib_num ); - std::lock_guard g( blk_state_mtx ); + fc::lock_guard g( blk_state_mtx ); auto& stale_blk = blk_state.get(); stale_blk.erase( stale_blk.lower_bound( 1 ), stale_blk.upper_bound( lib_num ) ); } @@ -2254,37 +2452,36 @@ namespace eosio { void dispatch_manager::bcast_block(const signed_block_ptr& b, const block_id_type& id) { fc_dlog( logger, "bcast block ${b}", ("b", b->block_num()) ); - if( my_impl->sync_master->syncing_with_peer() ) return; + if(my_impl->sync_master->syncing_from_peer() ) return; block_buffer_factory buff_factory; const auto bnum = b->block_num(); - for_each_block_connection( [this, &id, &bnum, &b, &buff_factory]( auto& cp ) { - fc_dlog( logger, "socket_is_open ${s}, connecting ${c}, syncing ${ss}, connection ${cid}", - ("s", cp->socket_is_open())("c", cp->connecting.load())("ss", cp->syncing.load())("cid", cp->connection_id) ); - if( !cp->current() ) return true; + my_impl->connections.for_each_block_connection( [this, &id, &bnum, &b, &buff_factory]( auto& cp ) { + fc_dlog( logger, "socket_is_open ${s}, state ${c}, syncing ${ss}, connection ${cid}", + ("s", cp->socket_is_open())("c", connection::state_str(cp->state()))("ss", cp->peer_syncing_from_us.load())("cid", cp->connection_id) ); + if( !cp->current() ) return; if( !add_peer_block( id, cp->connection_id ) ) { fc_dlog( logger, "not bcast block ${b} to connection ${cid}", ("b", bnum)("cid", cp->connection_id) ); - return true; + return; } send_buffer_type sb = buff_factory.get_send_buffer( b ); cp->strand.post( [cp, bnum, sb{std::move(sb)}]() { - cp->latest_blk_time = cp->get_time(); + cp->latest_blk_time = std::chrono::system_clock::now(); bool has_block = cp->peer_lib_num >= bnum; if( !has_block ) { peer_dlog( cp, "bcast block ${b}", ("b", bnum) ); cp->enqueue_buffer( sb, no_reason ); } }); - return true; } ); } // called from c's connection strand void dispatch_manager::recv_block(const connection_ptr& c, const block_id_type& id, uint32_t bnum) { - std::unique_lock g( c->conn_mtx ); + fc::unique_lock g( c->conn_mtx ); if (c && c->last_req && c->last_req->req_blocks.mode != none && @@ -2307,12 +2504,12 @@ namespace eosio { void dispatch_manager::bcast_transaction(const packed_transaction_ptr& trx) { trx_buffer_factory buff_factory; const fc::time_point_sec now{fc::time_point::now()}; - for_each_connection( [this, &trx, &now, &buff_factory]( auto& cp ) { - if( cp->is_blocks_only_connection() || !cp->current() ) { - return true; + my_impl->connections.for_each_connection( [this, &trx, &now, &buff_factory]( auto& cp ) { + if( !cp->is_transactions_connection() || !cp->current() ) { + return; } if( !add_peer_txn(trx->id(), trx->expiration(), cp->connection_id, now) ) { - return true; + return; } send_buffer_type sb = buff_factory.get_send_buffer( trx ); @@ -2320,7 +2517,6 @@ namespace eosio { cp->strand.post( [cp, sb{std::move(sb)}]() { cp->enqueue_buffer( sb, no_reason ); } ); - return true; } ); } @@ -2356,7 +2552,7 @@ namespace eosio { request_message last_req; block_id_type bid; { - std::lock_guard g_c_conn( c->conn_mtx ); + fc::lock_guard g_c_conn( c->conn_mtx ); if( !c->last_req ) { return; } @@ -2370,14 +2566,14 @@ namespace eosio { } last_req = *c->last_req; } - for_each_block_connection( [this, &c, &last_req, &bid]( auto& conn ) { + auto request_from_peer = [this, &c, &last_req, &bid]( auto& conn ) { if( conn == c ) - return true; + return false; { - std::lock_guard guard( conn->conn_mtx ); + fc::lock_guard guard( conn->conn_mtx ); if( conn->last_req ) { - return true; + return false; } } @@ -2386,19 +2582,21 @@ namespace eosio { conn->strand.post( [conn, last_req{std::move(last_req)}]() { conn->enqueue( last_req ); conn->fetch_wait(); - std::lock_guard g_conn_conn( conn->conn_mtx ); + fc::lock_guard g_conn_conn( conn->conn_mtx ); conn->last_req = last_req; } ); - return false; + return true; } - return true; - } ); + return false; + }; - // at this point no other peer has it, re-request or do nothing? - peer_wlog( c, "no peer has last_req" ); - if( c->connected() ) { - c->enqueue( last_req ); - c->fetch_wait(); + if (!my_impl->connections.any_of_block_connections(request_from_peer)) { + // at this point no other peer has it, re-request or do nothing? + peer_wlog(c, "no peer has last_req"); + if (c->connected()) { + c->enqueue(last_req); + c->fetch_wait(); + } } } @@ -2426,25 +2624,22 @@ namespace eosio { connection_ptr c = shared_from_this(); if( consecutive_immediate_connection_close > def_max_consecutive_immediate_connection_close || no_retry == benign_other ) { - auto connector_period_us = std::chrono::duration_cast( my_impl->connector_period ); - std::lock_guard g( c->conn_mtx ); - if( last_close == fc::time_point() || last_close > fc::time_point::now() - fc::microseconds( connector_period_us.count() ) ) { + fc::microseconds connector_period = my_impl->connections.get_connector_period(); + fc::lock_guard g( conn_mtx ); + if( last_close == fc::time_point() || last_close > fc::time_point::now() - connector_period ) { return true; // true so doesn't remove from valid connections } } strand.post([c]() { - string::size_type colon = c->peer_address().find(':'); - string::size_type colon2 = c->peer_address().find(':', colon + 1); - string host = c->peer_address().substr( 0, colon ); - string port = c->peer_address().substr( colon + 1, colon2 == string::npos ? string::npos : colon2 - (colon + 1)); + auto [host, port, type] = split_host_port_type(c->peer_address()); c->set_connection_type( c->peer_address() ); auto resolver = std::make_shared( my_impl->thread_pool.get_executor() ); connection_wptr weak_conn = c; // Note: need to add support for IPv6 too - resolver->async_resolve( tcp::v4(), host, port, boost::asio::bind_executor( c->strand, - [resolver, weak_conn, host, port]( const boost::system::error_code& err, const tcp::resolver::results_type& endpoints ) { + resolver->async_resolve(host, port, boost::asio::bind_executor( c->strand, + [resolver, weak_conn, host = host, port = port]( const boost::system::error_code& err, const tcp::resolver::results_type& endpoints ) { auto c = weak_conn.lock(); if( !c ) return; if( !err ) { @@ -2452,7 +2647,7 @@ namespace eosio { } else { fc_elog( logger, "Unable to resolve ${host}:${port} ${error}", ("host", host)("port", port)( "error", err.message() ) ); - c->connecting = false; + c->set_state(connection_state::closed); ++c->consecutive_immediate_connection_close; } } ) ); @@ -2462,7 +2657,7 @@ namespace eosio { // called from connection strand void connection::connect( const std::shared_ptr& resolver, const tcp::resolver::results_type& endpoints ) { - connecting = true; + set_state(connection_state::connecting); pending_message_buffer.reset(); buffer_queue.clear_out_queue(); boost::asio::async_connect( *socket, endpoints, @@ -2471,6 +2666,7 @@ namespace eosio { if( !err && socket->is_open() && socket == c->socket ) { if( c->start_session() ) { c->send_handshake(); + c->send_time(); } } else { fc_elog( logger, "connection failed to ${a}, ${error}", ("a", c->peer_address())( "error", err.message())); @@ -2482,84 +2678,53 @@ namespace eosio { } ) ); } - void net_plugin_impl::start_listen_loop() { - connection_ptr new_connection = std::make_shared(); - new_connection->connecting = true; - new_connection->strand.post( [this, new_connection = std::move( new_connection )](){ - acceptor->async_accept( *new_connection->socket, - boost::asio::bind_executor( new_connection->strand, [new_connection, socket=new_connection->socket, this]( boost::system::error_code ec ) { - if( !ec ) { - uint32_t visitors = 0; - uint32_t from_addr = 0; - boost::system::error_code rec; - const auto& paddr_add = socket->remote_endpoint( rec ).address(); - string paddr_str; - if( rec ) { - fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message())); - } else { - paddr_str = paddr_add.to_string(); - for_each_connection( [&visitors, &from_addr, &paddr_str]( auto& conn ) { - if( conn->socket_is_open()) { - if( conn->peer_address().empty()) { - ++visitors; - std::lock_guard g_conn( conn->conn_mtx ); - if( paddr_str == conn->remote_endpoint_ip ) { - ++from_addr; - } - } - } - return true; - } ); - if( from_addr < max_nodes_per_host && (auto_bp_peering_enabled() || max_client_count == 0 || visitors < max_client_count)) { - fc_ilog( logger, "Accepted new connection: " + paddr_str ); - new_connection->set_heartbeat_timeout( heartbeat_timeout ); - if( new_connection->start_session()) { - std::lock_guard g_unique( connections_mtx ); - connections.insert( new_connection ); - } - } else { - if( from_addr >= max_nodes_per_host ) { - fc_dlog( logger, "Number of connections (${n}) from ${ra} exceeds limit ${l}", - ("n", from_addr + 1)( "ra", paddr_str )( "l", max_nodes_per_host )); - } else { - fc_dlog( logger, "max_client_count ${m} exceeded", ("m", max_client_count)); - } - // new_connection never added to connections and start_session not called, lifetime will end - boost::system::error_code ec; - socket->shutdown( tcp::socket::shutdown_both, ec ); - socket->close( ec ); + void net_plugin_impl::create_session(tcp::socket&& socket, const string listen_address) { + uint32_t visitors = 0; + uint32_t from_addr = 0; + boost::system::error_code rec; + const auto& paddr_add = socket.remote_endpoint(rec).address(); + string paddr_str; + if (rec) { + fc_elog(logger, "Error getting remote endpoint: ${m}", ("m", rec.message())); + } else { + paddr_str = paddr_add.to_string(); + connections.for_each_connection([&visitors, &from_addr, &paddr_str](auto& conn) { + if (conn->socket_is_open()) { + if (conn->peer_address().empty()) { + ++visitors; + fc::lock_guard g_conn(conn->conn_mtx); + if (paddr_str == conn->remote_endpoint_ip) { + ++from_addr; } } - } else { - fc_elog( logger, "Error accepting connection: ${m}", ("m", ec.message())); - // For the listed error codes below, recall start_listen_loop() - switch (ec.value()) { - case EMFILE: // same as boost::system::errc::too_many_files_open - { - // no file descriptors available to accept the connection. Wait on async_timer - // and retry listening using shorter 100ms timer than SHiP or http_plugin - // as net_pluging is more critical - accept_error_timer.expires_from_now(boost::posix_time::milliseconds(100)); - accept_error_timer.async_wait([this]( const boost::system::error_code &ec) { - if (!ec) - start_listen_loop(); - }); - return; // wait for timer!! - } - case ECONNABORTED: - case ENFILE: - case ENOBUFS: - case ENOMEM: - case EPROTO: - break; - default: - return; + } + }); + if (from_addr < max_nodes_per_host && + (auto_bp_peering_enabled() || connections.get_max_client_count() == 0 || + visitors < connections.get_max_client_count())) { + fc_ilog(logger, "Accepted new connection: " + paddr_str); + + connection_ptr new_connection = std::make_shared(std::move(socket), listen_address); + new_connection->strand.post([new_connection, this]() { + if (new_connection->start_session()) { + connections.add(new_connection); } + }); + + } else { + if (from_addr >= max_nodes_per_host) { + fc_dlog(logger, "Number of connections (${n}) from ${ra} exceeds limit ${l}", + ("n", from_addr + 1)("ra", paddr_str)("l", max_nodes_per_host)); + } else { + fc_dlog(logger, "max_client_count ${m} exceeded", ("m", connections.get_max_client_count())); } - start_listen_loop(); - })); - } ); + // new_connection never added to connections and start_session not called, lifetime will end + boost::system::error_code ec; + socket.shutdown(tcp::socket::shutdown_both, ec); + socket.close(ec); + } + } } // only called from strand thread @@ -2599,7 +2764,18 @@ namespace eosio { boost::asio::bind_executor( strand, [conn = shared_from_this(), socket=socket]( boost::system::error_code ec, std::size_t bytes_transferred ) { // may have closed connection and cleared pending_message_buffer - if( !conn->socket_is_open() || socket != conn->socket ) return; + if (!conn->socket->is_open() && conn->socket_is_open()) { // if socket_open then close not called + peer_dlog( conn, "async_read socket not open, closing"); + conn->close(); + return; + } + if (socket != conn->socket ) { // different socket, conn must have created a new socket, make sure previous is closed + peer_dlog( conn, "async_read diff socket closing"); + boost::system::error_code ec; + socket->shutdown( tcp::socket::shutdown_both, ec ); + socket->close( ec ); + return; + } bool close_connection = false; try { @@ -2692,14 +2868,14 @@ namespace eosio { // called from connection strand bool connection::process_next_message( uint32_t message_length ) { try { - latest_msg_time = get_time(); + latest_msg_time = std::chrono::system_clock::now(); // if next message is a block we already have, exit early auto peek_ds = pending_message_buffer.create_peek_datastream(); unsigned_int which{}; fc::raw::unpack( peek_ds, which ); if( which == signed_block_which ) { - latest_blk_time = get_time(); + latest_blk_time = std::chrono::system_clock::now(); return process_next_block_message( message_length ); } else if( which == packed_transaction_which ) { @@ -2741,19 +2917,19 @@ namespace eosio { pending_message_buffer.advance_read_ptr( message_length ); return true; } - peer_dlog( this, "received block ${num}, id ${id}..., latency: ${latency}", + peer_dlog( this, "received block ${num}, id ${id}..., latency: ${latency}ms, head ${h}", ("num", bh.block_num())("id", blk_id.str().substr(8,16)) - ("latency", (fc::time_point::now() - bh.timestamp).count()/1000) ); - if( !my_impl->sync_master->syncing_with_peer() ) { // guard against peer thinking it needs to send us old blocks + ("latency", (fc::time_point::now() - bh.timestamp).count()/1000) + ("h", my_impl->get_chain_head_num())); + if( !my_impl->sync_master->syncing_from_peer() ) { // guard against peer thinking it needs to send us old blocks uint32_t lib_num = my_impl->get_chain_lib_num(); if( blk_num < lib_num ) { - std::unique_lock g( conn_mtx ); + fc::unique_lock g( conn_mtx ); const auto last_sent_lib = last_handshake_sent.last_irreversible_block_num; g.unlock(); peer_ilog( this, "received block ${n} less than ${which}lib ${lib}", ("n", blk_num)("which", blk_num < last_sent_lib ? "sent " : "") ("lib", blk_num < last_sent_lib ? last_sent_lib : lib_num) ); - my_impl->sync_master->reset_last_requested_num(); enqueue( (sync_request_message) {0, 0} ); send_handshake(); cancel_wait(); @@ -2761,6 +2937,8 @@ namespace eosio { pending_message_buffer.advance_read_ptr( message_length ); return true; } + } else { + my_impl->sync_master->sync_recv_block(shared_from_this(), blk_id, blk_num, false); } auto ds = pending_message_buffer.create_datastream(); @@ -2793,10 +2971,14 @@ namespace eosio { // called from connection strand bool connection::process_next_trx_message(uint32_t message_length) { if( !my_impl->p2p_accept_transactions ) { - peer_dlog( this, "p2p-accept-transaction=false - dropping txn" ); + peer_dlog( this, "p2p-accept-transaction=false - dropping trx" ); pending_message_buffer.advance_read_ptr( message_length ); return true; } + if (my_impl->sync_master->syncing_from_peer()) { + peer_wlog(this, "syncing, dropping trx"); + return true; + } const unsigned long trx_in_progress_sz = this->trx_in_progress_size.load(); @@ -2832,39 +3014,21 @@ namespace eosio { void net_plugin_impl::plugin_shutdown() { in_shutdown = true; + + connections.stop_conn_timer(); { - std::lock_guard g( connector_check_timer_mtx ); - if( connector_check_timer ) - connector_check_timer->cancel(); - } - { - std::lock_guard g( expire_timer_mtx ); + fc::lock_guard g( expire_timer_mtx ); if( expire_timer ) expire_timer->cancel(); } { - std::lock_guard g( keepalive_timer_mtx ); + fc::lock_guard g( keepalive_timer_mtx ); if( keepalive_timer ) keepalive_timer->cancel(); } - { - fc_ilog( logger, "close ${s} connections", ("s", connections.size()) ); - std::lock_guard g( connections_mtx ); - for( auto& con : connections ) { - fc_dlog( logger, "close: ${cid}", ("cid", con->connection_id) ); - con->close( false, true ); - } - connections.clear(); - } - + connections.close_all(); thread_pool.stop(); - - if( acceptor ) { - boost::system::error_code ec; - acceptor->cancel( ec ); - acceptor->close( ec ); - } } // call only from main application thread @@ -2872,7 +3036,7 @@ namespace eosio { controller& cc = chain_plug->chain(); uint32_t lib_num = 0, head_num = 0; { - std::lock_guard g( chain_info_mtx ); + fc::lock_guard g( chain_info_mtx ); chain_info.lib_num = lib_num = cc.last_irreversible_block_num(); chain_info.lib_id = cc.last_irreversible_block_id(); chain_info.head_num = head_num = cc.fork_db_head_block_num(); @@ -2882,17 +3046,17 @@ namespace eosio { } net_plugin_impl::chain_info_t net_plugin_impl::get_chain_info() const { - std::lock_guard g( chain_info_mtx ); + fc::lock_guard g( chain_info_mtx ); return chain_info; } uint32_t net_plugin_impl::get_chain_lib_num() const { - std::lock_guard g( chain_info_mtx ); + fc::lock_guard g( chain_info_mtx ); return chain_info.lib_num; } uint32_t net_plugin_impl::get_chain_head_num() const { - std::lock_guard g( chain_info_mtx ); + fc::lock_guard g( chain_info_mtx ); return chain_info.head_num; } @@ -2939,8 +3103,8 @@ namespace eosio { peer_dlog(this, "received chain_size_message"); } + // called from connection strand void connection::handle_message( const handshake_message& msg ) { - peer_dlog( this, "received handshake_message" ); if( !is_valid( msg ) ) { peer_elog( this, "bad handshake message"); no_retry = go_away_reason::fatal_other; @@ -2951,11 +3115,13 @@ namespace eosio { ("g", msg.generation)("lib", msg.last_irreversible_block_num)("head", msg.head_num) ); peer_lib_num = msg.last_irreversible_block_num; - std::unique_lock g_conn( conn_mtx ); + peer_head_block_num = msg.head_num; + fc::unique_lock g_conn( conn_mtx ); last_handshake_recv = msg; + auto c_time = last_handshake_sent.time; g_conn.unlock(); - connecting = false; + set_state(connection_state::connected); if (msg.generation == 1) { if( msg.node_id == my_impl->node_id) { peer_elog( this, "Self connection detected node_id ${id}. Closing connection", ("id", msg.node_id) ); @@ -2968,30 +3134,26 @@ namespace eosio { my_impl->mark_bp_connection(this); if (my_impl->exceeding_connection_limit(this)) { - // When auto bp peering is enabled, the start_listen_loop check doesn't have enough information to determine - // if a client is a BP peer. In start_listen_loop, it only has the peer address which a node is connecting + // When auto bp peering is enabled, create_session() check doesn't have enough information to determine + // if a client is a BP peer. In create_session(), it only has the peer address which a node is connecting // from, but it would be different from the address it is listening. The only way to make sure is when the // first handshake message is received with the p2p_address information in the message. Thus the connection // limit checking has to be here when auto bp peering is enabled. - fc_dlog(logger, "max_client_count ${m} exceeded", ("m", my_impl->max_client_count)); - my_impl->disconnect(peer_address()); + fc_dlog(logger, "max_client_count ${m} exceeded", ("m", my_impl->connections.get_max_client_count())); + my_impl->connections.disconnect(peer_address()); return; } - if( peer_address().empty() ) { - set_connection_type( msg.p2p_address ); - } + if( incoming() ) { + auto [host, port, type] = split_host_port_type(msg.p2p_address); + if (host.size()) + set_connection_type( msg.p2p_address ); - g_conn.lock(); - if( peer_address().empty() || last_handshake_recv.node_id == fc::sha256()) { - auto c_time = last_handshake_sent.time; - g_conn.unlock(); peer_dlog( this, "checking for duplicate" ); - std::shared_lock g_cnts( my_impl->connections_mtx ); - for(const auto& check : my_impl->connections) { + auto is_duplicate = [&](const auto& check) { if(check.get() == this) - continue; - std::unique_lock g_check_conn( check->conn_mtx ); + return false; + fc::unique_lock g_check_conn( check->conn_mtx ); fc_dlog( logger, "dup check: connected ${c}, ${l} =? ${r}", ("c", check->connected())("l", check->last_handshake_recv.node_id)("r", msg.node_id) ); if(check->connected() && check->last_handshake_recv.node_id == msg.node_id) { @@ -3004,36 +3166,36 @@ namespace eosio { auto check_time = check->last_handshake_sent.time + check->last_handshake_recv.time; g_check_conn.unlock(); if (msg.time + c_time <= check_time) - continue; + return false; } else if (net_version < proto_dup_node_id_goaway || msg.network_version < proto_dup_node_id_goaway) { - if (my_impl->p2p_address < msg.p2p_address) { - fc_dlog( logger, "my_impl->p2p_address '${lhs}' < msg.p2p_address '${rhs}'", - ("lhs", my_impl->p2p_address)( "rhs", msg.p2p_address ) ); + if (listen_address < msg.p2p_address) { + fc_dlog( logger, "listen_address '${lhs}' < msg.p2p_address '${rhs}'", + ("lhs", listen_address)( "rhs", msg.p2p_address ) ); // only the connection from lower p2p_address to higher p2p_address will be considered as a duplicate, // so there is no chance for both connections to be closed - continue; + return false; } } else if (my_impl->node_id < msg.node_id) { fc_dlog( logger, "not duplicate, my_impl->node_id '${lhs}' < msg.node_id '${rhs}'", ("lhs", my_impl->node_id)("rhs", msg.node_id) ); // only the connection from lower node_id to higher node_id will be considered as a duplicate, // so there is no chance for both connections to be closed - continue; + return false; } - - g_cnts.unlock(); - peer_dlog( this, "sending go_away duplicate, msg.p2p_address: ${add}", ("add", msg.p2p_address) ); - go_away_message gam(duplicate); - gam.node_id = conn_node_id; - enqueue(gam); - no_retry = duplicate; - return; + return true; } + return false; + }; + if (my_impl->connections.any_of_connections(std::move(is_duplicate))) { + peer_dlog( this, "sending go_away duplicate, msg.p2p_address: ${add}", ("add", msg.p2p_address) ); + go_away_message gam(duplicate); + gam.node_id = conn_node_id; + enqueue(gam); + no_retry = duplicate; + return; } } else { - peer_dlog( this, "skipping duplicate check, addr == ${pa}, id = ${ni}", - ("pa", peer_address())( "ni", last_handshake_recv.node_id ) ); - g_conn.unlock(); + peer_dlog(this, "skipping duplicate check, addr == ${pa}, id = ${ni}", ("pa", peer_address())("ni", msg.node_id)); } if( msg.chain_id != my_impl->chain_id ) { @@ -3098,7 +3260,23 @@ namespace eosio { } } - my_impl->sync_master->recv_handshake( shared_from_this(), msg ); + uint32_t nblk_combined_latency = calc_block_latency(); + my_impl->sync_master->recv_handshake( shared_from_this(), msg, nblk_combined_latency ); + } + + // called from connection strand + uint32_t connection::calc_block_latency() { + uint32_t nblk_combined_latency = 0; + if (peer_ping_time_ns != std::numeric_limits::max()) { + // number of blocks syncing node is behind from a peer node, round up + uint32_t nblk_behind_by_net_latency = std::lround(static_cast(peer_ping_time_ns.load()) / static_cast(block_interval_ns)); + // peer_ping_time_ns includes time there and back, include round trip time as the block latency is used to compensate for communication back + nblk_combined_latency = nblk_behind_by_net_latency; + // message in the log below is used in p2p_high_latency_test.py test + peer_dlog(this, "Network latency is ${lat}ms, ${num} blocks discrepancy by network latency, ${tot_num} blocks discrepancy expected once message received", + ("lat", peer_ping_time_ns / 2 / 1000000)("num", nblk_behind_by_net_latency)("tot_num", nblk_combined_latency)); + } + return nblk_combined_latency; } void connection::handle_message( const go_away_message& msg ) { @@ -3123,53 +3301,83 @@ namespace eosio { close( retry ); // reconnect if wrong_version } - void connection::handle_message( const time_message& msg ) { - peer_ilog( this, "received time_message" ); + // some clients before leap 5.0 provided microsecond epoch instead of nanosecond epoch + std::chrono::nanoseconds normalize_epoch_to_ns(int64_t x) { + // 1686211688888 milliseconds - 2023-06-08T08:08:08.888, 5yrs from EOS genesis 2018-06-08T08:08:08.888 + // 1686211688888000 microseconds + // 1686211688888000000 nanoseconds + if (x >= 1686211688888000000) // nanoseconds + return std::chrono::nanoseconds{x}; + if (x >= 1686211688888000) // microseconds + return std::chrono::nanoseconds{x*1000}; + if (x >= 1686211688888) // milliseconds + return std::chrono::nanoseconds{x*1000*1000}; + if (x >= 1686211688) // seconds + return std::chrono::nanoseconds{x*1000*1000*1000}; + return std::chrono::nanoseconds{0}; // unknown or is zero + } - /* We've already lost however many microseconds it took to dispatch - * the message, but it can't be helped. - */ - msg.dst = get_time(); + void connection::handle_message( const time_message& msg ) { + peer_dlog( this, "received time_message: ${t}, org: ${o}", ("t", msg)("o", org.count()) ); // If the transmit timestamp is zero, the peer is horribly broken. if(msg.xmt == 0) - return; /* invalid timestamp */ + return; // invalid timestamp + + // We've already lost however many microseconds it took to dispatch the message, but it can't be helped. + msg.dst = get_time().count(); + + if (msg.org != 0) { + if (msg.org == org.count()) { + auto ping = msg.dst - msg.org; + peer_dlog(this, "send_time ping ${p}us", ("p", ping / 1000)); + peer_ping_time_ns = ping; + } else { + // a diff time loop is in progress, ignore this message as it is not the one we want + return; + } + } - if(msg.xmt == xmt) - return; /* duplicate packet */ + auto msg_xmt = normalize_epoch_to_ns(msg.xmt); + if (msg_xmt == xmt) + return; // duplicate packet - xmt = msg.xmt; - rec = msg.rec; - dst = msg.dst; + xmt = msg_xmt; if( msg.org == 0 ) { send_time( msg ); return; // We don't have enough data to perform the calculation yet. } - double offset = (double(rec - org) + double(msg.xmt - dst)) / 2; - double NsecPerUsec{1000}; + if (org != std::chrono::nanoseconds{0}) { + auto rec = normalize_epoch_to_ns(msg.rec); + int64_t offset = (double((rec - org).count()) + double(msg_xmt.count() - msg.dst)) / 2.0; - if( logger.is_enabled( fc::log_level::all ) ) - logger.log( FC_LOG_MESSAGE( all, "Clock offset is ${o}ns (${us}us)", - ("o", offset)( "us", offset / NsecPerUsec ) ) ); - org = 0; - rec = 0; + if (std::abs(offset) > block_interval_ns) { + peer_wlog(this, "Clock offset is ${of}us, calculation: (rec ${r} - org ${o} + xmt ${x} - dst ${d})/2", + ("of", offset / 1000)("r", rec.count())("o", org.count())("x", msg_xmt.count())("d", msg.dst)); + } + } + org = std::chrono::nanoseconds{0}; - std::unique_lock g_conn( conn_mtx ); + fc::unique_lock g_conn( conn_mtx ); if( last_handshake_recv.generation == 0 ) { g_conn.unlock(); send_handshake(); } + + // make sure we also get the latency we need + if (peer_ping_time_ns == std::numeric_limits::max()) { + send_time(); + } } void connection::handle_message( const notice_message& msg ) { // peer tells us about one or more blocks or txns. When done syncing, forward on // notices of previously unknown blocks or txns, // - peer_dlog( this, "received notice_message" ); - connecting = false; - if( msg.known_blocks.ids.size() > 1 ) { + set_state(connection_state::connected); + if( msg.known_blocks.ids.size() > 2 ) { peer_elog( this, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection", ("s", msg.known_blocks.ids.size()) ); close( false ); @@ -3185,16 +3393,14 @@ namespace eosio { } switch (msg.known_trx.mode) { case none: - break; case last_irr_catch_up: { - std::unique_lock g_conn( conn_mtx ); - last_handshake_recv.head_num = msg.known_blocks.pending; + fc::unique_lock g_conn( conn_mtx ); + last_handshake_recv.head_num = std::max(msg.known_blocks.pending, last_handshake_recv.head_num); g_conn.unlock(); break; } - case catch_up : { + case catch_up: break; - } case normal: { my_impl->dispatcher->recv_notice( shared_from_this(), msg, false ); } @@ -3210,6 +3416,12 @@ namespace eosio { } case last_irr_catch_up: case catch_up: { + if (msg.known_blocks.ids.size() > 1) { + peer_start_block_num = block_header::num_from_id(msg.known_blocks.ids[1]); + } + if (msg.known_blocks.ids.size() > 0) { + peer_head_block_num = block_header::num_from_id(msg.known_blocks.ids[0]); + } my_impl->sync_master->sync_recv_notice( shared_from_this(), msg ); break; } @@ -3292,6 +3504,7 @@ namespace eosio { // called from connection strand void connection::handle_message( packed_transaction_ptr trx ) { const auto& tid = trx->id(); + peer_dlog( this, "received packed_transaction ${id}", ("id", tid) ); size_t trx_size = calc_trx_size( trx ); @@ -3318,8 +3531,6 @@ namespace eosio { // called from connection strand void connection::handle_message( const block_id_type& id, signed_block_ptr ptr ) { - peer_dlog( this, "received signed_block ${num}, id ${id}", ("num", block_header::num_from_id(id))("id", id) ); - // post to dispatcher strand so that we don't have multiple threads validating the block header my_impl->dispatcher->strand.post([id, c{shared_from_this()}, ptr{std::move(ptr)}, cid=connection_id]() mutable { controller& cc = my_impl->chain_plug->chain(); @@ -3388,7 +3599,7 @@ namespace eosio { c->strand.post( [sync_master = my_impl->sync_master.get(), dispatcher = my_impl->dispatcher.get(), c, blk_id, blk_num]() { dispatcher->add_peer_block( blk_id, c->connection_id ); - sync_master->sync_recv_block( c, blk_id, blk_num, false ); + sync_master->sync_recv_block( c, blk_id, blk_num, true ); }); return; } @@ -3463,32 +3674,10 @@ namespace eosio { } } - // called from any thread - void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { - if( in_shutdown ) return; - std::lock_guard g( connector_check_timer_mtx ); - ++connector_checks_in_flight; - connector_check_timer->expires_from_now( du ); - connector_check_timer->async_wait( [my = shared_from_this(), from_connection{std::move(from_connection)}](boost::system::error_code ec) mutable { - std::unique_lock g( my->connector_check_timer_mtx ); - int num_in_flight = --my->connector_checks_in_flight; - g.unlock(); - if( !ec ) { - my->connection_monitor(from_connection, num_in_flight == 0 ); - } else { - if( num_in_flight == 0 ) { - if( my->in_shutdown ) return; - fc_elog( logger, "Error from connection check monitor: ${m}", ("m", ec.message())); - my->start_conn_timer( my->connector_period, std::weak_ptr() ); - } - } - }); - } - // thread safe void net_plugin_impl::start_expire_timer() { if( in_shutdown ) return; - std::lock_guard g( expire_timer_mtx ); + fc::lock_guard g( expire_timer_mtx ); expire_timer->expires_from_now( txn_exp_period); expire_timer->async_wait( [my = shared_from_this()]( boost::system::error_code ec ) { if( !ec ) { @@ -3504,7 +3693,7 @@ namespace eosio { // thread safe void net_plugin_impl::ticker() { if( in_shutdown ) return; - std::lock_guard g( keepalive_timer_mtx ); + fc::lock_guard g( keepalive_timer_mtx ); keepalive_timer->expires_from_now(keepalive_interval); keepalive_timer->async_wait( [my = shared_from_this()]( boost::system::error_code ec ) { my->ticker(); @@ -3513,28 +3702,23 @@ namespace eosio { fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } - tstamp current_time = connection::get_time(); - my->for_each_connection( [current_time]( auto& c ) { + auto current_time = std::chrono::system_clock::now(); + my->connections.for_each_connection( [current_time]( auto& c ) { if( c->socket_is_open() ) { c->strand.post([c, current_time]() { c->check_heartbeat(current_time); } ); } - return true; } ); } ); } void net_plugin_impl::start_monitors() { { - std::lock_guard g( connector_check_timer_mtx ); - connector_check_timer = std::make_unique( my_impl->thread_pool.get_executor() ); - } - { - std::lock_guard g( expire_timer_mtx ); + fc::lock_guard g( expire_timer_mtx ); expire_timer = std::make_unique( my_impl->thread_pool.get_executor() ); } - start_conn_timer(connector_period, std::weak_ptr()); + connections.start_conn_timer(); start_expire_timer(); } @@ -3548,70 +3732,13 @@ namespace eosio { start_expire_timer(); } - // called from any thread - void net_plugin_impl::connection_monitor(const std::weak_ptr& from_connection, bool reschedule ) { - auto max_time = fc::time_point::now(); - max_time += fc::milliseconds(max_cleanup_time_ms); - auto from = from_connection.lock(); - std::unique_lock g( connections_mtx ); - auto it = (from ? connections.find(from) : connections.begin()); - if (it == connections.end()) it = connections.begin(); - size_t num_rm = 0, num_clients = 0, num_peers = 0, num_bp_peers = 0; - while (it != connections.end()) { - if (fc::time_point::now() >= max_time) { - connection_wptr wit = *it; - g.unlock(); - fc_dlog( logger, "Exiting connection monitor early, ran out of time: ${t}", ("t", max_time - fc::time_point::now()) ); - fc_ilog( logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}", - ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size()) ); - if( reschedule ) { - start_conn_timer( std::chrono::milliseconds( 1 ), wit ); // avoid exhausting - } - return; - } - if ((*it)->is_bp_connection) - ++num_bp_peers; - else if ((*it)->incoming()) - ++num_clients; - else - ++num_peers; - - if( !(*it)->socket_is_open() && !(*it)->connecting) { - if( !(*it)->incoming() ) { - if( !(*it)->resolve_and_connect() ) { - it = connections.erase(it); - --num_peers; ++num_rm; - continue; - } - } else { - --num_clients; ++num_rm; - it = connections.erase(it); - continue; - } - } - ++it; - } - g.unlock(); - - if (update_p2p_connection_metrics) { - update_p2p_connection_metrics({.num_peers = num_peers, .num_clients = num_clients}); - } - - if( num_clients > 0 || num_peers > 0 ) - fc_ilog( logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}, block producer peers: ${num_bp_peers}", - ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size())("num_bp_peers", num_bp_peers) ); - fc_dlog( logger, "connection monitor, removed ${n} connections", ("n", num_rm) ); - if( reschedule ) { - start_conn_timer( connector_period, std::weak_ptr()); - } - } - // called from application thread void net_plugin_impl::on_accepted_block_header(const block_state_ptr& bs) { update_chain_info(); - dispatcher->strand.post( [bs]() { - fc_dlog( logger, "signaled accepted_block_header, blk num = ${num}, id = ${id}", ("num", bs->block_num)("id", bs->id) ); - my_impl->dispatcher->bcast_block( bs->block, bs->id ); + + dispatcher->strand.post([bs]() { + fc_dlog(logger, "signaled accepted_block_header, blk num = ${num}, id = ${id}", ("num", bs->block_num)("id", bs->id)); + my_impl->dispatcher->bcast_block(bs->block, bs->id); }); } @@ -3705,8 +3832,13 @@ namespace eosio { // call from connection strand bool connection::populate_handshake( handshake_message& hello ) const { namespace sc = std::chrono; - hello.network_version = net_version_base + net_version; auto chain_info = my_impl->get_chain_info(); + auto now = sc::duration_cast(sc::system_clock::now().time_since_epoch()).count(); + constexpr int64_t hs_delay = sc::duration_cast(sc::milliseconds(50)).count(); + // nothing as changed since last handshake and one was sent recently, so skip sending + if (chain_info.head_id == hello.head_id && (hello.time + hs_delay > now)) + return false; + hello.network_version = net_version_base + net_version; hello.last_irreversible_block_num = chain_info.lib_num; hello.last_irreversible_block_id = chain_info.lib_id; hello.head_num = chain_info.head_num; @@ -3720,7 +3852,7 @@ namespace eosio { // If we couldn't sign, don't send a token. if(hello.sig == chain::signature_type()) hello.token = sha256(); - hello.p2p_address = my_impl->p2p_address; + hello.p2p_address = listen_address; if( is_transactions_only_connection() ) hello.p2p_address += ":trx"; // if we are not accepting transactions tell peer we are blocks only if( is_blocks_only_connection() || !my_impl->p2p_accept_transactions ) hello.p2p_address += ":blk"; @@ -3752,8 +3884,8 @@ namespace eosio { void net_plugin::set_program_options( options_description& /*cli*/, options_description& cfg ) { cfg.add_options() - ( "p2p-listen-endpoint", bpo::value()->default_value( "0.0.0.0:9876" ), "The actual host:port used to listen for incoming p2p connections.") - ( "p2p-server-address", bpo::value(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint.") + ( "p2p-listen-endpoint", bpo::value< vector >()->default_value( vector(1, string("0.0.0.0:9876")) ), "The actual host:port used to listen for incoming p2p connections. May be used multiple times.") + ( "p2p-server-address", bpo::value< vector >(), "An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. May be used as many times as p2p-listen-endpoint. If provided, the first address will be used in handshakes with other nodes. Otherwise the default is used.") ( "p2p-peer-address", bpo::value< vector >()->composing(), "The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network.\n" " Syntax: host:port[:|]\n" @@ -3774,15 +3906,18 @@ namespace eosio { ( "agent-name", bpo::value()->default_value("EOS Test Agent"), "The name supplied to identify this node amongst the peers.") ( "allowed-connection", bpo::value>()->multitoken()->default_value({"any"}, "any"), "Can be 'any' or 'producers' or 'specified' or 'none'. If 'specified', peer-key must be specified at least once. If only 'producers', peer-key is not required. 'producers' and 'specified' may be combined.") ( "peer-key", bpo::value>()->composing()->multitoken(), "Optional public key of peer allowed to connect. May be used multiple times.") - ( "peer-private-key", boost::program_options::value>()->composing()->multitoken(), + ( "peer-private-key", bpo::value>()->composing()->multitoken(), "Tuple of [PublicKey, WIF private key] (may specify multiple times)") - ( "max-clients", bpo::value()->default_value(def_max_clients), "Maximum number of clients from which connections are accepted, use 0 for no limit") + ( "max-clients", bpo::value()->default_value(def_max_clients), "Maximum number of clients from which connections are accepted, use 0 for no limit") ( "connection-cleanup-period", bpo::value()->default_value(def_conn_retry_wait), "number of seconds to wait before cleaning up dead connections") - ( "max-cleanup-time-msec", bpo::value()->default_value(10), "max connection cleanup time per cleanup call in milliseconds") + ( "max-cleanup-time-msec", bpo::value()->default_value(10), "max connection cleanup time per cleanup call in milliseconds") ( "p2p-dedup-cache-expire-time-sec", bpo::value()->default_value(10), "Maximum time to track transaction for duplicate optimization") ( "net-threads", bpo::value()->default_value(my->thread_pool_size), "Number of worker threads in net_plugin thread pool" ) - ( "sync-fetch-span", bpo::value()->default_value(def_sync_fetch_span), "number of blocks to retrieve in a chunk from any individual peer during synchronization") + ( "sync-fetch-span", bpo::value()->default_value(def_sync_fetch_span), + "Number of blocks to retrieve in a chunk from any individual peer during synchronization") + ( "sync-peer-limit", bpo::value()->default_value(3), + "Number of peers to sync from") ( "use-socket-read-watermark", bpo::value()->default_value(false), "Enable experimental socket read watermark optimization") ( "peer-log-format", bpo::value()->default_value( "[\"${_name}\" - ${_cid} ${_ip}:${_port}] " ), "The string used to format peers when logging messages about them. Variables are escaped with ${}.\n" @@ -3805,62 +3940,81 @@ namespace eosio { return fc::json::from_string(s).as(); } - void net_plugin::plugin_initialize( const variables_map& options ) { + void net_plugin_impl::plugin_initialize( const variables_map& options ) { try { - handle_sighup(); fc_ilog( logger, "Initialize net plugin" ); peer_log_format = options.at( "peer-log-format" ).as(); - my->sync_master = std::make_unique( options.at( "sync-fetch-span" ).as()); + sync_master = std::make_unique( + options.at( "sync-fetch-span" ).as(), + options.at( "sync-peer-limit" ).as() ); - my->connector_period = std::chrono::seconds( options.at( "connection-cleanup-period" ).as()); - my->max_cleanup_time_ms = options.at("max-cleanup-time-msec").as(); - my->txn_exp_period = def_txn_expire_wait; - my->p2p_dedup_cache_expire_time_us = fc::seconds( options.at( "p2p-dedup-cache-expire-time-sec" ).as() ); - my->resp_expected_period = def_resp_expected_wait; - my->max_client_count = options.at( "max-clients" ).as(); - my->max_nodes_per_host = options.at( "p2p-max-nodes-per-host" ).as(); - my->p2p_accept_transactions = options.at( "p2p-accept-transactions" ).as(); + txn_exp_period = def_txn_expire_wait; + p2p_dedup_cache_expire_time_us = fc::seconds( options.at( "p2p-dedup-cache-expire-time-sec" ).as() ); + resp_expected_period = def_resp_expected_wait; + max_nodes_per_host = options.at( "p2p-max-nodes-per-host" ).as(); + p2p_accept_transactions = options.at( "p2p-accept-transactions" ).as(); - my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); - my->keepalive_interval = std::chrono::milliseconds( options.at( "p2p-keepalive-interval-ms" ).as() ); - EOS_ASSERT( my->keepalive_interval.count() > 0, chain::plugin_config_exception, + use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); + keepalive_interval = std::chrono::milliseconds( options.at( "p2p-keepalive-interval-ms" ).as() ); + EOS_ASSERT( keepalive_interval.count() > 0, chain::plugin_config_exception, "p2p-keepalive_interval-ms must be greater than 0" ); - if( options.count( "p2p-keepalive-interval-ms" )) { - my->heartbeat_timeout = std::chrono::milliseconds( options.at( "p2p-keepalive-interval-ms" ).as() * 2 ); - } - - if( options.count( "p2p-listen-endpoint" ) && options.at("p2p-listen-endpoint").as().length()) { - my->p2p_address = options.at( "p2p-listen-endpoint" ).as(); - EOS_ASSERT( my->p2p_address.length() <= max_p2p_address_length, chain::plugin_config_exception, - "p2p-listen-endpoint too long, must be less than ${m}", ("m", max_p2p_address_length) ); + connections.init( std::chrono::milliseconds( options.at("p2p-keepalive-interval-ms").as() * 2 ), + fc::milliseconds( options.at("max-cleanup-time-msec").as() ), + std::chrono::seconds( options.at("connection-cleanup-period").as() ), + options.at("max-clients").as() ); + + if( options.count( "p2p-listen-endpoint" )) { + auto p2ps = options.at("p2p-listen-endpoint").as>(); + if (!p2ps.front().empty()) { + p2p_addresses = p2ps; + auto addr_count = p2p_addresses.size(); + std::sort(p2p_addresses.begin(), p2p_addresses.end()); + auto last = std::unique(p2p_addresses.begin(), p2p_addresses.end()); + p2p_addresses.erase(last, p2p_addresses.end()); + if( size_t addr_diff = addr_count - p2p_addresses.size(); addr_diff != 0) { + fc_wlog( logger, "Removed ${count} duplicate p2p-listen-endpoint entries", ("count", addr_diff)); + } + for( const auto& addr : p2p_addresses ) { + EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, + "p2p-listen-endpoint ${a} too long, must be less than ${m}", + ("a", addr)("m", max_p2p_address_length) ); + } + } } if( options.count( "p2p-server-address" ) ) { - my->p2p_server_address = options.at( "p2p-server-address" ).as(); - EOS_ASSERT( my->p2p_server_address.length() <= max_p2p_address_length, chain::plugin_config_exception, - "p2p_server_address too long, must be less than ${m}", ("m", max_p2p_address_length) ); + p2p_server_addresses = options.at( "p2p-server-address" ).as>(); + EOS_ASSERT( p2p_server_addresses.size() <= p2p_addresses.size(), chain::plugin_config_exception, + "p2p-server-address may not be specified more times than p2p-listen-endpoint" ); + for( const auto& addr: p2p_server_addresses ) { + EOS_ASSERT( addr.length() <= max_p2p_address_length, chain::plugin_config_exception, + "p2p-server-address ${a} too long, must be less than ${m}", + ("a", addr)("m", max_p2p_address_length) ); + } } + p2p_server_addresses.resize(p2p_addresses.size()); // extend with empty entries as needed - my->thread_pool_size = options.at( "net-threads" ).as(); - EOS_ASSERT( my->thread_pool_size > 0, chain::plugin_config_exception, - "net-threads ${num} must be greater than 0", ("num", my->thread_pool_size) ); + thread_pool_size = options.at( "net-threads" ).as(); + EOS_ASSERT( thread_pool_size > 0, chain::plugin_config_exception, + "net-threads ${num} must be greater than 0", ("num", thread_pool_size) ); + std::vector peers; if( options.count( "p2p-peer-address" )) { - auto v = options.at( "p2p-peer-address" ).as >(); - my->supplied_peers.insert(v.begin(), v.end()); + peers = options.at( "p2p-peer-address" ).as>(); + connections.add_supplied_peers(peers); } if( options.count( "agent-name" )) { - my->user_agent_name = options.at( "agent-name" ).as(); - EOS_ASSERT( my->user_agent_name.length() <= max_handshake_str_length, chain::plugin_config_exception, + user_agent_name = options.at( "agent-name" ).as(); + EOS_ASSERT( user_agent_name.length() <= max_handshake_str_length, chain::plugin_config_exception, "agent-name too long, must be less than ${m}", ("m", max_handshake_str_length) ); } if ( options.count( "p2p-auto-bp-peer")) { - my->set_bp_peers(options.at( "p2p-auto-bp-peer" ).as>()); - my->for_each_bp_peer_address([this](const auto& addr) { - EOS_ASSERT(my->supplied_peers.count(addr) == 0, chain::plugin_config_exception, + set_bp_peers(options.at( "p2p-auto-bp-peer" ).as>()); + for_each_bp_peer_address([&peers](const auto& addr) { + EOS_ASSERT(std::find(peers.begin(), peers.end(), addr) == peers.end(), chain::plugin_config_exception, "\"${addr}\" should only appear in either p2p-peer-address or p2p-auto-bp-peer option, not both.", ("addr",addr)); }); @@ -3870,17 +4024,17 @@ namespace eosio { const std::vector allowed_remotes = options["allowed-connection"].as>(); for( const std::string& allowed_remote : allowed_remotes ) { if( allowed_remote == "any" ) - my->allowed_connections |= net_plugin_impl::Any; + allowed_connections |= net_plugin_impl::Any; else if( allowed_remote == "producers" ) - my->allowed_connections |= net_plugin_impl::Producers; + allowed_connections |= net_plugin_impl::Producers; else if( allowed_remote == "specified" ) - my->allowed_connections |= net_plugin_impl::Specified; + allowed_connections |= net_plugin_impl::Specified; else if( allowed_remote == "none" ) - my->allowed_connections = net_plugin_impl::None; + allowed_connections = net_plugin_impl::None; } } - if( my->allowed_connections & net_plugin_impl::Specified ) + if( allowed_connections & net_plugin_impl::Specified ) EOS_ASSERT( options.count( "peer-key" ), plugin_config_exception, "At least one peer-key must accompany 'allowed-connection=specified'" ); @@ -3888,7 +4042,7 @@ namespace eosio { if( options.count( "peer-key" )) { const std::vector key_strings = options["peer-key"].as>(); for( const std::string& key_string : key_strings ) { - my->allowed_peers.push_back( dejsonify( key_string )); + allowed_peers.push_back( dejsonify( key_string )); } } @@ -3897,45 +4051,43 @@ namespace eosio { for( const std::string& key_id_to_wif_pair_string : key_id_to_wif_pair_strings ) { auto key_id_to_wif_pair = dejsonify>( key_id_to_wif_pair_string ); - my->private_keys[key_id_to_wif_pair.first] = fc::crypto::private_key( key_id_to_wif_pair.second ); + private_keys[key_id_to_wif_pair.first] = fc::crypto::private_key( key_id_to_wif_pair.second ); } } - my->chain_plug = app().find_plugin(); - EOS_ASSERT( my->chain_plug, chain::missing_chain_plugin_exception, "" ); - my->chain_id = my->chain_plug->get_chain_id(); - fc::rand_pseudo_bytes( my->node_id.data(), my->node_id.data_size()); - const controller& cc = my->chain_plug->chain(); + chain_plug = app().find_plugin(); + EOS_ASSERT( chain_plug, chain::missing_chain_plugin_exception, "" ); + chain_id = chain_plug->get_chain_id(); + fc::rand_pseudo_bytes( node_id.data(), node_id.data_size()); + const controller& cc = chain_plug->chain(); if( cc.get_read_mode() == db_read_mode::IRREVERSIBLE ) { - if( my->p2p_accept_transactions ) { - my->p2p_accept_transactions = false; + if( p2p_accept_transactions ) { + p2p_accept_transactions = false; fc_wlog( logger, "p2p-accept-transactions set to false due to read-mode: irreversible" ); } } - if( my->p2p_accept_transactions ) { - my->chain_plug->enable_accept_transactions(); + if( p2p_accept_transactions ) { + chain_plug->enable_accept_transactions(); } } FC_LOG_AND_RETHROW() } - void net_plugin::plugin_startup() { - try { - - fc_ilog( logger, "my node_id is ${id}", ("id", my->node_id )); + void net_plugin_impl::plugin_startup() { + fc_ilog( logger, "my node_id is ${id}", ("id", node_id )); - my->producer_plug = app().find_plugin(); - my->set_producer_accounts(my->producer_plug->producer_accounts()); + producer_plug = app().find_plugin(); + set_producer_accounts(producer_plug->producer_accounts()); - my->thread_pool.start( my->thread_pool_size, []( const fc::exception& e ) { + thread_pool.start( thread_pool_size, []( const fc::exception& e ) { fc_elog( logger, "Exception in net plugin thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); app().quit(); } ); - my->dispatcher = std::make_unique( my_impl->thread_pool.get_executor() ); + dispatcher = std::make_unique( my_impl->thread_pool.get_executor() ); - if( !my->p2p_accept_transactions && my->p2p_address.size() ) { + if( !p2p_accept_transactions && p2p_addresses.size() ) { fc_ilog( logger, "\n" "***********************************\n" "* p2p-accept-transactions = false *\n" @@ -3943,87 +4095,95 @@ namespace eosio { "***********************************\n" ); } - tcp::endpoint listen_endpoint; - if( !my->p2p_address.empty() ) { - auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); - auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); - tcp::resolver resolver( my->thread_pool.get_executor() ); - // Note: need to add support for IPv6 too? - listen_endpoint = *resolver.resolve( tcp::v4(), host, port ); + std::vector listen_addresses = p2p_addresses; - my->acceptor = std::make_unique( my_impl->thread_pool.get_executor() ); - - if( !my->p2p_server_address.empty() ) { - my->p2p_address = my->p2p_server_address; - } else { - if( listen_endpoint.address().to_v4() == address_v4::any()) { - boost::system::error_code ec; - host = host_name( ec ); - if( ec.value() != boost::system::errc::success ) { + EOS_ASSERT( p2p_addresses.size() == p2p_server_addresses.size(), chain::plugin_config_exception, "" ); + std::transform(p2p_addresses.begin(), p2p_addresses.end(), p2p_server_addresses.begin(), + p2p_addresses.begin(), [](const string& p2p_address, const string& p2p_server_address) { + auto [host, port] = fc::split_host_port(p2p_address); + + if( !p2p_server_address.empty() ) { + return p2p_server_address; + } else if( host.empty() || host == "0.0.0.0" || host == "[::]") { + boost::system::error_code ec; + auto hostname = host_name( ec ); + if( ec.value() != boost::system::errc::success ) { - FC_THROW_EXCEPTION( fc::invalid_arg_exception, - "Unable to retrieve host_name. ${msg}", ("msg", ec.message())); + FC_THROW_EXCEPTION( fc::invalid_arg_exception, + "Unable to retrieve host_name. ${msg}", ("msg", ec.message())); - } - port = my->p2p_address.substr( my->p2p_address.find( ':' ), my->p2p_address.size()); - my->p2p_address = host + port; } + return hostname + ":" + port; } - } + return p2p_address; + }); { - chain::controller& cc = my->chain_plug->chain(); - cc.accepted_block_header.connect( [my = my]( const block_state_ptr& s ) { + chain::controller& cc = chain_plug->chain(); + cc.accepted_block_header.connect( [my = shared_from_this()]( const block_state_ptr& s ) { my->on_accepted_block_header( s ); } ); - cc.accepted_block.connect( [my = my]( const block_state_ptr& s ) { + cc.accepted_block.connect( [my = shared_from_this()]( const block_state_ptr& s ) { my->on_accepted_block( s ); } ); - cc.irreversible_block.connect( [my = my]( const block_state_ptr& s ) { + cc.irreversible_block.connect( [my = shared_from_this()]( const block_state_ptr& s ) { my->on_irreversible_block( s ); } ); } { - std::lock_guard g( my->keepalive_timer_mtx ); - my->keepalive_timer = std::make_unique( my->thread_pool.get_executor() ); + fc::lock_guard g( keepalive_timer_mtx ); + keepalive_timer = std::make_unique( thread_pool.get_executor() ); } - my->incoming_transaction_ack_subscription = app().get_channel().subscribe( - [me = my.get()](auto&& t) { me->transaction_ack(std::forward(t)); }); + incoming_transaction_ack_subscription = app().get_channel().subscribe( + [this](auto&& t) { transaction_ack(std::forward(t)); }); - app().executor().post(priority::highest, [my=my, listen_endpoint](){ - if( my->acceptor ) { + for(auto listen_itr = listen_addresses.begin(), p2p_iter = p2p_addresses.begin(); + listen_itr != listen_addresses.end(); + ++listen_itr, ++p2p_iter) { + app().executor().post(priority::highest, [my=shared_from_this(), address = std::move(*listen_itr), p2p_addr = *p2p_iter](){ try { - my->acceptor->open(listen_endpoint.protocol()); - my->acceptor->set_option(tcp::acceptor::reuse_address(true)); - my->acceptor->bind(listen_endpoint); - my->acceptor->listen(); + const boost::posix_time::milliseconds accept_timeout(100); + + std::string extra_listening_log_info = + ", max clients is " + std::to_string(my->connections.get_max_client_count()); + + fc::create_listener( + my->thread_pool.get_executor(), logger, accept_timeout, address, extra_listening_log_info, + [my = my, addr = p2p_addr](tcp::socket&& socket) { my->create_session(std::move(socket), addr); }); } catch (const std::exception& e) { - fc_elog( logger, "net_plugin::plugin_startup failed to bind to port ${port}, ${what}", - ("port", listen_endpoint.port())("what", e.what()) ); + fc_elog( logger, "net_plugin::plugin_startup failed to listen on ${addr}, ${what}", + ("addr", address)("what", e.what()) ); app().quit(); return; } - fc_ilog( logger, "starting listener, max clients is ${mc}",("mc",my->max_client_count) ); - my->start_listen_loop(); - } - + }); + } + app().executor().post(priority::highest, [my=shared_from_this()](){ my->ticker(); my->start_monitors(); my->update_chain_info(); - for( const auto& seed_node : my->supplied_peers ) { - my->connect( seed_node ); - } + my->connections.connect_supplied_peers(*my->p2p_addresses.begin()); // attribute every outbound connection to the first listen port }); + } + + void net_plugin::plugin_initialize( const variables_map& options ) { + handle_sighup(); + my->plugin_initialize( options ); + } + void net_plugin::plugin_startup() { + try { + my->plugin_startup(); } catch( ... ) { // always want plugin_shutdown even on exception plugin_shutdown(); throw; } } + void net_plugin::handle_sighup() { fc::logger::update( logger_name, logger ); @@ -4040,92 +4200,253 @@ namespace eosio { FC_CAPTURE_AND_RETHROW() } - /** - * Used to trigger a new connection from RPC API - */ + /// RPC API string net_plugin::connect( const string& host ) { - return my->connect( host ); + return my->connections.connect( host, *my->p2p_addresses.begin() ); } - string net_plugin_impl::connect( const string& host ) { - std::lock_guard g( connections_mtx ); - if( find_connection( host ) ) - return "already connected"; + /// RPC API + string net_plugin::disconnect( const string& host ) { + return my->connections.disconnect(host); + } - connection_ptr c = std::make_shared( host ); - fc_dlog( logger, "calling active connector: ${h}", ("h", host) ); - if( c->resolve_and_connect() ) { - fc_dlog( logger, "adding new connection to the list: ${host} ${cid}", ("host", host)("cid", c->connection_id) ); - c->set_heartbeat_timeout( heartbeat_timeout ); - connections.insert( c ); + /// RPC API + std::optional net_plugin::status( const string& host )const { + return my->connections.status(host); + } + + /// RPC API + vector net_plugin::connections()const { + return my->connections.connection_statuses(); + } + + constexpr uint16_t net_plugin_impl::to_protocol_version(uint16_t v) { + if (v >= net_version_base) { + v -= net_version_base; + return (v > net_version_range) ? 0 : v; + } + return 0; + } + + bool net_plugin_impl::in_sync() const { + return sync_master->is_in_sync(); + } + + void net_plugin::register_update_p2p_connection_metrics(std::function&& fun){ + my->connections.register_update_p2p_connection_metrics(std::move(fun)); + } + + void net_plugin::register_increment_failed_p2p_connections(std::function&& fun){ + my->increment_failed_p2p_connections = std::move(fun); + } + + void net_plugin::register_increment_dropped_trxs(std::function&& fun){ + my->increment_dropped_trxs = std::move(fun); + } + + //---------------------------------------------------------------------------- + + size_t connections_manager::number_connections() const { + std::lock_guard g(connections_mtx); + return connections.size(); + } + + void connections_manager::add_supplied_peers(const vector& peers ) { + std::lock_guard g(connections_mtx); + supplied_peers.insert( peers.begin(), peers.end() ); + } + + // not thread safe, only call on startup + void connections_manager::init( std::chrono::milliseconds heartbeat_timeout_ms, + fc::microseconds conn_max_cleanup_time, + boost::asio::steady_timer::duration conn_period, + uint32_t maximum_client_count ) { + heartbeat_timeout = heartbeat_timeout_ms; + max_cleanup_time = conn_max_cleanup_time; + connector_period = conn_period; + max_client_count = maximum_client_count; + } + + fc::microseconds connections_manager::get_connector_period() const { + auto connector_period_us = std::chrono::duration_cast( connector_period ); + return fc::microseconds{ connector_period_us.count() }; + } + + void connections_manager::register_update_p2p_connection_metrics(std::function&& fun){ + update_p2p_connection_metrics = std::move(fun); + } + + void connections_manager::connect_supplied_peers(const string& p2p_address) { + std::lock_guard g(connections_mtx); + for (const auto& peer : supplied_peers) { + connect_i(peer, p2p_address); } + } + + void connections_manager::add( connection_ptr c ) { + std::lock_guard g( connections_mtx ); + add_i( std::move(c) ); + } + + // called by API + string connections_manager::connect( const string& host, const string& p2p_address ) { + std::lock_guard g( connections_mtx ); + if( find_connection_i( host ) ) + return "already connected"; + + connect_i( host, p2p_address ); + supplied_peers.insert(host); return "added connection"; } - string net_plugin_impl::disconnect( const string& host ) { - std::lock_guard g( connections_mtx ); - for( auto itr = connections.begin(); itr != connections.end(); ++itr ) { - if( (*itr)->peer_address() == host ) { - fc_ilog( logger, "disconnecting: ${cid}", ("cid", (*itr)->connection_id) ); - (*itr)->close(); - connections.erase(itr); - return "connection removed"; - } + // called by API + string connections_manager::disconnect( const string& host ) { + std::lock_guard g( connections_mtx ); + if( auto c = find_connection_i( host ) ) { + fc_ilog( logger, "disconnecting: ${cid}", ("cid", c->connection_id) ); + c->close(); + connections.erase(c); + supplied_peers.erase(host); + return "connection removed"; } return "no known connection for host"; } - string net_plugin::disconnect( const string& host ) { - return my->disconnect(host); + void connections_manager::close_all() { + fc_ilog( logger, "close all ${s} connections", ("s", connections.size()) ); + std::lock_guard g( connections_mtx ); + for( auto& con : connections ) { + fc_dlog( logger, "close: ${cid}", ("cid", con->connection_id) ); + con->close( false, true ); + } + connections.clear(); } - std::optional net_plugin::status( const string& host )const { - std::shared_lock g( my->connections_mtx ); - auto con = my->find_connection( host ); - if( con ) + std::optional connections_manager::status( const string& host )const { + std::shared_lock g( connections_mtx ); + auto con = find_connection_i( host ); + if( con ) { return con->get_status(); + } return {}; } - vector net_plugin::connections()const { + vector connections_manager::connection_statuses()const { vector result; - std::shared_lock g( my->connections_mtx ); - result.reserve( my->connections.size() ); - for( const auto& c : my->connections ) { + std::shared_lock g( connections_mtx ); + result.reserve( connections.size() ); + for( const auto& c : connections ) { result.push_back( c->get_status() ); } return result; } // call with connections_mtx - connection_ptr net_plugin_impl::find_connection( const string& host )const { - for( const auto& c : connections ) - if( c->peer_address() == host ) return c; + connection_ptr connections_manager::find_connection_i( const string& host )const { + for( const auto& c : connections ) { + if (c->peer_address() == host) + return c; + } return {}; } - constexpr uint16_t net_plugin_impl::to_protocol_version(uint16_t v) { - if (v >= net_version_base) { - v -= net_version_base; - return (v > net_version_range) ? 0 : v; + // call with connections_mtx + void connections_manager::connect_i( const string& host, const string& p2p_address ) { + connection_ptr c = std::make_shared( host, p2p_address ); + fc_dlog( logger, "calling active connector: ${h}", ("h", host) ); + if( c->resolve_and_connect() ) { + fc_dlog( logger, "adding new connection to the list: ${host} ${cid}", ("host", host)("cid", c->connection_id) ); + add_i( std::move(c) ); } - return 0; } - bool net_plugin_impl::in_sync() const { - return sync_master->is_in_sync(); + // call with connections_mtx + void connections_manager::add_i(connection_ptr&& c) { + c->set_heartbeat_timeout( heartbeat_timeout ); + connections.insert( std::move(c) ); } - void net_plugin::register_update_p2p_connection_metrics(std::function&& fun){ - my->update_p2p_connection_metrics = std::move(fun); + // called from any thread + void connections_manager::start_conn_timer() { + start_conn_timer(connector_period, {}); // this locks mutex } - void net_plugin::register_increment_failed_p2p_connections(std::function&& fun){ - my->increment_failed_p2p_connections = std::move(fun); + // called from any thread + void connections_manager::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { + fc::lock_guard g( connector_check_timer_mtx ); + if (!connector_check_timer) { + connector_check_timer = std::make_unique( my_impl->thread_pool.get_executor() ); + } + connector_check_timer->expires_from_now( du ); + connector_check_timer->async_wait( [this, from_connection{std::move(from_connection)}](boost::system::error_code ec) mutable { + if( !ec ) { + connection_monitor(from_connection); + } + }); } - void net_plugin::register_increment_dropped_trxs(std::function&& fun){ - my->increment_dropped_trxs = std::move(fun); + void connections_manager::stop_conn_timer() { + fc::lock_guard g( connector_check_timer_mtx ); + if (connector_check_timer) { + connector_check_timer->cancel(); + } } -} + // called from any thread + void connections_manager::connection_monitor(const std::weak_ptr& from_connection) { + auto max_time = fc::time_point::now().safe_add(max_cleanup_time); + auto from = from_connection.lock(); + std::unique_lock g( connections_mtx ); + auto it = (from ? connections.find(from) : connections.begin()); + if (it == connections.end()) it = connections.begin(); + size_t num_rm = 0, num_clients = 0, num_peers = 0, num_bp_peers = 0; + while (it != connections.end()) { + if (fc::time_point::now() >= max_time) { + connection_wptr wit = *it; + g.unlock(); + fc_dlog( logger, "Exiting connection monitor early, ran out of time: ${t}", ("t", max_time - fc::time_point::now()) ); + fc_ilog( logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}", + ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size()) ); + start_conn_timer( std::chrono::milliseconds( 1 ), wit ); // avoid exhausting + return; + } + if ((*it)->is_bp_connection) { + ++num_bp_peers; + } else if ((*it)->incoming()) { + ++num_clients; + } else { + ++num_peers; + } + + if (!(*it)->socket_is_open() && (*it)->state() != connection::connection_state::connecting) { + if (!(*it)->incoming()) { + if (!(*it)->resolve_and_connect()) { + it = connections.erase(it); + --num_peers; + ++num_rm; + continue; + } + } else { + --num_clients; + ++num_rm; + it = connections.erase(it); + continue; + } + } + ++it; + } + g.unlock(); + + if (update_p2p_connection_metrics) { + update_p2p_connection_metrics({.num_peers = num_peers, .num_clients = num_clients}); + } + + if( num_clients > 0 || num_peers > 0 ) { + fc_ilog(logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}, block producer peers: ${num_bp_peers}", + ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size())("num_bp_peers", num_bp_peers)); + } + fc_dlog( logger, "connection monitor, removed ${n} connections", ("n", num_rm) ); + start_conn_timer( connector_period, {}); + } + +} // namespace eosio diff --git a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp index 485234e4d8..6aa7fbebd6 100644 --- a/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp +++ b/plugins/net_plugin/tests/auto_bp_peering_unittest.cpp @@ -14,13 +14,14 @@ struct mock_connection { using namespace eosio::chain::literals; using namespace std::literals::string_literals; -struct mock_net_plugin : eosio::auto_bp_peering::bp_connection_manager { - - uint32_t max_client_count; - bool is_in_sync = false; +struct mock_connections_manager { + uint32_t max_client_count = 0; std::vector connections; - bool in_sync() { return is_in_sync; } + std::function connect; + std::function disconnect; + + uint32_t get_max_client_count() const { return max_client_count; } template void for_each_connection(Function&& func) const { @@ -29,9 +30,15 @@ struct mock_net_plugin : eosio::auto_bp_peering::bp_connection_manager connect; - std::function disconnect; +struct mock_net_plugin : eosio::auto_bp_peering::bp_connection_manager { + + bool is_in_sync = false; + mock_connections_manager connections; + std::vector p2p_addresses{"0.0.0.0:9876"}; + + bool in_sync() { return is_in_sync; } void setup_test_peers() { set_bp_peers({ "proda,127.0.0.1:8001:blk"s, "prodb,127.0.0.1:8002:trx"s, "prodc,127.0.0.1:8003"s, @@ -159,7 +166,7 @@ BOOST_AUTO_TEST_CASE(test_on_pending_schedule) { std::vector connected_hosts; - plugin.connect = [&connected_hosts](std::string host) { connected_hosts.push_back(host); }; + plugin.connections.connect = [&connected_hosts](std::string host, std::string p2p_address) { connected_hosts.push_back(host); }; // make sure nothing happens when it is not in_sync plugin.is_in_sync = false; @@ -203,10 +210,10 @@ BOOST_AUTO_TEST_CASE(test_on_active_schedule1) { plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n }; plugin.active_neighbors = { "proda"_n, "prodh"_n, "prodn"_n }; - plugin.connect = [](std::string host) {}; + plugin.connections.connect = [](std::string host, std::string p2p_address) {}; std::vector disconnected_hosts; - plugin.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); }; + plugin.connections.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); }; // make sure nothing happens when it is not in_sync plugin.is_in_sync = false; @@ -239,9 +246,9 @@ BOOST_AUTO_TEST_CASE(test_on_active_schedule2) { plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n }; plugin.active_neighbors = { "proda"_n, "prodh"_n, "prodn"_n }; - plugin.connect = [](std::string host) {}; + plugin.connections.connect = [](std::string host, std::string p2p_address) {}; std::vector disconnected_hosts; - plugin.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); }; + plugin.connections.disconnect = [&disconnected_hosts](std::string host) { disconnected_hosts.push_back(host); }; // when pending and active schedules are changed simultaneosly plugin.is_in_sync = true; @@ -263,8 +270,8 @@ BOOST_AUTO_TEST_CASE(test_exceeding_connection_limit) { mock_net_plugin plugin; plugin.setup_test_peers(); plugin.config.my_bp_accounts = { "prodd"_n, "produ"_n }; - plugin.max_client_count = 1; - plugin.connections = { + plugin.connections.max_client_count = 1; + plugin.connections.connections = { { .is_bp_connection = true, .is_open = true, .handshake_received = true }, // 0 { .is_bp_connection = true, .is_open = true, .handshake_received = false }, // 1 { .is_bp_connection = true, .is_open = false, .handshake_received = true }, // 2 @@ -277,12 +284,12 @@ BOOST_AUTO_TEST_CASE(test_exceeding_connection_limit) { BOOST_CHECK_EQUAL(plugin.num_established_clients(), 2); - BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections[0])); - BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections[1])); - BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections[2])); - BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections[3])); - BOOST_CHECK(plugin.exceeding_connection_limit(&plugin.connections[4])); - BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections[5])); - BOOST_CHECK(plugin.exceeding_connection_limit(&plugin.connections[6])); - BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections[7])); + BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[0])); + BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[1])); + BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[2])); + BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[3])); + BOOST_CHECK(plugin.exceeding_connection_limit(&plugin.connections.connections[4])); + BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[5])); + BOOST_CHECK(plugin.exceeding_connection_limit(&plugin.connections.connections[6])); + BOOST_CHECK(!plugin.exceeding_connection_limit(&plugin.connections.connections[7])); } \ No newline at end of file diff --git a/plugins/producer_api_plugin/producer.swagger.yaml b/plugins/producer_api_plugin/producer.swagger.yaml index 2b53598319..25c7b70e90 100644 --- a/plugins/producer_api_plugin/producer.swagger.yaml +++ b/plugins/producer_api_plugin/producer.swagger.yaml @@ -735,7 +735,7 @@ paths: $ref: "https://docs.eosnetwork.com/openapi/v2.0/Sha256.yaml" time_limit_ms: type: integer - default: 10 + default: http-max-response-time-ms example: 10 responses: "200": diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index 9d477ed425..65cbe2f58b 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -1,8 +1,8 @@ #include #include +#include #include -#include #include @@ -20,27 +20,21 @@ namespace eosio { using namespace eosio; -struct async_result_visitor : public fc::visitor { - template - fc::variant operator()(const T& v) const { - return fc::variant(v); - } -}; - -#define CALL_WITH_400(api_name, api_handle, call_name, INVOKE, http_response_code) \ +#define CALL_WITH_400(api_name, category, api_handle, call_name, INVOKE, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ - [&api_handle, http_max_response_time](string&&, string&& body, url_response_callback&& cb) mutable { \ + api_category::category, \ + [&](string&&, string&& body, url_response_callback&& cb) mutable { \ try { \ - auto deadline = fc::time_point::now() + http_max_response_time; \ INVOKE \ - cb(http_response_code, deadline, fc::variant(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ }} -#define CALL_ASYNC(api_name, api_handle, call_name, call_result, INVOKE, http_response_code) \ +#define CALL_ASYNC(api_name, category, api_handle, call_name, call_result, INVOKE, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ + api_category::category, \ [&api_handle](string&&, string&& body, url_response_callback&& cb) mutable { \ if (body.empty()) body = "{}"; \ auto next = [cb=std::move(cb), body=std::move(body)](const chain::next_function_variant& result){ \ @@ -51,7 +45,7 @@ struct async_result_visitor : public fc::visitor { http_plugin::handle_exception(#api_name, #call_name, body, cb);\ }\ } else if (std::holds_alternative(result)) { \ - cb(http_response_code, fc::time_point::maximum(), fc::variant(std::get(result)));\ + cb(http_response_code, fc::variant(std::get(result)));\ } else { \ assert(0); \ } \ @@ -69,6 +63,8 @@ struct async_result_visitor : public fc::visitor { auto result = api_handle.call_name(std::move(params)); #define INVOKE_R_R_D(api_handle, call_name, in_param) \ + auto deadline = http_max_response_time == fc::microseconds::maximum() ? fc::time_point::maximum() \ + : fc::time_point::now() + http_max_response_time; \ auto params = parse_params(body);\ auto result = api_handle.call_name(std::move(params), deadline); @@ -84,11 +80,6 @@ struct async_result_visitor : public fc::visitor { api_handle.call_name(std::move(params)); \ eosio::detail::producer_api_plugin_response result{"ok"}; -#define INVOKE_V_R_II(api_handle, call_name, in_param) \ - auto params = parse_params(body);\ - api_handle.call_name(std::move(params)); \ - eosio::detail::producer_api_plugin_response result{"ok"}; - #define INVOKE_V_V(api_handle, call_name) \ body = parse_params(body); \ api_handle.call_name(); \ @@ -103,50 +94,50 @@ void producer_api_plugin::plugin_startup() { fc::microseconds http_max_response_time = http.get_max_response_time(); app().get_plugin().add_api({ - CALL_WITH_400(producer, producer, paused, + CALL_WITH_400(producer, producer_ro, producer, paused, INVOKE_R_V(producer, paused), 201), - CALL_WITH_400(producer, producer, get_runtime_options, + CALL_WITH_400(producer, producer_ro, producer, get_runtime_options, INVOKE_R_V(producer, get_runtime_options), 201), - CALL_WITH_400(producer, producer, get_greylist, + CALL_WITH_400(producer, producer_ro, producer, get_greylist, INVOKE_R_V(producer, get_greylist), 201), - CALL_WITH_400(producer, producer, get_whitelist_blacklist, + CALL_WITH_400(producer, producer_ro, producer, get_whitelist_blacklist, INVOKE_R_V(producer, get_whitelist_blacklist), 201), - CALL_WITH_400(producer, producer, get_scheduled_protocol_feature_activations, + CALL_WITH_400(producer, producer_ro, producer, get_scheduled_protocol_feature_activations, INVOKE_R_V(producer, get_scheduled_protocol_feature_activations), 201), - CALL_WITH_400(producer, producer, get_supported_protocol_features, + CALL_WITH_400(producer, producer_ro, producer, get_supported_protocol_features, INVOKE_R_R_II(producer, get_supported_protocol_features, producer_plugin::get_supported_protocol_features_params), 201), - CALL_WITH_400(producer, producer, get_account_ram_corrections, + CALL_WITH_400(producer, producer_ro, producer, get_account_ram_corrections, INVOKE_R_R(producer, get_account_ram_corrections, producer_plugin::get_account_ram_corrections_params), 201), - CALL_WITH_400(producer, producer, get_unapplied_transactions, + CALL_WITH_400(producer, producer_ro, producer, get_unapplied_transactions, INVOKE_R_R_D(producer, get_unapplied_transactions, producer_plugin::get_unapplied_transactions_params), 200), - CALL_WITH_400(producer, producer, get_snapshot_requests, + CALL_WITH_400(producer, producer_ro, producer, get_snapshot_requests, INVOKE_R_V(producer, get_snapshot_requests), 201), }, appbase::exec_queue::read_only, appbase::priority::medium_high); // Not safe to run in parallel app().get_plugin().add_api({ - CALL_WITH_400(producer, producer, pause, + CALL_WITH_400(producer, producer_rw, producer, pause, INVOKE_V_V(producer, pause), 201), - CALL_WITH_400(producer, producer, resume, + CALL_WITH_400(producer, producer_rw, producer, resume, INVOKE_V_V(producer, resume), 201), - CALL_WITH_400(producer, producer, update_runtime_options, + CALL_WITH_400(producer, producer_rw, producer, update_runtime_options, INVOKE_V_R(producer, update_runtime_options, producer_plugin::runtime_options), 201), - CALL_WITH_400(producer, producer, add_greylist_accounts, + CALL_WITH_400(producer, producer_rw, producer, add_greylist_accounts, INVOKE_V_R(producer, add_greylist_accounts, producer_plugin::greylist_params), 201), - CALL_WITH_400(producer, producer, remove_greylist_accounts, + CALL_WITH_400(producer, producer_rw, producer, remove_greylist_accounts, INVOKE_V_R(producer, remove_greylist_accounts, producer_plugin::greylist_params), 201), - CALL_WITH_400(producer, producer, set_whitelist_blacklist, + CALL_WITH_400(producer, producer_rw, producer, set_whitelist_blacklist, INVOKE_V_R(producer, set_whitelist_blacklist, producer_plugin::whitelist_blacklist), 201), - CALL_ASYNC(producer, producer, create_snapshot, chain::snapshot_scheduler::snapshot_information, + CALL_ASYNC(producer, snapshot, producer, create_snapshot, chain::snapshot_scheduler::snapshot_information, INVOKE_R_V_ASYNC(producer, create_snapshot), 201), - CALL_WITH_400(producer, producer, schedule_snapshot, - INVOKE_R_R_II(producer, schedule_snapshot, chain::snapshot_scheduler::snapshot_request_information), 201), - CALL_WITH_400(producer, producer, unschedule_snapshot, + CALL_WITH_400(producer, snapshot, producer, schedule_snapshot, + INVOKE_R_R_II(producer, schedule_snapshot, chain::snapshot_scheduler::snapshot_request_params), 201), + CALL_WITH_400(producer, snapshot, producer, unschedule_snapshot, INVOKE_R_R(producer, unschedule_snapshot, chain::snapshot_scheduler::snapshot_request_id_information), 201), - CALL_WITH_400(producer, producer, get_integrity_hash, + CALL_WITH_400(producer, producer_rw, producer, get_integrity_hash, INVOKE_R_V(producer, get_integrity_hash), 201), - CALL_WITH_400(producer, producer, schedule_protocol_feature_activations, + CALL_WITH_400(producer, producer_rw, producer, schedule_protocol_feature_activations, INVOKE_V_R(producer, schedule_protocol_feature_activations, producer_plugin::scheduled_protocol_feature_activations), 201), }, appbase::exec_queue::read_write, appbase::priority::medium_high); } @@ -154,11 +145,22 @@ void producer_api_plugin::plugin_startup() { void producer_api_plugin::plugin_initialize(const variables_map& options) { try { const auto& _http_plugin = app().get_plugin(); - if( !_http_plugin.is_on_loopback()) { + if( !_http_plugin.is_on_loopback(api_category::producer_rw)) { + wlog( "\n" + "**********SECURITY WARNING**********\n" + "* *\n" + "* -- Producer RW API -- *\n" + "* - EXPOSED to the LOCAL NETWORK - *\n" + "* - USE ONLY ON SECURE NETWORKS! - *\n" + "* *\n" + "************************************\n" ); + + } + if( !_http_plugin.is_on_loopback(api_category::snapshot)) { wlog( "\n" "**********SECURITY WARNING**********\n" "* *\n" - "* -- Producer API -- *\n" + "* -- Snapshot API -- *\n" "* - EXPOSED to the LOCAL NETWORK - *\n" "* - USE ONLY ON SECURE NETWORKS! - *\n" "* *\n" diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 74b8c02f95..823266d1fa 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -98,7 +98,7 @@ class producer_plugin : public appbase::plugin { integrity_hash_information get_integrity_hash() const; void create_snapshot(next_function next); - chain::snapshot_scheduler::snapshot_schedule_result schedule_snapshot(const chain::snapshot_scheduler::snapshot_request_information& schedule); + chain::snapshot_scheduler::snapshot_schedule_result schedule_snapshot(const chain::snapshot_scheduler::snapshot_request_params& srp); chain::snapshot_scheduler::snapshot_schedule_result unschedule_snapshot(const chain::snapshot_scheduler::snapshot_request_id_information& schedule); chain::snapshot_scheduler::get_snapshot_requests_result get_snapshot_requests() const; @@ -112,7 +112,7 @@ class producer_plugin : public appbase::plugin { struct get_unapplied_transactions_params { string lower_bound; /// transaction id std::optional limit = 100; - std::optional time_limit_ms; // defaults to 10ms + std::optional time_limit_ms; // defaults to http-max-response-time-ms }; struct unapplied_trx { @@ -171,9 +171,9 @@ class producer_plugin : public appbase::plugin { void register_update_produced_block_metrics(std::function&&); void register_update_incoming_block_metrics(std::function&&); - private: inline static bool test_mode_{false}; // to be moved into appbase (application_base) + private: std::shared_ptr my; }; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index e317f92d25..dd0b944b0a 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -18,10 +17,6 @@ #include #include - -#include -#include -#include #include #include #include @@ -30,87 +25,94 @@ #include #include +#include +#include +#include +#include + namespace bmi = boost::multi_index; +using bmi::hashed_unique; using bmi::indexed_by; -using bmi::ordered_non_unique; using bmi::member; +using bmi::ordered_non_unique; using bmi::tag; -using bmi::hashed_unique; using boost::multi_index_container; +using boost::signals2::scoped_connection; using std::string; using std::vector; -using boost::signals2::scoped_connection; #undef FC_LOG_AND_DROP -#define LOG_AND_DROP() \ - catch ( const guard_exception& e ) { \ - chain_plugin::handle_guard_exception(e); \ - } catch ( const std::bad_alloc& ) { \ - chain_apis::api_base::handle_bad_alloc(); \ - } catch ( boost::interprocess::bad_alloc& ) { \ - chain_apis::api_base::handle_db_exhaustion(); \ - } catch( fc::exception& er ) { \ - wlog( "${details}", ("details",er.to_detail_string()) ); \ - } catch( const std::exception& e ) { \ - fc::exception fce( \ - FC_LOG_MESSAGE( warn, "std::exception: ${what}: ",("what",e.what()) ), \ - fc::std_exception_code,\ - BOOST_CORE_TYPEID(e).name(), \ - e.what() ) ; \ - wlog( "${details}", ("details",fce.to_detail_string()) ); \ - } catch( ... ) { \ - fc::unhandled_exception e( \ - FC_LOG_MESSAGE( warn, "unknown: ", ), \ - std::current_exception() ); \ - wlog( "${details}", ("details",e.to_detail_string()) ); \ +#define LOG_AND_DROP() \ + catch (const guard_exception& e) { \ + chain_plugin::handle_guard_exception(e); \ + } \ + catch (const std::bad_alloc&) { \ + chain_apis::api_base::handle_bad_alloc(); \ + } \ + catch (boost::interprocess::bad_alloc&) { \ + chain_apis::api_base::handle_db_exhaustion(); \ + } \ + catch (fc::exception & er) { \ + wlog("${details}", ("details", er.to_detail_string())); \ + } \ + catch (const std::exception& e) { \ + fc::exception fce(FC_LOG_MESSAGE(warn, "std::exception: ${what}: ", ("what", e.what())), \ + fc::std_exception_code, \ + BOOST_CORE_TYPEID(e).name(), \ + e.what()); \ + wlog("${details}", ("details", fce.to_detail_string())); \ + } \ + catch (...) { \ + fc::unhandled_exception e(FC_LOG_MESSAGE(warn, "unknown: ", ), std::current_exception());\ + wlog("${details}", ("details", e.to_detail_string())); \ } const std::string logger_name("producer_plugin"); -fc::logger _log; +fc::logger _log; const std::string trx_successful_trace_logger_name("transaction_success_tracing"); -fc::logger _trx_successful_trace_log; +fc::logger _trx_successful_trace_log; const std::string trx_failed_trace_logger_name("transaction_failure_tracing"); -fc::logger _trx_failed_trace_log; +fc::logger _trx_failed_trace_log; const std::string trx_trace_success_logger_name("transaction_trace_success"); -fc::logger _trx_trace_success_log; +fc::logger _trx_trace_success_log; const std::string trx_trace_failure_logger_name("transaction_trace_failure"); -fc::logger _trx_trace_failure_log; +fc::logger _trx_trace_failure_log; const std::string trx_logger_name("transaction"); -fc::logger _trx_log; +fc::logger _trx_log; const std::string transient_trx_successful_trace_logger_name("transient_trx_success_tracing"); -fc::logger _transient_trx_successful_trace_log; +fc::logger _transient_trx_successful_trace_log; const std::string transient_trx_failed_trace_logger_name("transient_trx_failure_tracing"); -fc::logger _transient_trx_failed_trace_log; +fc::logger _transient_trx_failed_trace_log; namespace eosio { - static auto _producer_plugin = application::register_plugin(); +static auto _producer_plugin = application::register_plugin(); using namespace eosio::chain; using namespace eosio::chain::plugin_interface; namespace { - bool exception_is_exhausted(const fc::exception& e) { - auto code = e.code(); - return (code == block_cpu_usage_exceeded::code_value) || - (code == block_net_usage_exceeded::code_value) || - (code == deadline_exception::code_value) || - (code == ro_trx_vm_oc_compile_temporary_failure::code_value); - } +bool exception_is_exhausted(const fc::exception& e) { + auto code = e.code(); + return (code == block_cpu_usage_exceeded::code_value) || + (code == block_net_usage_exceeded::code_value) || + (code == deadline_exception::code_value) || + (code == ro_trx_vm_oc_compile_temporary_failure::code_value); } +} // namespace struct transaction_id_with_expiry { - transaction_id_type trx_id; - fc::time_point expiry; + transaction_id_type trx_id; + fc::time_point expiry; }; struct by_id; @@ -118,11 +120,8 @@ struct by_expiry; using transaction_id_with_expiry_index = multi_index_container< transaction_id_with_expiry, - indexed_by< - hashed_unique, BOOST_MULTI_INDEX_MEMBER(transaction_id_with_expiry, transaction_id_type, trx_id)>, - ordered_non_unique, BOOST_MULTI_INDEX_MEMBER(transaction_id_with_expiry, fc::time_point, expiry)> - > ->; + indexed_by, BOOST_MULTI_INDEX_MEMBER(transaction_id_with_expiry, transaction_id_type, trx_id)>, + ordered_non_unique, BOOST_MULTI_INDEX_MEMBER(transaction_id_with_expiry, fc::time_point, expiry)>>>; namespace { @@ -131,21 +130,21 @@ class account_failures { public: account_failures() = default; - void set_max_failures_per_account( uint32_t max_failures, uint32_t size ) { - max_failures_per_account = max_failures; + void set_max_failures_per_account(uint32_t max_failures, uint32_t size) { + max_failures_per_account = max_failures; reset_window_size_in_num_blocks = size; } - void add( const account_name& n, const fc::exception& e ) { + void add(const account_name& n, const fc::exception& e) { auto& fa = failed_accounts[n]; ++fa.num_failures; - fa.add( n, e ); + fa.add(n, e); } // return true if exceeds max_failures_per_account and should be dropped - bool failure_limit( const account_name& n ) { - auto fitr = failed_accounts.find( n ); - if( fitr != failed_accounts.end() && fitr->second.num_failures >= max_failures_per_account ) { + bool failure_limit(const account_name& n) { + auto fitr = failed_accounts.find(n); + if (fitr != failed_accounts.end() && fitr->second.num_failures >= max_failures_per_account) { ++fitr->second.num_failures; return true; } @@ -153,7 +152,7 @@ class account_failures { } void report_and_clear(uint32_t block_num, const chain::subjective_billing& sub_bill) { - if (last_reset_block_num != block_num && (block_num % reset_window_size_in_num_blocks == 0) ) { + if (last_reset_block_num != block_num && (block_num % reset_window_size_in_num_blocks == 0)) { report(block_num, sub_bill); failed_accounts.clear(); last_reset_block_num = block_num; @@ -167,624 +166,843 @@ class account_failures { private: void report(uint32_t block_num, const chain::subjective_billing& sub_bill) const { - if( _log.is_enabled(fc::log_level::debug)) { + if (_log.is_enabled(fc::log_level::debug)) { auto now = fc::time_point::now(); - for ( const auto& e : failed_accounts ) { + for (const auto& e : failed_accounts) { std::string reason; - if( e.second.is_deadline() ) reason += "deadline"; - if( e.second.is_tx_cpu_usage() ) { - if( !reason.empty() ) reason += ", "; + if (e.second.is_deadline()) + reason += "deadline"; + if (e.second.is_tx_cpu_usage()) { + if (!reason.empty()) + reason += ", "; reason += "tx_cpu_usage"; } - if( e.second.is_eosio_assert() ) { - if( !reason.empty() ) reason += ", "; + if (e.second.is_eosio_assert()) { + if (!reason.empty()) + reason += ", "; reason += "assert"; } - if( e.second.is_other() ) { - if( !reason.empty() ) reason += ", "; + if (e.second.is_other()) { + if (!reason.empty()) + reason += ", "; reason += "other"; } - fc_dlog( _log, "Failed ${n} trxs, account: ${a}, sub bill: ${b}us, reason: ${r}", - ("n", e.second.num_failures)("b", sub_bill.get_subjective_bill(e.first, now)) - ("a", e.first)("r", reason) ); + fc_dlog(_log, "Failed ${n} trxs, account: ${a}, sub bill: ${b}us, reason: ${r}", + ("n", e.second.num_failures)("b", sub_bill.get_subjective_bill(e.first, now))("a", e.first)("r", reason)); } } } struct account_failure { enum class ex_fields : uint8_t { - ex_deadline_exception = 1, - ex_tx_cpu_usage_exceeded = 2, + ex_deadline_exception = 1, + ex_tx_cpu_usage_exceeded = 2, ex_eosio_assert_exception = 4, - ex_other_exception = 8 + ex_other_exception = 8 }; - void add( const account_name& n, const fc::exception& e ) { + void add(const account_name& n, const fc::exception& e) { auto exception_code = e.code(); - if( exception_code == tx_cpu_usage_exceeded::code_value ) { - ex_flags = set_field( ex_flags, ex_fields::ex_tx_cpu_usage_exceeded ); - } else if( exception_code == deadline_exception::code_value ) { - ex_flags = set_field( ex_flags, ex_fields::ex_deadline_exception ); - } else if( exception_code == eosio_assert_message_exception::code_value || - exception_code == eosio_assert_code_exception::code_value ) { - ex_flags = set_field( ex_flags, ex_fields::ex_eosio_assert_exception ); + if (exception_code == tx_cpu_usage_exceeded::code_value) { + ex_flags = set_field(ex_flags, ex_fields::ex_tx_cpu_usage_exceeded); + } else if (exception_code == deadline_exception::code_value) { + ex_flags = set_field(ex_flags, ex_fields::ex_deadline_exception); + } else if (exception_code == eosio_assert_message_exception::code_value || + exception_code == eosio_assert_code_exception::code_value) { + ex_flags = set_field(ex_flags, ex_fields::ex_eosio_assert_exception); } else { - ex_flags = set_field( ex_flags, ex_fields::ex_other_exception ); - fc_dlog( _log, "Failed trx, account: ${a}, reason: ${r}, except: ${e}", - ("a", n)("r", exception_code)("e", e) ); + ex_flags = set_field(ex_flags, ex_fields::ex_other_exception); + fc_dlog(_log, "Failed trx, account: ${a}, reason: ${r}, except: ${e}", ("a", n)("r", exception_code)("e", e)); } } - bool is_deadline() const { return has_field( ex_flags, ex_fields::ex_deadline_exception ); } - bool is_tx_cpu_usage() const { return has_field( ex_flags, ex_fields::ex_tx_cpu_usage_exceeded ); } - bool is_eosio_assert() const { return has_field( ex_flags, ex_fields::ex_eosio_assert_exception ); } - bool is_other() const { return has_field( ex_flags, ex_fields::ex_other_exception ); } + bool is_deadline() const { return has_field(ex_flags, ex_fields::ex_deadline_exception); } + bool is_tx_cpu_usage() const { return has_field(ex_flags, ex_fields::ex_tx_cpu_usage_exceeded); } + bool is_eosio_assert() const { return has_field(ex_flags, ex_fields::ex_eosio_assert_exception); } + bool is_other() const { return has_field(ex_flags, ex_fields::ex_other_exception); } uint32_t num_failures = 0; - uint8_t ex_flags = 0; + uint8_t ex_flags = 0; }; std::map failed_accounts; - uint32_t max_failures_per_account = 3; - uint32_t last_reset_block_num = 0; - uint32_t reset_window_size_in_num_blocks = 1; + uint32_t max_failures_per_account = 3; + uint32_t last_reset_block_num = 0; + uint32_t reset_window_size_in_num_blocks = 1; }; struct block_time_tracker { - void add_idle_time( const fc::microseconds& idle ) { - block_idle_time += idle; - } + struct trx_time_tracker { + enum class time_status { success, fail, other }; - void add_fail_time( const fc::microseconds& fail_time, bool is_transient ) { - if( is_transient ) { - // transient time includes both success and fail time - transient_trx_time += fail_time; - ++transient_trx_num; - } else { - trx_fail_time += fail_time; - ++trx_fail_num; + trx_time_tracker(block_time_tracker& btt, bool transient) + : _block_time_tracker(btt), _is_transient(transient) {} + + trx_time_tracker(trx_time_tracker&&) = default; + + trx_time_tracker() = delete; + trx_time_tracker(const trx_time_tracker&) = delete; + trx_time_tracker& operator=(const trx_time_tracker&) = delete; + trx_time_tracker& operator=(trx_time_tracker&&) = delete; + + void trx_success() { _time_status = time_status::success; } + + // Neither success nor fail, will be reported as other + void cancel() { _time_status = time_status::other; } + + // updates block_time_tracker + ~trx_time_tracker() { + switch (_time_status) { + case time_status::success: + _block_time_tracker.add_success_time(_is_transient); + break; + case time_status::fail: + _block_time_tracker.add_fail_time(_is_transient); + break; + case time_status::other: + _block_time_tracker.add_other_time(); + break; + } } + + private: + block_time_tracker& _block_time_tracker; + time_status _time_status = time_status::fail; + bool _is_transient; + }; + + trx_time_tracker start_trx(bool is_transient, fc::time_point now = fc::time_point::now()) { + assert(!paused); + add_other_time(now); + return {*this, is_transient}; } - void add_success_time( const fc::microseconds& time, bool is_transient ) { - if( is_transient ) { - transient_trx_time += time; - ++transient_trx_num; - } else { - trx_success_time += time; - ++trx_success_num; - } + void add_other_time(fc::time_point now = fc::time_point::now()) { + assert(!paused); + other_time += now - last_time_point; + last_time_point = now; } - void report( const fc::time_point& idle_trx_time, uint32_t block_num ) { + fc::microseconds add_idle_time(fc::time_point now = fc::time_point::now()) { + assert(!paused); + auto dur = now - last_time_point; + block_idle_time += dur; + last_time_point = now; // guard against calling add_idle_time() twice in a row. + return dur; + } + + // assumes idle time before pause + void pause(fc::time_point now = fc::time_point::now()) { + assert(!paused); + add_idle_time(now); + paused = true; + } + + // assumes last call was to pause + void unpause(fc::time_point now = fc::time_point::now()) { + assert(paused); + paused = false; + auto pause_time = now - last_time_point; + clear_time_point += pause_time; + last_time_point = now; + } + + void report(uint32_t block_num, account_name producer) { + using namespace std::string_literals; + assert(!paused); if( _log.is_enabled( fc::log_level::debug ) ) { auto now = fc::time_point::now(); - add_idle_time( now - idle_trx_time ); - fc_dlog( _log, "Block #${n} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, transient: ${trans_trx_num}, ${trans_trx_time}us, other: ${o}us", - ("n", block_num) - ("i", block_idle_time)("t", now - clear_time)("sn", trx_success_num)("s", trx_success_time) + auto diff = now - clear_time_point - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time - other_time; + fc_dlog( _log, "Block #${n} ${p} trx idle: ${i}us out of ${t}us, success: ${sn}, ${s}us, fail: ${fn}, ${f}us, " + "transient: ${ttn}, ${tt}us, other: ${o}us${rest}", + ("n", block_num)("p", producer) + ("i", block_idle_time)("t", now - clear_time_point)("sn", trx_success_num)("s", trx_success_time) ("fn", trx_fail_num)("f", trx_fail_time) - ("trans_trx_num", transient_trx_num)("trans_trx_time", transient_trx_time) - ("o", (now - clear_time) - block_idle_time - trx_success_time - trx_fail_time - transient_trx_time) ); + ("ttn", transient_trx_num)("tt", transient_trx_time) + ("o", other_time)("rest", diff.count() > 5 ? ", diff: "s + std::to_string(diff.count()) + "us"s : ""s ) ); } } void clear() { - block_idle_time = trx_fail_time = trx_success_time = transient_trx_time = fc::microseconds{}; + assert(!paused); + block_idle_time = trx_fail_time = trx_success_time = transient_trx_time = other_time = fc::microseconds{}; trx_fail_num = trx_success_num = transient_trx_num = 0; - clear_time = fc::time_point::now(); + clear_time_point = last_time_point = fc::time_point::now(); } + private: + void add_success_time(bool is_transient) { + assert(!paused); + auto now = fc::time_point::now(); + if( is_transient ) { + // transient time includes both success and fail time + transient_trx_time += now - last_time_point; + ++transient_trx_num; + } else { + trx_success_time += now - last_time_point; + ++trx_success_num; + } + last_time_point = now; + } + + void add_fail_time(bool is_transient) { + assert(!paused); + auto now = fc::time_point::now(); + if( is_transient ) { + // transient time includes both success and fail time + transient_trx_time += now - last_time_point; + ++transient_trx_num; + } else { + trx_fail_time += now - last_time_point; + ++trx_fail_num; + } + last_time_point = now; + } + + private: fc::microseconds block_idle_time; - uint32_t trx_success_num = 0; - uint32_t trx_fail_num = 0; - uint32_t transient_trx_num = 0; + uint32_t trx_success_num = 0; + uint32_t trx_fail_num = 0; + uint32_t transient_trx_num = 0; fc::microseconds trx_success_time; fc::microseconds trx_fail_time; fc::microseconds transient_trx_time; - fc::time_point clear_time{fc::time_point::now()}; + fc::microseconds other_time; + fc::time_point last_time_point{fc::time_point::now()}; + fc::time_point clear_time_point{fc::time_point::now()}; + bool paused = false; }; } // anonymous namespace class producer_plugin_impl : public std::enable_shared_from_this { - public: - producer_plugin_impl(boost::asio::io_service& io) - :_timer(io) - ,_transaction_ack_channel(app().get_channel()) - ,_ro_timer(io) - { - } - - uint32_t calculate_next_block_slot(const account_name& producer_name, uint32_t current_block_slot) const; - void schedule_production_loop(); - void schedule_maybe_produce_block( bool exhausted ); - void produce_block(); - bool maybe_produce_block(); - bool block_is_exhausted() const; - bool remove_expired_trxs( const fc::time_point& deadline ); - bool remove_expired_blacklisted_trxs( const fc::time_point& deadline ); - bool process_unapplied_trxs( const fc::time_point& deadline ); - void process_scheduled_and_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ); - bool process_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ); - - struct push_result { - bool block_exhausted = false; - bool trx_exhausted = false; - bool failed = false; - }; - push_result push_transaction( const fc::time_point& block_deadline, - const transaction_metadata_ptr& trx, - bool api_trx, bool return_failure_trace, - const next_function& next ); - push_result handle_push_result( const transaction_metadata_ptr& trx, - const next_function& next, - const fc::time_point& start, - chain::controller& chain, - const transaction_trace_ptr& trace, - bool return_failure_trace, - bool disable_subjective_enforcement, - account_name first_auth, - int64_t sub_bill, - uint32_t prev_billed_cpu_time_us ); - void log_trx_results( const transaction_metadata_ptr& trx, const transaction_trace_ptr& trace, const fc::time_point& start ); - void log_trx_results( const transaction_metadata_ptr& trx, const fc::exception_ptr& except_ptr ); - void log_trx_results( const packed_transaction_ptr& trx, const transaction_trace_ptr& trace, - const fc::exception_ptr& except_ptr, uint32_t billed_cpu_us, const fc::time_point& start, bool is_transient ); - - boost::program_options::variables_map _options; - bool _production_enabled = false; - bool _pause_production = false; - - using signature_provider_type = signature_provider_plugin::signature_provider_type; - std::map _signature_providers; - std::set _producers; - boost::asio::deadline_timer _timer; - using producer_watermark = std::pair; - std::map _producer_watermarks; - pending_block_mode _pending_block_mode = pending_block_mode::speculating; - unapplied_transaction_queue _unapplied_transactions; - size_t _thread_pool_size = config::default_controller_thread_pool_size; - named_thread_pool _thread_pool; - - std::atomic _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool - std::atomic _received_block{0}; // modified by net_plugin thread pool - fc::microseconds _max_irreversible_block_age_us; - int32_t _cpu_effort_us = 0; - fc::time_point _pending_block_deadline; - uint32_t _max_block_cpu_usage_threshold_us = 0; - uint32_t _max_block_net_usage_threshold_bytes = 0; - int32_t _max_scheduled_transaction_time_per_block_ms = 0; - bool _disable_subjective_p2p_billing = true; - bool _disable_subjective_api_billing = true; - fc::time_point _irreversible_block_time; - fc::time_point _idle_trx_time{fc::time_point::now()}; - - std::vector _protocol_features_to_activate; - bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block - - chain_plugin* chain_plug = nullptr; - - compat::channels::transaction_ack::channel_type& _transaction_ack_channel; - - incoming::methods::block_sync::method_type::handle _incoming_block_sync_provider; - incoming::methods::transaction_async::method_type::handle _incoming_transaction_async_provider; - - transaction_id_with_expiry_index _blacklisted_transactions; - account_failures _account_fails; - block_time_tracker _time_tracker; - - std::optional _accepted_block_connection; - std::optional _accepted_block_header_connection; - std::optional _irreversible_block_connection; - std::optional _block_start_connection; - - /* - * HACK ALERT - * Boost timers can be in a state where a handler has not yet executed but is not abortable. - * As this method needs to mutate state handlers depend on for proper functioning to maintain - * invariants for other code (namely accepting incoming transactions in a nearly full block) - * the handlers capture a corelation ID at the time they are set. When they are executed - * they must check that correlation_id against the global ordinal. If it does not match that - * implies that this method has been called with the handler in the state where it should be - * cancelled but wasn't able to be. - */ - uint32_t _timer_corelation_id = 0; - - // keep a expected ratio between defer txn and incoming txn - double _incoming_defer_ratio = 1.0; // 1:1 - - // path to write the snapshots to - std::filesystem::path _snapshots_dir; - - // async snapshot scheduler - snapshot_scheduler _snapshot_scheduler; - - std::function _update_produced_block_metrics; - std::function _update_incoming_block_metrics; - - // ro for read-only - struct ro_trx_t { - transaction_metadata_ptr trx; - next_func_t next; - }; - // The queue storing previously exhausted read-only transactions to be re-executed by read-only threads - // thread-safe - class ro_trx_queue_t { - public: - void push_front(ro_trx_t&& t) { - std::lock_guard g(mtx); - queue.push_front(std::move(t)); - } +public: + producer_plugin_impl(boost::asio::io_service& io) + : _timer(io) + , _transaction_ack_channel(app().get_channel()) + , _ro_timer(io) {} + + uint32_t calculate_next_block_slot(const account_name& producer_name, uint32_t current_block_slot) const; + void schedule_production_loop(); + void schedule_maybe_produce_block(bool exhausted); + void produce_block(); + bool maybe_produce_block(); + bool block_is_exhausted() const; + bool remove_expired_trxs(const fc::time_point& deadline); + bool remove_expired_blacklisted_trxs(const fc::time_point& deadline); + bool process_unapplied_trxs(const fc::time_point& deadline); + void process_scheduled_and_incoming_trxs(const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr); + bool process_incoming_trxs(const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr); + + struct push_result { + bool block_exhausted = false; + bool trx_exhausted = false; + bool failed = false; + }; + push_result push_transaction(const fc::time_point& block_deadline, + const transaction_metadata_ptr& trx, + bool api_trx, + bool return_failure_trace, + block_time_tracker::trx_time_tracker& trx_tracker, + const next_function& next); + push_result handle_push_result(const transaction_metadata_ptr& trx, + const next_function& next, + const fc::time_point& start, + chain::controller& chain, + const transaction_trace_ptr& trace, + bool return_failure_trace, + bool disable_subjective_enforcement, + account_name first_auth, + int64_t sub_bill, + uint32_t prev_billed_cpu_time_us); + + void log_trx_results(const transaction_metadata_ptr& trx, const transaction_trace_ptr& trace, const fc::time_point& start); + void log_trx_results(const transaction_metadata_ptr& trx, const fc::exception_ptr& except_ptr); + void log_trx_results(const packed_transaction_ptr& trx, + const transaction_trace_ptr& trace, + const fc::exception_ptr& except_ptr, + uint32_t billed_cpu_us, + const fc::time_point& start, + bool is_transient); + + void add_greylist_accounts(const producer_plugin::greylist_params& params) { + EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); - bool empty() const { - std::lock_guard g(mtx); - return queue.empty(); - } + chain::controller& chain = chain_plug->chain(); + for (auto& acc : params.accounts) { + chain.add_resource_greylist(acc); + } + } - bool pop_front(ro_trx_t& t) { - std::unique_lock g(mtx); - if (queue.empty()) - return false; - t = queue.front(); - queue.pop_front(); - return true; - } + void remove_greylist_accounts(const producer_plugin::greylist_params& params) { + EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); - private: - mutable std::mutex mtx; - deque queue; // boost deque which is faster than std::deque - }; + chain::controller& chain = chain_plug->chain(); + for (auto& acc : params.accounts) { + chain.remove_resource_greylist(acc); + } + } - uint32_t _ro_thread_pool_size{ 0 }; - // Due to uncertainty to get total virtual memory size on a 5-level paging system for eos-vm-oc and - // possible memory exhuastion for large number of contract usage for non-eos-vm-oc, set a hard limit - static constexpr uint32_t _ro_max_threads_allowed{ 8 }; - named_thread_pool _ro_thread_pool; - fc::microseconds _ro_write_window_time_us{ 200000 }; - fc::microseconds _ro_read_window_time_us{ 60000 }; - static constexpr fc::microseconds _ro_read_window_minimum_time_us{ 10000 }; - fc::microseconds _ro_read_window_effective_time_us{ 0 }; // calculated during option initialization - std::atomic _ro_all_threads_exec_time_us; // total time spent by all threads executing transactions. use atomic for simplicity and performance - fc::time_point _ro_read_window_start_time; - fc::time_point _ro_window_deadline; // only modified on app thread, read-window deadline or write-window deadline - boost::asio::deadline_timer _ro_timer; // only accessible from the main thread - fc::microseconds _ro_max_trx_time_us{ 0 }; // calculated during option initialization - ro_trx_queue_t _ro_exhausted_trx_queue; - std::atomic _ro_num_active_exec_tasks{ 0 }; - std::vector> _ro_exec_tasks_fut; - - void start_write_window(); - void switch_to_write_window(); - void switch_to_read_window(); - bool read_only_execution_task(uint32_t pending_block_num); - void repost_exhausted_transactions(const fc::time_point& deadline); - bool push_read_only_transaction(transaction_metadata_ptr trx, next_function next); - - void consider_new_watermark( account_name producer, uint32_t block_num, block_timestamp_type timestamp) { - auto itr = _producer_watermarks.find( producer ); - if( itr != _producer_watermarks.end() ) { - itr->second.first = std::max( itr->second.first, block_num ); - itr->second.second = std::max( itr->second.second, timestamp ); - } else if( _producers.count( producer ) > 0 ) { - _producer_watermarks.emplace( producer, std::make_pair(block_num, timestamp) ); - } + producer_plugin::greylist_params get_greylist() const { + chain::controller& chain = chain_plug->chain(); + producer_plugin::greylist_params result; + const auto& list = chain.get_resource_greylist(); + result.accounts.reserve(list.size()); + for (auto& acc : list) { + result.accounts.push_back(acc); } + return result; + } - std::optional get_watermark( account_name producer ) const { - auto itr = _producer_watermarks.find( producer ); + producer_plugin::integrity_hash_information get_integrity_hash() { + chain::controller& chain = chain_plug->chain(); - if( itr == _producer_watermarks.end() ) return {}; + auto reschedule = fc::make_scoped_exit([this]() { schedule_production_loop(); }); - return itr->second; + if (chain.is_building_block()) { + // abort the pending block + abort_block(); + } else { + reschedule.cancel(); } - void on_block( const block_state_ptr& bsp ) { - auto& chain = chain_plug->chain(); - auto before = _unapplied_transactions.size(); - _unapplied_transactions.clear_applied( bsp ); - chain.get_mutable_subjective_billing().on_block( _log, bsp, fc::time_point::now() ); - if (before > 0) { - fc_dlog( _log, "Removed applied transactions before: ${before}, after: ${after}", - ("before", before)("after", _unapplied_transactions.size()) ); + return {chain.head_block_id(), chain.calculate_integrity_hash()}; + } + + void create_snapshot(producer_plugin::next_function next) { + chain::controller& chain = chain_plug->chain(); + + auto reschedule = fc::make_scoped_exit([this]() { schedule_production_loop(); }); + + auto predicate = [&]() -> void { + if (chain.is_building_block()) { + // abort the pending block + abort_block(); + } else { + reschedule.cancel(); } + }; + + _snapshot_scheduler.create_snapshot(std::move(next), chain, predicate); + } + + void update_runtime_options(const producer_plugin::runtime_options& options); + + producer_plugin::runtime_options get_runtime_options() const { + return {_max_transaction_time_ms, + _max_irreversible_block_age_us.count() < 0 ? -1 : _max_irreversible_block_age_us.count() / 1'000'000, + _cpu_effort_us, + _max_scheduled_transaction_time_per_block_ms, + chain_plug->chain().get_subjective_cpu_leeway() ? chain_plug->chain().get_subjective_cpu_leeway()->count() + : std::optional(), + _incoming_defer_ratio, + chain_plug->chain().get_greylist_limit()}; + } + + void schedule_protocol_feature_activations(const producer_plugin::scheduled_protocol_feature_activations& schedule); + + void plugin_shutdown(); + void plugin_startup(); + void plugin_initialize(const boost::program_options::variables_map& options); + + boost::program_options::variables_map _options; + bool _production_enabled = false; + bool _pause_production = false; + + using signature_provider_type = signature_provider_plugin::signature_provider_type; + std::map _signature_providers; + std::set _producers; + boost::asio::deadline_timer _timer; + using producer_watermark = std::pair; + std::map _producer_watermarks; + pending_block_mode _pending_block_mode = pending_block_mode::speculating; + unapplied_transaction_queue _unapplied_transactions; + size_t _thread_pool_size = config::default_controller_thread_pool_size; + named_thread_pool _thread_pool; + std::atomic _max_transaction_time_ms; // modified by app thread, read by net_plugin thread pool + std::atomic _received_block{0}; // modified by net_plugin thread pool + fc::microseconds _max_irreversible_block_age_us; + int32_t _cpu_effort_us = 0; + fc::time_point _pending_block_deadline; + uint32_t _max_block_cpu_usage_threshold_us = 0; + uint32_t _max_block_net_usage_threshold_bytes = 0; + int32_t _max_scheduled_transaction_time_per_block_ms = 0; + bool _disable_subjective_p2p_billing = true; + bool _disable_subjective_api_billing = true; + fc::time_point _irreversible_block_time; + fc::time_point _idle_trx_time{fc::time_point::now()}; + + std::vector _protocol_features_to_activate; + bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block + + chain_plugin* chain_plug = nullptr; + + compat::channels::transaction_ack::channel_type& _transaction_ack_channel; + + incoming::methods::block_sync::method_type::handle _incoming_block_sync_provider; + incoming::methods::transaction_async::method_type::handle _incoming_transaction_async_provider; + + transaction_id_with_expiry_index _blacklisted_transactions; + account_failures _account_fails; + block_time_tracker _time_tracker; + + std::optional _accepted_block_connection; + std::optional _accepted_block_header_connection; + std::optional _irreversible_block_connection; + std::optional _block_start_connection; + + /* + * HACK ALERT + * Boost timers can be in a state where a handler has not yet executed but is not abortable. + * As this method needs to mutate state handlers depend on for proper functioning to maintain + * invariants for other code (namely accepting incoming transactions in a nearly full block) + * the handlers capture a corelation ID at the time they are set. When they are executed + * they must check that correlation_id against the global ordinal. If it does not match that + * implies that this method has been called with the handler in the state where it should be + * cancelled but wasn't able to be. + */ + uint32_t _timer_corelation_id = 0; + + // keep a expected ratio between defer txn and incoming txn + double _incoming_defer_ratio = 1.0; // 1:1 + + // path to write the snapshots to + std::filesystem::path _snapshots_dir; + + // async snapshot scheduler + snapshot_scheduler _snapshot_scheduler; + + std::function _update_produced_block_metrics; + std::function _update_incoming_block_metrics; + + // ro for read-only + struct ro_trx_t { + transaction_metadata_ptr trx; + next_func_t next; + }; + // The queue storing previously exhausted read-only transactions to be re-executed by read-only threads + // thread-safe + class ro_trx_queue_t { + public: + void push_front(ro_trx_t&& t) { + std::lock_guard g(mtx); + queue.push_front(std::move(t)); } - void on_block_header( const block_state_ptr& bsp ) { - consider_new_watermark( bsp->header.producer, bsp->block_num, bsp->block->timestamp ); + bool empty() const { + std::lock_guard g(mtx); + return queue.empty(); } - void on_irreversible_block( const signed_block_ptr& lib ) { - const chain::controller& chain = chain_plug->chain(); - EOS_ASSERT(chain.is_write_window(), producer_exception, "write window is expected for on_irreversible_block signal"); - _irreversible_block_time = lib->timestamp.to_time_point(); - _snapshot_scheduler.on_irreversible_block(lib, chain); + bool pop_front(ro_trx_t& t) { + std::unique_lock g(mtx); + if (queue.empty()) + return false; + t = queue.front(); + queue.pop_front(); + return true; } - void abort_block() { - auto& chain = chain_plug->chain(); + private: + mutable std::mutex mtx; + deque queue; // boost deque which is faster than std::deque + }; - if( chain.is_building_block() ) { - _time_tracker.report( _idle_trx_time, chain.pending_block_num() ); - } - _unapplied_transactions.add_aborted( chain.abort_block() ); - _idle_trx_time = fc::time_point::now(); + uint32_t _ro_thread_pool_size{0}; + // Due to uncertainty to get total virtual memory size on a 5-level paging system for eos-vm-oc and + // possible memory exhuastion for large number of contract usage for non-eos-vm-oc, set a hard limit + static constexpr uint32_t _ro_max_threads_allowed{8}; + named_thread_pool _ro_thread_pool; + fc::microseconds _ro_write_window_time_us{200000}; + fc::microseconds _ro_read_window_time_us{60000}; + static constexpr fc::microseconds _ro_read_window_minimum_time_us{10000}; + fc::microseconds _ro_read_window_effective_time_us{0}; // calculated during option initialization + std::atomic _ro_all_threads_exec_time_us; // total time spent by all threads executing transactions. + // use atomic for simplicity and performance + fc::time_point _ro_read_window_start_time; + fc::time_point _ro_window_deadline; // only modified on app thread, read-window deadline or write-window deadline + boost::asio::deadline_timer _ro_timer; // only accessible from the main thread + fc::microseconds _ro_max_trx_time_us{0}; // calculated during option initialization + ro_trx_queue_t _ro_exhausted_trx_queue; + std::atomic _ro_num_active_exec_tasks{0}; + std::vector> _ro_exec_tasks_fut; + + void start_write_window(); + void switch_to_write_window(); + void switch_to_read_window(); + bool read_only_execution_task(uint32_t pending_block_num); + void repost_exhausted_transactions(const fc::time_point& deadline); + bool push_read_only_transaction(transaction_metadata_ptr trx, next_function next); + + void consider_new_watermark(account_name producer, uint32_t block_num, block_timestamp_type timestamp) { + auto itr = _producer_watermarks.find(producer); + if (itr != _producer_watermarks.end()) { + itr->second.first = std::max(itr->second.first, block_num); + itr->second.second = std::max(itr->second.second, timestamp); + } else if (_producers.count(producer) > 0) { + _producer_watermarks.emplace(producer, std::make_pair(block_num, timestamp)); } + } - bool on_incoming_block(const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { - auto& chain = chain_plug->chain(); - if ( _pending_block_mode == pending_block_mode::producing ) { - fc_wlog( _log, "dropped incoming block #${num} id: ${id}", - ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN") ); - return false; - } + std::optional get_watermark(account_name producer) const { + auto itr = _producer_watermarks.find(producer); - // start a new speculative block, speculative start_block may have been interrupted - auto ensure = fc::make_scoped_exit([this](){ - schedule_production_loop(); - }); + if (itr == _producer_watermarks.end()) + return {}; - const auto& id = block_id ? *block_id : block->calculate_id(); - auto blk_num = block->block_num(); + return itr->second; + } - auto now = fc::time_point::now(); - if (now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0)) // only log every 1000 during sync - fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); + void on_block(const block_state_ptr& bsp) { + auto& chain = chain_plug->chain(); + auto before = _unapplied_transactions.size(); + _unapplied_transactions.clear_applied(bsp); + chain.get_mutable_subjective_billing().on_block(_log, bsp, fc::time_point::now()); + if (before > 0) { + fc_dlog(_log, "Removed applied transactions before: ${before}, after: ${after}", ("before", before)("after", _unapplied_transactions.size())); + } + } - EOS_ASSERT( block->timestamp < (now + fc::seconds( 7 )), block_from_the_future, - "received a block from the future, ignoring it: ${id}", ("id", id) ); + void on_block_header(const block_state_ptr& bsp) { consider_new_watermark(bsp->header.producer, bsp->block_num, bsp->block->timestamp); } - /* de-dupe here... no point in aborting block if we already know the block */ - auto existing = chain.fetch_block_by_id( id ); - if( existing ) { return true; } // return true because the block is valid + void on_irreversible_block(const signed_block_ptr& lib) { + const chain::controller& chain = chain_plug->chain(); + EOS_ASSERT(chain.is_write_window(), producer_exception, "write window is expected for on_irreversible_block signal"); + _irreversible_block_time = lib->timestamp.to_time_point(); + _snapshot_scheduler.on_irreversible_block(lib, chain); + } - // start processing of block - std::future bsf; - if( !bsp ) { - bsf = chain.create_block_state_future( id, block ); - } + void abort_block() { + auto& chain = chain_plug->chain(); - // abort the pending block - abort_block(); + std::optional> block_info; + if( chain.is_building_block() ) { + block_info = std::make_tuple(chain.pending_block_num(), chain.pending_block_producer()); + } + _unapplied_transactions.add_aborted( chain.abort_block() ); + _time_tracker.add_other_time(); - // push the new block - auto handle_error = [&](const auto& e) - { - elog((e.to_detail_string())); - app().get_channel().publish( priority::medium, block ); - throw; - }; + if (block_info) { + auto[block_num, block_producer] = *block_info; + _time_tracker.report(block_num, block_producer); + } + _time_tracker.clear(); + } - controller::block_report br; - try { - const block_state_ptr& bspr = bsp ? bsp : bsf.get(); - chain.push_block( br, bspr, [this]( const branch_type& forked_branch ) { - _unapplied_transactions.add_forked( forked_branch ); - }, [this]( const transaction_id_type& id ) { - return _unapplied_transactions.get_trx( id ); - } ); - } catch ( const guard_exception& e ) { - chain_plugin::handle_guard_exception(e); - return false; - } catch ( const std::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } catch ( boost::interprocess::bad_alloc& ) { - chain_apis::api_base::handle_db_exhaustion(); - } catch ( const fork_database_exception& e ) { - elog("Cannot recover from ${e}. Shutting down.", ("e", e.to_detail_string())); - appbase::app().quit(); - return false; - } catch( const fc::exception& e ) { - handle_error(e); - } catch (const std::exception& e) { - handle_error(fc::std_exception_wrapper::from_current_exception(e)); - } + bool on_incoming_block(const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { + auto& chain = chain_plug->chain(); + if (in_producing_mode()) { + fc_wlog(_log, "dropped incoming block #${num} id: ${id}", ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN")); + return false; + } - const auto& hbs = chain.head_block_state(); - now = fc::time_point::now(); - if( hbs->header.timestamp.next().to_time_point() >= now ) { - _production_enabled = true; - } + // start a new speculative block, speculative start_block may have been interrupted + auto ensure = fc::make_scoped_exit([this]() { schedule_production_loop(); }); - if( now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0) ) { - ilog("Received block ${id}... #${n} @ ${t} signed by ${p} " - "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", - ("p",block->producer)("id",id.str().substr(8,16))("n",blk_num)("t",block->timestamp) - ("count",block->transactions.size())("lib",chain.last_irreversible_block_num()) - ("confs", block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) - ("elapsed", br.total_elapsed_time)("time", br.total_time) - ("latency", (now - block->timestamp).count()/1000 ) ); - if( chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr ) { // not applied to head - ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} " - "[trxs: ${count}, dpos: ${dpos}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: ${latency} ms]", - ("p",hbs->block->producer)("id",hbs->id.str().substr(8,16))("n",hbs->block_num)("t",hbs->block->timestamp) - ("count",hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum) - ("confs", hbs->block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) - ("elapsed", br.total_elapsed_time)("time", br.total_time) - ("latency", (now - hbs->block->timestamp).count()/1000 ) ); - } - } - if (_update_incoming_block_metrics) { - _update_incoming_block_metrics({.trxs_incoming_total = block->transactions.size(), - .cpu_usage_us = br.total_cpu_usage_us, - .net_usage_us = br.total_net_usage, - .last_irreversible = chain.last_irreversible_block_num(), - .head_block_num = chain.head_block_num()}); - } + auto now = fc::time_point::now(); + const auto& id = block_id ? *block_id : block->calculate_id(); + auto blk_num = block->block_num(); - return true; - } + if (now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0)) // only log every 1000 during sync + fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); - void restart_speculative_block() { - // abort the pending block - abort_block(); + _time_tracker.add_idle_time(now); - schedule_production_loop(); + EOS_ASSERT(block->timestamp < (now + fc::seconds(7)), block_from_the_future, "received a block from the future, ignoring it: ${id}", ("id", id)); + + /* de-dupe here... no point in aborting block if we already know the block */ + auto existing = chain.fetch_block_by_id(id); + if (existing) { + return true; // return true because the block is valid + } + + // start processing of block + std::future bsf; + if (!bsp) { + bsf = chain.create_block_state_future(id, block); } - void on_incoming_transaction_async(const packed_transaction_ptr& trx, - bool api_trx, - transaction_metadata::trx_type trx_type, - bool return_failure_traces, - next_function next) { - if ( trx_type == transaction_metadata::trx_type::read_only ) { - // Post all read only trxs to read_only queue for execution. - auto trx_metadata = transaction_metadata::create_no_recover_keys( trx, transaction_metadata::trx_type::read_only ); - app().executor().post(priority::low, exec_queue::read_only, [this, trx{std::move(trx_metadata)}, next{std::move(next)}]() mutable { - push_read_only_transaction( std::move(trx), std::move(next) ); - } ); - return; - } + // abort the pending block + abort_block(); - chain::controller& chain = chain_plug->chain(); - const auto max_trx_time_ms = ( trx_type == transaction_metadata::trx_type::read_only ) ? -1 : _max_transaction_time_ms.load(); - fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds( max_trx_time_ms ); - - auto future = transaction_metadata::start_recover_keys( trx, _thread_pool.get_executor(), - chain.get_chain_id(), fc::microseconds( max_trx_cpu_usage ), - trx_type, - chain.configured_subjective_signature_length_limit() ); - - auto is_transient = (trx_type == transaction_metadata::trx_type::read_only || trx_type == transaction_metadata::trx_type::dry_run); - if( !is_transient ) { - next = [this, trx, next{std::move(next)}]( const next_function_variant& response ) { - next( response ); - - fc::exception_ptr except_ptr; // rejected - if( std::holds_alternative( response ) ) { - except_ptr = std::get( response ); - } else if( std::get( response )->except ) { - except_ptr = std::get( response )->except->dynamic_copy_exception(); - } + // push the new block + auto handle_error = [&](const auto& e) { + elog("Exception on block ${bn}: ${e}", ("bn", blk_num)("e", e.to_detail_string())); + app().get_channel().publish(priority::medium, block); + throw; + }; - _transaction_ack_channel.publish( priority::low, std::pair( except_ptr, trx ) ); - }; + controller::block_report br; + try { + const block_state_ptr& bspr = bsp ? bsp : bsf.get(); + chain.push_block( + br, + bspr, + [this](const branch_type& forked_branch) { _unapplied_transactions.add_forked(forked_branch); }, + [this](const transaction_id_type& id) { return _unapplied_transactions.get_trx(id); }); + } catch (const guard_exception& e) { + chain_plugin::handle_guard_exception(e); + return false; + } catch (const std::bad_alloc&) { + chain_apis::api_base::handle_bad_alloc(); + } catch (boost::interprocess::bad_alloc&) { + chain_apis::api_base::handle_db_exhaustion(); + } catch (const fork_database_exception& e) { + elog("Cannot recover from ${e}. Shutting down.", ("e", e.to_detail_string())); + appbase::app().quit(); + return false; + } catch (const fc::exception& e) { + handle_error(e); + } catch (const std::exception& e) { + handle_error(fc::std_exception_wrapper::from_current_exception(e)); + } + + const auto& hbs = chain.head_block_state(); + now = fc::time_point::now(); + if (hbs->header.timestamp.next().to_time_point() >= now) { + _production_enabled = true; + } + + if (now - block->timestamp < fc::minutes(5) || (blk_num % 1000 == 0)) { + ilog("Received block ${id}... #${n} @ ${t} signed by ${p} " + "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, latency: " + "${latency} ms]", + ("p", block->producer)("id", id.str().substr(8, 16))("n", blk_num)("t", block->timestamp) + ("count", block->transactions.size())("lib", chain.last_irreversible_block_num()) + ("confs", block->confirmed)("net", br.total_net_usage)("cpu", br.total_cpu_usage_us) + ("elapsed", br.total_elapsed_time)("time", br.total_time)("latency", (now - block->timestamp).count() / 1000)); + if (chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr) { // not applied to head + ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} " + "[trxs: ${count}, dpos: ${dpos}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${elapsed}, time: ${time}, " + "latency: ${latency} ms]", + ("p", hbs->block->producer)("id", hbs->id.str().substr(8, 16))("n", hbs->block_num)("t", hbs->block->timestamp) + ("count", hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum)("confs", hbs->block->confirmed) + ("net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("elapsed", br.total_elapsed_time)("time", br.total_time) + ("latency", (now - hbs->block->timestamp).count() / 1000)); } - - boost::asio::post(_thread_pool.get_executor(), [self = this, future{std::move(future)}, api_trx, is_transient, return_failure_traces, - next{std::move(next)}, trx=trx]() mutable { - if( future.valid() ) { - future.wait(); - app().executor().post( priority::low, exec_queue::read_write, [self, future{std::move(future)}, api_trx, is_transient, next{std::move( next )}, trx{std::move(trx)}, return_failure_traces]() mutable { - auto start = fc::time_point::now(); - auto idle_time = start - self->_idle_trx_time; - self->_time_tracker.add_idle_time( idle_time ); - fc_tlog( _log, "Time since last trx: ${t}us", ("t", idle_time) ); - - auto exception_handler = [self, is_transient, &next, trx{std::move(trx)}, &start](fc::exception_ptr ex) { - self->_time_tracker.add_idle_time( start - self->_idle_trx_time ); - self->log_trx_results( trx, nullptr, ex, 0, start, is_transient ); - next( std::move(ex) ); - self->_idle_trx_time = fc::time_point::now(); - auto dur = self->_idle_trx_time - start; - self->_time_tracker.add_fail_time(dur, is_transient); - }; - try { - auto result = future.get(); - if( !self->process_incoming_transaction_async( result, api_trx, return_failure_traces, next) ) { - if( self->_pending_block_mode == pending_block_mode::producing ) { - self->schedule_maybe_produce_block( true ); - } else { - self->restart_speculative_block(); - } - } - self->_idle_trx_time = fc::time_point::now(); - } CATCH_AND_CALL(exception_handler); - } ); - } - }); + } + if (_update_incoming_block_metrics) { + _update_incoming_block_metrics({.trxs_incoming_total = block->transactions.size(), + .cpu_usage_us = br.total_cpu_usage_us, + .net_usage_us = br.total_net_usage, + .last_irreversible = chain.last_irreversible_block_num(), + .head_block_num = chain.head_block_num()}); } - bool process_incoming_transaction_async(const transaction_metadata_ptr& trx, - bool api_trx, - bool return_failure_trace, - const next_function& next) { - bool exhausted = false; - chain::controller& chain = chain_plug->chain(); - try { - const auto& id = trx->id(); - - fc::time_point bt = chain.is_building_block() ? chain.pending_block_time() : chain.head_block_time(); - const fc::time_point expire = trx->packed_trx()->expiration().to_time_point(); - if( expire < bt ) { - auto except_ptr = std::static_pointer_cast( - std::make_shared( - FC_LOG_MESSAGE( error, "expired transaction ${id}, expiration ${e}, block time ${bt}", - ("id", id)("e", expire)("bt", bt)))); - log_trx_results( trx, except_ptr ); - next( std::move(except_ptr) ); - return true; - } + return true; + } - if( chain.is_known_unexpired_transaction( id )) { - auto except_ptr = std::static_pointer_cast( std::make_shared( - FC_LOG_MESSAGE( error, "duplicate transaction ${id}", ("id", id)))); - next( std::move(except_ptr) ); - return true; - } + void restart_speculative_block() { + // abort the pending block + abort_block(); + + schedule_production_loop(); + } - if( !chain.is_building_block()) { - _unapplied_transactions.add_incoming( trx, api_trx, return_failure_trace, next ); - return true; + void on_incoming_transaction_async(const packed_transaction_ptr& trx, + bool api_trx, + transaction_metadata::trx_type trx_type, + bool return_failure_traces, + next_function next) { + if (trx_type == transaction_metadata::trx_type::read_only) { + // Post all read only trxs to read_only queue for execution. + auto trx_metadata = transaction_metadata::create_no_recover_keys(trx, transaction_metadata::trx_type::read_only); + app().executor().post(priority::low, exec_queue::read_only, [this, trx{std::move(trx_metadata)}, next{std::move(next)}]() mutable { + push_read_only_transaction(std::move(trx), std::move(next)); + }); + return; + } + + chain::controller& chain = chain_plug->chain(); + const auto max_trx_time_ms = (trx_type == transaction_metadata::trx_type::read_only) ? -1 : _max_transaction_time_ms.load(); + fc::microseconds max_trx_cpu_usage = max_trx_time_ms < 0 ? fc::microseconds::maximum() : fc::milliseconds(max_trx_time_ms); + + auto future = transaction_metadata::start_recover_keys(trx, + _thread_pool.get_executor(), + chain.get_chain_id(), + fc::microseconds(max_trx_cpu_usage), + trx_type, + chain.configured_subjective_signature_length_limit()); + + auto is_transient = (trx_type == transaction_metadata::trx_type::read_only || trx_type == transaction_metadata::trx_type::dry_run); + if (!is_transient) { + next = [this, trx, next{std::move(next)}](const next_function_variant& response) { + next(response); + + fc::exception_ptr except_ptr; // rejected + if (std::holds_alternative(response)) { + except_ptr = std::get(response); + } else if (std::get(response)->except) { + except_ptr = std::get(response)->except->dynamic_copy_exception(); } - const auto block_deadline = _pending_block_deadline; - push_result pr = push_transaction( block_deadline, trx, api_trx, return_failure_trace, next ); + _transaction_ack_channel.publish(priority::low, std::pair(except_ptr, trx)); + }; + } - exhausted = pr.block_exhausted; - if( pr.trx_exhausted ) { - _unapplied_transactions.add_incoming( trx, api_trx, return_failure_trace, next ); - } + boost::asio::post(_thread_pool.get_executor(), + [self = this, future{std::move(future)}, api_trx, is_transient, return_failure_traces, + next{std::move(next)}, trx = trx]() mutable { + if (future.valid()) { + future.wait(); + app().executor().post(priority::low, exec_queue::read_write, + [self, future{std::move(future)}, api_trx, is_transient, next{std::move(next)}, trx{std::move(trx)}, + return_failure_traces]() mutable { + auto start = fc::time_point::now(); + auto idle_time = self->_time_tracker.add_idle_time(start); + auto trx_tracker = self->_time_tracker.start_trx(is_transient, start); + fc_tlog(_log, "Time since last trx: ${t}us", ("t", idle_time)); + + auto exception_handler = + [self, is_transient, &next, trx{std::move(trx)}, &start](fc::exception_ptr ex) { + self->log_trx_results(trx, nullptr, ex, 0, start, is_transient); + next(std::move(ex)); + }; + try { + auto result = future.get(); + if (!self->process_incoming_transaction_async(result, api_trx, return_failure_traces, trx_tracker, next)) { + if (self->in_producing_mode()) { + self->schedule_maybe_produce_block(true); + } else { + self->restart_speculative_block(); + } + } + } + CATCH_AND_CALL(exception_handler); + }); + } + }); + } + + bool process_incoming_transaction_async(const transaction_metadata_ptr& trx, + bool api_trx, + bool return_failure_trace, + block_time_tracker::trx_time_tracker& trx_tracker, + const next_function& next) { + bool exhausted = false; + chain::controller& chain = chain_plug->chain(); + try { + const auto& id = trx->id(); + + fc::time_point bt = chain.is_building_block() ? chain.pending_block_time() : chain.head_block_time(); + const fc::time_point expire = trx->packed_trx()->expiration().to_time_point(); + if (expire < bt) { + auto except_ptr = std::static_pointer_cast(std::make_shared( + FC_LOG_MESSAGE(error, "expired transaction ${id}, expiration ${e}, block time ${bt}", ("id", id)("e", expire)("bt", bt)))); + log_trx_results(trx, except_ptr); + next(std::move(except_ptr)); + return true; + } - } catch ( const guard_exception& e ) { - chain_plugin::handle_guard_exception(e); - } catch ( boost::interprocess::bad_alloc& ) { - chain_apis::api_base::handle_db_exhaustion(); - } catch ( std::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } CATCH_AND_CALL(next); + if (chain.is_known_unexpired_transaction(id)) { + auto except_ptr = std::static_pointer_cast( + std::make_shared(FC_LOG_MESSAGE(error, "duplicate transaction ${id}", ("id", id)))); + next(std::move(except_ptr)); + return true; + } - return !exhausted; - } + if (!chain.is_building_block()) { + _unapplied_transactions.add_incoming(trx, api_trx, return_failure_trace, next); + trx_tracker.cancel(); + return true; + } + const auto block_deadline = _pending_block_deadline; + push_result pr = push_transaction(block_deadline, trx, api_trx, return_failure_trace, trx_tracker, next); - fc::microseconds get_irreversible_block_age() { - auto now = fc::time_point::now(); - if (now < _irreversible_block_time) { - return fc::microseconds(0); - } else { - return now - _irreversible_block_time; + if (pr.trx_exhausted) { + _unapplied_transactions.add_incoming(trx, api_trx, return_failure_trace, next); } + + exhausted = pr.block_exhausted; + + if ( !in_producing_mode() && pr.trx_exhausted ) + exhausted = true; // report transaction exhausted if trx was exhausted in non-producing mode (so we will restart + // a speculative block to retry it immediately, instead of waiting to receive a new block) + + } catch (const guard_exception& e) { + chain_plugin::handle_guard_exception(e); + } catch (boost::interprocess::bad_alloc&) { + chain_apis::api_base::handle_db_exhaustion(); + } catch (std::bad_alloc&) { + chain_apis::api_base::handle_bad_alloc(); } + CATCH_AND_CALL(next); - account_name get_pending_block_producer() { - auto& chain = chain_plug->chain(); - if (chain.is_building_block()) { - return chain.pending_block_producer(); - } else { - return {}; - } + return !exhausted; + } + + + fc::microseconds get_irreversible_block_age() { + auto now = fc::time_point::now(); + if (now < _irreversible_block_time) { + return fc::microseconds(0); + } else { + return now - _irreversible_block_time; + } + } + + account_name get_pending_block_producer() { + auto& chain = chain_plug->chain(); + if (chain.is_building_block()) { + return chain.pending_block_producer(); + } else { + return {}; } + } + + bool production_disabled_by_policy() { + return !_production_enabled || _pause_production || + (_max_irreversible_block_age_us.count() >= 0 && get_irreversible_block_age() >= _max_irreversible_block_age_us); + } - bool production_disabled_by_policy() { - return !_production_enabled || _pause_production || (_max_irreversible_block_age_us.count() >= 0 && get_irreversible_block_age() >= _max_irreversible_block_age_us); + bool is_producer_key(const chain::public_key_type& key) const { + return _signature_providers.find(key) != _signature_providers.end(); + } + + chain::signature_type sign_compact(const chain::public_key_type& key, const fc::sha256& digest) const { + if (key != chain::public_key_type()) { + auto private_key_itr = _signature_providers.find(key); + EOS_ASSERT(private_key_itr != _signature_providers.end(), producer_priv_key_not_found, + "Local producer has no private key in config.ini corresponding to public key ${key}", ("key", key)); + + return private_key_itr->second(digest); + } else { + return chain::signature_type(); } + } - enum class start_block_result { - succeeded, - failed, - waiting_for_block, - waiting_for_production, - exhausted - }; + void resume() { + _pause_production = false; + // it is possible that we are only speculating because of this policy which we have now changed + // re-evaluate that now + // + if (in_speculating_mode()) { + abort_block(); + fc_ilog(_log, "Producer resumed. Scheduling production."); + schedule_production_loop(); + } else { + fc_ilog(_log, "Producer resumed."); + } + } + + enum class start_block_result { + succeeded, + failed, + waiting_for_block, + waiting_for_production, + exhausted + }; - inline bool should_interrupt_start_block( const fc::time_point& deadline, uint32_t pending_block_num ) const; - start_block_result start_block(); + inline bool should_interrupt_start_block( const fc::time_point& deadline, uint32_t pending_block_num ) const; + start_block_result start_block(); - block_timestamp_type calculate_pending_block_time() const; - void schedule_delayed_production_loop(const std::weak_ptr& weak_this, std::optional wake_up_time); - std::optional calculate_producer_wake_up_time( const block_timestamp_type& ref_block_time ) const; + block_timestamp_type calculate_pending_block_time() const; + void schedule_delayed_production_loop(const std::weak_ptr& weak_this, std::optional wake_up_time); + std::optional calculate_producer_wake_up_time( const block_timestamp_type& ref_block_time ) const; + bool in_producing_mode() const { return _pending_block_mode == pending_block_mode::producing; } + bool in_speculating_mode() const { return _pending_block_mode == pending_block_mode::speculating; } }; void new_chain_banner(const eosio::chain::controller& db) @@ -862,8 +1080,6 @@ void producer_plugin::set_program_options( "ratio between incoming transactions and deferred transactions when both are queued for execution") ("incoming-transaction-queue-size-mb", bpo::value()->default_value( 1024 ), "Maximum size (in MiB) of the incoming transaction queue. Exceeding this value will subjectively drop transaction with resource exhaustion.") - ("disable-subjective-billing", bpo::value()->default_value(true), - "Disable subjective CPU billing for API/P2P transactions") ("disable-subjective-account-billing", boost::program_options::value>()->composing()->multitoken(), "Account which is excluded from subjective CPU billing") ("disable-subjective-p2p-billing", bpo::value()->default_value(true), @@ -886,23 +1102,12 @@ void producer_plugin::set_program_options( bool producer_plugin::is_producer_key(const chain::public_key_type& key) const { - auto private_key_itr = my->_signature_providers.find(key); - if(private_key_itr != my->_signature_providers.end()) - return true; - return false; + return my->is_producer_key(key); } chain::signature_type producer_plugin::sign_compact(const chain::public_key_type& key, const fc::sha256& digest) const { - if(key != chain::public_key_type()) { - auto private_key_itr = my->_signature_providers.find(key); - EOS_ASSERT(private_key_itr != my->_signature_providers.end(), producer_priv_key_not_found, "Local producer has no private key in config.ini corresponding to public key ${key}", ("key", key)); - - return private_key_itr->second(digest); - } - else { - return chain::signature_type(); - } + return my->sign_compact(key, digest); } template @@ -918,23 +1123,23 @@ if( options.count(op_name) ) { \ } \ } -void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) -{ try { - handle_sighup(); // Sets loggers +void producer_plugin_impl::plugin_initialize(const boost::program_options::variables_map& options) +{ + chain_plug = app().find_plugin(); + EOS_ASSERT(chain_plug, plugin_config_exception, "chain_plugin not found" ); + _options = &options; + LOAD_VALUE_SET(options, "producer-name", _producers) - my->chain_plug = app().find_plugin(); - EOS_ASSERT( my->chain_plug, plugin_config_exception, "chain_plugin not found" ); - my->_options = &options; - LOAD_VALUE_SET(options, "producer-name", my->_producers) + chain::controller& chain = chain_plug->chain(); - chain::controller& chain = my->chain_plug->chain(); + chain.set_producer_node(!_producers.empty()); - if( options.count("signature-provider") ) { + if (options.count("signature-provider")) { const std::vector key_spec_pairs = options["signature-provider"].as>(); for (const auto& key_spec_pair : key_spec_pairs) { try { const auto& [pubkey, provider] = app().get_plugin().signature_provider_for_specification(key_spec_pair); - my->_signature_providers[pubkey] = provider; + _signature_providers[pubkey] = provider; } catch(secure_enclave_exception& e) { elog("Error with Secure Enclave signature provider: ${e}; ignoring ${val}", ("e", e.top_message())("val", key_spec_pair)); } catch (fc::exception& e) { @@ -946,179 +1151,206 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ } auto subjective_account_max_failures_window_size = options.at("subjective-account-max-failures-window-size").as(); - EOS_ASSERT( subjective_account_max_failures_window_size > 0, plugin_config_exception, - "subjective-account-max-failures-window-size ${s} must be greater than 0", ("s", subjective_account_max_failures_window_size) ); + EOS_ASSERT(subjective_account_max_failures_window_size > 0, plugin_config_exception, + "subjective-account-max-failures-window-size ${s} must be greater than 0", ("s", subjective_account_max_failures_window_size)); - my->_account_fails.set_max_failures_per_account( options.at("subjective-account-max-failures").as(), - subjective_account_max_failures_window_size ); + _account_fails.set_max_failures_per_account(options.at("subjective-account-max-failures").as(), + subjective_account_max_failures_window_size); uint32_t cpu_effort_pct = options.at("cpu-effort-percent").as(); - EOS_ASSERT( cpu_effort_pct >= 0 && cpu_effort_pct <= 100, plugin_config_exception, - "cpu-effort-percent ${pct} must be 0 - 100", ("pct", cpu_effort_pct) ); - cpu_effort_pct *= config::percent_1; + EOS_ASSERT(cpu_effort_pct >= 0 && cpu_effort_pct <= 100, plugin_config_exception, + "cpu-effort-percent ${pct} must be 0 - 100", ("pct", cpu_effort_pct)); + cpu_effort_pct *= config::percent_1; - my->_cpu_effort_us = EOS_PERCENT( config::block_interval_us, cpu_effort_pct ); + _cpu_effort_us = EOS_PERCENT(config::block_interval_us, cpu_effort_pct); - my->_max_block_cpu_usage_threshold_us = options.at( "max-block-cpu-usage-threshold-us" ).as(); - EOS_ASSERT( my->_max_block_cpu_usage_threshold_us < config::block_interval_us, plugin_config_exception, - "max-block-cpu-usage-threshold-us ${t} must be 0 .. ${bi}", ("bi", config::block_interval_us)("t", my->_max_block_cpu_usage_threshold_us) ); + _max_block_cpu_usage_threshold_us = options.at("max-block-cpu-usage-threshold-us").as(); + EOS_ASSERT(_max_block_cpu_usage_threshold_us < config::block_interval_us, + plugin_config_exception, + "max-block-cpu-usage-threshold-us ${t} must be 0 .. ${bi}", + ("bi", config::block_interval_us)("t", _max_block_cpu_usage_threshold_us)); - my->_max_block_net_usage_threshold_bytes = options.at( "max-block-net-usage-threshold-bytes" ).as(); + _max_block_net_usage_threshold_bytes = options.at("max-block-net-usage-threshold-bytes").as(); - my->_max_scheduled_transaction_time_per_block_ms = options.at("max-scheduled-transaction-time-per-block-ms").as(); + _max_scheduled_transaction_time_per_block_ms = options.at("max-scheduled-transaction-time-per-block-ms").as(); - if( options.at( "subjective-cpu-leeway-us" ).as() != config::default_subjective_cpu_leeway_us ) { - chain.set_subjective_cpu_leeway( fc::microseconds( options.at( "subjective-cpu-leeway-us" ).as() ) ); + if (options.at("subjective-cpu-leeway-us").as() != config::default_subjective_cpu_leeway_us) { + chain.set_subjective_cpu_leeway(fc::microseconds(options.at("subjective-cpu-leeway-us").as())); } - fc::microseconds subjective_account_decay_time = fc::minutes(options.at( "subjective-account-decay-time-minutes" ).as()); - EOS_ASSERT( subjective_account_decay_time.count() > 0, plugin_config_exception, - "subjective-account-decay-time-minutes ${dt} must be greater than 0", ("dt", subjective_account_decay_time.to_seconds() / 60)); - chain.get_mutable_subjective_billing().set_expired_accumulator_average_window( subjective_account_decay_time ); + fc::microseconds subjective_account_decay_time = fc::minutes(options.at("subjective-account-decay-time-minutes").as()); + EOS_ASSERT(subjective_account_decay_time.count() > 0, + plugin_config_exception, + "subjective-account-decay-time-minutes ${dt} must be greater than 0", + ("dt", subjective_account_decay_time.to_seconds() / 60)); + chain.get_mutable_subjective_billing().set_expired_accumulator_average_window(subjective_account_decay_time); - my->_max_transaction_time_ms = options.at("max-transaction-time").as(); + _max_transaction_time_ms = options.at("max-transaction-time").as(); - my->_max_irreversible_block_age_us = fc::seconds(options.at("max-irreversible-block-age").as()); + _max_irreversible_block_age_us = fc::seconds(options.at("max-irreversible-block-age").as()); - auto max_incoming_transaction_queue_size = options.at("incoming-transaction-queue-size-mb").as() * 1024*1024; + auto max_incoming_transaction_queue_size = options.at("incoming-transaction-queue-size-mb").as() * 1024 * 1024; - EOS_ASSERT( max_incoming_transaction_queue_size > 0, plugin_config_exception, - "incoming-transaction-queue-size-mb ${mb} must be greater than 0", ("mb", max_incoming_transaction_queue_size) ); + EOS_ASSERT(max_incoming_transaction_queue_size > 0, plugin_config_exception, + "incoming-transaction-queue-size-mb ${mb} must be greater than 0", ("mb", max_incoming_transaction_queue_size)); - my->_unapplied_transactions.set_max_transaction_queue_size( max_incoming_transaction_queue_size ); + _unapplied_transactions.set_max_transaction_queue_size(max_incoming_transaction_queue_size); - my->_incoming_defer_ratio = options.at("incoming-defer-ratio").as(); + _incoming_defer_ratio = options.at("incoming-defer-ratio").as(); - bool disable_subjective_billing = options.at("disable-subjective-billing").as(); - my->_disable_subjective_p2p_billing = options.at("disable-subjective-p2p-billing").as(); - my->_disable_subjective_api_billing = options.at("disable-subjective-api-billing").as(); - dlog( "disable-subjective-billing: ${s}, disable-subjective-p2p-billing: ${p2p}, disable-subjective-api-billing: ${api}", - ("s", disable_subjective_billing)("p2p", my->_disable_subjective_p2p_billing)("api", my->_disable_subjective_api_billing) ); - if( !disable_subjective_billing ) { - my->_disable_subjective_p2p_billing = my->_disable_subjective_api_billing = false; - } else if( !my->_disable_subjective_p2p_billing || !my->_disable_subjective_api_billing ) { - disable_subjective_billing = false; - } - if( disable_subjective_billing ) { + _disable_subjective_p2p_billing = options.at("disable-subjective-p2p-billing").as(); + _disable_subjective_api_billing = options.at("disable-subjective-api-billing").as(); + dlog("disable-subjective-p2p-billing: ${p2p}, disable-subjective-api-billing: ${api}", + ("p2p", _disable_subjective_p2p_billing)("api", _disable_subjective_api_billing)); + if (_disable_subjective_p2p_billing && _disable_subjective_api_billing) { chain.get_mutable_subjective_billing().disable(); - ilog( "Subjective CPU billing disabled" ); - } else if( !my->_disable_subjective_p2p_billing && !my->_disable_subjective_api_billing ) { - ilog( "Subjective CPU billing enabled" ); + ilog("Subjective CPU billing disabled"); + } else if (!_disable_subjective_p2p_billing && !_disable_subjective_api_billing) { + ilog("Subjective CPU billing enabled"); } else { - if( my->_disable_subjective_p2p_billing ) ilog( "Subjective CPU billing of P2P trxs disabled " ); - if( my->_disable_subjective_api_billing ) ilog( "Subjective CPU billing of API trxs disabled " ); + if (_disable_subjective_p2p_billing) + ilog("Subjective CPU billing of P2P trxs disabled "); + if (_disable_subjective_api_billing) + ilog("Subjective CPU billing of API trxs disabled "); } - my->_thread_pool_size = options.at( "producer-threads" ).as(); - EOS_ASSERT( my->_thread_pool_size > 0, plugin_config_exception, - "producer-threads ${num} must be greater than 0", ("num", my->_thread_pool_size)); + _thread_pool_size = options.at("producer-threads").as(); + EOS_ASSERT(_thread_pool_size > 0, plugin_config_exception, "producer-threads ${num} must be greater than 0", ("num", _thread_pool_size)); - if( options.count( "snapshots-dir" )) { - auto sd = options.at( "snapshots-dir" ).as(); - if( sd.is_relative()) { - my->_snapshots_dir = app().data_dir() / sd; - if (!std::filesystem::exists(my->_snapshots_dir)) { - std::filesystem::create_directories(my->_snapshots_dir); + if (options.count("snapshots-dir")) { + auto sd = options.at("snapshots-dir").as(); + if (sd.is_relative()) { + _snapshots_dir = app().data_dir() / sd; + if (!std::filesystem::exists(_snapshots_dir)) { + std::filesystem::create_directories(_snapshots_dir); } } else { - my->_snapshots_dir = sd; + _snapshots_dir = sd; } - EOS_ASSERT( std::filesystem::is_directory(my->_snapshots_dir), snapshot_directory_not_found_exception, - "No such directory '${dir}'", ("dir", my->_snapshots_dir) ); + EOS_ASSERT(std::filesystem::is_directory(_snapshots_dir), + snapshot_directory_not_found_exception, + "No such directory '${dir}'", + ("dir", _snapshots_dir)); if (auto resmon_plugin = app().find_plugin()) { - resmon_plugin->monitor_directory(my->_snapshots_dir); + resmon_plugin->monitor_directory(_snapshots_dir); } } - if ( options.count( "read-only-threads" ) ) { - my->_ro_thread_pool_size = options.at( "read-only-threads" ).as(); - } else if ( my->_producers.empty() ) { - if( options.count( "plugin" ) ) { - const auto& v = options.at( "plugin" ).as>(); - auto i = std::find_if( v.cbegin(), v.cend(), []( const std::string& p ) { return p == "eosio::chain_api_plugin"; } ); - if( i != v.cend() ) { + if (options.count("read-only-threads")) { + _ro_thread_pool_size = options.at("read-only-threads").as(); + } else if (_producers.empty()) { + if (options.count("plugin")) { + const auto& v = options.at("plugin").as>(); + auto i = std::find_if(v.cbegin(), v.cend(), [](const std::string& p) { return p == "eosio::chain_api_plugin"; }); + if (i != v.cend()) { // default to 3 threads for non producer nodes running chain_api_plugin if not specified - my->_ro_thread_pool_size = 3; - ilog( "chain_api_plugin configured, defaulting read-only-threads to ${t}", ("t", my->_ro_thread_pool_size) ); + _ro_thread_pool_size = 3; + ilog("chain_api_plugin configured, defaulting read-only-threads to ${t}", ("t", _ro_thread_pool_size)); } } } - EOS_ASSERT( test_mode_ || my->_ro_thread_pool_size == 0 || my->_producers.empty(), plugin_config_exception, "read-only-threads not allowed on producer node" ); + EOS_ASSERT(producer_plugin::test_mode_ || _ro_thread_pool_size == 0 || _producers.empty(), plugin_config_exception, + "read-only-threads not allowed on producer node"); // only initialize other read-only options when read-only thread pool is enabled - if ( my->_ro_thread_pool_size > 0 ) { + if (_ro_thread_pool_size > 0) { #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED if (chain.is_eos_vm_oc_enabled()) { // EOS VM OC requires 4.2TB Virtual for each executing thread. Make sure the memory // required by configured read-only threads does not exceed the total system virtual memory. - std::string attr_name; - size_t vm_total_kb { 0 }; - size_t vm_used_kb { 0 }; + std::string attr_name; + size_t vm_total_kb{0}; + size_t vm_used_kb{0}; std::ifstream meminfo_file("/proc/meminfo"); while (meminfo_file >> attr_name) { if (attr_name == "VmallocTotal:") { - if ( !(meminfo_file >> vm_total_kb) ) + if (!(meminfo_file >> vm_total_kb)) break; } else if (attr_name == "VmallocUsed:") { - if ( !(meminfo_file >> vm_used_kb) ) + if (!(meminfo_file >> vm_used_kb)) break; } meminfo_file.ignore(std::numeric_limits::max(), '\n'); } - EOS_ASSERT( vm_total_kb > 0, plugin_config_exception, "Unable to get system virtual memory size (not a Linux?), therefore cannot determine if the system has enough virtual memory for multi-threaded read-only transactions on EOS VM OC"); - EOS_ASSERT( vm_total_kb > vm_used_kb, plugin_config_exception, "vm total (${t}) must be greater than vm used (${u})", ("t", vm_total_kb)("u", vm_used_kb)); + EOS_ASSERT(vm_total_kb > 0, plugin_config_exception, + "Unable to get system virtual memory size (not a Linux?), therefore cannot determine if the system has enough " + "virtual memory for multi-threaded read-only transactions on EOS VM OC"); + EOS_ASSERT(vm_total_kb > vm_used_kb, plugin_config_exception, + "vm total (${t}) must be greater than vm used (${u})", + ("t", vm_total_kb)("u", vm_used_kb)); uint32_t num_threads_supported = (vm_total_kb - vm_used_kb) / 4200000000; // reserve 1 for the app thread, 1 for anything else which might use VM - EOS_ASSERT( num_threads_supported > 2, plugin_config_exception, "With the EOS VM OC configured, there is not enough system virtual memory to support the required minimum of 3 threads (1 for main thread, 1 for read-only, and 1 for anything else), vm total: ${t}, vm used: ${u}", ("t", vm_total_kb)("u", vm_used_kb)); + EOS_ASSERT(num_threads_supported > 2, plugin_config_exception, + "With the EOS VM OC configured, there is not enough system virtual memory to support the required minimum of " + "3 threads (1 for main thread, 1 for read-only, and 1 for anything else), vm total: ${t}, vm used: ${u}", + ("t", vm_total_kb)("u", vm_used_kb)); num_threads_supported -= 2; - auto actual_threads_allowed = std::min(my->_ro_max_threads_allowed, num_threads_supported); - ilog("vm total in kb: ${total}, vm used in kb: ${used}, number of EOS VM OC threads supported ((vm total - vm used)/4.2 TB - 2): ${supp}, max allowed: ${max}, actual allowed: ${actual}", ("total", vm_total_kb) ("used", vm_used_kb) ("supp", num_threads_supported) ("max", my->_ro_max_threads_allowed)("actual", actual_threads_allowed)); - EOS_ASSERT( my->_ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", ("th", my->_ro_thread_pool_size) ("allowed", actual_threads_allowed) ); + auto actual_threads_allowed = std::min(_ro_max_threads_allowed, num_threads_supported); + ilog("vm total in kb: ${total}, vm used in kb: ${used}, number of EOS VM OC threads supported " + "((vm total - vm used)/4.2 TB - 2): ${supp}, max allowed: ${max}, actual allowed: ${actual}", + ("total", vm_total_kb)("used", vm_used_kb)("supp", num_threads_supported)("max", _ro_max_threads_allowed) + ("actual", actual_threads_allowed)); + EOS_ASSERT(_ro_thread_pool_size <= actual_threads_allowed, plugin_config_exception, + "read-only-threads (${th}) greater than number of threads allowed for EOS VM OC (${allowed})", + ("th", _ro_thread_pool_size)("allowed", actual_threads_allowed)); } #endif - EOS_ASSERT( my->_ro_thread_pool_size <= my->_ro_max_threads_allowed, plugin_config_exception, "read-only-threads (${th}) greater than the number of threads allowed (${allowed})", ("th", my->_ro_thread_pool_size) ("allowed", my->_ro_max_threads_allowed) ); - - my->_ro_write_window_time_us = fc::microseconds( options.at( "read-only-write-window-time-us" ).as() ); - my->_ro_read_window_time_us = fc::microseconds( options.at( "read-only-read-window-time-us" ).as() ); - EOS_ASSERT( my->_ro_read_window_time_us > my->_ro_read_window_minimum_time_us, plugin_config_exception, "read-only-read-window-time-us (${read}) must be at least greater than ${min} us", ("read", my->_ro_read_window_time_us) ("min", my->_ro_read_window_minimum_time_us) ); - my->_ro_read_window_effective_time_us = my->_ro_read_window_time_us - my->_ro_read_window_minimum_time_us; + EOS_ASSERT(_ro_thread_pool_size <= _ro_max_threads_allowed, + plugin_config_exception, + "read-only-threads (${th}) greater than the number of threads allowed (${allowed})", + ("th", _ro_thread_pool_size)("allowed", _ro_max_threads_allowed)); + + _ro_write_window_time_us = fc::microseconds(options.at("read-only-write-window-time-us").as()); + _ro_read_window_time_us = fc::microseconds(options.at("read-only-read-window-time-us").as()); + EOS_ASSERT(_ro_read_window_time_us > _ro_read_window_minimum_time_us, + plugin_config_exception, + "read-only-read-window-time-us (${read}) must be at least greater than ${min} us", + ("read", _ro_read_window_time_us)("min", _ro_read_window_minimum_time_us)); + _ro_read_window_effective_time_us = _ro_read_window_time_us - _ro_read_window_minimum_time_us; // Make sure a read-only transaction can finish within the read // window if scheduled at the very beginning of the window. // Add _ro_read_window_minimum_time_us for safety margin. - if ( my->_max_transaction_time_ms.load() > 0 ) { - EOS_ASSERT( my->_ro_read_window_time_us > ( fc::milliseconds(my->_max_transaction_time_ms.load()) + my->_ro_read_window_minimum_time_us ), plugin_config_exception, "read-only-read-window-time-us (${read} us) must be greater than max-transaction-time (${trx_time} us) plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", ("read", my->_ro_read_window_time_us) ("trx_time", my->_max_transaction_time_ms.load() * 1000) ("min", my->_ro_read_window_minimum_time_us) ); + if (_max_transaction_time_ms.load() > 0) { + EOS_ASSERT( + _ro_read_window_time_us > (fc::milliseconds(_max_transaction_time_ms.load()) + _ro_read_window_minimum_time_us), + plugin_config_exception, + "read-only-read-window-time-us (${read} us) must be greater than max-transaction-time (${trx_time} us) " + "plus ${min} us, required: ${read} us > (${trx_time} us + ${min} us).", + ("read", _ro_read_window_time_us)("trx_time", _max_transaction_time_ms.load() * 1000)("min", _ro_read_window_minimum_time_us)); } ilog("read-only-write-window-time-us: ${ww} us, read-only-read-window-time-us: ${rw} us, effective read window time to be used: ${w} us", - ("ww", my->_ro_write_window_time_us)("rw", my->_ro_read_window_time_us)("w", my->_ro_read_window_effective_time_us)); + ("ww", _ro_write_window_time_us)("rw", _ro_read_window_time_us)("w", _ro_read_window_effective_time_us)); } // Make sure _ro_max_trx_time_us is alwasys set. - if ( my->_max_transaction_time_ms.load() > 0 ) { - my->_ro_max_trx_time_us = fc::milliseconds(my->_max_transaction_time_ms.load()); + if (_max_transaction_time_ms.load() > 0) { + _ro_max_trx_time_us = fc::milliseconds(_max_transaction_time_ms.load()); } else { // max-transaction-time can be set to negative for unlimited time - my->_ro_max_trx_time_us = fc::microseconds::maximum(); + _ro_max_trx_time_us = fc::microseconds::maximum(); } - ilog("read-only-threads ${s}, max read-only trx time to be enforced: ${t} us}", ("s", my->_ro_thread_pool_size)("t", my->_ro_max_trx_time_us)); + ilog("read-only-threads ${s}, max read-only trx time to be enforced: ${t} us", ("s", _ro_thread_pool_size)("t", _ro_max_trx_time_us)); - my->_incoming_block_sync_provider = app().get_method().register_provider( - [this](const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { - return my->on_incoming_block(block, block_id, bsp); - }); + _incoming_block_sync_provider = app().get_method().register_provider( + [this](const signed_block_ptr& block, const std::optional& block_id, const block_state_ptr& bsp) { + return on_incoming_block(block, block_id, bsp); + }); - my->_incoming_transaction_async_provider = app().get_method().register_provider( - [this](const packed_transaction_ptr& trx, bool api_trx, transaction_metadata::trx_type trx_type, bool return_failure_traces, next_function next) -> void { - return my->on_incoming_transaction_async(trx, api_trx, trx_type, return_failure_traces, next ); - }); + _incoming_transaction_async_provider = + app().get_method().register_provider( + [this](const packed_transaction_ptr& trx, bool api_trx, transaction_metadata::trx_type trx_type, + bool return_failure_traces, next_function next) -> void { + return on_incoming_transaction_async(trx, api_trx, trx_type, return_failure_traces, next); + }); if (options.count("greylist-account")) { - std::vector greylist = options["greylist-account"].as>(); - greylist_params param; - for (auto &a : greylist) { + std::vector greylist = options["greylist-account"].as>(); + producer_plugin::greylist_params param; + for (auto& a : greylist) { param.accounts.push_back(account_name(a)); } add_greylist_accounts(param); @@ -1126,132 +1358,130 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ { uint32_t greylist_limit = options.at("greylist-limit").as(); - chain.set_greylist_limit( greylist_limit ); + chain.set_greylist_limit(greylist_limit); } - if( options.count("disable-subjective-account-billing") ) { + if (options.count("disable-subjective-account-billing")) { std::vector accounts = options["disable-subjective-account-billing"].as>(); - for( const auto& a : accounts ) { - chain.get_mutable_subjective_billing().disable_account( account_name(a) ); + for (const auto& a : accounts) { + chain.get_mutable_subjective_billing().disable_account(account_name(a)); } } - my->_snapshot_scheduler.set_db_path(my->_snapshots_dir); - my->_snapshot_scheduler.set_snapshots_path(my->_snapshots_dir); -} FC_LOG_AND_RETHROW() } + _snapshot_scheduler.set_db_path(_snapshots_dir); + _snapshot_scheduler.set_snapshots_path(_snapshots_dir); +} -using namespace std::chrono_literals; -void producer_plugin::plugin_startup() -{ try { +void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { - ilog("producer plugin: plugin_startup() begin"); + handle_sighup(); // Sets loggers + my->plugin_initialize(options); + } + FC_LOG_AND_RETHROW() +} - my->_thread_pool.start( my->_thread_pool_size, []( const fc::exception& e ) { - fc_elog( _log, "Exception in producer thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); - app().quit(); - } ); +using namespace std::chrono_literals; +void producer_plugin_impl::plugin_startup() { + try { + try { + ilog("producer plugin: plugin_startup() begin"); + _thread_pool.start(_thread_pool_size, [](const fc::exception& e) { + fc_elog(_log, "Exception in producer thread pool, exiting: ${e}", ("e", e.to_detail_string())); + app().quit(); + }); - chain::controller& chain = my->chain_plug->chain(); - EOS_ASSERT( my->_producers.empty() || chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, plugin_config_exception, - "node cannot have any producer-name configured because block production is impossible when read_mode is \"irreversible\"" ); - EOS_ASSERT( my->_producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, plugin_config_exception, - "node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\"" ); + chain::controller& chain = chain_plug->chain(); + EOS_ASSERT(_producers.empty() || chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, plugin_config_exception, + "node cannot have any producer-name configured because block production is impossible when read_mode is \"irreversible\""); + + EOS_ASSERT(_producers.empty() || chain.get_validation_mode() == chain::validation_mode::FULL, plugin_config_exception, + "node cannot have any producer-name configured because block production is not safe when validation_mode is not \"full\""); + + EOS_ASSERT(_producers.empty() || chain_plug->accept_transactions(), plugin_config_exception, + "node cannot have any producer-name configured because no block production is possible with no [api|p2p]-accepted-transactions"); + + _accepted_block_connection.emplace(chain.accepted_block.connect([this](const auto& bsp) { on_block(bsp); })); + _accepted_block_header_connection.emplace(chain.accepted_block_header.connect([this](const auto& bsp) { on_block_header(bsp); })); + _irreversible_block_connection.emplace( + chain.irreversible_block.connect([this](const auto& bsp) { on_irreversible_block(bsp->block); })); + + _block_start_connection.emplace(chain.block_start.connect([this, &chain](uint32_t bs) { + try { + _snapshot_scheduler.on_start_block(bs, chain); + } catch (const snapshot_execution_exception& e) { + fc_elog(_log, "Exception during snapshot execution: ${e}", ("e", e.to_detail_string())); + app().quit(); + } + })); - EOS_ASSERT( my->_producers.empty() || my->chain_plug->accept_transactions(), plugin_config_exception, - "node cannot have any producer-name configured because no block production is possible with no [api|p2p]-accepted-transactions" ); + const auto lib_num = chain.last_irreversible_block_num(); + const auto lib = chain.fetch_block_by_number(lib_num); + if (lib) { + on_irreversible_block(lib); + } else { + _irreversible_block_time = fc::time_point::maximum(); + } - my->_accepted_block_connection.emplace(chain.accepted_block.connect( [this]( const auto& bsp ){ my->on_block( bsp ); } )); - my->_accepted_block_header_connection.emplace(chain.accepted_block_header.connect( [this]( const auto& bsp ){ my->on_block_header( bsp ); } )); - my->_irreversible_block_connection.emplace(chain.irreversible_block.connect( [this]( const auto& bsp ){ my->on_irreversible_block( bsp->block ); } )); - - my->_block_start_connection.emplace(chain.block_start.connect( [this, &chain]( uint32_t bs ) { - try { - my->_snapshot_scheduler.on_start_block(bs, chain); - } - catch (const snapshot_execution_exception & e) { - fc_elog( _log, "Exception during snapshot execution: ${e}", ("e", e.to_detail_string()) ); - app().quit(); - } - } )); - - const auto lib_num = chain.last_irreversible_block_num(); - const auto lib = chain.fetch_block_by_number(lib_num); - if (lib) { - my->on_irreversible_block(lib); - } else { - my->_irreversible_block_time = fc::time_point::maximum(); - } + if (!_producers.empty()) { + ilog("Launching block production for ${n} producers at ${time}.", ("n", _producers.size())("time", fc::time_point::now())); - if (!my->_producers.empty()) { - ilog("Launching block production for ${n} producers at ${time}.", ("n", my->_producers.size())("time",fc::time_point::now())); + if (_production_enabled) { + if (chain.head_block_num() == 0) { + new_chain_banner(chain); + } + } + } - if (my->_production_enabled) { - if (chain.head_block_num() == 0) { - new_chain_banner(chain); + if (_ro_thread_pool_size > 0) { + _ro_thread_pool.start( + _ro_thread_pool_size, + [](const fc::exception& e) { + fc_elog(_log, "Exception in read-only thread pool, exiting: ${e}", ("e", e.to_detail_string())); + app().quit(); + }, + [&]() { + chain.init_thread_local_data(); + }); + + _time_tracker.pause(); // start_write_window assumes time_tracker is paused + start_write_window(); } - } - } - if ( my->_ro_thread_pool_size > 0 ) { - std::atomic num_threads_started = 0; - my->_ro_thread_pool.start( my->_ro_thread_pool_size, - []( const fc::exception& e ) { - fc_elog( _log, "Exception in read-only thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); - app().quit(); - }, - [&]() { - chain.init_thread_local_data(); - ++num_threads_started; - }); + schedule_production_loop(); - // This will be changed with std::latch or std::atomic<>::wait - // when C++20 is used. - auto time_slept_ms = 0; - constexpr auto max_time_slept_ms = 1000; - while ( num_threads_started.load() < my->_ro_thread_pool_size && time_slept_ms < max_time_slept_ms ) { - std::this_thread::sleep_for( 1ms ); - ++time_slept_ms; + ilog("producer plugin: plugin_startup() end"); + } catch (...) { + // always call plugin_shutdown, even on exception + plugin_shutdown(); + throw; } - EOS_ASSERT(num_threads_started.load() == my->_ro_thread_pool_size, producer_exception, "read-only threads failed to start. num_threads_started: ${n}, time_slept_ms: ${t}ms", ("n", num_threads_started.load())("t", time_slept_ms)); - - my->start_write_window(); } + FC_CAPTURE_AND_RETHROW() +} - my->schedule_production_loop(); - - ilog("producer plugin: plugin_startup() end"); - } catch( ... ) { - // always call plugin_shutdown, even on exception - plugin_shutdown(); - throw; - } -} FC_CAPTURE_AND_RETHROW() } - -void producer_plugin::plugin_shutdown() { - try { - my->_timer.cancel(); - } catch ( const std::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } catch ( const boost::interprocess::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } catch(const fc::exception& e) { - edump((e.to_detail_string())); - } catch(const std::exception& e) { - edump((fc::std_exception_wrapper::from_current_exception(e).to_detail_string())); - } +void producer_plugin::plugin_startup() { + my->plugin_startup(); +} - my->_thread_pool.stop(); +void producer_plugin_impl::plugin_shutdown() { + boost::system::error_code ec; + _timer.cancel(ec); + _thread_pool.stop(); + _unapplied_transactions.clear(); - my->_unapplied_transactions.clear(); + app().executor().post(0, [me = shared_from_this()]() {}); // keep my pointer alive until queue is drained - app().executor().post( 0, [me = my](){} ); // keep my pointer alive until queue is drained fc_ilog(_log, "exit shutdown"); } +void producer_plugin::plugin_shutdown() { + my->plugin_shutdown(); +} + void producer_plugin::handle_sighup() { - fc::logger::update( logger_name, _log ); + fc::logger::update(logger_name, _log); fc::logger::update(trx_successful_trace_logger_name, _trx_successful_trace_log); fc::logger::update(trx_failed_trace_logger_name, _trx_failed_trace_log); fc::logger::update(trx_trace_success_logger_name, _trx_trace_success_log); @@ -1267,51 +1497,41 @@ void producer_plugin::pause() { } void producer_plugin::resume() { - my->_pause_production = false; - // it is possible that we are only speculating because of this policy which we have now changed - // re-evaluate that now - // - if (my->_pending_block_mode == pending_block_mode::speculating) { - my->abort_block(); - fc_ilog(_log, "Producer resumed. Scheduling production."); - my->schedule_production_loop(); - } else { - fc_ilog(_log, "Producer resumed."); - } + my->resume(); } bool producer_plugin::paused() const { return my->_pause_production; } -void producer_plugin::update_runtime_options(const runtime_options& options) { - chain::controller& chain = my->chain_plug->chain(); - bool check_speculating = false; +void producer_plugin_impl::update_runtime_options(const producer_plugin::runtime_options& options) { + chain::controller& chain = chain_plug->chain(); + bool check_speculating = false; if (options.max_transaction_time) { - my->_max_transaction_time_ms = *options.max_transaction_time; + _max_transaction_time_ms = *options.max_transaction_time; } if (options.max_irreversible_block_age) { - my->_max_irreversible_block_age_us = fc::seconds(*options.max_irreversible_block_age); - check_speculating = true; + _max_irreversible_block_age_us = fc::seconds(*options.max_irreversible_block_age); + check_speculating = true; } if (options.cpu_effort_us) { - my->_cpu_effort_us = *options.cpu_effort_us; + _cpu_effort_us = *options.cpu_effort_us; } if (options.max_scheduled_transaction_time_per_block_ms) { - my->_max_scheduled_transaction_time_per_block_ms = *options.max_scheduled_transaction_time_per_block_ms; + _max_scheduled_transaction_time_per_block_ms = *options.max_scheduled_transaction_time_per_block_ms; } if (options.incoming_defer_ratio) { - my->_incoming_defer_ratio = *options.incoming_defer_ratio; + _incoming_defer_ratio = *options.incoming_defer_ratio; } - if (check_speculating && my->_pending_block_mode == pending_block_mode::speculating) { - my->abort_block(); - my->schedule_production_loop(); + if (check_speculating && in_speculating_mode()) { + abort_block(); + schedule_production_loop(); } if (options.subjective_cpu_leeway_us) { @@ -1323,246 +1543,211 @@ void producer_plugin::update_runtime_options(const runtime_options& options) { } } +void producer_plugin::update_runtime_options(const runtime_options& options) { + my->update_runtime_options(options); +} + producer_plugin::runtime_options producer_plugin::get_runtime_options() const { - return { - my->_max_transaction_time_ms, - my->_max_irreversible_block_age_us.count() < 0 ? -1 : my->_max_irreversible_block_age_us.count() / 1'000'000, - my->_cpu_effort_us, - my->_max_scheduled_transaction_time_per_block_ms, - my->chain_plug->chain().get_subjective_cpu_leeway() ? - my->chain_plug->chain().get_subjective_cpu_leeway()->count() : - std::optional(), - my->_incoming_defer_ratio, - my->chain_plug->chain().get_greylist_limit() - }; + return my->get_runtime_options(); } void producer_plugin::add_greylist_accounts(const greylist_params& params) { - EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); - - chain::controller& chain = my->chain_plug->chain(); - for (auto &acc : params.accounts) { - chain.add_resource_greylist(acc); - } + my->add_greylist_accounts(params); } void producer_plugin::remove_greylist_accounts(const greylist_params& params) { - EOS_ASSERT(params.accounts.size() > 0, chain::invalid_http_request, "At least one account is required"); - - chain::controller& chain = my->chain_plug->chain(); - for (auto &acc : params.accounts) { - chain.remove_resource_greylist(acc); - } + my->remove_greylist_accounts(params); } producer_plugin::greylist_params producer_plugin::get_greylist() const { - chain::controller& chain = my->chain_plug->chain(); - greylist_params result; - const auto& list = chain.get_resource_greylist(); - result.accounts.reserve(list.size()); - for (auto &acc: list) { - result.accounts.push_back(acc); - } - return result; + return my->get_greylist(); } producer_plugin::whitelist_blacklist producer_plugin::get_whitelist_blacklist() const { chain::controller& chain = my->chain_plug->chain(); - return { - chain.get_actor_whitelist(), - chain.get_actor_blacklist(), - chain.get_contract_whitelist(), - chain.get_contract_blacklist(), - chain.get_action_blacklist(), - chain.get_key_blacklist() - }; + return {chain.get_actor_whitelist(), + chain.get_actor_blacklist(), + chain.get_contract_whitelist(), + chain.get_contract_blacklist(), + chain.get_action_blacklist(), + chain.get_key_blacklist()}; } void producer_plugin::set_whitelist_blacklist(const producer_plugin::whitelist_blacklist& params) { - EOS_ASSERT(params.actor_whitelist || params.actor_blacklist || params.contract_whitelist || params.contract_blacklist || params.action_blacklist || params.key_blacklist, + EOS_ASSERT(params.actor_whitelist || params.actor_blacklist || params.contract_whitelist || params.contract_blacklist || + params.action_blacklist || params.key_blacklist, chain::invalid_http_request, - "At least one of actor_whitelist, actor_blacklist, contract_whitelist, contract_blacklist, action_blacklist, and key_blacklist is required" - ); + "At least one of actor_whitelist, actor_blacklist, contract_whitelist, contract_blacklist, action_blacklist, and " + "key_blacklist is required"); chain::controller& chain = my->chain_plug->chain(); - if(params.actor_whitelist) chain.set_actor_whitelist(*params.actor_whitelist); - if(params.actor_blacklist) chain.set_actor_blacklist(*params.actor_blacklist); - if(params.contract_whitelist) chain.set_contract_whitelist(*params.contract_whitelist); - if(params.contract_blacklist) chain.set_contract_blacklist(*params.contract_blacklist); - if(params.action_blacklist) chain.set_action_blacklist(*params.action_blacklist); - if(params.key_blacklist) chain.set_key_blacklist(*params.key_blacklist); + if (params.actor_whitelist) + chain.set_actor_whitelist(*params.actor_whitelist); + if (params.actor_blacklist) + chain.set_actor_blacklist(*params.actor_blacklist); + if (params.contract_whitelist) + chain.set_contract_whitelist(*params.contract_whitelist); + if (params.contract_blacklist) + chain.set_contract_blacklist(*params.contract_blacklist); + if (params.action_blacklist) + chain.set_action_blacklist(*params.action_blacklist); + if (params.key_blacklist) + chain.set_key_blacklist(*params.key_blacklist); } producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash() const { - chain::controller& chain = my->chain_plug->chain(); - - auto reschedule = fc::make_scoped_exit([this](){ - my->schedule_production_loop(); - }); - - if (chain.is_building_block()) { - // abort the pending block - my->abort_block(); - } else { - reschedule.cancel(); - } - - return {chain.head_block_id(), chain.calculate_integrity_hash()}; + return my->get_integrity_hash(); } void producer_plugin::create_snapshot(producer_plugin::next_function next) { - chain::controller& chain = my->chain_plug->chain(); - - auto reschedule = fc::make_scoped_exit([this](){ - my->schedule_production_loop(); - }); + my->create_snapshot(std::move(next)); +} - auto predicate = [&]() -> void { - if (chain.is_building_block()) { - // abort the pending block - my->abort_block(); - } else { - reschedule.cancel(); - } +chain::snapshot_scheduler::snapshot_schedule_result +producer_plugin::schedule_snapshot(const chain::snapshot_scheduler::snapshot_request_params& srp) { + chain::controller& chain = my->chain_plug->chain(); + const auto head_block_num = chain.head_block_num(); + + // missing start/end is set to head block num, missing end to UINT32_MAX + chain::snapshot_scheduler::snapshot_request_information sri = { + .block_spacing = srp.block_spacing ? *srp.block_spacing : 0, + .start_block_num = srp.start_block_num ? *srp.start_block_num : head_block_num + 1, + .end_block_num = srp.end_block_num ? *srp.end_block_num : std::numeric_limits::max(), + .snapshot_description = srp.snapshot_description ? *srp.snapshot_description : "" }; - - my->_snapshot_scheduler.create_snapshot(next, chain, predicate); -} -chain::snapshot_scheduler::snapshot_schedule_result producer_plugin::schedule_snapshot(const chain::snapshot_scheduler::snapshot_request_information& sri) -{ return my->_snapshot_scheduler.schedule_snapshot(sri); } -chain::snapshot_scheduler::snapshot_schedule_result producer_plugin::unschedule_snapshot(const chain::snapshot_scheduler::snapshot_request_id_information& sri) -{ +chain::snapshot_scheduler::snapshot_schedule_result +producer_plugin::unschedule_snapshot(const chain::snapshot_scheduler::snapshot_request_id_information& sri) { return my->_snapshot_scheduler.unschedule_snapshot(sri.snapshot_request_id); } -chain::snapshot_scheduler::get_snapshot_requests_result producer_plugin::get_snapshot_requests() const -{ +chain::snapshot_scheduler::get_snapshot_requests_result producer_plugin::get_snapshot_requests() const { return my->_snapshot_scheduler.get_snapshot_requests(); } -producer_plugin::scheduled_protocol_feature_activations -producer_plugin::get_scheduled_protocol_feature_activations()const { +producer_plugin::scheduled_protocol_feature_activations producer_plugin::get_scheduled_protocol_feature_activations() const { return {my->_protocol_features_to_activate}; } -void producer_plugin::schedule_protocol_feature_activations( const scheduled_protocol_feature_activations& schedule ) { - const chain::controller& chain = my->chain_plug->chain(); - std::set set_of_features_to_activate( schedule.protocol_features_to_activate.begin(), - schedule.protocol_features_to_activate.end() ); - EOS_ASSERT( set_of_features_to_activate.size() == schedule.protocol_features_to_activate.size(), - invalid_protocol_features_to_activate, "duplicate digests" ); - chain.validate_protocol_features( schedule.protocol_features_to_activate ); +void producer_plugin_impl::schedule_protocol_feature_activations(const producer_plugin::scheduled_protocol_feature_activations& schedule) { + const chain::controller& chain = chain_plug->chain(); + std::set set_of_features_to_activate(schedule.protocol_features_to_activate.begin(), + schedule.protocol_features_to_activate.end()); + EOS_ASSERT(set_of_features_to_activate.size() == schedule.protocol_features_to_activate.size(), invalid_protocol_features_to_activate, + "duplicate digests"); + chain.validate_protocol_features(schedule.protocol_features_to_activate); const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); - for (auto &feature_digest : set_of_features_to_activate) { + for (auto& feature_digest : set_of_features_to_activate) { const auto& pf = pfs.get_protocol_feature(feature_digest); - EOS_ASSERT( !pf.preactivation_required, protocol_feature_exception, - "protocol feature requires preactivation: ${digest}", - ("digest", feature_digest)); + EOS_ASSERT(!pf.preactivation_required, protocol_feature_exception, + "protocol feature requires preactivation: ${digest}", ("digest", feature_digest)); } - my->_protocol_features_to_activate = schedule.protocol_features_to_activate; - my->_protocol_features_signaled = false; + _protocol_features_to_activate = schedule.protocol_features_to_activate; + _protocol_features_signaled = false; } -fc::variants producer_plugin::get_supported_protocol_features( const get_supported_protocol_features_params& params ) const { - fc::variants results; - const chain::controller& chain = my->chain_plug->chain(); - const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); - const auto next_block_time = chain.head_block_time() + fc::milliseconds(config::block_interval_ms); +void producer_plugin::schedule_protocol_feature_activations(const scheduled_protocol_feature_activations& schedule) { + my->schedule_protocol_feature_activations(schedule); +} + +fc::variants producer_plugin::get_supported_protocol_features(const get_supported_protocol_features_params& params) const { + fc::variants results; + const chain::controller& chain = my->chain_plug->chain(); + const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); + const auto next_block_time = chain.head_block_time() + fc::milliseconds(config::block_interval_ms); - flat_map visited_protocol_features; - visited_protocol_features.reserve( pfs.size() ); + flat_map visited_protocol_features; + visited_protocol_features.reserve(pfs.size()); std::function add_feature = - [&results, &pfs, ¶ms, next_block_time, &visited_protocol_features, &add_feature] - ( const protocol_feature& pf ) -> bool { - if( ( params.exclude_disabled || params.exclude_unactivatable ) && !pf.enabled ) return false; - if( params.exclude_unactivatable && ( next_block_time < pf.earliest_allowed_activation_time ) ) return false; + [&results, &pfs, ¶ms, next_block_time, &visited_protocol_features, &add_feature](const protocol_feature& pf) -> bool { + if ((params.exclude_disabled || params.exclude_unactivatable) && !pf.enabled) + return false; + if (params.exclude_unactivatable && (next_block_time < pf.earliest_allowed_activation_time)) + return false; - auto res = visited_protocol_features.emplace( pf.feature_digest, false ); - if( !res.second ) return res.first->second; + auto res = visited_protocol_features.emplace(pf.feature_digest, false); + if (!res.second) + return res.first->second; const auto original_size = results.size(); - for( const auto& dependency : pf.dependencies ) { - if( !add_feature( pfs.get_protocol_feature( dependency ) ) ) { - results.resize( original_size ); + for (const auto& dependency : pf.dependencies) { + if (!add_feature(pfs.get_protocol_feature(dependency))) { + results.resize(original_size); return false; } } res.first->second = true; - results.emplace_back( pf.to_variant(true) ); + results.emplace_back(pf.to_variant(true)); return true; }; - for( const auto& pf : pfs ) { - add_feature( pf ); + for (const auto& pf : pfs) { + add_feature(pf); } return results; } producer_plugin::get_account_ram_corrections_result -producer_plugin::get_account_ram_corrections( const get_account_ram_corrections_params& params ) const { +producer_plugin::get_account_ram_corrections(const get_account_ram_corrections_params& params) const { get_account_ram_corrections_result result; - const auto& db = my->chain_plug->chain().db(); + const auto& db = my->chain_plug->chain().db(); - const auto& idx = db.get_index(); - account_name lower_bound_value{ std::numeric_limits::lowest() }; - account_name upper_bound_value{ std::numeric_limits::max() }; + const auto& idx = db.get_index(); + account_name lower_bound_value{std::numeric_limits::lowest()}; + account_name upper_bound_value{std::numeric_limits::max()}; - if( params.lower_bound ) { + if (params.lower_bound) { lower_bound_value = *params.lower_bound; } - if( params.upper_bound ) { + if (params.upper_bound) { upper_bound_value = *params.upper_bound; } - if( upper_bound_value < lower_bound_value ) + if (upper_bound_value < lower_bound_value) return result; - auto walk_range = [&]( auto itr, auto end_itr ) { - for( unsigned int count = 0; - count < params.limit && itr != end_itr; - ++itr ) - { - result.rows.push_back( fc::variant( *itr ) ); + auto walk_range = [&](auto itr, auto end_itr) { + for (unsigned int count = 0; count < params.limit && itr != end_itr; ++itr) { + result.rows.push_back(fc::variant(*itr)); ++count; } - if( itr != end_itr ) { + if (itr != end_itr) { result.more = itr->name; } }; - auto lower = idx.lower_bound( lower_bound_value ); - auto upper = idx.upper_bound( upper_bound_value ); - if( params.reverse ) { - walk_range( boost::make_reverse_iterator(upper), boost::make_reverse_iterator(lower) ); + auto lower = idx.lower_bound(lower_bound_value); + auto upper = idx.upper_bound(upper_bound_value); + if (params.reverse) { + walk_range(boost::make_reverse_iterator(upper), boost::make_reverse_iterator(lower)); } else { - walk_range( lower, upper ); + walk_range(lower, upper); } return result; } -producer_plugin::get_unapplied_transactions_result -producer_plugin::get_unapplied_transactions( const get_unapplied_transactions_params& p, const fc::time_point& deadline ) const { +producer_plugin::get_unapplied_transactions_result producer_plugin::get_unapplied_transactions(const get_unapplied_transactions_params& p, + const fc::time_point& deadline) const { - fc::microseconds params_time_limit = p.time_limit_ms ? fc::milliseconds(*p.time_limit_ms) : fc::milliseconds(10); - fc::time_point params_deadline = fc::time_point::now() + params_time_limit; + fc::time_point params_deadline = + p.time_limit_ms ? std::min(fc::time_point::now().safe_add(fc::milliseconds(*p.time_limit_ms)), deadline) : deadline; auto& ua = my->_unapplied_transactions; - auto itr = ([&](){ + auto itr = ([&]() { if (!p.lower_bound.empty()) { try { - auto trx_id = transaction_id_type( p.lower_bound ); - return ua.lower_bound( trx_id ); - } catch( ... ) { + auto trx_id = transaction_id_type(p.lower_bound); + return ua.lower_bound(trx_id); + } catch (...) { return ua.end(); } } else { @@ -1571,47 +1756,52 @@ producer_plugin::get_unapplied_transactions( const get_unapplied_transactions_pa })(); auto get_trx_type = [&](trx_enum_type t, transaction_metadata::trx_type type) { - if( type == transaction_metadata::trx_type::dry_run ) return "dry_run"; - if( type == transaction_metadata::trx_type::read_only ) return "read_only"; - switch( t ) { - case trx_enum_type::unknown: - return "unknown"; - case trx_enum_type::forked: - return "forked"; - case trx_enum_type::aborted: - return "aborted"; - case trx_enum_type::incoming_api: - return "incoming_api"; - case trx_enum_type::incoming_p2p: - return "incoming_p2p"; + if (type == transaction_metadata::trx_type::dry_run) + return "dry_run"; + if (type == transaction_metadata::trx_type::read_only) + return "read_only"; + switch (t) { + case trx_enum_type::unknown: + return "unknown"; + case trx_enum_type::forked: + return "forked"; + case trx_enum_type::aborted: + return "aborted"; + case trx_enum_type::incoming_api: + return "incoming_api"; + case trx_enum_type::incoming_p2p: + return "incoming_p2p"; } return "unknown type"; }; get_unapplied_transactions_result result; - result.size = ua.size(); + result.size = ua.size(); result.incoming_size = ua.incoming_size(); uint32_t remaining = p.limit ? *p.limit : std::numeric_limits::max(); - while (itr != ua.end() && remaining > 0 && params_deadline > fc::time_point::now()) { - FC_CHECK_DEADLINE(deadline); - auto& r = result.trxs.emplace_back(); - r.trx_id = itr->id(); - r.expiration = itr->expiration(); - const auto& pt = itr->trx_meta->packed_trx(); - r.trx_type = get_trx_type( itr->trx_type, itr->trx_meta->get_trx_type() ); - r.first_auth = pt->get_transaction().first_authorizer(); + if (deadline != fc::time_point::maximum() && remaining > 1000) + remaining = 1000; + while (itr != ua.end() && remaining > 0) { + auto& r = result.trxs.emplace_back(); + r.trx_id = itr->id(); + r.expiration = itr->expiration(); + const auto& pt = itr->trx_meta->packed_trx(); + r.trx_type = get_trx_type(itr->trx_type, itr->trx_meta->get_trx_type()); + r.first_auth = pt->get_transaction().first_authorizer(); const auto& actions = pt->get_transaction().actions; - if( !actions.empty() ) { + if (!actions.empty()) { r.first_receiver = actions[0].account; - r.first_action = actions[0].name; + r.first_action = actions[0].name; } - r.total_actions = pt->get_transaction().total_actions(); + r.total_actions = pt->get_transaction().total_actions(); r.billed_cpu_time_us = itr->trx_meta->billed_cpu_time_us; - r.size = pt->get_estimated_size(); + r.size = pt->get_estimated_size(); ++itr; remaining--; + if (fc::time_point::now() >= params_deadline) + break; } if (itr != ua.end()) { @@ -1623,18 +1813,19 @@ producer_plugin::get_unapplied_transactions( const get_unapplied_transactions_pa uint32_t producer_plugin_impl::calculate_next_block_slot(const account_name& producer_name, uint32_t current_block_slot) const { - chain::controller& chain = chain_plug->chain(); - const auto& hbs = chain.head_block_state(); - const auto& active_schedule = hbs->active_schedule.producers; + chain::controller& chain = chain_plug->chain(); + const auto& hbs = chain.head_block_state(); + const auto& active_schedule = hbs->active_schedule.producers; // determine if this producer is in the active schedule and if so, where - auto itr = std::find_if(active_schedule.begin(), active_schedule.end(), [&](const auto& asp){ return asp.producer_name == producer_name; }); + auto itr = + std::find_if(active_schedule.begin(), active_schedule.end(), [&](const auto& asp) { return asp.producer_name == producer_name; }); if (itr == active_schedule.end()) { // this producer is not in the active producer set return UINT32_MAX; } - size_t producer_index = itr - active_schedule.begin(); + size_t producer_index = itr - active_schedule.begin(); uint32_t minimum_offset = 1; // must at least be the "next" block // account for a watermark in the future which is disqualifying this producer for now @@ -1645,7 +1836,7 @@ uint32_t producer_plugin_impl::calculate_next_block_slot(const account_name& pro auto current_watermark = get_watermark(producer_name); if (current_watermark) { const auto watermark = *current_watermark; - auto block_num = chain.head_block_state()->block_num; + auto block_num = chain.head_block_state()->block_num; if (chain.is_building_block()) { ++block_num; } @@ -1654,15 +1845,16 @@ uint32_t producer_plugin_impl::calculate_next_block_slot(const account_name& pro minimum_offset = watermark.first - block_num + 1; } if (watermark.second.slot > current_block_slot) { - // if I have a watermark block timestamp then I need to wait until after that watermark timestamp - minimum_offset = std::max(minimum_offset, watermark.second.slot - current_block_slot + 1); + // if I have a watermark block timestamp then I need to wait until after that watermark timestamp + minimum_offset = std::max(minimum_offset, watermark.second.slot - current_block_slot + 1); } } // this producers next opportunity to produce is the next time its slot arrives after or at the calculated minimum uint32_t minimum_slot = current_block_slot + minimum_offset; - size_t minimum_slot_producer_index = (minimum_slot % (active_schedule.size() * config::producer_repetitions)) / config::producer_repetitions; - if ( producer_index == minimum_slot_producer_index ) { + size_t minimum_slot_producer_index = + (minimum_slot % (active_schedule.size() * config::producer_repetitions)) / config::producer_repetitions; + if (producer_index == minimum_slot_producer_index) { // this is the producer for the minimum slot, go with that return minimum_slot; } else { @@ -1677,20 +1869,20 @@ uint32_t producer_plugin_impl::calculate_next_block_slot(const account_name& pro uint32_t first_minimum_producer_slot = minimum_slot - (minimum_slot % config::producer_repetitions); // offset the aligned minimum to the *earliest* next set of slots for this producer - uint32_t next_block_slot = first_minimum_producer_slot + (producer_distance * config::producer_repetitions); + uint32_t next_block_slot = first_minimum_producer_slot + (producer_distance * config::producer_repetitions); return next_block_slot; } } block_timestamp_type producer_plugin_impl::calculate_pending_block_time() const { const chain::controller& chain = chain_plug->chain(); - const fc::time_point now = fc::time_point::now(); - const fc::time_point base = std::max(now, chain.head_block_time()); + const fc::time_point now = fc::time_point::now(); + const fc::time_point base = std::max(now, chain.head_block_time()); return block_timestamp_type(base).next(); } -bool producer_plugin_impl::should_interrupt_start_block( const fc::time_point& deadline, uint32_t pending_block_num ) const { - if( _pending_block_mode == pending_block_mode::producing ) { +bool producer_plugin_impl::should_interrupt_start_block(const fc::time_point& deadline, uint32_t pending_block_num) const { + if (in_producing_mode()) { return deadline <= fc::time_point::now(); } // if we can produce then honor deadline so production starts on time @@ -1700,20 +1892,20 @@ bool producer_plugin_impl::should_interrupt_start_block( const fc::time_point& d producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { chain::controller& chain = chain_plug->chain(); - if( !chain_plug->accept_transactions() ) + if (!chain_plug->accept_transactions()) return start_block_result::waiting_for_block; const auto& hbs = chain.head_block_state(); - if( chain.get_terminate_at_block() > 0 && chain.get_terminate_at_block() <= chain.head_block_num() ) { + if (chain.get_terminate_at_block() > 0 && chain.get_terminate_at_block() <= chain.head_block_num()) { ilog("Reached configured maximum block ${num}; terminating", ("num", chain.get_terminate_at_block())); app().quit(); return start_block_result::failed; } - const fc::time_point now = fc::time_point::now(); - const block_timestamp_type block_time = calculate_pending_block_time(); - const uint32_t pending_block_num = hbs->block_num + 1; + const fc::time_point now = fc::time_point::now(); + const block_timestamp_type block_time = calculate_pending_block_time(); + const uint32_t pending_block_num = hbs->block_num + 1; _pending_block_mode = pending_block_mode::producing; @@ -1723,9 +1915,9 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { const auto current_watermark = get_watermark(scheduled_producer.producer_name); size_t num_relevant_signatures = 0; - scheduled_producer.for_each_key([&](const public_key_type& key){ + scheduled_producer.for_each_key([&](const public_key_type& key) { const auto& iter = _signature_providers.find(key); - if(iter != _signature_providers.end()) { + if (iter != _signature_providers.end()) { num_relevant_signatures++; } }); @@ -1733,67 +1925,66 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { auto irreversible_block_age = get_irreversible_block_age(); // If the next block production opportunity is in the present or future, we're synced. - if( !_production_enabled ) { + if (!_production_enabled) { _pending_block_mode = pending_block_mode::speculating; - } else if( _producers.find(scheduled_producer.producer_name) == _producers.end()) { + } else if (_producers.find(scheduled_producer.producer_name) == _producers.end()) { _pending_block_mode = pending_block_mode::speculating; } else if (num_relevant_signatures == 0) { - elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); + elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", + ("authority", scheduled_producer.authority)); _pending_block_mode = pending_block_mode::speculating; - } else if ( _pause_production ) { + } else if (_pause_production) { elog("Not producing block because production is explicitly paused"); _pending_block_mode = pending_block_mode::speculating; - } else if ( _max_irreversible_block_age_us.count() >= 0 && irreversible_block_age >= _max_irreversible_block_age_us ) { - elog("Not producing block because the irreversible block is too old [age:${age}s, max:${max}s]", ("age", irreversible_block_age.count() / 1'000'000)( "max", _max_irreversible_block_age_us.count() / 1'000'000 )); + } else if (_max_irreversible_block_age_us.count() >= 0 && irreversible_block_age >= _max_irreversible_block_age_us) { + elog("Not producing block because the irreversible block is too old [age:${age}s, max:${max}s]", + ("age", irreversible_block_age.count() / 1'000'000)("max", _max_irreversible_block_age_us.count() / 1'000'000)); _pending_block_mode = pending_block_mode::speculating; } - if (_pending_block_mode == pending_block_mode::producing) { + if (in_producing_mode()) { // determine if our watermark excludes us from producing at this point if (current_watermark) { const block_timestamp_type block_timestamp{block_time}; if (current_watermark->first > hbs->block_num) { - elog("Not producing block because \"${producer}\" signed a block at a higher block number (${watermark}) than the current fork's head (${head_block_num})", - ("producer", scheduled_producer.producer_name) - ("watermark", current_watermark->first) - ("head_block_num", hbs->block_num)); + elog("Not producing block because \"${producer}\" signed a block at a higher block number (${watermark}) than the current " + "fork's head (${head_block_num})", + ("producer", scheduled_producer.producer_name)("watermark", current_watermark->first)("head_block_num", hbs->block_num)); _pending_block_mode = pending_block_mode::speculating; } else if (current_watermark->second >= block_timestamp) { - elog("Not producing block because \"${producer}\" signed a block at the next block time or later (${watermark}) than the pending block time (${block_timestamp})", - ("producer", scheduled_producer.producer_name) - ("watermark", current_watermark->second) - ("block_timestamp", block_timestamp)); + elog("Not producing block because \"${producer}\" signed a block at the next block time or later (${watermark}) than the pending " + "block time (${block_timestamp})", + ("producer", scheduled_producer.producer_name)("watermark", current_watermark->second)("block_timestamp", block_timestamp)); _pending_block_mode = pending_block_mode::speculating; } } } - if (_pending_block_mode == pending_block_mode::speculating) { + if (in_speculating_mode()) { auto head_block_age = now - chain.head_block_time(); if (head_block_age > fc::seconds(5)) return start_block_result::waiting_for_block; } - _pending_block_deadline = block_timing_util::calculate_block_deadline(_cpu_effort_us, _pending_block_mode, block_time); - auto preprocess_deadline = _pending_block_deadline; + _pending_block_deadline = block_timing_util::calculate_block_deadline(_cpu_effort_us, _pending_block_mode, block_time); + auto preprocess_deadline = _pending_block_deadline; uint32_t production_round_index = block_timestamp_type(block_time).slot % chain::config::producer_repetitions; if (production_round_index == 0) { - // first block of our round, wait for block production window - const auto start_block_time = block_time.to_time_point() - fc::microseconds( config::block_interval_us ); + // first block of our round, wait for block production window + const auto start_block_time = block_time.to_time_point() - fc::microseconds(config::block_interval_us); if (now < start_block_time) { - fc_dlog( _log, "Not starting block until ${bt}", ("bt", start_block_time) ); - schedule_delayed_production_loop( weak_from_this(), start_block_time ); + fc_dlog(_log, "Not starting block until ${bt}", ("bt", start_block_time)); + schedule_delayed_production_loop(weak_from_this(), start_block_time); return start_block_result::waiting_for_production; } } - fc_dlog(_log, "Starting block #${n} at ${time} producer ${p}", - ("n", pending_block_num)("time", now)("p", scheduled_producer.producer_name)); + fc_dlog(_log, "Starting block #${n} at ${time} producer ${p}", ("n", pending_block_num)("time", now)("p", scheduled_producer.producer_name)); try { uint16_t blocks_to_confirm = 0; - if (_pending_block_mode == pending_block_mode::producing) { + if (in_producing_mode()) { // determine how many blocks this producer can confirm // 1) if it is not a producer from this node, assume no confirmations (we will discard this block anyway) // 2) if it is a producer on this node that has never produced, the conservative approach is to assume no @@ -1814,161 +2005,157 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { abort_block(); auto features_to_activate = chain.get_preactivated_protocol_features(); - if( _pending_block_mode == pending_block_mode::producing && _protocol_features_to_activate.size() > 0 ) { + if (in_producing_mode() && _protocol_features_to_activate.size() > 0) { bool drop_features_to_activate = false; try { - chain.validate_protocol_features( _protocol_features_to_activate ); - } catch ( const std::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } catch ( const boost::interprocess::bad_alloc& ) { - chain_apis::api_base::handle_bad_alloc(); - } catch( const fc::exception& e ) { - wlog( "protocol features to activate are no longer all valid: ${details}", - ("details",e.to_detail_string()) ); + chain.validate_protocol_features(_protocol_features_to_activate); + } catch (const std::bad_alloc&) { + chain_apis::api_base::handle_bad_alloc(); + } catch (const boost::interprocess::bad_alloc&) { + chain_apis::api_base::handle_bad_alloc(); + } catch (const fc::exception& e) { + wlog("protocol features to activate are no longer all valid: ${details}", ("details", e.to_detail_string())); drop_features_to_activate = true; - } catch( const std::exception& e ) { - wlog( "protocol features to activate are no longer all valid: ${details}", - ("details",fc::std_exception_wrapper::from_current_exception(e).to_detail_string()) ); + } catch (const std::exception& e) { + wlog("protocol features to activate are no longer all valid: ${details}", + ("details", fc::std_exception_wrapper::from_current_exception(e).to_detail_string())); drop_features_to_activate = true; } - if( drop_features_to_activate ) { + if (drop_features_to_activate) { _protocol_features_to_activate.clear(); } else { auto protocol_features_to_activate = _protocol_features_to_activate; // do a copy as pending_block might be aborted - if( features_to_activate.size() > 0 ) { - protocol_features_to_activate.reserve( protocol_features_to_activate.size() - + features_to_activate.size() ); - std::set set_of_features_to_activate( protocol_features_to_activate.begin(), - protocol_features_to_activate.end() ); - for( const auto& f : features_to_activate ) { - auto res = set_of_features_to_activate.insert( f ); - if( res.second ) { - protocol_features_to_activate.push_back( f ); + if (features_to_activate.size() > 0) { + protocol_features_to_activate.reserve(protocol_features_to_activate.size() + features_to_activate.size()); + std::set set_of_features_to_activate(protocol_features_to_activate.begin(), + protocol_features_to_activate.end()); + for (const auto& f : features_to_activate) { + auto res = set_of_features_to_activate.insert(f); + if (res.second) { + protocol_features_to_activate.push_back(f); } } features_to_activate.clear(); } - std::swap( features_to_activate, protocol_features_to_activate ); + std::swap(features_to_activate, protocol_features_to_activate); _protocol_features_signaled = true; - ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", - ("num", pending_block_num)("features_to_activate", features_to_activate) ); + ilog("signaling activation of the following protocol features in block ${num}: ${features_to_activate}", + ("num", pending_block_num)("features_to_activate", features_to_activate)); } } - controller::block_status bs = _pending_block_mode == pending_block_mode::producing ? - controller::block_status::incomplete : controller::block_status::ephemeral; - chain.start_block( block_time, blocks_to_confirm, features_to_activate, bs, preprocess_deadline ); - } LOG_AND_DROP(); + controller::block_status bs = + in_producing_mode() ? controller::block_status::incomplete : controller::block_status::ephemeral; + chain.start_block(block_time, blocks_to_confirm, features_to_activate, bs, preprocess_deadline); + } + LOG_AND_DROP(); - if( chain.is_building_block() ) { + if (chain.is_building_block()) { const auto& pending_block_signing_authority = chain.pending_block_signing_authority(); - if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_authority != scheduled_producer.authority) { - elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); + if (in_producing_mode() && pending_block_signing_authority != scheduled_producer.authority) { + elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", + ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); _pending_block_mode = pending_block_mode::speculating; } try { chain::subjective_billing& subjective_bill = chain.get_mutable_subjective_billing(); _account_fails.report_and_clear(hbs->block_num, subjective_bill); - _time_tracker.clear(); - if( !remove_expired_trxs( preprocess_deadline ) ) + if (!remove_expired_trxs(preprocess_deadline)) return start_block_result::exhausted; - if( !remove_expired_blacklisted_trxs( preprocess_deadline ) ) + if (!remove_expired_blacklisted_trxs(preprocess_deadline)) return start_block_result::exhausted; - if( !subjective_bill.remove_expired( _log, chain.pending_block_time(), fc::time_point::now(), - [&](){ return should_interrupt_start_block( preprocess_deadline, pending_block_num ); } ) ) { + if (!subjective_bill.remove_expired(_log, chain.pending_block_time(), fc::time_point::now(), [&]() { + return should_interrupt_start_block(preprocess_deadline, pending_block_num); + })) { return start_block_result::exhausted; } // limit execution of pending incoming to once per block auto incoming_itr = _unapplied_transactions.incoming_begin(); - if (_pending_block_mode == pending_block_mode::producing) { - if( !process_unapplied_trxs( preprocess_deadline ) ) + if (in_producing_mode()) { + if (!process_unapplied_trxs(preprocess_deadline)) return start_block_result::exhausted; auto scheduled_trx_deadline = preprocess_deadline; if (_max_scheduled_transaction_time_per_block_ms >= 0) { scheduled_trx_deadline = std::min( - scheduled_trx_deadline, - fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms) - ); + scheduled_trx_deadline, fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms)); } // may exhaust scheduled_trx_deadline but not preprocess_deadline, exhausted preprocess_deadline checked below - process_scheduled_and_incoming_trxs( scheduled_trx_deadline, incoming_itr ); + process_scheduled_and_incoming_trxs(scheduled_trx_deadline, incoming_itr); } - repost_exhausted_transactions( preprocess_deadline ); + repost_exhausted_transactions(preprocess_deadline); - if( app().is_quiting() ) // db guard exception above in LOG_AND_DROP could have called app().quit() + if (app().is_quiting()) // db guard exception above in LOG_AND_DROP could have called app().quit() return start_block_result::failed; - if ( should_interrupt_start_block( preprocess_deadline, pending_block_num ) || block_is_exhausted() ) { + if (should_interrupt_start_block(preprocess_deadline, pending_block_num) || block_is_exhausted()) { return start_block_result::exhausted; } - if( !process_incoming_trxs( preprocess_deadline, incoming_itr ) ) + if (!process_incoming_trxs(preprocess_deadline, incoming_itr)) return start_block_result::exhausted; return start_block_result::succeeded; - } catch ( const guard_exception& e ) { + } catch (const guard_exception& e) { chain_plugin::handle_guard_exception(e); return start_block_result::failed; - } catch ( std::bad_alloc& ) { + } catch (std::bad_alloc&) { chain_apis::api_base::handle_bad_alloc(); - } catch ( boost::interprocess::bad_alloc& ) { + } catch (boost::interprocess::bad_alloc&) { chain_apis::api_base::handle_db_exhaustion(); } - } return start_block_result::failed; } -bool producer_plugin_impl::remove_expired_trxs( const fc::time_point& deadline ) -{ - chain::controller& chain = chain_plug->chain(); - auto pending_block_time = chain.pending_block_time(); - auto pending_block_num = chain.pending_block_num(); +bool producer_plugin_impl::remove_expired_trxs(const fc::time_point& deadline) { + chain::controller& chain = chain_plug->chain(); + auto pending_block_time = chain.pending_block_time(); + auto pending_block_num = chain.pending_block_num(); // remove all expired transactions size_t num_expired = 0; - size_t orig_count = _unapplied_transactions.size(); - bool exhausted = !_unapplied_transactions.clear_expired( pending_block_time, [&](){ return should_interrupt_start_block(deadline, pending_block_num); }, - [&num_expired]( const packed_transaction_ptr& packed_trx_ptr, trx_enum_type trx_type ) { - // expired exception is logged as part of next() call - ++num_expired; - }); + size_t orig_count = _unapplied_transactions.size(); + bool exhausted = !_unapplied_transactions.clear_expired( + pending_block_time, + [&]() { return should_interrupt_start_block(deadline, pending_block_num); }, + [&num_expired](const packed_transaction_ptr& packed_trx_ptr, trx_enum_type trx_type) { + // expired exception is logged as part of next() call + ++num_expired; + }); - if( exhausted && _pending_block_mode == pending_block_mode::producing ) { - fc_wlog( _log, "Unable to process all expired transactions of the ${n} transactions in the unapplied queue before deadline, " - "Expired ${expired}", ("n", orig_count)("expired", num_expired) ); + if (exhausted && in_producing_mode()) { + fc_wlog(_log, "Unable to process all expired transactions of the ${n} transactions in the unapplied queue before deadline, " + "Expired ${expired}", ("n", orig_count)("expired", num_expired)); } else { - fc_dlog( _log, "Processed ${ex} expired transactions of the ${n} transactions in the unapplied queue.", - ("n", orig_count)("ex", num_expired) ); + fc_dlog(_log, "Processed ${ex} expired transactions of the ${n} transactions in the unapplied queue.", ("n", orig_count)("ex", num_expired)); } return !exhausted; } -bool producer_plugin_impl::remove_expired_blacklisted_trxs( const fc::time_point& deadline ) -{ - bool exhausted = false; +bool producer_plugin_impl::remove_expired_blacklisted_trxs(const fc::time_point& deadline) { + bool exhausted = false; auto& blacklist_by_expiry = _blacklisted_transactions.get(); - if(!blacklist_by_expiry.empty()) { - const chain::controller& chain = chain_plug->chain(); - const auto lib_time = chain.last_irreversible_block_time(); - const auto pending_block_num = chain.pending_block_num(); + if (!blacklist_by_expiry.empty()) { + const chain::controller& chain = chain_plug->chain(); + const auto lib_time = chain.last_irreversible_block_time(); + const auto pending_block_num = chain.pending_block_num(); int num_expired = 0; - int orig_count = _blacklisted_transactions.size(); + int orig_count = _blacklisted_transactions.size(); while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= lib_time) { - if ( should_interrupt_start_block( deadline, pending_block_num ) ) { + if (should_interrupt_start_block(deadline, pending_block_num)) { exhausted = true; break; } @@ -1976,66 +2163,59 @@ bool producer_plugin_impl::remove_expired_blacklisted_trxs( const fc::time_point num_expired++; } - fc_dlog(_log, "Processed ${n} blacklisted transactions, Expired ${expired}", - ("n", orig_count)("expired", num_expired)); + fc_dlog(_log, "Processed ${n} blacklisted transactions, Expired ${expired}", ("n", orig_count)("expired", num_expired)); } return !exhausted; } // Returns contract name, action name, and exception text of an exception that occurred in a contract inline std::string get_detailed_contract_except_info(const packed_transaction_ptr& trx, - const transaction_trace_ptr& trace, - const fc::exception_ptr& except_ptr) -{ + const transaction_trace_ptr& trace, + const fc::exception_ptr& except_ptr) { std::string contract_name; std::string act_name; - if( trace && !trace->action_traces.empty() ) { + if (trace && !trace->action_traces.empty()) { auto last_action_ordinal = trace->action_traces.size() - 1; - contract_name = trace->action_traces[last_action_ordinal].receiver.to_string(); - act_name = trace->action_traces[last_action_ordinal].act.name.to_string(); - } else if ( trx ) { + contract_name = trace->action_traces[last_action_ordinal].receiver.to_string(); + act_name = trace->action_traces[last_action_ordinal].act.name.to_string(); + } else if (trx) { const auto& actions = trx->get_transaction().actions; - if( actions.empty() ) return {}; // should not be possible + if (actions.empty()) + return {}; // should not be possible contract_name = actions[0].account.to_string(); - act_name = actions[0].name.to_string(); + act_name = actions[0].name.to_string(); } - std::string details = except_ptr ? except_ptr->top_message() - : ((trace && trace->except) ? trace->except->top_message() - : std::string()); + std::string details = except_ptr ? except_ptr->top_message() : ((trace && trace->except) ? trace->except->top_message() : std::string()); fc::escape_str(details, fc::escape_control_chars::on, 1024); // this format is parsed by external tools return "action: " + contract_name + ":" + act_name + ", " + details; } -void producer_plugin_impl::log_trx_results( const transaction_metadata_ptr& trx, - const transaction_trace_ptr& trace, - const fc::time_point& start ) -{ +void producer_plugin_impl::log_trx_results(const transaction_metadata_ptr& trx, + const transaction_trace_ptr& trace, + const fc::time_point& start) { uint32_t billed_cpu_time_us = (trace && trace->receipt) ? trace->receipt->cpu_usage_us : 0; - log_trx_results( trx->packed_trx(), trace, nullptr, billed_cpu_time_us, start, trx->is_transient() ); + log_trx_results(trx->packed_trx(), trace, nullptr, billed_cpu_time_us, start, trx->is_transient()); } -void producer_plugin_impl::log_trx_results( const transaction_metadata_ptr& trx, - const fc::exception_ptr& except_ptr ) -{ +void producer_plugin_impl::log_trx_results(const transaction_metadata_ptr& trx, const fc::exception_ptr& except_ptr) { uint32_t billed_cpu_time_us = trx ? trx->billed_cpu_time_us : 0; - log_trx_results( trx->packed_trx(), nullptr, except_ptr, billed_cpu_time_us, fc::time_point::now(), trx->is_transient() ); + log_trx_results(trx->packed_trx(), nullptr, except_ptr, billed_cpu_time_us, fc::time_point::now(), trx->is_transient()); } -void producer_plugin_impl::log_trx_results( const packed_transaction_ptr& trx, - const transaction_trace_ptr& trace, - const fc::exception_ptr& except_ptr, - uint32_t billed_cpu_us, - const fc::time_point& start, - bool is_transient ) -{ +void producer_plugin_impl::log_trx_results(const packed_transaction_ptr& trx, + const transaction_trace_ptr& trace, + const fc::exception_ptr& except_ptr, + uint32_t billed_cpu_us, + const fc::time_point& start, + bool is_transient) { chain::controller& chain = chain_plug->chain(); auto get_trace = [&](const transaction_trace_ptr& trace, const fc::exception_ptr& except_ptr) -> fc::variant { - if( trace ) { - return chain_plug->get_log_trx_trace( trace ); + if (trace) { + return chain_plug->get_log_trx_trace(trace); } else { return fc::variant{except_ptr}; } @@ -2043,40 +2223,42 @@ void producer_plugin_impl::log_trx_results( const packed_transaction_ptr& trx, bool except = except_ptr || (trace && trace->except); if (except) { - if (_pending_block_mode == pending_block_mode::producing) { - fc_dlog( is_transient ? _transient_trx_failed_trace_log : _trx_failed_trace_log, - "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING ${desc}tx: ${txid}, auth: ${a}, ${details}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("desc", is_transient ? "transient " : "")("txid", trx->id()) - ("a", trx->get_transaction().first_authorizer()) - ("details", get_detailed_contract_except_info(trx, trace, except_ptr))); - - if ( !is_transient ) { + if (in_producing_mode()) { + fc_dlog(is_transient ? _transient_trx_failed_trace_log : _trx_failed_trace_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING ${desc}tx: ${txid}, auth: ${a}, ${details}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("desc", is_transient ? "transient " : "") + ("txid", trx->id())("a", trx->get_transaction().first_authorizer()) + ("details", get_detailed_contract_except_info(trx, trace, except_ptr))); + + if (!is_transient) { fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${trx}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("trx", chain_plug->get_log_trx(trx->get_transaction()))); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("entire_trace", get_trace(trace, except_ptr))); + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) + ("trx", chain_plug->get_log_trx(trx->get_transaction()))); + fc_dlog(_trx_trace_failure_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) + ("entire_trace", get_trace(trace, except_ptr))); } } else { - fc_dlog( is_transient ? _transient_trx_failed_trace_log : _trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING ${desc}tx: ${txid}, auth: ${a} : ${details}", - ("desc", is_transient ? "transient " : "") - ("txid", trx->id())("a", trx->get_transaction().first_authorizer()) - ("details", get_detailed_contract_except_info(trx, trace, except_ptr))); - if ( !is_transient ) { + fc_dlog(is_transient ? _transient_trx_failed_trace_log : _trx_failed_trace_log, + "[TRX_TRACE] Speculative execution is REJECTING ${desc}tx: ${txid}, auth: ${a} : ${details}", + ("desc", is_transient ? "transient " : "")("txid", trx->id()) + ("a", trx->get_transaction().first_authorizer())("details", get_detailed_contract_except_info(trx, trace, except_ptr))); + if (!is_transient) { fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx} ", ("trx", chain_plug->get_log_trx(trx->get_transaction()))); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trace} ", + fc_dlog(_trx_trace_failure_log, + "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trace} ", ("entire_trace", get_trace(trace, except_ptr))); } } } else { - if (_pending_block_mode == pending_block_mode::producing) { - fc_dlog( is_transient ? _transient_trx_successful_trace_log : _trx_successful_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING ${desc}tx: ${txid}, auth: ${a}, cpu: ${cpu}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("desc", is_transient ? "transient " : "")("txid", trx->id()) - ("a", trx->get_transaction().first_authorizer())("cpu", billed_cpu_us)); - if ( !is_transient ) { + if (in_producing_mode()) { + fc_dlog(is_transient ? _transient_trx_successful_trace_log : _trx_successful_trace_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING ${desc}tx: ${txid}, auth: ${a}, cpu: ${cpu}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("desc", is_transient ? "transient " : "") + ("txid", trx->id())("a", trx->get_transaction().first_authorizer())("cpu", billed_cpu_us)); + if (!is_transient) { fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${trx}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("trx", chain_plug->get_log_trx(trx->get_transaction()))); @@ -2085,13 +2267,12 @@ void producer_plugin_impl::log_trx_results( const packed_transaction_ptr& trx, ("entire_trace", get_trace(trace, except_ptr))); } } else { - fc_dlog( is_transient ? _transient_trx_successful_trace_log : _trx_successful_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING ${desc}tx: ${txid}, auth: ${a}, cpu: ${cpu}", - ("desc", is_transient ? "transient " : "") - ("txid", trx->id())("a", trx->get_transaction().first_authorizer()) - ("cpu", billed_cpu_us)); - if ( !is_transient ) { - fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${trx}", - ("trx", chain_plug->get_log_trx(trx->get_transaction()))); + fc_dlog(is_transient ? _transient_trx_successful_trace_log : _trx_successful_trace_log, + "[TRX_TRACE] Speculative execution is ACCEPTING ${desc}tx: ${txid}, auth: ${a}, cpu: ${cpu}", + ("desc", is_transient ? "transient " : "")("txid", trx->id())("a", trx->get_transaction().first_authorizer()) + ("cpu", billed_cpu_us)); + if (!is_transient) { + fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${trx}", ("trx", chain_plug->get_log_trx(trx->get_transaction()))); fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${entire_trace}", ("entire_trace", get_trace(trace, except_ptr))); } @@ -2100,212 +2281,204 @@ void producer_plugin_impl::log_trx_results( const packed_transaction_ptr& trx, } // Does not modify unapplied_transaction_queue -producer_plugin_impl::push_result -producer_plugin_impl::push_transaction( const fc::time_point& block_deadline, - const transaction_metadata_ptr& trx, - bool api_trx, - bool return_failure_trace, - const next_function& next ) -{ +producer_plugin_impl::push_result producer_plugin_impl::push_transaction(const fc::time_point& block_deadline, + const transaction_metadata_ptr& trx, + bool api_trx, + bool return_failure_trace, + block_time_tracker::trx_time_tracker& trx_tracker, + const next_function& next) { auto start = fc::time_point::now(); EOS_ASSERT(!trx->is_read_only(), producer_exception, "Unexpected read-only trx"); - chain::controller& chain = chain_plug->chain(); + chain::controller& chain = chain_plug->chain(); chain::subjective_billing& subjective_bill = chain.get_mutable_subjective_billing(); auto first_auth = trx->packed_trx()->get_transaction().first_authorizer(); - bool disable_subjective_enforcement = (api_trx && _disable_subjective_api_billing) - || (!api_trx && _disable_subjective_p2p_billing) - || subjective_bill.is_account_disabled( first_auth ) - || trx->is_transient(); - - if( !disable_subjective_enforcement && _account_fails.failure_limit( first_auth ) ) { - if( next ) { - auto except_ptr = std::static_pointer_cast( std::make_shared( - FC_LOG_MESSAGE( error, "transaction ${id} exceeded failure limit for account ${a} until ${next_reset_time}", - ("id", trx->id())( "a", first_auth ) - ("next_reset_time", _account_fails.next_reset_timepoint(chain.head_block_num(),chain.head_block_time()))) ) ); - log_trx_results( trx, except_ptr ); - next( except_ptr ); - } - _time_tracker.add_fail_time(fc::time_point::now() - start, trx->is_transient()); + bool disable_subjective_enforcement = (api_trx && _disable_subjective_api_billing) || + (!api_trx && _disable_subjective_p2p_billing) || + subjective_bill.is_account_disabled(first_auth) || + trx->is_transient(); + + if (!disable_subjective_enforcement && _account_fails.failure_limit(first_auth)) { + if (next) { + auto except_ptr = std::static_pointer_cast(std::make_shared( + FC_LOG_MESSAGE(error, "transaction ${id} exceeded failure limit for account ${a} until ${next_reset_time}", + ("id", trx->id())("a", first_auth) + ("next_reset_time", _account_fails.next_reset_timepoint(chain.head_block_num(), chain.head_block_time()))))); + log_trx_results(trx, except_ptr); + next(except_ptr); + } return push_result{.failed = true}; } - fc::microseconds max_trx_time = fc::milliseconds( _max_transaction_time_ms.load() ); - if( max_trx_time.count() < 0 ) max_trx_time = fc::microseconds::maximum(); + fc::microseconds max_trx_time = fc::milliseconds(_max_transaction_time_ms.load()); + if (max_trx_time.count() < 0) + max_trx_time = fc::microseconds::maximum(); int64_t sub_bill = 0; - if( !disable_subjective_enforcement ) - sub_bill = subjective_bill.get_subjective_bill( first_auth, fc::time_point::now() ); + if (!disable_subjective_enforcement) + sub_bill = subjective_bill.get_subjective_bill(first_auth, fc::time_point::now()); auto prev_billed_cpu_time_us = trx->billed_cpu_time_us; - if( _pending_block_mode == pending_block_mode::producing && prev_billed_cpu_time_us > 0 ) { + if (in_producing_mode() && prev_billed_cpu_time_us > 0) { const auto& rl = chain.get_resource_limits_manager(); - if ( !subjective_bill.is_account_disabled( first_auth ) && !rl.is_unlimited_cpu( first_auth ) ) { - int64_t prev_billed_plus100_us = prev_billed_cpu_time_us + EOS_PERCENT( prev_billed_cpu_time_us, 100 * config::percent_1 ); - if( prev_billed_plus100_us < max_trx_time.count() ) max_trx_time = fc::microseconds( prev_billed_plus100_us ); + if (!subjective_bill.is_account_disabled(first_auth) && !rl.is_unlimited_cpu(first_auth)) { + int64_t prev_billed_plus100_us = prev_billed_cpu_time_us + EOS_PERCENT(prev_billed_cpu_time_us, 100 * config::percent_1); + if (prev_billed_plus100_us < max_trx_time.count()) + max_trx_time = fc::microseconds(prev_billed_plus100_us); } } - auto trace = chain.push_transaction( trx, block_deadline, max_trx_time, prev_billed_cpu_time_us, false, sub_bill ); + auto trace = chain.push_transaction(trx, block_deadline, max_trx_time, prev_billed_cpu_time_us, false, sub_bill); + + auto pr = handle_push_result(trx, next, start, chain, trace, return_failure_trace, disable_subjective_enforcement, first_auth, sub_bill, prev_billed_cpu_time_us); - return handle_push_result(trx, next, start, chain, trace, return_failure_trace, disable_subjective_enforcement, first_auth, sub_bill, prev_billed_cpu_time_us); + if (!pr.failed) { + trx_tracker.trx_success(); + } + return pr; } producer_plugin_impl::push_result -producer_plugin_impl::handle_push_result( const transaction_metadata_ptr& trx, - const next_function& next, - const fc::time_point& start, - chain::controller& chain, - const transaction_trace_ptr& trace, - bool return_failure_trace, - bool disable_subjective_enforcement, - account_name first_auth, - int64_t sub_bill, - uint32_t prev_billed_cpu_time_us) { - auto end = fc::time_point::now(); +producer_plugin_impl::handle_push_result(const transaction_metadata_ptr& trx, + const next_function& next, + const fc::time_point& start, + chain::controller& chain, + const transaction_trace_ptr& trace, + bool return_failure_trace, + bool disable_subjective_enforcement, + account_name first_auth, + int64_t sub_bill, + uint32_t prev_billed_cpu_time_us) { + auto end = fc::time_point::now(); chain::subjective_billing& subjective_bill = chain.get_mutable_subjective_billing(); push_result pr; if( trace->except ) { - // Transient trxs are dry-run or read-only. - // Dry-run trxs only run in write window. Read-only trxs can run in - // both write and read windows; time spent in read window is counted - // by read window summary. - if ( chain.is_write_window() ) { - auto dur = end - start; - _time_tracker.add_fail_time(dur, trx->is_transient()); - } if( exception_is_exhausted( *trace->except ) ) { - if( _pending_block_mode == pending_block_mode::producing ) { - fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", + if( in_producing_mode() ) { + fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx->id())); } else { - fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", ("txid", trx->id())); + fc_dlog(trx->is_transient() ? _transient_trx_failed_trace_log : _trx_failed_trace_log, + "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", ("txid", trx->id())); } - if ( !trx->is_read_only() ) + if (!trx->is_read_only()) pr.block_exhausted = block_is_exhausted(); // smaller trx might fit pr.trx_exhausted = true; } else { - pr.failed = true; + pr.failed = true; const fc::exception& e = *trace->except; - if( e.code() != tx_duplicate::code_value ) { - fc_tlog( _log, "Subjective bill for failed ${a}: ${b} elapsed ${t}us, time ${r}us", - ("a",first_auth)("b",sub_bill)("t",trace->elapsed)("r", end - start)); + if (e.code() != tx_duplicate::code_value) { + fc_tlog(_log, "Subjective bill for failed ${a}: ${b} elapsed ${t}us, time ${r}us", + ("a", first_auth)("b", sub_bill)("t", trace->elapsed)("r", end - start)); if (!disable_subjective_enforcement) // subjectively bill failure when producing since not in objective cpu account billing - subjective_bill.subjective_bill_failure( first_auth, trace->elapsed, fc::time_point::now() ); + subjective_bill.subjective_bill_failure(first_auth, trace->elapsed, fc::time_point::now()); - log_trx_results( trx, trace, start ); + log_trx_results(trx, trace, start); // this failed our configured maximum transaction time, we don't want to replay it - fc_tlog( _log, "Failed ${c} trx, auth: ${a}, prev billed: ${p}us, ran: ${r}us, id: ${id}, except: ${e}", - ("c", e.code())("a", first_auth)("p", prev_billed_cpu_time_us) - ( "r", end - start)("id", trx->id())("e", e) ); - if( !disable_subjective_enforcement ) - _account_fails.add( first_auth, e ); + fc_tlog(_log, "Failed ${c} trx, auth: ${a}, prev billed: ${p}us, ran: ${r}us, id: ${id}, except: ${e}", + ("c", e.code())("a", first_auth)("p", prev_billed_cpu_time_us)("r", end - start)("id", trx->id())("e", e)); + if (!disable_subjective_enforcement) + _account_fails.add(first_auth, e); } - if( next ) { - if( return_failure_trace ) { - next( trace ); + if (next) { + if (return_failure_trace) { + next(trace); } else { auto e_ptr = trace->except->dynamic_copy_exception(); - next( e_ptr ); + next(e_ptr); } } } } else { - fc_tlog( _log, "Subjective bill for success ${a}: ${b} elapsed ${t}us, time ${r}us", - ("a",first_auth)("b",sub_bill)("t",trace->elapsed)("r", end - start)); - // Transient trxs are dry-run or read-only. - // Dry-run trxs only run in write window. Read-only trxs can run in - // both write and read windows; time spent in read window is counted - // by read window summary. - if ( chain.is_write_window() ) { - auto dur = end - start; - _time_tracker.add_success_time(dur, trx->is_transient()); - } - log_trx_results( trx, trace, start ); + fc_tlog(_log, "Subjective bill for success ${a}: ${b} elapsed ${t}us, time ${r}us", + ("a", first_auth)("b", sub_bill)("t", trace->elapsed)("r", end - start)); + log_trx_results(trx, trace, start); // if producing then trx is in objective cpu account billing - if (!disable_subjective_enforcement && _pending_block_mode != pending_block_mode::producing) { - subjective_bill.subjective_bill( trx->id(), trx->packed_trx()->expiration(), first_auth, trace->elapsed ); + if (!disable_subjective_enforcement && !in_producing_mode()) { + subjective_bill.subjective_bill(trx->id(), trx->packed_trx()->expiration(), first_auth, trace->elapsed); } - if( next ) next( trace ); + if (next) + next(trace); } return pr; } -bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadline ) -{ +bool producer_plugin_impl::process_unapplied_trxs(const fc::time_point& deadline) { bool exhausted = false; - if( !_unapplied_transactions.empty() ) { - const chain::controller& chain = chain_plug->chain(); - const auto pending_block_num = chain.pending_block_num(); - int num_applied = 0, num_failed = 0, num_processed = 0; - auto unapplied_trxs_size = _unapplied_transactions.size(); - auto itr = _unapplied_transactions.unapplied_begin(); - auto end_itr = _unapplied_transactions.unapplied_end(); - while( itr != end_itr ) { - if( should_interrupt_start_block( deadline, pending_block_num ) ) { + if (!_unapplied_transactions.empty()) { + const chain::controller& chain = chain_plug->chain(); + const auto pending_block_num = chain.pending_block_num(); + int num_applied = 0, num_failed = 0, num_processed = 0; + auto unapplied_trxs_size = _unapplied_transactions.size(); + auto itr = _unapplied_transactions.unapplied_begin(); + auto end_itr = _unapplied_transactions.unapplied_end(); + while (itr != end_itr) { + if (should_interrupt_start_block(deadline, pending_block_num)) { exhausted = true; break; } ++num_processed; try { - push_result pr = push_transaction( deadline, itr->trx_meta, false, itr->return_failure_trace, itr->next ); + auto trx_tracker = _time_tracker.start_trx(itr->trx_meta->is_transient()); + push_result pr = push_transaction(deadline, itr->trx_meta, false, itr->return_failure_trace, trx_tracker, itr->next); exhausted = pr.block_exhausted; - if( exhausted ) { + if (exhausted) { break; } else { - if( pr.failed ) { + if (pr.failed) { ++num_failed; } else { ++num_applied; } } - if( !pr.trx_exhausted ) { - itr = _unapplied_transactions.erase( itr ); + if (!pr.trx_exhausted) { + itr = _unapplied_transactions.erase(itr); } else { ++itr; // keep exhausted } continue; - } LOG_AND_DROP(); + } + LOG_AND_DROP(); ++num_failed; ++itr; } - fc_dlog( _log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", - ("m", num_processed)( "n", unapplied_trxs_size )("applied", num_applied)("failed", num_failed) ); + fc_dlog(_log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("m", num_processed)("n", unapplied_trxs_size)("applied", num_applied)("failed", num_failed)); } return !exhausted; } -void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ) -{ +void producer_plugin_impl::process_scheduled_and_incoming_trxs(const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr) { // scheduled transactions - int num_applied = 0; - int num_failed = 0; - int num_processed = 0; - bool exhausted = false; + int num_applied = 0; + int num_failed = 0; + int num_processed = 0; + bool exhausted = false; double incoming_trx_weight = 0.0; - auto& blacklist_by_id = _blacklisted_transactions.get(); - chain::controller& chain = chain_plug->chain(); - time_point pending_block_time = chain.pending_block_time(); - auto end = _unapplied_transactions.incoming_end(); - const auto& sch_idx = chain.db().get_index(); - const auto scheduled_trxs_size = sch_idx.size(); - auto sch_itr = sch_idx.begin(); - while( sch_itr != sch_idx.end() ) { - if( sch_itr->delay_until > pending_block_time) break; // not scheduled yet - if( exhausted || deadline <= fc::time_point::now() ) { + auto& blacklist_by_id = _blacklisted_transactions.get(); + chain::controller& chain = chain_plug->chain(); + time_point pending_block_time = chain.pending_block_time(); + auto end = _unapplied_transactions.incoming_end(); + const auto& sch_idx = chain.db().get_index(); + const auto scheduled_trxs_size = sch_idx.size(); + auto sch_itr = sch_idx.begin(); + while (sch_itr != sch_idx.end()) { + if (sch_itr->delay_until > pending_block_time) + break; // not scheduled yet + if (exhausted || deadline <= fc::time_point::now()) { exhausted = true; break; } - if( sch_itr->published >= pending_block_time ) { + if (sch_itr->published >= pending_block_time) { ++sch_itr; continue; // do not allow schedule and execute in same block } @@ -2315,17 +2488,17 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p continue; } - const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated - const auto sch_expiration = sch_itr->expiration; - auto sch_itr_next = sch_itr; // save off next since sch_itr may be invalidated by loop + const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated + const auto sch_expiration = sch_itr->expiration; + auto sch_itr_next = sch_itr; // save off next since sch_itr may be invalidated by loop ++sch_itr_next; const auto next_delay_until = sch_itr_next != sch_idx.end() ? sch_itr_next->delay_until : sch_itr->delay_until; - const auto next_id = sch_itr_next != sch_idx.end() ? sch_itr_next->id : sch_itr->id; + const auto next_id = sch_itr_next != sch_idx.end() ? sch_itr_next->id : sch_itr->id; num_processed++; // configurable ratio of incoming txns vs deferred txns - while (incoming_trx_weight >= 1.0 && itr != end ) { + while (incoming_trx_weight >= 1.0 && itr != end) { if (deadline <= fc::time_point::now()) { exhausted = true; break; @@ -2334,18 +2507,20 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p incoming_trx_weight -= 1.0; auto trx_meta = itr->trx_meta; - bool api_trx = itr->trx_type == trx_enum_type::incoming_api; + bool api_trx = itr->trx_type == trx_enum_type::incoming_api; - push_result pr = push_transaction( deadline, trx_meta, api_trx, itr->return_failure_trace, itr->next ); + auto trx_tracker = _time_tracker.start_trx(trx_meta->is_transient()); + push_result pr = push_transaction(deadline, trx_meta, api_trx, itr->return_failure_trace, trx_tracker, itr->next); exhausted = pr.block_exhausted; - if( pr.trx_exhausted ) { + if (pr.trx_exhausted) { ++itr; // leave in incoming } else { - itr = _unapplied_transactions.erase( itr ); + itr = _unapplied_transactions.erase(itr); } - if( exhausted ) break; + if (exhausted) + break; } if (exhausted || deadline <= fc::time_point::now()) { @@ -2354,34 +2529,35 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p } auto get_first_authorizer = [&](const transaction_trace_ptr& trace) { - for( const auto& a : trace->action_traces ) { - for( const auto& u : a.act.authorization ) + for (const auto& a : trace->action_traces) { + for (const auto& u : a.act.authorization) return u.actor; } return account_name(); }; try { - auto start = fc::time_point::now(); - fc::microseconds max_trx_time = fc::milliseconds( _max_transaction_time_ms.load() ); - if( max_trx_time.count() < 0 ) max_trx_time = fc::microseconds::maximum(); + auto start = fc::time_point::now(); + auto trx_tracker = _time_tracker.start_trx(false, start); // delayed transaction cannot be transient + fc::microseconds max_trx_time = fc::milliseconds(_max_transaction_time_ms.load()); + if (max_trx_time.count() < 0) + max_trx_time = fc::microseconds::maximum(); auto trace = chain.push_scheduled_transaction(trx_id, deadline, max_trx_time, 0, false); - auto end = fc::time_point::now(); + auto end = fc::time_point::now(); if (trace->except) { - _time_tracker.add_fail_time(end - start, false); // delayed transaction cannot be transient if (exception_is_exhausted(*trace->except)) { - if( block_is_exhausted() ) { + if (block_is_exhausted()) { exhausted = true; break; } } else { fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${txid}, time: ${r}, auth: ${a} : ${details}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("txid", trx_id)("r", end - start)("a", get_first_authorizer(trace)) - ("details", get_detailed_contract_except_info(nullptr, trace, nullptr))); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${entire_trace}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx_id)("r", end - start) + ("a", get_first_authorizer(trace))("details", get_detailed_contract_except_info(nullptr, trace, nullptr))); + fc_dlog(_trx_trace_failure_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING scheduled tx: ${entire_trace}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("entire_trace", chain_plug->get_log_trx_trace(trace))); // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist @@ -2389,75 +2565,79 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p num_failed++; } } else { - _time_tracker.add_success_time(end - start, false); // delayed transaction cannot be transient + trx_tracker.trx_success(); fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${txid}, time: ${r}, auth: ${a}, cpu: ${cpu}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("txid", trx_id)("r", end - start)("a", get_first_authorizer(trace)) - ("cpu", trace->receipt ? trace->receipt->cpu_usage_us : 0)); - fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${entire_trace}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer())("txid", trx_id)("r", end - start) + ("a", get_first_authorizer(trace))("cpu", trace->receipt ? trace->receipt->cpu_usage_us : 0)); + fc_dlog(_trx_trace_success_log, + "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING scheduled tx: ${entire_trace}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) ("entire_trace", chain_plug->get_log_trx_trace(trace))); num_applied++; } - } LOG_AND_DROP(); + } + LOG_AND_DROP(); incoming_trx_weight += _incoming_defer_ratio; - if( sch_itr_next == sch_idx.end() ) break; - sch_itr = sch_idx.lower_bound( boost::make_tuple( next_delay_until, next_id ) ); + if (sch_itr_next == sch_idx.end()) + break; + sch_itr = sch_idx.lower_bound(boost::make_tuple(next_delay_until, next_id)); } - if( scheduled_trxs_size > 0 ) { - fc_dlog( _log, - "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", - ( "m", num_processed )( "n", scheduled_trxs_size )( "applied", num_applied )( "failed", num_failed ) ); + if (scheduled_trxs_size > 0) { + fc_dlog(_log, "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + ("m", num_processed)("n", scheduled_trxs_size)("applied", num_applied)("failed", num_failed)); } } -bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr ) -{ +bool producer_plugin_impl::process_incoming_trxs(const fc::time_point& deadline, unapplied_transaction_queue::iterator& itr) { bool exhausted = false; - auto end = _unapplied_transactions.incoming_end(); - if( itr != end ) { + auto end = _unapplied_transactions.incoming_end(); + if (itr != end) { size_t processed = 0; - fc_dlog( _log, "Processing ${n} pending transactions", ("n", _unapplied_transactions.incoming_size()) ); - const chain::controller& chain = chain_plug->chain(); - const auto pending_block_num = chain.pending_block_num(); - while( itr != end ) { - if ( should_interrupt_start_block( deadline, pending_block_num ) ) { + fc_dlog(_log, "Processing ${n} pending transactions", ("n", _unapplied_transactions.incoming_size())); + const chain::controller& chain = chain_plug->chain(); + const auto pending_block_num = chain.pending_block_num(); + while (itr != end) { + if (should_interrupt_start_block(deadline, pending_block_num)) { exhausted = true; break; } auto trx_meta = itr->trx_meta; - bool api_trx = itr->trx_type == trx_enum_type::incoming_api; + bool api_trx = itr->trx_type == trx_enum_type::incoming_api; - push_result pr = push_transaction( deadline, trx_meta, api_trx, itr->return_failure_trace, itr->next ); + auto trx_tracker = _time_tracker.start_trx(trx_meta->is_transient()); + push_result pr = push_transaction(deadline, trx_meta, api_trx, itr->return_failure_trace, trx_tracker, itr->next); exhausted = pr.block_exhausted; - if( pr.trx_exhausted ) { + if (pr.trx_exhausted) { ++itr; // leave in incoming } else { - itr = _unapplied_transactions.erase( itr ); + itr = _unapplied_transactions.erase(itr); } - if( exhausted ) break; + if (exhausted) + break; ++processed; } - fc_dlog( _log, "Processed ${n} pending transactions, ${p} left", ("n", processed)("p", _unapplied_transactions.incoming_size()) ); + fc_dlog(_log, "Processed ${n} pending transactions, ${p} left", ("n", processed)("p", _unapplied_transactions.incoming_size())); } return !exhausted; } bool producer_plugin_impl::block_is_exhausted() const { const chain::controller& chain = chain_plug->chain(); - const auto& rl = chain.get_resource_limits_manager(); + const auto& rl = chain.get_resource_limits_manager(); const uint64_t cpu_limit = rl.get_block_cpu_limit(); - if( cpu_limit < _max_block_cpu_usage_threshold_us ) return true; + if (cpu_limit < _max_block_cpu_usage_threshold_us) + return true; const uint64_t net_limit = rl.get_block_net_limit(); - if( net_limit < _max_block_net_usage_threshold_bytes ) return true; + if (net_limit < _max_block_net_usage_threshold_bytes) + return true; return false; } @@ -2472,21 +2652,21 @@ void producer_plugin_impl::schedule_production_loop() { auto result = start_block(); - _idle_trx_time = fc::time_point::now(); - if (result == start_block_result::failed) { elog("Failed to start a pending block, will try again later"); - _timer.expires_from_now( boost::posix_time::microseconds( config::block_interval_us / 10 )); + _timer.expires_from_now(boost::posix_time::microseconds(config::block_interval_us / 10)); // we failed to start a block, so try again later? - _timer.async_wait( app().executor().wrap( priority::high, exec_queue::read_write, - [weak_this = weak_from_this(), cid = ++_timer_corelation_id]( const boost::system::error_code& ec ) { - auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { - self->schedule_production_loop(); - } - } ) ); - } else if (result == start_block_result::waiting_for_block){ + _timer.async_wait( + app().executor().wrap(priority::high, + exec_queue::read_write, + [weak_this = weak_from_this(), cid = ++_timer_corelation_id](const boost::system::error_code& ec) { + auto self = weak_this.lock(); + if (self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id) { + self->schedule_production_loop(); + } + })); + } else if (result == start_block_result::waiting_for_block) { if (!_producers.empty() && !production_disabled_by_policy()) { fc_dlog(_log, "Waiting till another block is received and scheduling Speculative/Production Change"); schedule_delayed_production_loop(weak_from_this(), calculate_producer_wake_up_time(calculate_pending_block_time())); @@ -2498,64 +2678,67 @@ void producer_plugin_impl::schedule_production_loop() { } else if (result == start_block_result::waiting_for_production) { // scheduled in start_block() - } else if (_pending_block_mode == pending_block_mode::producing) { - schedule_maybe_produce_block( result == start_block_result::exhausted ); + } else if (in_producing_mode()) { + schedule_maybe_produce_block(result == start_block_result::exhausted); - } else if (_pending_block_mode == pending_block_mode::speculating && !_producers.empty() && !production_disabled_by_policy()){ + } else if (in_speculating_mode() && !_producers.empty() && !production_disabled_by_policy()) { chain::controller& chain = chain_plug->chain(); fc_dlog(_log, "Speculative Block Created; Scheduling Speculative/Production Change"); - EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state" ); + EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state"); schedule_delayed_production_loop(weak_from_this(), calculate_producer_wake_up_time(chain.pending_block_timestamp())); } else { fc_dlog(_log, "Speculative Block Created"); } + + _time_tracker.add_other_time(); } -void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { +void producer_plugin_impl::schedule_maybe_produce_block(bool exhausted) { chain::controller& chain = chain_plug->chain(); // we succeeded but block may be exhausted - static const boost::posix_time::ptime epoch( boost::gregorian::date( 1970, 1, 1 ) ); - auto deadline = block_timing_util::calculate_block_deadline(_cpu_effort_us, _pending_block_mode, chain.pending_block_time() ); + static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); + auto deadline = block_timing_util::calculate_block_deadline(_cpu_effort_us, _pending_block_mode, chain.pending_block_time()); - if( !exhausted && deadline > fc::time_point::now() ) { + if (!exhausted && deadline > fc::time_point::now()) { // ship this block off no later than its deadline - EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, - "producing without pending_block_state, start_block succeeded" ); - _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() ) ); - fc_dlog( _log, "Scheduling Block Production on Normal Block #${num} for ${time}", - ("num", chain.head_block_num() + 1)( "time", deadline ) ); + EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state, start_block succeeded"); + _timer.expires_at(epoch + boost::posix_time::microseconds(deadline.time_since_epoch().count())); + fc_dlog(_log, "Scheduling Block Production on Normal Block #${num} for ${time}", + ("num", chain.head_block_num() + 1)("time", deadline)); } else { - EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); - _timer.expires_from_now( boost::posix_time::microseconds( 0 ) ); - fc_dlog( _log, "Scheduling Block Production on ${desc} Block #${num} immediately", - ("num", chain.head_block_num() + 1)("desc", block_is_exhausted() ? "Exhausted" : "Deadline exceeded") ); + EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state"); + _timer.expires_from_now(boost::posix_time::microseconds(0)); + fc_dlog(_log, "Scheduling Block Production on ${desc} Block #${num} immediately", + ("num", chain.head_block_num() + 1)("desc", block_is_exhausted() ? "Exhausted" : "Deadline exceeded")); } - _timer.async_wait( app().executor().wrap( priority::high, exec_queue::read_write, - [&chain, weak_this = weak_from_this(), cid=++_timer_corelation_id](const boost::system::error_code& ec) { - auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { - // pending_block_state expected, but can't assert inside async_wait - auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0; - fc_dlog( _log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now()) ); - auto res = self->maybe_produce_block(); - fc_dlog( _log, "Producing Block #${num} returned: ${res}", ("num", block_num)( "res", res ) ); - } - } ) ); + _timer.async_wait(app().executor().wrap( + priority::high, + exec_queue::read_write, + [&chain, weak_this = weak_from_this(), cid = ++_timer_corelation_id](const boost::system::error_code& ec) { + auto self = weak_this.lock(); + if (self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id) { + // pending_block_state expected, but can't assert inside async_wait + auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0; + fc_dlog(_log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now())); + auto res = self->maybe_produce_block(); + fc_dlog(_log, "Producing Block #${num} returned: ${res}", ("num", block_num)("res", res)); + } + })); } -std::optional producer_plugin_impl::calculate_producer_wake_up_time( const block_timestamp_type& ref_block_time ) const { +std::optional producer_plugin_impl::calculate_producer_wake_up_time(const block_timestamp_type& ref_block_time) const { auto ref_block_slot = ref_block_time.slot; // if we have any producers then we should at least set a timer for our next available slot uint32_t wake_up_slot = UINT32_MAX; for (const auto& p : _producers) { auto next_producer_block_slot = calculate_next_block_slot(p, ref_block_slot); - wake_up_slot = std::min(next_producer_block_slot, wake_up_slot); + wake_up_slot = std::min(next_producer_block_slot, wake_up_slot); } - if( wake_up_slot == UINT32_MAX ) { + if (wake_up_slot == UINT32_MAX) { fc_dlog(_log, "Not Scheduling Speculative/Production, no local producers had valid wake up times"); return {}; } @@ -2563,31 +2746,31 @@ std::optional producer_plugin_impl::calculate_producer_wake_up_t return block_timing_util::production_round_block_start_time(_cpu_effort_us, block_timestamp_type(wake_up_slot)); } -void producer_plugin_impl::schedule_delayed_production_loop(const std::weak_ptr& weak_this, std::optional wake_up_time) { +void producer_plugin_impl::schedule_delayed_production_loop(const std::weak_ptr& weak_this, + std::optional wake_up_time) { if (wake_up_time) { fc_dlog(_log, "Scheduling Speculative/Production Change at ${time}", ("time", wake_up_time)); static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); _timer.expires_at(epoch + boost::posix_time::microseconds(wake_up_time->time_since_epoch().count())); - _timer.async_wait( app().executor().wrap( priority::high, exec_queue::read_write, - [weak_this,cid=++_timer_corelation_id](const boost::system::error_code& ec) { + _timer.async_wait(app().executor().wrap( + priority::high, exec_queue::read_write, [weak_this, cid = ++_timer_corelation_id](const boost::system::error_code& ec) { auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { + if (self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id) { self->schedule_production_loop(); } - } ) ); + })); } } bool producer_plugin_impl::maybe_produce_block() { - auto reschedule = fc::make_scoped_exit([this]{ - schedule_production_loop(); - }); + auto reschedule = fc::make_scoped_exit([this] { schedule_production_loop(); }); try { produce_block(); return true; - } LOG_AND_DROP(); + } + LOG_AND_DROP(); fc_dlog(_log, "Aborting block due to produce_block error"); abort_block(); @@ -2596,13 +2779,11 @@ bool producer_plugin_impl::maybe_produce_block() { static auto make_debug_time_logger() { auto start = fc::time_point::now(); - return fc::make_scoped_exit([=](){ - fc_dlog(_log, "Signing took ${ms}us", ("ms", fc::time_point::now() - start) ); - }); + return fc::make_scoped_exit([=]() { fc_dlog(_log, "Signing took ${ms}us", ("ms", fc::time_point::now() - start)); }); } static auto maybe_make_debug_time_logger() -> std::optional { - if (_log.is_enabled( fc::log_level::debug ) ){ + if (_log.is_enabled(fc::log_level::debug)) { return make_debug_time_logger(); } else { return {}; @@ -2611,33 +2792,37 @@ static auto maybe_make_debug_time_logger() -> std::optionalchain(); - EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); + EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, + "pending_block_state does not exist but it should, another plugin may have corrupted it"); - const auto& auth = chain.pending_block_signing_authority(); + const auto& auth = chain.pending_block_signing_authority(); std::vector> relevant_providers; relevant_providers.reserve(_signature_providers.size()); - producer_authority::for_each_key(auth, [&](const public_key_type& key){ + producer_authority::for_each_key(auth, [&](const public_key_type& key) { const auto& iter = _signature_providers.find(key); if (iter != _signature_providers.end()) { relevant_providers.emplace_back(iter->second); } }); - EOS_ASSERT(relevant_providers.size() > 0, producer_priv_key_not_found, "Attempting to produce a block for which we don't have any relevant private keys"); + EOS_ASSERT(relevant_providers.size() > 0, producer_priv_key_not_found, + "Attempting to produce a block for which we don't have any relevant private keys"); if (_protocol_features_signaled) { _protocol_features_to_activate.clear(); // clear _protocol_features_to_activate as it is already set in pending_block _protocol_features_signaled = false; } - //idump( (fc::time_point::now() - chain.pending_block_time()) ); + // idump( (fc::time_point::now() - chain.pending_block_time()) ); controller::block_report br; - chain.finalize_block( br, [&]( const digest_type& d ) { - auto debug_logger = maybe_make_debug_time_logger(); + chain.finalize_block(br, [&](const digest_type& d) { + auto debug_logger = maybe_make_debug_time_logger(); vector sigs; sigs.reserve(relevant_providers.size()); @@ -2646,70 +2831,70 @@ void producer_plugin_impl::produce_block() { sigs.emplace_back(p.get()(d)); } return sigs; - } ); + }); chain.commit_block(); block_state_ptr new_bs = chain.head_block_state(); - _time_tracker.report(_idle_trx_time, new_bs->block_num); - br.total_time += fc::time_point::now() - start; if (_update_produced_block_metrics) { _update_produced_block_metrics( - {.unapplied_transactions_total = _unapplied_transactions.size(), - .blacklisted_transactions_total = _blacklisted_transactions.size(), - .subjective_bill_account_size_total = chain.get_subjective_billing().get_account_cache_size(), - .scheduled_trxs_total = chain.db().get_index().size(), - .trxs_produced_total = new_bs->block->transactions.size(), - .cpu_usage_us = br.total_cpu_usage_us, - .net_usage_us = br.total_net_usage, - .last_irreversible = chain.last_irreversible_block_num(), - .head_block_num = chain.head_block_num()}); + {.unapplied_transactions_total = _unapplied_transactions.size(), + .blacklisted_transactions_total = _blacklisted_transactions.size(), + .subjective_bill_account_size_total = chain.get_subjective_billing().get_account_cache_size(), + .scheduled_trxs_total = chain.db().get_index().size(), + .trxs_produced_total = new_bs->block->transactions.size(), + .cpu_usage_us = br.total_cpu_usage_us, + .net_usage_us = br.total_net_usage, + .last_irreversible = chain.last_irreversible_block_num(), + .head_block_num = chain.head_block_num()}); } ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} " "[trxs: ${count}, lib: ${lib}, confirmed: ${confs}, net: ${net}, cpu: ${cpu}, elapsed: ${et}, time: ${tt}]", - ("p",new_bs->header.producer)("id",new_bs->id.str().substr(8,16)) - ("n",new_bs->block_num)("t",new_bs->header.timestamp) - ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num()) - ("net", br.total_net_usage)("cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time) - ("confs", new_bs->header.confirmed)); + ("p", new_bs->header.producer)("id", new_bs->id.str().substr(8, 16))("n", new_bs->block_num)("t", new_bs->header.timestamp) + ("count", new_bs->block->transactions.size())("lib", chain.last_irreversible_block_num())("net", br.total_net_usage) + ("cpu", br.total_cpu_usage_us)("et", br.total_elapsed_time)("tt", br.total_time)("confs", new_bs->header.confirmed)); + + _time_tracker.add_other_time(); + _time_tracker.report(new_bs->block_num, new_bs->block->producer); + _time_tracker.clear(); } void producer_plugin::received_block(uint32_t block_num) { my->_received_block = block_num; } -void producer_plugin::log_failed_transaction(const transaction_id_type& trx_id, const packed_transaction_ptr& packed_trx_ptr, const char* reason) const { +void producer_plugin::log_failed_transaction(const transaction_id_type& trx_id, + const packed_transaction_ptr& packed_trx_ptr, + const char* reason) const { fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()) : fc::variant{trx_id})); - fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why}", - ("txid", trx_id)("why", reason)); + fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why}", ("txid", trx_id)("why", reason)); fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", - ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()) : fc::variant{trx_id})); + ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()) : fc::variant{trx_id})); } // Called from only one read_only thread void producer_plugin_impl::switch_to_write_window() { - if ( _log.is_enabled( fc::log_level::debug ) ) { + if (_log.is_enabled(fc::log_level::debug)) { auto now = fc::time_point::now(); - fc_dlog( _log, "Read-only threads ${n}, read window ${r}us, total all threads ${t}us", - ("n", _ro_thread_pool_size) - ("r", now - _ro_read_window_start_time) - ("t", _ro_all_threads_exec_time_us.load())); + fc_dlog(_log, "Read-only threads ${n}, read window ${r}us, total all threads ${t}us", + ("n", _ro_thread_pool_size)("r", now - _ro_read_window_start_time)("t", _ro_all_threads_exec_time_us.load())); } chain::controller& chain = chain_plug->chain(); // this method can be called from multiple places. it is possible // we are already in write window. - if ( chain.is_write_window() ) { + if (chain.is_write_window()) { return; } - EOS_ASSERT(_ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "no read-only tasks should be running before switching to write window"); + EOS_ASSERT(_ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, + "no read-only tasks should be running before switching to write window"); start_write_window(); } @@ -2722,17 +2907,18 @@ void producer_plugin_impl::start_write_window() { app().executor().set_to_write_window(); chain.set_to_write_window(); chain.unset_db_read_only_mode(); - _idle_trx_time = _ro_window_deadline = fc::time_point::now(); + auto now = fc::time_point::now(); + _time_tracker.unpause(now); - _ro_window_deadline += _ro_write_window_time_us; // not allowed on block producers, so no need to limit to block deadline + _ro_window_deadline = now + _ro_write_window_time_us; // not allowed on block producers, so no need to limit to block deadline auto expire_time = boost::posix_time::microseconds(_ro_write_window_time_us.count()); - _ro_timer.expires_from_now( expire_time ); - _ro_timer.async_wait( app().executor().wrap( // stay on app thread + _ro_timer.expires_from_now(expire_time); + _ro_timer.async_wait(app().executor().wrap( // stay on app thread priority::high, exec_queue::read_write, // placed in read_write so only called from main thread - [weak_this = weak_from_this()]( const boost::system::error_code& ec ) { + [weak_this = weak_from_this()](const boost::system::error_code& ec) { auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted ) { + if (self && ec != boost::asio::error::operation_aborted) { self->switch_to_read_window(); } })); @@ -2741,22 +2927,22 @@ void producer_plugin_impl::start_write_window() { // Called only from app thread void producer_plugin_impl::switch_to_read_window() { chain::controller& chain = chain_plug->chain(); - EOS_ASSERT(chain.is_write_window(), producer_exception, "expected to be in write window"); - EOS_ASSERT( _ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "_ro_exec_tasks_fut expected to be empty" ); + EOS_ASSERT(chain.is_write_window(), producer_exception, "expected to be in write window"); + EOS_ASSERT(_ro_num_active_exec_tasks.load() == 0 && _ro_exec_tasks_fut.empty(), producer_exception, "_ro_exec_tasks_fut expected to be empty"); - _time_tracker.add_idle_time( fc::time_point::now() - _idle_trx_time ); + _time_tracker.pause(); // we are in write window, so no read-only trx threads are processing transactions. - if ( app().executor().read_only_queue().empty() ) { // no read-only tasks to process. stay in write window - start_write_window(); // restart write window timer for next round + if (app().executor().read_only_queue().empty()) { // no read-only tasks to process. stay in write window + start_write_window(); // restart write window timer for next round return; } uint32_t pending_block_num = chain.head_block_num() + 1; _ro_read_window_start_time = fc::time_point::now(); - _ro_window_deadline = _ro_read_window_start_time + _ro_read_window_effective_time_us; - app().executor().set_to_read_window(_ro_thread_pool_size, - [received_block=&_received_block, pending_block_num, ro_window_deadline=_ro_window_deadline]() { + _ro_window_deadline = _ro_read_window_start_time + _ro_read_window_effective_time_us; + app().executor().set_to_read_window( + _ro_thread_pool_size, [received_block = &_received_block, pending_block_num, ro_window_deadline = _ro_window_deadline]() { return fc::time_point::now() >= ro_window_deadline || (received_block->load() >= pending_block_num); // should_exit() }); chain.set_to_read_window(); @@ -2766,32 +2952,29 @@ void producer_plugin_impl::switch_to_read_window() { // start a read-only execution task in each thread in the thread pool _ro_num_active_exec_tasks = _ro_thread_pool_size; _ro_exec_tasks_fut.resize(0); - for (uint32_t i = 0; i < _ro_thread_pool_size; ++i ) { - _ro_exec_tasks_fut.emplace_back( post_async_task( _ro_thread_pool.get_executor(), [self = this, pending_block_num] () { - return self->read_only_execution_task(pending_block_num); - }) ); + for (uint32_t i = 0; i < _ro_thread_pool_size; ++i) { + _ro_exec_tasks_fut.emplace_back(post_async_task( + _ro_thread_pool.get_executor(), [self = this, pending_block_num]() { return self->read_only_execution_task(pending_block_num); })); } auto expire_time = boost::posix_time::microseconds(_ro_read_window_time_us.count()); - _ro_timer.expires_from_now( expire_time ); + _ro_timer.expires_from_now(expire_time); // Needs to be on read_only because that is what is being processed until switch_to_write_window(). - _ro_timer.async_wait( app().executor().wrap( - priority::high, - exec_queue::read_only, - [weak_this = weak_from_this()]( const boost::system::error_code& ec ) { + _ro_timer.async_wait( + app().executor().wrap(priority::high, exec_queue::read_only, [weak_this = weak_from_this()](const boost::system::error_code& ec) { auto self = weak_this.lock(); - if( self && ec != boost::asio::error::operation_aborted ) { + if (self && ec != boost::asio::error::operation_aborted) { // use future to make sure all read-only tasks finished before switching to write window - for ( auto& task: self->_ro_exec_tasks_fut ) { + for (auto& task : self->_ro_exec_tasks_fut) { task.get(); } self->_ro_exec_tasks_fut.clear(); // will be executed from the main app thread because all read-only threads are idle now self->switch_to_write_window(); - } else if ( self ) { - self->_ro_exec_tasks_fut.clear(); - } - })); + } else if (self) { + self->_ro_exec_tasks_fut.clear(); + } + })); } // Called from a read only thread. Run in parallel with app and other read only threads @@ -2800,27 +2983,27 @@ bool producer_plugin_impl::read_only_execution_task(uint32_t pending_block_num) // 1. pass read window deadline // 2. net_plugin receives a block // 3. no read-only tasks to execute - while ( fc::time_point::now() < _ro_window_deadline && _received_block < pending_block_num ) { + while (fc::time_point::now() < _ro_window_deadline && _received_block < pending_block_num) { bool more = app().executor().execute_highest_read_only(); // blocks until all read only threads are idle - if ( !more ) { + if (!more) { break; } } // If all tasks are finished, do not wait until end of read window; switch to write window now. - if ( --_ro_num_active_exec_tasks == 0 ) { + if (--_ro_num_active_exec_tasks == 0) { // Needs to be on read_only because that is what is being processed until switch_to_write_window(). - app().executor().post( priority::high, exec_queue::read_only, [self=this]() { + app().executor().post(priority::high, exec_queue::read_only, [self = this]() { self->_ro_exec_tasks_fut.clear(); // will be executed from the main app thread because all read-only threads are idle now self->switch_to_write_window(); - } ); + }); // last thread post any exhausted back into read_only queue with slightly higher priority (low+1) so they are executed first ro_trx_t t; - while( _ro_exhausted_trx_queue.pop_front(t) ) { - app().executor().post(priority::low+1, exec_queue::read_only, [this, trx{std::move(t.trx)}, next{std::move(t.next)}]() mutable { - push_read_only_transaction( std::move(trx), std::move(next) ); - } ); + while (_ro_exhausted_trx_queue.pop_front(t)) { + app().executor().post(priority::low + 1, exec_queue::read_only, [this, trx{std::move(t.trx)}, next{std::move(t.next)}]() mutable { + push_read_only_transaction(std::move(trx), std::move(next)); + }); } } @@ -2830,15 +3013,15 @@ bool producer_plugin_impl::read_only_execution_task(uint32_t pending_block_num) // Called from app thread during start block. // Reschedule any exhausted read-only transactions from the last block void producer_plugin_impl::repost_exhausted_transactions(const fc::time_point& deadline) { - if ( !_ro_exhausted_trx_queue.empty() ) { - chain::controller& chain = chain_plug->chain(); - uint32_t pending_block_num = chain.pending_block_num(); + if (!_ro_exhausted_trx_queue.empty()) { + chain::controller& chain = chain_plug->chain(); + uint32_t pending_block_num = chain.pending_block_num(); // post any exhausted back into read_only queue with slightly higher priority (low+1) so they are executed first ro_trx_t t; - while( !should_interrupt_start_block( deadline, pending_block_num ) && _ro_exhausted_trx_queue.pop_front(t) ) { - app().executor().post(priority::low+1, exec_queue::read_only, [this, trx{std::move(t.trx)}, next{std::move(t.next)}]() mutable { - push_read_only_transaction( std::move(trx), std::move(next) ); - } ); + while (!should_interrupt_start_block(deadline, pending_block_num) && _ro_exhausted_trx_queue.pop_front(t)) { + app().executor().post(priority::low + 1, exec_queue::read_only, [this, trx{std::move(t.trx)}, next{std::move(t.next)}]() mutable { + push_read_only_transaction(std::move(trx), std::move(next)); + }); } } } @@ -2849,50 +3032,56 @@ bool producer_plugin_impl::push_read_only_transaction(transaction_metadata_ptr t auto retry = false; try { - auto start = fc::time_point::now(); + auto start = fc::time_point::now(); chain::controller& chain = chain_plug->chain(); - if ( !chain.is_building_block() ) { - _ro_exhausted_trx_queue.push_front( {std::move(trx), std::move(next)} ); + if (!chain.is_building_block()) { + _ro_exhausted_trx_queue.push_front({std::move(trx), std::move(next)}); return true; } // When executing a read-only trx on the main thread while in the write window, // need to switch db mode to read only. - auto db_read_only_mode_guard = fc::make_scoped_exit([&]{ - if( chain.is_write_window() ) + auto db_read_only_mode_guard = fc::make_scoped_exit([&] { + if (chain.is_write_window()) chain.unset_db_read_only_mode(); }); + std::optional trx_tracker; if ( chain.is_write_window() ) { chain.set_db_read_only_mode(); - auto idle_time = fc::time_point::now() - _idle_trx_time; - _time_tracker.add_idle_time( idle_time ); + trx_tracker.emplace(_time_tracker.start_trx(true, start)); } // use read-window/write-window deadline if there are read/write windows, otherwise use block_deadline if only the app thead auto window_deadline = (_ro_thread_pool_size != 0) ? _ro_window_deadline : _pending_block_deadline; // Ensure the trx to finish by the end of read-window or write-window or block_deadline depending on - auto trace = chain.push_transaction( trx, window_deadline, _ro_max_trx_time_us, 0, false, 0 ); + auto trace = chain.push_transaction(trx, window_deadline, _ro_max_trx_time_us, 0, false, 0); _ro_all_threads_exec_time_us += (fc::time_point::now() - start).count(); - auto pr = handle_push_result(trx, next, start, chain, trace, true /*return_failure_trace*/, true /*disable_subjective_enforcement*/, {} /*first_auth*/, 0 /*sub_bill*/, 0 /*prev_billed_cpu_time_us*/); + auto pr = handle_push_result(trx, next, start, chain, trace, + true, // return_failure_trace + true, // disable_subjective_enforcement + {}, // first_auth + 0, // sub_bill + 0); // prev_billed_cpu_time_us // If a transaction was exhausted, that indicates we are close to // the end of read window. Retry in next round. retry = pr.trx_exhausted; - if( retry ) { - _ro_exhausted_trx_queue.push_front( {std::move(trx), std::move(next)} ); + if (retry) { + _ro_exhausted_trx_queue.push_front({std::move(trx), std::move(next)}); } - if ( chain.is_write_window() ) { - _idle_trx_time = fc::time_point::now(); + if ( chain.is_write_window() && !pr.failed ) { + trx_tracker->trx_success(); } - } catch ( const guard_exception& e ) { + } catch (const guard_exception& e) { chain_plugin::handle_guard_exception(e); - } catch ( boost::interprocess::bad_alloc& ) { + } catch (boost::interprocess::bad_alloc&) { chain_apis::api_base::handle_db_exhaustion(); - } catch ( std::bad_alloc& ) { + } catch (std::bad_alloc&) { chain_apis::api_base::handle_bad_alloc(); - } CATCH_AND_CALL(next); + } + CATCH_AND_CALL(next); return retry; } @@ -2901,11 +3090,11 @@ const std::set& producer_plugin::producer_accounts() const { return my->_producers; } -void producer_plugin::register_update_produced_block_metrics(std::function&& fun){ +void producer_plugin::register_update_produced_block_metrics(std::function&& fun) { my->_update_produced_block_metrics = std::move(fun); } -void producer_plugin::register_update_incoming_block_metrics(std::function&& fun){ +void producer_plugin::register_update_incoming_block_metrics(std::function&& fun) { my->_update_incoming_block_metrics = std::move(fun); } diff --git a/plugins/producer_plugin/test/CMakeLists.txt b/plugins/producer_plugin/test/CMakeLists.txt index 2eee8c8b8f..42c42596f8 100644 --- a/plugins/producer_plugin/test/CMakeLists.txt +++ b/plugins/producer_plugin/test/CMakeLists.txt @@ -1,9 +1,8 @@ add_executable( test_producer_plugin test_trx_full.cpp test_options.cpp - test_read_only_trx.cpp test_block_timing_util.cpp main.cpp ) -target_link_libraries( test_producer_plugin producer_plugin eosio_testing ) +target_link_libraries( test_producer_plugin producer_plugin eosio_testing eosio_chain_wrap ) add_test(NAME test_producer_plugin COMMAND plugins/producer_plugin/test/test_producer_plugin WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) \ No newline at end of file diff --git a/plugins/producer_plugin/test/test_options.cpp b/plugins/producer_plugin/test/test_options.cpp index 0f96dd2424..3fe429b6a9 100644 --- a/plugins/producer_plugin/test/test_options.cpp +++ b/plugins/producer_plugin/test/test_options.cpp @@ -30,17 +30,21 @@ BOOST_AUTO_TEST_CASE(state_dir) { std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", - "--data-dir", temp_dir_str.c_str(), - "--state-dir", custom_state_dir_str.c_str(), - "--config-dir", temp_dir_str.c_str(), - "-p", "eosio", "-e", "--max-transaction-time", "475", "--disable-subjective-billing=true" }; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", + "--data-dir", temp_dir_str.c_str(), + "--state-dir", custom_state_dir_str.c_str(), + "--config-dir", temp_dir_str.c_str(), + "-p", "eosio", "-e" }; + app->initialize( argv.size(), (char**) &argv[0] ); + app->startup(); + plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); auto[prod_plug, chain_plug] = plugin_fut.get(); diff --git a/plugins/producer_plugin/test/test_read_only_trx.cpp b/plugins/producer_plugin/test/test_read_only_trx.cpp deleted file mode 100644 index 5030d9713d..0000000000 --- a/plugins/producer_plugin/test/test_read_only_trx.cpp +++ /dev/null @@ -1,200 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace eosio::test::detail { -using namespace eosio::chain::literals; -struct testit { - uint64_t id; - testit( uint64_t id = 0 ) - :id(id){} - static account_name get_account() { - return chain::config::system_account_name; - } - static action_name get_name() { - return "testit"_n; - } -}; -} -FC_REFLECT( eosio::test::detail::testit, (id) ) - -namespace { -using namespace eosio; -using namespace eosio::chain; -using namespace eosio::test::detail; - -auto make_unique_trx( const chain_id_type& chain_id ) { - static uint64_t nextid = 0; - ++nextid; - account_name creator = config::system_account_name; - signed_transaction trx; - trx.expiration = fc::time_point_sec{fc::time_point::now() + fc::seconds( nextid % 50 == 0 ? 0 : 60 )}; // fail some transactions via expired - if( nextid % 10 == 0 ) { - // fail some for authorization (read-only transaction should not have authorization) - trx.actions.emplace_back( vector{{creator, config::active_name}}, testit{nextid} ); - } else { - vector no_auth{}; - trx.actions.emplace_back( no_auth, testit{nextid} ); - } - return std::make_shared( std::move(trx) ); -} -} - -BOOST_AUTO_TEST_SUITE(read_only_trxs) - -enum class app_init_status { failed, succeeded }; - -void test_configs_common(std::vector& specific_args, app_init_status expected_status) { - appbase::scoped_app app; - fc::temp_directory temp; - auto temp_dir_str = temp.path().string(); - - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; - argv.insert( argv.end(), specific_args.begin(), specific_args.end() ); - - // app->initialize() returns a boolean. BOOST_CHECK_EQUAL cannot compare - // a boolean with a app_init_status directly - bool rc = (expected_status == app_init_status::succeeded) ? true : false; - BOOST_CHECK_EQUAL( app->initialize( argv.size(), (char**) &argv[0]), rc ); -} - -// --read-only-thread not allowed on producer node -BOOST_AUTO_TEST_CASE(read_only_on_producer) { - std::vector specific_args = {"-p", "eosio", "-e", "--read-only-threads", "2" }; - test_configs_common(specific_args, app_init_status::failed); -} - -// read_window_time must be greater than max_transaction_time + 10ms -BOOST_AUTO_TEST_CASE(invalid_read_window_time) { - std::vector specific_args = { "--read-only-threads", "2", "--max-transaction-time", "10", "--read-only-write-window-time-us", "50000", "--read-only-read-window-time-us", "20000" }; // 20000 not greater than --max-transaction-time (10ms) + 10000us (minimum margin) - test_configs_common(specific_args, app_init_status::failed); -} - -// if --read-only-threads is not configured, read-only trx related configs should -// not be checked -BOOST_AUTO_TEST_CASE(not_check_configs_if_no_read_only_threads) { - std::vector specific_args = { "--max-transaction-time", "10", "--read-only-write-window-time-us", "50000", "--read-only-read-window-time-us", "20000" }; // 20000 not greater than --max-transaction-time (10ms) + 10000us (minimum margin) - test_configs_common(specific_args, app_init_status::succeeded); -} - -void test_trxs_common(std::vector& specific_args) { - using namespace std::chrono_literals; - appbase::scoped_app app; - fc::temp_directory temp; - auto temp_dir_str = temp.path().string(); - producer_plugin::set_test_mode(true); - - std::promise> plugin_promise; - std::future> plugin_fut = plugin_promise.get_future(); - std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; - argv.insert( argv.end(), specific_args.begin(), specific_args.end() ); - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value( {app->find_plugin(), app->find_plugin()} ); - app->exec(); - } ); - - auto[prod_plug, chain_plug] = plugin_fut.get(); - auto chain_id = chain_plug->get_chain_id(); - - std::atomic next_calls = 0; - std::atomic num_get_account_calls = 0; - std::atomic num_posts = 0; - std::atomic trace_with_except = 0; - std::atomic trx_match = true; - const size_t num_pushes = 4242; - - for( size_t i = 1; i <= num_pushes; ++i ) { - auto ptrx = make_unique_trx( chain_id ); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); - ++num_get_account_calls; - }); - app->executor().post( priority::low, exec_queue::read_write, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { - ++num_posts; - bool return_failure_traces = true; - app->get_method()(ptrx, - false, // api_trx - transaction_metadata::trx_type::read_only, // trx_type - return_failure_traces, - [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] - (const next_function_variant& result) { - if( !std::holds_alternative( result ) && !std::get( result )->except ) { - if( std::get( result )->id != ptrx->id() ) { - elog( "trace not for trx ${id}: ${t}", - ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); - trx_match = false; - } - } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { - elog( "trace with except ${e}", - ("e", fc::json::to_pretty_string( *std::get( result ) )) ); - ++trace_with_except; - } - ++next_calls; - }); - }); - app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { - chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); - }); - } - - // Wait long enough such that all transactions are executed - auto start = fc::time_point::now(); - auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever - while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ - std::this_thread::sleep_for( 100ms );; - } - - app->quit(); - app_thread.join(); - - BOOST_CHECK_EQUAL( trace_with_except, 0 ); // should not have any traces with except in it - BOOST_CHECK_EQUAL( num_pushes, num_posts ); - BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); - BOOST_CHECK_EQUAL( num_pushes, num_get_account_calls.load() ); - BOOST_CHECK( trx_match.load() ); // trace should match the transaction -} - -// test read-only trxs on main thread (no --read-only-threads) -BOOST_AUTO_TEST_CASE(no_read_only_threads) { - std::vector specific_args = { "-p", "eosio", "-e", "--abi-serializer-max-time-ms=999" }; - test_trxs_common(specific_args); -} - -// test read-only trxs on 1 threads (with --read-only-threads) -BOOST_AUTO_TEST_CASE(with_1_read_only_threads) { - std::vector specific_args = { "-p", "eosio", "-e", - "--read-only-threads=1", - "--max-transaction-time=10", - "--abi-serializer-max-time-ms=999", - "--read-only-write-window-time-us=100000", - "--read-only-read-window-time-us=40000", - "--disable-subjective-billing=true" }; - test_trxs_common(specific_args); -} - -// test read-only trxs on 8 separate threads (with --read-only-threads) -BOOST_AUTO_TEST_CASE(with_8_read_only_threads) { - std::vector specific_args = { "-p", "eosio", "-e", - "--read-only-threads=8", - "--max-transaction-time=10", - "--abi-serializer-max-time-ms=999", - "--read-only-write-window-time-us=100000", - "--read-only-read-window-time-us=40000", - "--disable-subjective-billing=true" }; - test_trxs_common(specific_args); -} - - -BOOST_AUTO_TEST_SUITE_END() diff --git a/plugins/producer_plugin/test/test_trx_full.cpp b/plugins/producer_plugin/test/test_trx_full.cpp index d63d27d4f9..129b135114 100644 --- a/plugins/producer_plugin/test/test_trx_full.cpp +++ b/plugins/producer_plugin/test/test_trx_full.cpp @@ -99,24 +99,28 @@ BOOST_AUTO_TEST_SUITE(ordered_trxs_full) // Test verifies that transactions are processed, reported to caller, and not lost // even when blocks are aborted and some transactions fail. BOOST_AUTO_TEST_CASE(producer) { + fc::temp_directory temp; appbase::scoped_app app; - fc::temp_directory temp; auto temp_dir_str = temp.path().string(); { std::promise> plugin_promise; std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread( [&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str(), - "-p", "eosio", "-e", "--disable-subjective-billing=true" }; - app->initialize( argv.size(), (char**) &argv[0] ); - app->startup(); - plugin_promise.set_value( - {app->find_plugin(), app->find_plugin()} ); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str(), + "-p", "eosio", "-e", "--disable-subjective-p2p-billing=true" }; + app->initialize( argv.size(), (char**) &argv[0] ); + app->startup(); + plugin_promise.set_value( + {app->find_plugin(), app->find_plugin()} ); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); } ); auto[prod_plug, chain_plug] = plugin_fut.get(); diff --git a/plugins/prometheus_plugin/include/eosio/prometheus_plugin/simple_rest_server.hpp b/plugins/prometheus_plugin/include/eosio/prometheus_plugin/simple_rest_server.hpp deleted file mode 100644 index c9bef7f05f..0000000000 --- a/plugins/prometheus_plugin/include/eosio/prometheus_plugin/simple_rest_server.hpp +++ /dev/null @@ -1,234 +0,0 @@ -#pragma once - -#include -#include -#include - -namespace eosio { namespace rest { - - // The majority of the code here are derived from boost source - // libs/beast/example/http/server/async/http_server_async.cpp - // with minimum modification and yet reusable. - - namespace beast = boost::beast; // from - namespace http = beast::http; // from - namespace net = boost::asio; // from - using tcp = boost::asio::ip::tcp; // from - template - class simple_server { - T* self() { return static_cast(this); } - - void fail(beast::error_code ec, char const* what) { self()->log_error(what, ec.message()); } - // Return a response for the given request. - http::response handle_request(http::request&& req) { - auto server_header = self()->server_header(); - // Returns a bad request response - auto const bad_request = [&req, &server_header](std::string_view why) { - http::response res{ http::status::bad_request, req.version() }; - res.set(http::field::server, server_header); - res.set(http::field::content_type, "text/plain"); - res.keep_alive(req.keep_alive()); - res.body() = std::string(why); - res.prepare_payload(); - return res; - }; - - // Returns a not found response - auto const not_found = [&req, &server_header](std::string_view target) { - http::response res{ http::status::not_found, req.version() }; - res.set(http::field::server, server_header); - res.set(http::field::content_type, "text/plain"); - res.keep_alive(req.keep_alive()); - res.body() = "The resource '" + std::string(target) + "' was not found."; - res.prepare_payload(); - return res; - }; - - // Returns a server error response - auto const server_error = [&req, &server_header](std::string_view what) { - http::response res{ http::status::internal_server_error, req.version() }; - res.set(http::field::server, server_header); - res.set(http::field::content_type, "text/plain"); - res.keep_alive(req.keep_alive()); - res.body() = "An error occurred: '" + std::string(what) + "'"; - res.prepare_payload(); - return res; - }; - - // Make sure we can handle the method - if (!self()->allow_method(req.method())) - return bad_request("Unknown HTTP-method"); - - // Request path must be absolute and not contain "..". - std::string_view target{req.target().data(), req.target().size()}; - if (target.empty() || target[0] != '/' || target.find("..") != std::string_view::npos) - return bad_request("Illegal request-target"); - - try { - auto res = self()->on_request(std::move(req)); - if (!res) - return not_found(target); - return *res; - } catch (std::exception& ex) { return server_error(ex.what()); } - } - - class session : public std::enable_shared_from_this { - tcp::socket socket_; - boost::asio::io_context::strand strand_; - beast::flat_buffer buffer_; - http::request req_; - simple_server* server_; - std::shared_ptr> res_; - - public: - // Take ownership of the stream - session(net::io_context& ioc, tcp::socket&& socket, simple_server* server) - : socket_(std::move(socket)), strand_(ioc), server_(server) {} - - // Start the asynchronous operation - void run() { do_read(); } - - void do_read() { - // Make the request empty before reading, - // otherwise the operation behavior is undefined. - req_ = {}; - - // Read a request - http::async_read( - socket_, buffer_, req_, - boost::asio::bind_executor(strand_, [self = this->shared_from_this()](beast::error_code ec, - std::size_t bytes_transferred) { - self->on_read(ec, bytes_transferred); - })); - } - - void on_read(beast::error_code ec, std::size_t bytes_transferred) { - boost::ignore_unused(bytes_transferred); - - // This means they closed the connection - if (ec == http::error::end_of_stream) - return do_close(); - - if (ec) - return server_->fail(ec, "read"); - - // Send the response - send_response(server_->handle_request(std::move(req_))); - } - - void send_response(http::response&& msg) { - // The lifetime of the message has to extend - // for the duration of the async operation so - // we use a shared_ptr to manage it. - res_ = std::make_shared>(std::move(msg)); - - // Write the response - http::async_write(socket_, *res_, - boost::asio::bind_executor(socket_.get_executor(), - [self = this->shared_from_this(), close = res_->need_eof()]( - beast::error_code ec, std::size_t bytes_transferred) { - self->on_write(ec, bytes_transferred, close); - })); - } - - void on_write(boost::system::error_code ec, std::size_t bytes_transferred, bool close) { - boost::ignore_unused(bytes_transferred); - - if (ec) - return server_->fail(ec, "write"); - - if (close) { - // This means we should close the connection, usually because - // the response indicated the "Connection: close" semantic. - return do_close(); - } - - // We're done with the response so delete it - res_ = nullptr; - - // Read another request - do_read(); - } - - void do_close() { - // Send a TCP shutdown - beast::error_code ec; - socket_.shutdown(tcp::socket::shutdown_send, ec); - - // At this point the connection is closed gracefully - } - }; - - //------------------------------------------------------------------------------ - - // Accepts incoming connections and launches the sessions - class listener : public std::enable_shared_from_this { - net::io_context& ioc_; - tcp::acceptor acceptor_; - tcp::socket socket_; - simple_server* server_; - - public: - listener(net::io_context& ioc, tcp::endpoint endpoint, simple_server* server) - : ioc_(ioc), acceptor_(ioc), socket_(ioc), server_(server) { - boost::system::error_code ec; - - // Open the acceptor - acceptor_.open(endpoint.protocol(), ec); - if (ec) { - server_->fail(ec, "open"); - return; - } - - // Allow address reuse - acceptor_.set_option(net::socket_base::reuse_address(true), ec); - if (ec) { - server_->fail(ec, "set_option"); - return; - } - - // Bind to the server address - acceptor_.bind(endpoint, ec); - if (ec) { - server_->fail(ec, "bind"); - return; - } - - // Start listening for connections - acceptor_.listen(net::socket_base::max_listen_connections, ec); - if (ec) { - server_->fail(ec, "listen"); - return; - } - } - - // Start accepting incoming connections - void run() { - if (!acceptor_.is_open()) - return; - do_accept(); - } - - private: - void do_accept() { - acceptor_.async_accept( - socket_, [self = this->shared_from_this()](boost::system::error_code ec) { self->on_accept(ec); }); - } - - void on_accept(boost::system::error_code ec) { - if (ec) { - server_->fail(ec, "accept"); - } else { - // Create the session and run it - std::make_shared(ioc_, std::move(socket_), server_)->run(); - } - - // Accept another connection - do_accept(); - } - }; - - public: - void run(net::io_context& ioc, tcp::endpoint endpoint) { std::make_shared(ioc, endpoint, this)->run(); } - }; -}} // namespace eosio::rest \ No newline at end of file diff --git a/plugins/prometheus_plugin/prometheus_plugin.cpp b/plugins/prometheus_plugin/prometheus_plugin.cpp index c02f2cfc4b..869df66c4d 100644 --- a/plugins/prometheus_plugin/prometheus_plugin.cpp +++ b/plugins/prometheus_plugin/prometheus_plugin.cpp @@ -1,5 +1,4 @@ #include -#include #include #include @@ -14,56 +13,20 @@ namespace eosio { - static const char* prometheus_api_name = "/v1/prometheus/metrics"; using namespace prometheus; - using namespace chain::plugin_interface; static auto _prometheus_plugin = application::register_plugin(); - namespace http = boost::beast::http; - struct prometheus_plugin_impl : rest::simple_server { - - std::string server_header() const { - return http_plugin::get_server_header(); - } - - void log_error(char const* what, const std::string& message) { - elog("${what}: ${message}", ("what", what)("message", message)); - } - - bool allow_method(http::verb method) const { - return method == http::verb::get; - } - - std::optional> - on_request(http::request&& req) { - if(req.target() != prometheus_api_name) - return {}; - http::response res{ http::status::ok, req.version() }; - // Respond to GET request - res.set(http::field::server, server_header()); - res.set(http::field::content_type, "text/plain"); - res.keep_alive(req.keep_alive()); - res.body() = _catalog.report(); - res.prepare_payload(); - return res; - } + struct prometheus_plugin_impl { eosio::chain::named_thread_pool _prometheus_thread_pool; - boost::asio::io_context::strand _prometheus_strand; - metrics::catalog_type _catalog; - - boost::asio::ip::tcp::endpoint _endpoint; + boost::asio::io_context::strand _prometheus_strand; + metrics::catalog_type _catalog; + fc::microseconds _max_response_time_us; prometheus_plugin_impl(): _prometheus_strand(_prometheus_thread_pool.get_executor()){ _catalog.register_update_handlers(_prometheus_strand); } - - void start() { - run(_prometheus_thread_pool.get_executor(), _endpoint); - _prometheus_thread_pool.start( - 1, [](const fc::exception& e) { elog("Prometheus exception ${e}", ("e", e)); }); - } }; prometheus_plugin::prometheus_plugin() @@ -78,29 +41,38 @@ namespace eosio { "The local IP and port to listen for incoming prometheus metrics http request."); } - void prometheus_plugin::plugin_initialize(const variables_map& options) { + struct prometheus_api_handle { + prometheus_plugin_impl* _impl; + + fc::time_point start() const { + return fc::time_point::now() + _impl->_max_response_time_us; + } - string lipstr = options.at("prometheus-exporter-address").as(); - EOS_ASSERT(lipstr.size() > 0, chain::plugin_config_exception, "prometheus-exporter-address must have a value"); + void metrics(const fc::variant_object&, chain::plugin_interface::next_function results) { + _impl->_prometheus_strand.post([this, results=std::move(results)]() { + results(_impl->_catalog.report()); + }); + } + }; + using metrics_params = fc::variant_object; - string host = lipstr.substr(0, lipstr.find(':')); - string port = lipstr.substr(host.size() + 1, lipstr.size()); - boost::system::error_code ec; - using tcp = boost::asio::ip::tcp; - tcp::resolver resolver(app().get_io_service()); + void prometheus_plugin::plugin_initialize(const variables_map& options) { - my->_endpoint = *resolver.resolve(tcp::v4(), host, port, ec); - if (!ec) { - fc_ilog(logger(), "configured prometheus metrics exporter to listen on ${h}", ("h", lipstr)); - } else { - fc_elog(logger(), "failed to configure prometheus metrics exporter to listen on ${h} (${m})", - ("h", lipstr)("m", ec.message())); - } + auto& _http_plugin = app().get_plugin(); + my->_max_response_time_us = _http_plugin.get_max_response_time(); + + prometheus_api_handle handle{my.get()}; + app().get_plugin().add_async_api({ + CALL_ASYNC_WITH_400(prometheus, prometheus, handle, eosio, metrics, std::string, 200, http_params_types::no_params)} + , http_content_type::plaintext); } void prometheus_plugin::plugin_startup() { - my->start(); + my->_prometheus_thread_pool.start(1, [](const fc::exception& e) { + elog("Prometheus exception ${e}", ("e", e)); } + ); + ilog("Prometheus plugin started."); } diff --git a/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp b/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp index cafebdded6..15cf0dcbdc 100644 --- a/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp +++ b/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp @@ -128,7 +128,7 @@ class resource_monitor_plugin_impl { } monitor_thread = std::thread( [this] { - fc::set_os_thread_name( "resmon" ); // console_appender uses 9 chars for thread name reporting. + fc::set_thread_name( "resmon" ); // console_appender uses 9 chars for thread name reporting. space_handler.space_monitor_loop(); ctx.run(); diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp index 2a20f9f059..3d3c920af1 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/session.hpp @@ -289,7 +289,7 @@ class blocks_result_send_queue_entry : public send_queue_entry_base, public std: template struct session : session_base, std::enable_shared_from_this> { private: - Plugin plugin; + Plugin& plugin; session_manager& session_mgr; std::optional> socket_stream; // ship thread only after creation std::string description; @@ -306,16 +306,16 @@ struct session : session_base, std::enable_shared_from_thisdefault_frame_size) { + , default_frame_size(plugin.default_frame_size) { description = to_description_string(); } void start() { - fc_ilog(plugin->logger(), "incoming connection from ${a}", ("a", description)); + fc_ilog(plugin.get_logger(), "incoming connection from ${a}", ("a", description)); socket_stream->auto_fragment(false); socket_stream->binary(true); if constexpr (std::is_same_v) { @@ -373,7 +373,7 @@ struct session : session_base, std::enable_shared_from_this& buf) { if (result.traces.has_value()) { - auto& optional_log = plugin->get_trace_log(); + auto& optional_log = plugin.get_trace_log(); if( optional_log ) { buf.emplace( optional_log->create_locked_decompress_stream() ); return optional_log->get_unpacked_entry( result.this_block->block_num, *buf ); @@ -385,7 +385,7 @@ struct session : session_base, std::enable_shared_from_this& buf) { if (result.deltas.has_value()) { - auto& optional_log = plugin->get_chain_state_log(); + auto& optional_log = plugin.get_chain_state_log(); if( optional_log ) { buf.emplace( optional_log->create_locked_decompress_stream() ); return optional_log->get_unpacked_entry( result.this_block->block_num, *buf ); @@ -395,7 +395,7 @@ struct session : session_base, std::enable_shared_from_thislogger(), "received get_status_request_v0"); + fc_dlog(plugin.get_logger(), "received get_status_request_v0"); auto self = this->shared_from_this(); auto entry_ptr = std::make_unique>(self); @@ -403,7 +403,7 @@ struct session : session_base, std::enable_shared_from_thislogger(), "received get_blocks_request_v0 = ${req}", ("req", req)); + fc_dlog(plugin.get_logger(), "received get_blocks_request_v0 = ${req}", ("req", req)); auto self = this->shared_from_this(); auto entry_ptr = std::make_unique>(self, std::move(req)); @@ -411,9 +411,9 @@ struct session : session_base, std::enable_shared_from_thislogger(), "received get_blocks_ack_request_v0 = ${req}", ("req", req)); + fc_dlog(plugin.get_logger(), "received get_blocks_ack_request_v0 = ${req}", ("req", req)); if (!current_request) { - fc_dlog(plugin->logger(), " no current get_blocks_request_v0, discarding the get_blocks_ack_request_v0"); + fc_dlog(plugin.get_logger(), " no current get_blocks_request_v0, discarding the get_blocks_ack_request_v0"); return; } @@ -423,48 +423,48 @@ struct session : session_base, std::enable_shared_from_thislogger(), "replying get_status_request_v0"); + fc_dlog(plugin.get_logger(), "replying get_status_request_v0"); state_history::get_status_result_v0 result; - result.head = plugin->get_block_head(); - result.last_irreversible = plugin->get_last_irreversible(); - result.chain_id = plugin->get_chain_id(); - auto&& trace_log = plugin->get_trace_log(); + result.head = plugin.get_block_head(); + result.last_irreversible = plugin.get_last_irreversible(); + result.chain_id = plugin.get_chain_id(); + auto&& trace_log = plugin.get_trace_log(); if (trace_log) { auto r = trace_log->block_range(); result.trace_begin_block = r.first; result.trace_end_block = r.second; } - auto&& chain_state_log = plugin->get_chain_state_log(); + auto&& chain_state_log = plugin.get_chain_state_log(); if (chain_state_log) { auto r = chain_state_log->block_range(); result.chain_state_begin_block = r.first; result.chain_state_end_block = r.second; } - fc_dlog(plugin->logger(), "pushing get_status_result_v0 to send queue"); + fc_dlog(plugin.get_logger(), "pushing get_status_result_v0 to send queue"); return result; } void update_current_request(state_history::get_blocks_request_v0& req) { - fc_dlog(plugin->logger(), "replying get_blocks_request_v0 = ${req}", ("req", req)); - to_send_block_num = req.start_block_num; + fc_dlog(plugin.get_logger(), "replying get_blocks_request_v0 = ${req}", ("req", req)); + to_send_block_num = std::max(req.start_block_num, plugin.get_first_available_block_num()); for (auto& cp : req.have_positions) { if (req.start_block_num <= cp.block_num) continue; - auto id = plugin->get_block_id(cp.block_num); + auto id = plugin.get_block_id(cp.block_num); if (!id || *id != cp.block_id) req.start_block_num = std::min(req.start_block_num, cp.block_num); if (!id) { to_send_block_num = std::min(to_send_block_num, cp.block_num); - fc_dlog(plugin->logger(), "block ${block_num} is not available", ("block_num", cp.block_num)); + fc_dlog(plugin.get_logger(), "block ${block_num} is not available", ("block_num", cp.block_num)); } else if (*id != cp.block_id) { to_send_block_num = std::min(to_send_block_num, cp.block_num); - fc_dlog(plugin->logger(), "the id for block ${block_num} in block request have_positions does not match the existing", + fc_dlog(plugin.get_logger(), "the id for block ${block_num} in block request have_positions does not match the existing", ("block_num", cp.block_num)); } } - fc_dlog(plugin->logger(), " get_blocks_request_v0 start_block_num set to ${num}", ("num", to_send_block_num)); + fc_dlog(plugin.get_logger(), " get_blocks_request_v0 start_block_num set to ${num}", ("num", to_send_block_num)); if( !req.have_positions.empty() ) { position_it = req.have_positions.begin(); @@ -480,18 +480,21 @@ struct session : session_base, std::enable_shared_from_thisget_last_irreversible(); + result.last_irreversible = plugin.get_last_irreversible(); uint32_t current = current_request->irreversible_only ? result.last_irreversible.block_num : result.head.block_num; if (to_send_block_num > current || to_send_block_num >= current_request->end_block_num) { - fc_dlog( plugin->logger(), "Not sending, to_send_block_num: ${s}, current: ${c} current_request.end_block_num: ${b}", + fc_dlog( plugin.get_logger(), "Not sending, to_send_block_num: ${s}, current: ${c} current_request.end_block_num: ${b}", ("s", to_send_block_num)("c", current)("b", current_request->end_block_num) ); session_mgr.pop_entry(false); return; } - auto block_id = plugin->get_block_id(to_send_block_num); + // not just an optimization, on accepted_block signal may not be able to find block_num in forkdb as it has not been validated + // until after the accepted_block signal + std::optional block_id = + (block_state && block_state->block_num == to_send_block_num) ? block_state->id : plugin.get_block_id(to_send_block_num); if (block_id && position_it && (*position_it)->block_num == to_send_block_num) { // This branch happens when the head block of nodeos is behind the head block of connecting client. @@ -512,22 +515,22 @@ struct session : session_base, std::enable_shared_from_thisget_block_id(to_send_block_num - 1); + auto prev_block_id = plugin.get_block_id(to_send_block_num - 1); if (prev_block_id) result.prev_block = state_history::block_position{to_send_block_num - 1, *prev_block_id}; if (current_request->fetch_block) - plugin->get_block(to_send_block_num, block_state, result.block); - if (current_request->fetch_traces && plugin->get_trace_log()) + plugin.get_block(to_send_block_num, block_state, result.block); + if (current_request->fetch_traces && plugin.get_trace_log()) result.traces.emplace(); - if (current_request->fetch_deltas && plugin->get_chain_state_log()) + if (current_request->fetch_deltas && plugin.get_chain_state_log()) result.deltas.emplace(); } ++to_send_block_num; // during syncing if block is older than 5 min, log every 1000th block - bool fresh_block = fc::time_point::now() - plugin->get_head_block_timestamp() < fc::minutes(5); + bool fresh_block = fc::time_point::now() - plugin.get_head_block_timestamp() < fc::minutes(5); if (fresh_block || (result.this_block && result.this_block->block_num % 1000 == 0)) { - fc_ilog(plugin->logger(), + fc_ilog(plugin.get_logger(), "pushing result " "{\"head\":{\"block_num\":${head}},\"last_irreversible\":{\"block_num\":${last_irr}},\"this_block\":{" "\"block_num\":${this_block}}} to send queue", @@ -557,7 +560,7 @@ struct session : session_base, std::enable_shared_from_thisget_block_head(); + result.head = plugin.get_block_head(); send_update(std::move(result), {}); } else { session_mgr.pop_entry(false); @@ -571,26 +574,26 @@ struct session : session_base, std::enable_shared_from_thislogger(), "${e}", ("e", e.to_detail_string()) ); + fc_elog( plugin.get_logger(), "${e}", ("e", e.to_detail_string()) ); } catch( const std::exception& e ) { - fc_elog( plugin->logger(), "${e}", ("e", e.what()) ); + fc_elog( plugin.get_logger(), "${e}", ("e", e.what()) ); } catch( ... ) { - fc_elog( plugin->logger(), "unknown exception" ); + fc_elog( plugin.get_logger(), "unknown exception" ); } } else { if (ec == boost::asio::error::operation_aborted || ec == boost::asio::error::connection_reset || ec == boost::asio::error::eof || ec == boost::beast::websocket::error::closed) { - fc_dlog(plugin->logger(), "${w}: ${m}", ("w", what)("m", ec.message())); + fc_dlog(plugin.get_logger(), "${w}: ${m}", ("w", what)("m", ec.message())); } else { - fc_elog(plugin->logger(), "${w}: ${m}", ("w", what)("m", ec.message())); + fc_elog(plugin.get_logger(), "${w}: ${m}", ("w", what)("m", ec.message())); } } // on exception allow session to be destroyed - fc_ilog(plugin->logger(), "Closing connection from ${a}", ("a", description)); + fc_ilog(plugin.get_logger(), "Closing connection from ${a}", ("a", description)); session_mgr.remove( this->shared_from_this(), active_entry ); } }; diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp index 7e0c632804..017dda0b12 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp @@ -3,6 +3,7 @@ #include #include +#include namespace fc { class variant; @@ -11,7 +12,6 @@ class variant; namespace eosio { using chain::bytes; using std::shared_ptr; - typedef shared_ptr state_history_ptr; class state_history_plugin : public plugin { @@ -29,6 +29,9 @@ class state_history_plugin : public plugin { void handle_sighup() override; + const state_history_log* trace_log() const; + const state_history_log* chain_state_log() const; + private: state_history_ptr my; }; diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index a6567fd8c5..2407fdb2a4 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -7,8 +7,8 @@ #include #include #include -#include #include +#include #include #include @@ -19,17 +19,17 @@ #include #include +#include namespace ws = boost::beast::websocket; - namespace eosio { using namespace chain; using namespace state_history; using boost::signals2::scoped_connection; namespace bio = boost::iostreams; - static auto _state_history_plugin = application::register_plugin(); +static auto _state_history_plugin = application::register_plugin(); const std::string logger_name("state_history"); fc::logger _log; @@ -48,44 +48,38 @@ auto catch_and_log(F f) { } struct state_history_plugin_impl : std::enable_shared_from_this { + constexpr static uint64_t default_frame_size = 1024 * 1024; + +private: chain_plugin* chain_plug = nullptr; std::optional trace_log; std::optional chain_state_log; + uint32_t first_available_block = 0; bool trace_debug_mode = false; std::optional applied_transaction_connection; std::optional block_start_connection; std::optional accepted_block_connection; string endpoint_address; - uint16_t endpoint_port = 8080; string unix_path; state_history::trace_converter trace_converter; session_manager session_mgr; mutable std::mutex mtx; - block_id_type head_id; - block_id_type lib_id; - time_point head_timestamp; + block_id_type head_id; + block_id_type lib_id; + time_point head_timestamp; - constexpr static uint64_t default_frame_size = 1024 * 1024; - - template - struct generic_acceptor { - using socket_type = typename ACCEPTOR::protocol_type::socket; - explicit generic_acceptor(boost::asio::io_context& ioc) : acceptor_(ioc), socket_(ioc), error_timer_(ioc) {} - ACCEPTOR acceptor_; - socket_type socket_; - boost::asio::deadline_timer error_timer_; - }; + named_thread_pool thread_pool; - using tcp_acceptor = generic_acceptor; - using unix_acceptor = generic_acceptor; + bool plugin_started = false; - using acceptor_type = std::variant, std::unique_ptr>; - std::set acceptors; +public: + void plugin_initialize(const variables_map& options); + void plugin_startup(); + void plugin_shutdown(); + session_manager& get_session_manager() { return session_mgr; } - named_thread_pool thread_pool; - - static fc::logger& logger() { return _log; } + static fc::logger& get_logger() { return _log; } std::optional& get_trace_log() { return trace_log; } std::optional& get_chain_state_log(){ return chain_state_log; } @@ -120,10 +114,17 @@ struct state_history_plugin_impl : std::enable_shared_from_this get_block_id(uint32_t block_num) { - if (trace_log) - return trace_log->get_block_id(block_num); - if (chain_state_log) - return chain_state_log->get_block_id(block_num); + std::optional id; + if( trace_log ) { + id = trace_log->get_block_id( block_num ); + if( id ) + return id; + } + if( chain_state_log ) { + id = chain_state_log->get_block_id( block_num ); + if( id ) + return id; + } try { return chain_plug->chain().get_block_id_for_num(block_num); } catch (...) { @@ -149,96 +150,38 @@ struct state_history_plugin_impl : std::enable_shared_from_this(thread_pool.get_executor())); }; - auto init_unix_acceptor = [&]() { - // take a sniff and see if anything is already listening at the given socket path, or if the socket path exists - // but nothing is listening - { - boost::system::error_code test_ec; - boost::asio::local::stream_protocol::socket test_socket(app().get_io_service()); - test_socket.connect(unix_path.c_str(), test_ec); - - // looks like a service is already running on that socket, don't touch it... fail out - if (test_ec == boost::system::errc::success) - ec = boost::system::errc::make_error_code(boost::system::errc::address_in_use); - // socket exists but no one home, go ahead and remove it and continue on - else if (test_ec == boost::system::errc::connection_refused) - ::unlink(unix_path.c_str()); - else if (test_ec != boost::system::errc::no_such_file_or_directory) - ec = test_ec; - } - check_ec("open"); - acceptors.insert(std::make_unique(thread_pool.get_executor())); - }; - - // create and configure acceptors, can be both - if (!endpoint_address.empty()) init_tcp_acceptor(); - if (!unix_path.empty()) init_unix_acceptor(); - - // start it - std::for_each(acceptors.begin(), acceptors.end(), [&](const acceptor_type& acc) { - std::visit(overloaded{[&](const std::unique_ptr& tcp_acc) { - auto address = boost::asio::ip::make_address(endpoint_address); - auto endpoint = boost::asio::ip::tcp::endpoint{address, endpoint_port}; - tcp_acc->acceptor_.open(endpoint.protocol(), ec); - check_ec("open"); - tcp_acc->acceptor_.set_option(boost::asio::socket_base::reuse_address(true)); - tcp_acc->acceptor_.bind(endpoint, ec); - check_ec("bind"); - tcp_acc->acceptor_.listen(boost::asio::socket_base::max_listen_connections, ec); - check_ec("listen"); - do_accept(*tcp_acc); - }, - [&](const std::unique_ptr& unx_acc) { - unx_acc->acceptor_.open(boost::asio::local::stream_protocol::acceptor::protocol_type(), ec); - check_ec("open"); - unx_acc->acceptor_.bind(unix_path.c_str(), ec); - check_ec("bind"); - unx_acc->acceptor_.listen(boost::asio::socket_base::max_listen_connections, ec); - check_ec("listen"); - do_accept(*unx_acc); - }}, - acc); - }); + template + void create_listener(const std::string& address) { + const boost::posix_time::milliseconds accept_timeout(200); + using socket_type = typename Protocol::socket; + fc::create_listener( + thread_pool.get_executor(), _log, accept_timeout, address, "", [this](socket_type&& socket) { + // Create a session object and run it + catch_and_log([&, this] { + auto s = std::make_shared>(*this, std::move(socket), + session_mgr); + session_mgr.insert(s); + s->start(); + }); + }); } - template - void do_accept(Acceptor& acc) { - // &acceptor kept alive by self, reference into acceptors set - acc.acceptor_.async_accept(acc.socket_, [self = shared_from_this(), &acc](const boost::system::error_code& ec) { - if (ec == boost::system::errc::too_many_files_open) { - fc_elog(_log, "ship accept() error: too many files open - waiting 200ms"); - acc.error_timer_.expires_from_now(boost::posix_time::milliseconds(200)); - acc.error_timer_.async_wait([self = self->shared_from_this(), &acc](const boost::system::error_code& ec) { - if (!ec) - catch_and_log([&] { self->do_accept(acc); }); - }); - } else { - if (ec) - fc_elog(_log, "ship accept() error: ${m} - closing connection", ("m", ec.message())); - else { - // Create a session object and run it - catch_and_log([&] { - auto s = std::make_shared, typename Acceptor::socket_type>>(self, std::move(acc.socket_), self->session_mgr); - self->session_mgr.insert(s); - s->start(); - }); - } - - // Accept another connection - catch_and_log([&] { self->do_accept(acc); }); + void listen(){ + try { + if (!endpoint_address.empty()) { + create_listener(endpoint_address); } - }); + if (!unix_path.empty()) { + create_listener(unix_path); + } + } catch (std::exception&) { + FC_THROW_EXCEPTION(plugin_exception, "unable to open listen socket"); + } } // called from main thread @@ -247,15 +190,18 @@ struct state_history_plugin_impl : std::enable_shared_from_thischain(); + std::lock_guard g(mtx); + head_id = chain.head_block_id(); + lib_id = chain.last_irreversible_block_id(); + head_timestamp = chain.head_block_time(); + } + // called from main thread void on_accepted_block(const block_state_ptr& block_state) { - { - const auto& chain = chain_plug->chain(); - std::lock_guard g(mtx); - head_id = chain.head_block_id(); - lib_id = chain.last_irreversible_block_id(); - head_timestamp = chain.head_block_time(); - } + update_current(); try { store_traces(block_state); @@ -272,9 +218,15 @@ struct state_history_plugin_impl : std::enable_shared_from_thisshared_from_this(), block_state]() { - self->session_mgr.send_update(block_state); - }); + // avoid accumulating all these posts during replay before ship threads started + // that can lead to a large memory consumption and failures + // this is safe as there are no clients connected until after replay is complete + // this method is called from the main thread and "plugin_started" is set on the main thread as well when plugin is started + if (plugin_started) { + boost::asio::post(get_ship_executor(), [self = this->shared_from_this(), block_state]() { + self->get_session_manager().send_update(block_state); + }); + } } @@ -318,21 +270,9 @@ struct state_history_plugin_impl : std::enable_shared_from_this& a ) { - boost::system::error_code ec; - if( const auto ep = a->acceptor_.local_endpoint( ec ); !ec ) - ::unlink( ep.path().c_str() ); - }, - []( const std::unique_ptr& a) {} - }, acc); - }); } -}; // state_history_plugin_impl - - +}; // state_history_plugin_impl state_history_plugin::state_history_plugin() : my(std::make_shared()) {} @@ -372,24 +312,22 @@ void state_history_plugin::set_program_options(options_description& cli, options options("state-history-log-retain-blocks", bpo::value(), "if set, periodically prune the state history files to store only configured number of most recent blocks"); } -void state_history_plugin::plugin_initialize(const variables_map& options) { +void state_history_plugin_impl::plugin_initialize(const variables_map& options) { try { - handle_sighup(); // setup logging - EOS_ASSERT(options.at("disable-replay-opts").as(), plugin_exception, "state_history_plugin requires --disable-replay-opts"); - my->chain_plug = app().find_plugin(); - EOS_ASSERT(my->chain_plug, chain::missing_chain_plugin_exception, ""); - auto& chain = my->chain_plug->chain(); - my->applied_transaction_connection.emplace(chain.applied_transaction.connect( + chain_plug = app().find_plugin(); + EOS_ASSERT(chain_plug, chain::missing_chain_plugin_exception, ""); + auto& chain = chain_plug->chain(); + applied_transaction_connection.emplace(chain.applied_transaction.connect( [&](std::tuple t) { - my->on_applied_transaction(std::get<0>(t), std::get<1>(t)); + on_applied_transaction(std::get<0>(t), std::get<1>(t)); })); - my->accepted_block_connection.emplace( - chain.accepted_block.connect([&](const block_state_ptr& p) { my->on_accepted_block(p); })); - my->block_start_connection.emplace( - chain.block_start.connect([&](uint32_t block_num) { my->on_block_start(block_num); })); + accepted_block_connection.emplace( + chain.accepted_block.connect([&](const block_state_ptr& p) { on_accepted_block(p); })); + block_start_connection.emplace( + chain.block_start.connect([&](uint32_t block_num) { on_block_start(block_num); })); auto dir_option = options.at("state-history-dir").as(); std::filesystem::path state_history_dir; @@ -400,23 +338,13 @@ void state_history_plugin::plugin_initialize(const variables_map& options) { if (auto resmon_plugin = app().find_plugin()) resmon_plugin->monitor_directory(state_history_dir); - auto ip_port = options.at("state-history-endpoint").as(); - - if (!ip_port.empty()) { - auto port = ip_port.substr(ip_port.find(':') + 1, ip_port.size()); - auto host = ip_port.substr(0, ip_port.find(':')); - my->endpoint_address = host; - my->endpoint_port = std::stoi(port); - - fc_dlog(_log, "PLUGIN_INITIALIZE ${ip_port} ${host} ${port}", - ("ip_port", ip_port)("host", host)("port", port)); - } + endpoint_address = options.at("state-history-endpoint").as(); if (options.count("state-history-unix-socket-path")) { std::filesystem::path sock_path = options.at("state-history-unix-socket-path").as(); if (sock_path.is_relative()) sock_path = app().data_dir() / sock_path; - my->unix_path = sock_path.generic_string(); + unix_path = sock_path.generic_string(); } if (options.at("delete-state-history").as()) { @@ -426,7 +354,7 @@ void state_history_plugin::plugin_initialize(const variables_map& options) { std::filesystem::create_directories(state_history_dir); if (options.at("trace-history-debug-mode").as()) { - my->trace_debug_mode = true; + trace_debug_mode = true; } bool has_state_history_partition_options = @@ -435,7 +363,7 @@ void state_history_plugin::plugin_initialize(const variables_map& options) { state_history_log_config ship_log_conf; if (options.count("state-history-log-retain-blocks")) { - auto ship_log_prune_conf = ship_log_conf.emplace(); + auto& ship_log_prune_conf = ship_log_conf.emplace(); ship_log_prune_conf.prune_blocks = options.at("state-history-log-retain-blocks").as(); //the arbitrary limit of 1000 here is mainly so that there is enough buffer for newly applied forks to be delivered to clients // before getting pruned out. ideally pruning would have been smart enough to know not to prune reversible blocks @@ -455,41 +383,79 @@ void state_history_plugin::plugin_initialize(const variables_map& options) { } if (options.at("trace-history").as()) - my->trace_log.emplace("trace_history", state_history_dir , ship_log_conf); + trace_log.emplace("trace_history", state_history_dir , ship_log_conf); if (options.at("chain-state-history").as()) - my->chain_state_log.emplace("chain_state_history", state_history_dir, ship_log_conf); + chain_state_log.emplace("chain_state_history", state_history_dir, ship_log_conf); } FC_LOG_AND_RETHROW() } // state_history_plugin::plugin_initialize -void state_history_plugin::plugin_startup() { +void state_history_plugin::plugin_initialize(const variables_map& options) { + handle_sighup(); // setup logging + my->plugin_initialize(options); +} + +void state_history_plugin_impl::plugin_startup() { try { - auto bsp = my->chain_plug->chain().head_block_state(); - if( bsp && my->chain_state_log && my->chain_state_log->empty() ) { + const auto& chain = chain_plug->chain(); + update_current(); + auto bsp = chain.head_block_state(); + if( bsp && chain_state_log && chain_state_log->empty() ) { fc_ilog( _log, "Storing initial state on startup, this can take a considerable amount of time" ); - my->store_chain_state( bsp ); + store_chain_state( bsp ); fc_ilog( _log, "Done storing initial state on startup" ); } - my->listen(); + first_available_block = chain.earliest_available_block_num(); + if (trace_log) { + auto first_trace_block = trace_log->block_range().first; + if( first_trace_block > 0 ) + first_available_block = std::min( first_available_block, first_trace_block ); + } + if (chain_state_log) { + auto first_state_block = chain_state_log->block_range().first; + if( first_state_block > 0 ) + first_available_block = std::min( first_available_block, first_state_block ); + } + fc_ilog(_log, "First available block for SHiP ${b}", ("b", first_available_block)); + listen(); // use of executor assumes only one thread - my->thread_pool.start( 1, [](const fc::exception& e) { + thread_pool.start( 1, [](const fc::exception& e) { fc_elog( _log, "Exception in SHiP thread pool, exiting: ${e}", ("e", e.to_detail_string()) ); app().quit(); - } ); + }); + plugin_started = true; } catch (std::exception& ex) { appbase::app().quit(); } } +void state_history_plugin::plugin_startup() { + my->plugin_startup(); +} + +void state_history_plugin_impl::plugin_shutdown() { + applied_transaction_connection.reset(); + accepted_block_connection.reset(); + block_start_connection.reset(); + thread_pool.stop(); +} + void state_history_plugin::plugin_shutdown() { - my->applied_transaction_connection.reset(); - my->accepted_block_connection.reset(); - my->block_start_connection.reset(); - my->thread_pool.stop(); + my->plugin_shutdown(); } void state_history_plugin::handle_sighup() { fc::logger::update(logger_name, _log); } +const state_history_log* state_history_plugin::trace_log() const { + const auto& log = my->get_trace_log(); + return log ? std::addressof(*log) : nullptr; +} + +const state_history_log* state_history_plugin::chain_state_log() const { + const auto& log = my->get_chain_state_log(); + return log ? std::addressof(*log) : nullptr; +} + } // namespace eosio diff --git a/plugins/state_history_plugin/tests/CMakeLists.txt b/plugins/state_history_plugin/tests/CMakeLists.txt index 6c5227cff1..c01c62df61 100644 --- a/plugins/state_history_plugin/tests/CMakeLists.txt +++ b/plugins/state_history_plugin/tests/CMakeLists.txt @@ -1,5 +1,5 @@ -add_executable( test_state_history_session session_test.cpp ) -target_link_libraries(test_state_history_session state_history Boost::unit_test_framework) -target_include_directories( test_state_history_session PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include" ) +add_executable( test_state_history main.cpp session_test.cpp plugin_config_test.cpp) +target_link_libraries(test_state_history state_history_plugin eosio_testing eosio_chain_wrap) +target_include_directories( test_state_history PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../include" ) -add_test(test_state_history_session test_state_history_session) \ No newline at end of file +add_test(test_state_history test_state_history) \ No newline at end of file diff --git a/plugins/state_history_plugin/tests/main.cpp b/plugins/state_history_plugin/tests/main.cpp new file mode 100644 index 0000000000..e618f36999 --- /dev/null +++ b/plugins/state_history_plugin/tests/main.cpp @@ -0,0 +1,2 @@ +#define BOOST_TEST_MODULE state_history_plugin +#include \ No newline at end of file diff --git a/plugins/state_history_plugin/tests/plugin_config_test.cpp b/plugins/state_history_plugin/tests/plugin_config_test.cpp new file mode 100644 index 0000000000..48bf085757 --- /dev/null +++ b/plugins/state_history_plugin/tests/plugin_config_test.cpp @@ -0,0 +1,40 @@ +#include +#include +#include +#include +#include +#include + +BOOST_AUTO_TEST_CASE(state_history_plugin_default_tests) { + fc::temp_directory tmp; + appbase::scoped_app app; + + auto tmp_path = tmp.path().string(); + std::array args = {"test_state_history", "--trace-history", "--state-history-stride", "10", + "--disable-replay-opts", "--data-dir", tmp_path.c_str()}; + + BOOST_CHECK(app->initialize(args.size(), const_cast(args.data()))); + auto& plugin = app->get_plugin(); + + BOOST_REQUIRE(plugin.trace_log()); + auto* config = std::get_if(&plugin.trace_log()->config()); + BOOST_REQUIRE(config); + BOOST_CHECK_EQUAL(config->max_retained_files, UINT32_MAX); +} + +BOOST_AUTO_TEST_CASE(state_history_plugin_retain_blocks_tests) { + fc::temp_directory tmp; + appbase::scoped_app app; + + auto tmp_path = tmp.path().string(); + std::array args = {"test_state_history", "--trace-history", "--state-history-log-retain-blocks", "4242", + "--disable-replay-opts", "--data-dir", tmp_path.c_str()}; + + BOOST_CHECK(app->initialize(args.size(), const_cast(args.data()))); + auto& plugin = app->get_plugin(); + + BOOST_REQUIRE(plugin.trace_log()); + auto* config = std::get_if(&plugin.trace_log()->config()); + BOOST_REQUIRE(config); + BOOST_CHECK_EQUAL(config->prune_blocks, 4242); +} \ No newline at end of file diff --git a/plugins/state_history_plugin/tests/session_test.cpp b/plugins/state_history_plugin/tests/session_test.cpp index abe08a7732..708809468f 100644 --- a/plugins/state_history_plugin/tests/session_test.cpp +++ b/plugins/state_history_plugin/tests/session_test.cpp @@ -1,5 +1,3 @@ - -#define BOOST_TEST_MODULE example #include #include @@ -23,6 +21,7 @@ #include #include #include +#include namespace beast = boost::beast; // from namespace http = beast::http; // from @@ -118,7 +117,9 @@ struct mock_state_history_plugin { log.emplace("ship", log_dir.path(), conf); } - fc::logger logger() { return fc::logger::get(DEFAULT_LOGGER); } + fc::logger logger = fc::logger::get(DEFAULT_LOGGER); + + fc::logger& get_logger() { return logger; } void get_block(uint32_t block_num, const eosio::chain::block_state_ptr& block_state, std::optional& result) const { @@ -134,88 +135,15 @@ struct mock_state_history_plugin { eosio::state_history::block_position get_block_head() { return block_head; } eosio::state_history::block_position get_last_irreversible() { return block_head; } + uint32_t get_first_available_block_num() const { return 0; } + void add_session(std::shared_ptr s) { session_mgr.insert(std::move(s)); } }; -using session_type = eosio::session; - -// Accepts incoming connections and launches the sessions -class listener : public std::enable_shared_from_this { - mock_state_history_plugin* server_; - tcp::acceptor acceptor_; - - public: - listener(mock_state_history_plugin* server, tcp::endpoint& endpoint) - : server_(server) - , acceptor_(server->ship_ioc) { - beast::error_code ec; - - // Open the acceptor - acceptor_.open(endpoint.protocol(), ec); - if (ec) { - fail(ec, "open"); - return; - } - - // Allow address reuse - acceptor_.set_option(net::socket_base::reuse_address(true), ec); - if (ec) { - fail(ec, "set_option"); - return; - } - - // Bind to the server address - acceptor_.bind(endpoint, ec); - if (ec) { - fail(ec, "bind"); - return; - } - - endpoint = acceptor_.local_endpoint(ec); - if (ec) { - fail(ec, "local_endpoint"); - return; - } - - // Start listening for connections - acceptor_.listen(net::socket_base::max_listen_connections, ec); - if (ec) { - fail(ec, "listen"); - return; - } - } - - // Start accepting incoming connections - void run() { do_accept(); } - - private: - void do_accept() { - // The new connection gets its own strand - acceptor_.async_accept(boost::asio::make_strand(server_->ship_ioc), - [self = shared_from_this()](beast::error_code ec, boost::asio::ip::tcp::socket&& socket) { - if( self->server_->stopping ) return; - if (ec) { - fail(ec, "async_accept"); - } else { - self->on_accept( ec, std::move( socket ) ); - } - }); - } - - void on_accept(beast::error_code ec, tcp::socket&& socket) { - if (ec) { - fail(ec, "accept"); - } else { - // Create the session and run it - auto s = std::make_shared(server_, std::move(socket), server_->session_mgr); - s->start(); - server_->add_session(s); - } - } -}; +using session_type = eosio::session; struct test_server : mock_state_history_plugin { std::vector threads; @@ -229,14 +157,24 @@ struct test_server : mock_state_history_plugin { threads.emplace_back([this]{ main_ioc.run(); }); threads.emplace_back([this]{ ship_ioc.run(); }); + auto create_session = [this](tcp::socket&& peer_socket) { + auto s = std::make_shared(*this, std::move(peer_socket), session_mgr); + s->start(); + add_session(s); + }; + // Create and launch a listening port - std::make_shared(this, local_address)->run(); + auto server = std::make_shared>( + ship_ioc, logger, boost::posix_time::milliseconds(100), "", local_address, "", create_session); + server->do_accept(); + local_address = server->acceptor().local_endpoint(); } ~test_server() { stopping = true; ship_ioc_work.reset(); main_ioc_work.reset(); + ship_ioc.stop(); for (auto& thr : threads) { thr.join(); @@ -664,4 +602,3 @@ BOOST_FIXTURE_TEST_CASE(test_session_fork, state_history_test_fixture) { } FC_LOG_AND_RETHROW() } - diff --git a/plugins/test_control_api_plugin/test_control_api_plugin.cpp b/plugins/test_control_api_plugin/test_control_api_plugin.cpp index 4f0e21c17e..ec5d7f3768 100644 --- a/plugins/test_control_api_plugin/test_control_api_plugin.cpp +++ b/plugins/test_control_api_plugin/test_control_api_plugin.cpp @@ -11,33 +11,27 @@ using namespace eosio; class test_control_api_plugin_impl { public: - test_control_api_plugin_impl(controller& db) + explicit test_control_api_plugin_impl(controller& db) : db(db) {} controller& db; }; -test_control_api_plugin::test_control_api_plugin(){} -test_control_api_plugin::~test_control_api_plugin(){} +test_control_api_plugin::test_control_api_plugin() = default; +test_control_api_plugin::~test_control_api_plugin() = default; void test_control_api_plugin::set_program_options(options_description&, options_description&) {} void test_control_api_plugin::plugin_initialize(const variables_map&) {} -struct async_result_visitor : public fc::visitor { - template - std::string operator()(const T& v) const { - return fc::json::to_string(v); - } -}; - #define CALL_WITH_API_400(api_name, api_handle, api_namespace, call_name, http_response_code, params_type) \ {std::string("/v1/" #api_name "/" #call_name), \ + api_category::test_control, \ [api_handle](string&&, string&& body, url_response_callback&& cb) mutable { \ try { \ auto params = parse_params(body);\ fc::variant result( api_handle.call_name( std::move(params) ) ); \ - cb(http_response_code, fc::time_point::maximum(), std::move(result)); \ + cb(http_response_code, std::move(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/trace_api_plugin/abi_data_handler.cpp b/plugins/trace_api_plugin/abi_data_handler.cpp index b0c8b46c49..574eb9919b 100644 --- a/plugins/trace_api_plugin/abi_data_handler.cpp +++ b/plugins/trace_api_plugin/abi_data_handler.cpp @@ -9,7 +9,7 @@ namespace eosio::trace_api { std::make_shared(std::move(abi), chain::abi_serializer::create_yield_function(fc::microseconds::maximum()))); } - std::tuple> abi_data_handler::serialize_to_variant(const std::variant & action, const yield_function& yield ) { + std::tuple> abi_data_handler::serialize_to_variant(const std::variant& action) { auto account = std::visit([](auto &&action) -> auto { return action.account; }, action); if (abi_serializer_by_account.count(account) > 0) { @@ -20,8 +20,8 @@ namespace eosio::trace_api { if (!type_name.empty()) { try { // abi_serializer expects a yield function that takes a recursion depth - auto abi_yield = [yield](size_t recursion_depth) { - yield(); + // abis are user provided, do not use a deadline + auto abi_yield = [](size_t recursion_depth) { EOS_ASSERT( recursion_depth < chain::abi_serializer::max_recursion_depth, chain::abi_recursion_depth_exception, "exceeded max_recursion_depth ${r} ", ("r", chain::abi_serializer::max_recursion_depth) ); }; diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/abi_data_handler.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/abi_data_handler.hpp index 7ff5729786..4676a0b784 100644 --- a/plugins/trace_api_plugin/include/eosio/trace_api/abi_data_handler.hpp +++ b/plugins/trace_api_plugin/include/eosio/trace_api/abi_data_handler.hpp @@ -34,13 +34,12 @@ namespace eosio { * Given an action trace, produce a tuple representing the `data` and `return_value` fields in the trace * * @param action - trace of the action including metadata necessary for finding the ABI - * @param yield - a yield function to allow cooperation during long running tasks * @return tuple where the first element is a variant representing the `data` field of the action interpreted by known ABIs OR an empty variant, and the second element represents the `return_value` field of the trace. */ - std::tuple> serialize_to_variant(const std::variant & action, const yield_function& yield ); + std::tuple> serialize_to_variant(const std::variant & action); /** - * Utility class that allows mulitple request_handlers to share the same abi_data_handler + * Utility class that allows multiple request_handlers to share the same abi_data_handler */ class shared_provider { public: @@ -48,8 +47,8 @@ namespace eosio { :handler(handler) {} - std::tuple> serialize_to_variant( const std::variant & action, const yield_function& yield ) { - return handler->serialize_to_variant(action, yield); + std::tuple> serialize_to_variant( const std::variant & action ) { + return handler->serialize_to_variant(action); } std::shared_ptr handler; diff --git a/plugins/trace_api_plugin/include/eosio/trace_api/request_handler.hpp b/plugins/trace_api_plugin/include/eosio/trace_api/request_handler.hpp index cbc24e8c44..e31aa433e2 100644 --- a/plugins/trace_api_plugin/include/eosio/trace_api/request_handler.hpp +++ b/plugins/trace_api_plugin/include/eosio/trace_api/request_handler.hpp @@ -6,12 +6,12 @@ #include namespace eosio::trace_api { - using data_handler_function = std::function>( const std::variant & action_trace_t, const yield_function&)>; + using data_handler_function = std::function>( const std::variant & action_trace_t)>; namespace detail { class response_formatter { public: - static fc::variant process_block( const data_log_entry& trace, bool irreversible, const data_handler_function& data_handler, const yield_function& yield ); + static fc::variant process_block( const data_log_entry& trace, bool irreversible, const data_handler_function& data_handler ); }; } @@ -31,28 +31,24 @@ namespace eosio::trace_api { * (eg JSON) * * @param block_height - the height of the block whose trace is requested - * @param yield - a yield function to allow cooperation during long running tasks * @return a properly formatted variant representing the trace for the given block height if it exists, an * empty variant otherwise. - * @throws yield_exception if a call to `yield` throws. * @throws bad_data_exception when there are issues with the underlying data preventing processing. */ - fc::variant get_block_trace( uint32_t block_height, const yield_function& yield = {}) { - auto data = logfile_provider.get_block(block_height, yield); + fc::variant get_block_trace( uint32_t block_height ) { + auto data = logfile_provider.get_block(block_height); if (!data) { _log("No block found at block height " + std::to_string(block_height) ); return {}; } - yield(); - - auto data_handler = [this](const auto& action, const yield_function& yield) -> std::tuple> { + auto data_handler = [this](const auto& action) -> std::tuple> { return std::visit([&](const auto& action_trace_t) { - return data_handler_provider.serialize_to_variant(action_trace_t, yield); + return data_handler_provider.serialize_to_variant(action_trace_t); }, action); }; - return detail::response_formatter::process_block(std::get<0>(*data), std::get<1>(*data), data_handler, yield); + return detail::response_formatter::process_block(std::get<0>(*data), std::get<1>(*data), data_handler); } /** @@ -61,17 +57,15 @@ namespace eosio::trace_api { * * @param trxid - the transaction id whose trace is requested * @param block_height - the height of the block whose trace contains requested transaction trace - * @param yield - a yield function to allow cooperation during long running tasks * @return a properly formatted variant representing the trace for the given transaction id if it exists, an * empty variant otherwise. - * @throws yield_exception if a call to `yield` throws. * @throws bad_data_exception when there are issues with the underlying data preventing processing. */ - fc::variant get_transaction_trace(chain::transaction_id_type trxid, uint32_t block_height, const yield_function& yield = {}){ + fc::variant get_transaction_trace(chain::transaction_id_type trxid, uint32_t block_height){ _log("get_transaction_trace called" ); fc::variant result = {}; // extract the transaction trace from the block trace - auto resp = get_block_trace(block_height, yield); + auto resp = get_block_trace(block_height); if (!resp.is_null()) { auto& b_mvo = resp.get_object(); if (b_mvo.contains("transactions")) { diff --git a/plugins/trace_api_plugin/request_handler.cpp b/plugins/trace_api_plugin/request_handler.cpp index c9f28dc43f..582c134413 100644 --- a/plugins/trace_api_plugin/request_handler.cpp +++ b/plugins/trace_api_plugin/request_handler.cpp @@ -11,12 +11,10 @@ namespace { return t.to_iso_string() + "Z"; } - fc::variants process_authorizations(const std::vector& authorizations, const yield_function& yield ) { + fc::variants process_authorizations(const std::vector& authorizations) { fc::variants result; result.reserve(authorizations.size()); for ( const auto& a: authorizations) { - yield(); - result.emplace_back(fc::mutable_variant_object() ("account", a.account.to_string()) ("permission", a.permission.to_string()) @@ -28,7 +26,7 @@ namespace { } template - fc::variants process_actions(const std::vector& actions, const data_handler_function & data_handler, const yield_function& yield ) { + fc::variants process_actions(const std::vector& actions, const data_handler_function & data_handler) { fc::variants result; result.reserve(actions.size()); // create a vector of indices to sort based on actions to avoid copies @@ -38,8 +36,6 @@ namespace { return actions.at(lhs).global_sequence < actions.at(rhs).global_sequence; }); for ( int index : indices) { - yield(); - const auto& a = actions.at(index); auto common_mvo = fc::mutable_variant_object(); @@ -47,13 +43,13 @@ namespace { ("receiver", a.receiver.to_string()) ("account", a.account.to_string()) ("action", a.action.to_string()) - ("authorization", process_authorizations(a.authorization, yield)) + ("authorization", process_authorizations(a.authorization)) ("data", fc::to_hex(a.data.data(), a.data.size())); auto action_variant = fc::mutable_variant_object(); if constexpr(std::is_same_v){ action_variant(std::move(common_mvo)); - auto [params, return_data] = data_handler(a, yield); + auto [params, return_data] = data_handler(a); if (!params.is_null()) { action_variant("params", params); } @@ -61,7 +57,7 @@ namespace { else if constexpr(std::is_same_v){ action_variant(std::move(common_mvo)); action_variant("return_value", fc::to_hex(a.return_value.data(),a.return_value.size())) ; - auto [params, return_data] = data_handler(a, yield); + auto [params, return_data] = data_handler(a); if (!params.is_null()) { action_variant("params", params); } @@ -75,17 +71,15 @@ namespace { } template - fc::variants process_transactions(const std::vector& transactions, const data_handler_function& data_handler, const yield_function& yield ) { + fc::variants process_transactions(const std::vector& transactions, const data_handler_function& data_handler) { fc::variants result; result.reserve(transactions.size()); for ( const auto& t: transactions) { - yield(); - if constexpr(std::is_same_v){ result.emplace_back( fc::mutable_variant_object() ("id", t.id.str()) - ("actions", process_actions(t.actions, data_handler, yield))); + ("actions", process_actions(t.actions, data_handler))); } else { auto common_mvo = fc::mutable_variant_object(); common_mvo("status", t.status) @@ -97,14 +91,14 @@ namespace { result.emplace_back( fc::mutable_variant_object() ("id", t.id.str()) - ("actions", process_actions(t.actions, data_handler, yield)) + ("actions", process_actions(t.actions, data_handler)) (std::move(common_mvo))); } else if constexpr(std::is_same_v){ result.emplace_back( fc::mutable_variant_object() ("id", t.id.str()) - ("actions", process_actions(std::get>(t.actions), data_handler, yield)) + ("actions", process_actions(std::get>(t.actions), data_handler)) (std::move(common_mvo))); } else if constexpr(std::is_same_v){ @@ -114,7 +108,7 @@ namespace { ("block_num", t.block_num) ("block_time", t.block_time) ("producer_block_id", t.producer_block_id) - ("actions", process_actions(std::get>(t.actions), data_handler, yield)) + ("actions", process_actions(std::get>(t.actions), data_handler)) (std::move(common_mvo)) ); } @@ -126,7 +120,7 @@ namespace { } namespace eosio::trace_api::detail { - fc::variant response_formatter::process_block( const data_log_entry& trace, bool irreversible, const data_handler_function& data_handler, const yield_function& yield ) { + fc::variant response_formatter::process_block( const data_log_entry& trace, bool irreversible, const data_handler_function& data_handler ) { auto common_mvo = std::visit([&](auto&& arg) -> fc::mutable_variant_object { return fc::mutable_variant_object() ("id", arg.id.str()) @@ -139,7 +133,7 @@ namespace eosio::trace_api::detail { auto& block_trace = std::get(trace); return fc::mutable_variant_object() (std::move(common_mvo)) - ("transactions", process_transactions(block_trace.transactions, data_handler, yield )); + ("transactions", process_transactions(block_trace.transactions, data_handler )); }else if(std::holds_alternative(trace)){ auto& block_trace = std::get(trace); return fc::mutable_variant_object() @@ -147,11 +141,11 @@ namespace eosio::trace_api::detail { ("transaction_mroot", block_trace.transaction_mroot) ("action_mroot", block_trace.action_mroot) ("schedule_version", block_trace.schedule_version) - ("transactions", process_transactions( block_trace.transactions_v1, data_handler, yield )) ; + ("transactions", process_transactions( block_trace.transactions_v1, data_handler )) ; }else if(std::holds_alternative(trace)){ auto& block_trace = std::get(trace); auto transactions = std::visit([&](auto&& arg){ - return process_transactions(arg, data_handler, yield); + return process_transactions(arg, data_handler); }, block_trace.transactions); return fc::mutable_variant_object() (std::move(common_mvo)) diff --git a/plugins/trace_api_plugin/store_provider.cpp b/plugins/trace_api_plugin/store_provider.cpp index 3677761a71..ae42e843da 100644 --- a/plugins/trace_api_plugin/store_provider.cpp +++ b/plugins/trace_api_plugin/store_provider.cpp @@ -297,7 +297,7 @@ namespace eosio::trace_api { void slice_directory::start_maintenance_thread(log_handler log) { _maintenance_thread = std::thread([this, log=std::move(log)](){ - fc::set_os_thread_name( "trace-mx" ); + fc::set_thread_name( "trace-mx" ); uint32_t last_lib = 0; while(true) { diff --git a/plugins/trace_api_plugin/test/test_data_handlers.cpp b/plugins/trace_api_plugin/test/test_data_handlers.cpp index 94efbb74b7..464b3a81c1 100644 --- a/plugins/trace_api_plugin/test/test_data_handlers.cpp +++ b/plugins/trace_api_plugin/test/test_data_handlers.cpp @@ -18,7 +18,7 @@ BOOST_AUTO_TEST_SUITE(abi_data_handler_tests) abi_data_handler handler(exception_handler{}); auto expected = fc::variant(); - auto actual = handler.serialize_to_variant(action_trace_t, [](){}); + auto actual = handler.serialize_to_variant(action_trace_t); BOOST_TEST(to_kv(expected) == to_kv(std::get<0>(actual)), boost::test_tools::per_element()); } @@ -33,7 +33,7 @@ BOOST_AUTO_TEST_SUITE(abi_data_handler_tests) abi_data_handler handler(exception_handler{}); auto expected = fc::variant(); - auto actual = handler.serialize_to_variant(action_trace_t, [](){}); + auto actual = handler.serialize_to_variant(action_trace_t); BOOST_TEST(to_kv(expected) == to_kv(std::get<0>(actual)), boost::test_tools::per_element()); } @@ -47,7 +47,7 @@ BOOST_AUTO_TEST_SUITE(abi_data_handler_tests) abi_data_handler handler(exception_handler{}); auto expected = fc::variant(); - auto actual = handler.serialize_to_variant(action_trace_t, [](){}); + auto actual = handler.serialize_to_variant(action_trace_t); BOOST_TEST(to_kv(expected) == to_kv(std::get<0>(actual)), boost::test_tools::per_element()); } @@ -62,7 +62,7 @@ BOOST_AUTO_TEST_SUITE(abi_data_handler_tests) abi_data_handler handler(exception_handler{}); auto expected = fc::variant(); - auto actual = handler.serialize_to_variant(action_trace_t, [](){}); + auto actual = handler.serialize_to_variant(action_trace_t); BOOST_TEST(to_kv(expected) == to_kv(std::get<0>(actual)), boost::test_tools::per_element()); } @@ -95,7 +95,7 @@ BOOST_AUTO_TEST_SUITE(abi_data_handler_tests) ("c", 2) ("d", 3); - auto actual = handler.serialize_to_variant(action_trace_t, [](){}); + auto actual = handler.serialize_to_variant(action_trace_t); BOOST_TEST(to_kv(expected) == to_kv(std::get<0>(actual)), boost::test_tools::per_element()); } @@ -129,7 +129,7 @@ BOOST_AUTO_TEST_SUITE(abi_data_handler_tests) ("c", 2) ("d", 3); - auto actual = handler.serialize_to_variant(action_trace_t, [](){}); + auto actual = handler.serialize_to_variant(action_trace_t); BOOST_TEST(to_kv(expected) == to_kv(std::get<0>(actual)), boost::test_tools::per_element()); } @@ -158,7 +158,7 @@ BOOST_AUTO_TEST_SUITE(abi_data_handler_tests) auto expected = fc::variant(); - auto actual = handler.serialize_to_variant(action_trace_t, [](){}); + auto actual = handler.serialize_to_variant(action_trace_t); BOOST_TEST(to_kv(expected) == to_kv(std::get<0>(actual)), boost::test_tools::per_element()); } @@ -187,7 +187,7 @@ BOOST_AUTO_TEST_SUITE(abi_data_handler_tests) auto expected = fc::variant(); - auto actual = handler.serialize_to_variant(action_trace_t, [](){}); + auto actual = handler.serialize_to_variant(action_trace_t); BOOST_TEST(to_kv(expected) == to_kv(std::get<0>(actual)), boost::test_tools::per_element()); } @@ -217,7 +217,7 @@ BOOST_AUTO_TEST_SUITE(abi_data_handler_tests) auto expected = fc::variant(); - auto actual = handler.serialize_to_variant(action_trace_t, [](){}); + auto actual = handler.serialize_to_variant(action_trace_t); BOOST_TEST(to_kv(expected) == to_kv(std::get<0>(actual)), boost::test_tools::per_element()); BOOST_TEST(log_called); diff --git a/plugins/trace_api_plugin/test/test_responses.cpp b/plugins/trace_api_plugin/test/test_responses.cpp index 6d99bcee32..42ca936332 100644 --- a/plugins/trace_api_plugin/test/test_responses.cpp +++ b/plugins/trace_api_plugin/test/test_responses.cpp @@ -25,17 +25,17 @@ struct response_test_fixture { * optional containing a 2-tuple of the block_trace and a flag indicating irreversibility * @throws bad_data_exception : if the data is corrupt in some way */ - get_block_t get_block(uint32_t height, const yield_function& yield= {}) { - return fixture.mock_get_block(height, yield); + get_block_t get_block(uint32_t height) { + return fixture.mock_get_block(height); } response_test_fixture& fixture; }; - constexpr static auto default_mock_data_handler_v0 = [](const action_trace_v0& a, const yield_function&) ->std::tuple> { + constexpr static auto default_mock_data_handler_v0 = [](const action_trace_v0& a) ->std::tuple> { return {fc::mutable_variant_object()("hex" , fc::to_hex(a.data.data(), a.data.size())),{}}; }; - constexpr static auto default_mock_data_handler_v1 = [](const action_trace_v1& a, const yield_function&) -> std::tuple>{ + constexpr static auto default_mock_data_handler_v1 = [](const action_trace_v1& a) -> std::tuple>{ return {fc::mutable_variant_object()("hex" , fc::to_hex(a.data.data(), a.data.size())), {fc::mutable_variant_object()("hex" , fc::to_hex(a.return_value.data(), a.return_value.size()))}}; }; @@ -45,12 +45,12 @@ struct response_test_fixture { {} template - std::tuple> serialize_to_variant(const ActionTrace & action, const yield_function& yield) { + std::tuple> serialize_to_variant(const ActionTrace & action) { if constexpr(std::is_same_v){ - return fixture.mock_data_handler_v0(action, yield); + return fixture.mock_data_handler_v0(action); } else if constexpr(std::is_same_v){ - return fixture.mock_data_handler_v1(action, yield); + return fixture.mock_data_handler_v1(action); } } @@ -68,14 +68,14 @@ struct response_test_fixture { } - fc::variant get_block_trace( uint32_t block_height, const yield_function& yield = {} ) { - return response_impl.get_block_trace( block_height, yield ); + fc::variant get_block_trace( uint32_t block_height ) { + return response_impl.get_block_trace( block_height ); } // fixture data and methods - std::function mock_get_block; - std::function>(const action_trace_v0&, const yield_function&)> mock_data_handler_v0 = default_mock_data_handler_v0; - std::function>(const action_trace_v1&, const yield_function&)> mock_data_handler_v1 = default_mock_data_handler_v1; + std::function mock_get_block; + std::function>(const action_trace_v0&)> mock_data_handler_v0 = default_mock_data_handler_v0; + std::function>(const action_trace_v1&)> mock_data_handler_v1 = default_mock_data_handler_v1; response_impl_type response_impl; @@ -111,7 +111,7 @@ BOOST_AUTO_TEST_SUITE(trace_responses) ("transactions", fc::variants() ) ; - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry{block_trace}, false); }; @@ -201,7 +201,7 @@ BOOST_AUTO_TEST_SUITE(trace_responses) })) ; - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry(block_trace), false); }; @@ -287,13 +287,13 @@ BOOST_AUTO_TEST_SUITE(trace_responses) })) ; - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry(block_trace), false); }; // simulate an inability to parse the parameters - mock_data_handler_v0 = [](const action_trace_v0&, const yield_function&) -> std::tuple> { + mock_data_handler_v0 = [](const action_trace_v0&) -> std::tuple> { return {}; }; @@ -414,13 +414,13 @@ BOOST_AUTO_TEST_SUITE(trace_responses) })) ; - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry(block_trace), false); }; // simulate an inability to parse the parameters - mock_data_handler_v0 = [](const action_trace_v0&, const yield_function&) -> std::tuple> { + mock_data_handler_v0 = [](const action_trace_v0&) -> std::tuple> { return {}; }; @@ -458,7 +458,7 @@ BOOST_AUTO_TEST_SUITE(trace_responses) ("transactions", fc::variants() ) ; - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry(block_trace), true); }; @@ -470,7 +470,7 @@ BOOST_AUTO_TEST_SUITE(trace_responses) BOOST_FIXTURE_TEST_CASE(corrupt_block_data, response_test_fixture) { - mock_get_block = []( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = []( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); throw bad_data_exception("mock exception"); }; @@ -480,7 +480,7 @@ BOOST_AUTO_TEST_SUITE(trace_responses) BOOST_FIXTURE_TEST_CASE(missing_block_data, response_test_fixture) { - mock_get_block = []( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = []( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return {}; }; @@ -490,71 +490,6 @@ BOOST_AUTO_TEST_SUITE(trace_responses) BOOST_TEST(null_response.is_null()); } - BOOST_FIXTURE_TEST_CASE(yield_throws, response_test_fixture) - { - auto block_trace = block_trace_v1 { - { - "b000000000000000000000000000000000000000000000000000000000000001"_h, - 1, - "0000000000000000000000000000000000000000000000000000000000000000"_h, - chain::block_timestamp_type(0), - "bp.one"_n - }, - "0000000000000000000000000000000000000000000000000000000000000000"_h, - "0000000000000000000000000000000000000000000000000000000000000000"_h, - 0, - { - { - { - "0000000000000000000000000000000000000000000000000000000000000001"_h, - { - { - 0, - "receiver"_n, "contract"_n, "action"_n, - {{ "alice"_n, "active"_n }}, - { 0x00, 0x01, 0x02, 0x03 } - } - } - }, - fc::enum_type{chain::transaction_receipt_header::status_enum::executed}, - 10, - 5, - std::vector{chain::signature_type()}, - {chain::time_point_sec(), 1, 0, 100, 50, 0} - } - } - }; - - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { - BOOST_TEST(height == 1); - return std::make_tuple(data_log_entry(block_trace), false); - }; - - int countdown = 3; - yield_function yield = [&]() { - if (countdown-- == 0) { - throw yield_exception("mock"); - } - }; - - BOOST_REQUIRE_THROW(get_block_trace( 1, yield ), yield_exception); - } - - BOOST_FIXTURE_TEST_CASE(yield_throws_from_get_block, response_test_fixture) - { - // no other yield calls will throw - yield_function yield = [&]() { - }; - - // simulate a yield throw inside get block - mock_get_block = []( uint32_t height, const yield_function& yield) -> get_block_t { - throw yield_exception("mock exception"); - }; - - - BOOST_REQUIRE_THROW(get_block_trace( 1, yield ), yield_exception); - } - BOOST_FIXTURE_TEST_CASE(old_version_block_response, response_test_fixture) { auto block_trace = block_trace_v0 { @@ -607,7 +542,7 @@ BOOST_AUTO_TEST_SUITE(trace_responses) })) ; - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry(block_trace), false); }; @@ -644,7 +579,7 @@ BOOST_AUTO_TEST_SUITE(trace_responses) ("transactions", fc::variants() ) ; - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry{block_trace}, false); }; @@ -738,7 +673,7 @@ BOOST_AUTO_TEST_SUITE(trace_responses) })) ; - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry(block_trace), false); }; @@ -826,13 +761,13 @@ BOOST_AUTO_TEST_SUITE(trace_responses) }) ); - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry(block_trace), false); }; // simulate an inability to parse the parameters and return_data - mock_data_handler_v1 = [](const action_trace_v1&, const yield_function&) -> std::tuple> { + mock_data_handler_v1 = [](const action_trace_v1&) -> std::tuple> { return {}; }; @@ -965,13 +900,13 @@ BOOST_AUTO_TEST_SUITE(trace_responses) })) ; - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry(block_trace), false); }; // simulate an inability to parse the parameters and return_data - mock_data_handler_v1 = [](const action_trace_v1&, const yield_function&) -> std::tuple> { + mock_data_handler_v1 = [](const action_trace_v1&) -> std::tuple> { return {}; }; @@ -1007,7 +942,7 @@ BOOST_AUTO_TEST_SUITE(trace_responses) ("transactions", fc::variants() ) ; - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { + mock_get_block = [&block_trace]( uint32_t height ) -> get_block_t { BOOST_TEST(height == 1); return std::make_tuple(data_log_entry(block_trace), true); }; @@ -1017,57 +952,4 @@ BOOST_AUTO_TEST_SUITE(trace_responses) } - BOOST_FIXTURE_TEST_CASE(yield_throws_v2, response_test_fixture) - { - auto action_trace = action_trace_v1 { - { - 0, - "receiver"_n, "contract"_n, "action"_n, - {{ "alice"_n, "active"_n }}, - { 0x00, 0x01, 0x02, 0x03 } - }, - { 0x04, 0x05, 0x06, 0x07 } - }; - - auto transaction_trace = transaction_trace_v2 { - "0000000000000000000000000000000000000000000000000000000000000001"_h, - std::vector { - action_trace - }, - fc::enum_type{chain::transaction_receipt_header::status_enum::executed}, - 10, - 5, - std::vector{chain::signature_type()}, - {chain::time_point_sec(), 1, 0, 100, 50, 0} - }; - - auto block_trace = block_trace_v2 { - "b000000000000000000000000000000000000000000000000000000000000001"_h, - 1, - "0000000000000000000000000000000000000000000000000000000000000000"_h, - chain::block_timestamp_type(0), - "bp.one"_n, - "0000000000000000000000000000000000000000000000000000000000000000"_h, - "0000000000000000000000000000000000000000000000000000000000000000"_h, - 0, - std::vector{ - transaction_trace - } - }; - - mock_get_block = [&block_trace]( uint32_t height, const yield_function& ) -> get_block_t { - BOOST_TEST(height == 1); - return std::make_tuple(data_log_entry(block_trace), false); - }; - - int countdown = 3; - yield_function yield = [&]() { - if (countdown-- == 0) { - throw yield_exception("mock"); - } - }; - - BOOST_REQUIRE_THROW(get_block_trace( 1, yield ), yield_exception); - } - BOOST_AUTO_TEST_SUITE_END() diff --git a/plugins/trace_api_plugin/trace_api_plugin.cpp b/plugins/trace_api_plugin/trace_api_plugin.cpp index 3f295a7b07..6eee7a93f4 100644 --- a/plugins/trace_api_plugin/trace_api_plugin.cpp +++ b/plugins/trace_api_plugin/trace_api_plugin.cpp @@ -70,7 +70,7 @@ namespace { template struct shared_store_provider { - shared_store_provider(const std::shared_ptr& store) + explicit shared_store_provider(const std::shared_ptr& store) :store(store) {} @@ -83,8 +83,8 @@ namespace { store->append_lib(new_lib); } - get_block_t get_block(uint32_t height, const yield_function& yield) { - return store->get_block(height, yield); + get_block_t get_block(uint32_t height) { + return store->get_block(height); } void append_trx_ids(block_trxs_entry tt){ @@ -178,7 +178,7 @@ struct trace_api_common_impl { */ struct trace_api_rpc_plugin_impl : public std::enable_shared_from_this { - trace_api_rpc_plugin_impl( const std::shared_ptr& common ) + explicit trace_api_rpc_plugin_impl( const std::shared_ptr& common ) :common(common) {} static void set_program_options(appbase::options_description& cli, appbase::options_description& cfg) { @@ -233,30 +233,18 @@ struct trace_api_rpc_plugin_impl : public std::enable_shared_from_this fc::microseconds::maximum() - deadline.time_since_epoch() ) { - deadline = fc::time_point::maximum(); - } else { - deadline += max_serialization_time; - } - return deadline; - } - void plugin_startup() { auto& http = app().get_plugin(); - fc::microseconds max_response_time = http.get_max_response_time(); - http.add_async_handler("/v1/trace_api/get_block", - [wthis=weak_from_this(), max_response_time](std::string, std::string body, url_response_callback cb) + http.add_async_handler({"/v1/trace_api/get_block", + api_category::trace_api, + [wthis=weak_from_this()](std::string, std::string body, url_response_callback cb) { auto that = wthis.lock(); if (!that) { return; } - const auto deadline = that->calc_deadline( max_response_time ); - auto block_number = ([&body]() -> std::optional { if (body.empty()) { return {}; @@ -276,35 +264,34 @@ struct trace_api_rpc_plugin_impl : public std::enable_shared_from_thisreq_handler->get_block_trace(*block_number, [deadline]() { FC_CHECK_DEADLINE(deadline); }); + auto resp = that->req_handler->get_block_trace(*block_number); if (resp.is_null()) { error_results results{404, "Trace API: block trace missing"}; - cb( 404, deadline, fc::variant( results )); + cb( 404, fc::variant( results )); } else { - cb( 200, deadline, std::move(resp) ); + cb( 200, std::move(resp) ); } } catch (...) { http_plugin::handle_exception("trace_api", "get_block", body, cb); } - }); + }}); - http.add_async_handler("/v1/trace_api/get_transaction_trace", - [wthis=weak_from_this(), max_response_time, this](std::string, std::string body, url_response_callback cb) + http.add_async_handler({"/v1/trace_api/get_transaction_trace", + api_category::trace_api, + [wthis=weak_from_this(), this](std::string, std::string body, url_response_callback cb) { auto that = wthis.lock(); if (!that) { return; } - const auto deadline = that->calc_deadline( max_response_time ); - auto trx_id = ([&body]() -> std::optional { if (body.empty()) { return {}; @@ -323,29 +310,29 @@ struct trace_api_rpc_plugin_impl : public std::enable_shared_from_thisstore->get_trx_block_number(*trx_id, common->minimum_irreversible_history_blocks, [deadline]() { FC_CHECK_DEADLINE(deadline); }); + get_block_n blk_num = common->store->get_trx_block_number(*trx_id, common->minimum_irreversible_history_blocks); if (!blk_num.has_value()){ error_results results{404, "Trace API: transaction id missing in the transaction id log files"}; - cb( 404, deadline, fc::variant( results )); + cb( 404, fc::variant( results )); } else { - auto resp = that->req_handler->get_transaction_trace(*trx_id, *blk_num, [deadline]() { FC_CHECK_DEADLINE(deadline); }); + auto resp = that->req_handler->get_transaction_trace(*trx_id, *blk_num); if (resp.is_null()) { error_results results{404, "Trace API: transaction trace missing"}; - cb( 404, deadline, fc::variant( results )); + cb( 404, fc::variant( results )); } else { - cb( 200, deadline, std::move(resp) ); + cb( 200, std::move(resp) ); } } } catch (...) { http_plugin::handle_exception("trace_api", "get_transaction", body, cb); } - }); + }}); } void plugin_shutdown() { @@ -358,7 +345,7 @@ struct trace_api_rpc_plugin_impl : public std::enable_shared_from_this& common ) + explicit trace_api_plugin_impl( const std::shared_ptr& common ) :common(common) {} void plugin_initialize(const appbase::variables_map& options) { @@ -421,11 +408,9 @@ struct trace_api_plugin_impl { std::optional irreversible_block_connection; }; -trace_api_plugin::trace_api_plugin() -{} +trace_api_plugin::trace_api_plugin() = default; -trace_api_plugin::~trace_api_plugin() -{} +trace_api_plugin::~trace_api_plugin() = default; void trace_api_plugin::set_program_options(appbase::options_description& cli, appbase::options_description& cfg) { trace_api_common_impl::set_program_options(cli, cfg); @@ -460,11 +445,9 @@ void trace_api_plugin::handle_sighup() { fc::logger::update( logger_name, _log ); } -trace_api_rpc_plugin::trace_api_rpc_plugin() -{} +trace_api_rpc_plugin::trace_api_rpc_plugin() = default; -trace_api_rpc_plugin::~trace_api_rpc_plugin() -{} +trace_api_rpc_plugin::~trace_api_rpc_plugin() = default; void trace_api_rpc_plugin::set_program_options(appbase::options_description& cli, appbase::options_description& cfg) { trace_api_common_impl::set_program_options(cli, cfg); diff --git a/plugins/wallet_api_plugin/wallet_api_plugin.cpp b/plugins/wallet_api_plugin/wallet_api_plugin.cpp index af592592ea..e36d184099 100644 --- a/plugins/wallet_api_plugin/wallet_api_plugin.cpp +++ b/plugins/wallet_api_plugin/wallet_api_plugin.cpp @@ -22,10 +22,11 @@ using namespace eosio; #define CALL_WITH_400(api_name, api_handle, call_name, INVOKE, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ + api_category::node, \ [&api_handle](string&&, string&& body, url_response_callback&& cb) mutable { \ try { \ INVOKE \ - cb(http_response_code, fc::time_point::now() + fc::days(365), fc::variant(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ @@ -121,8 +122,7 @@ void wallet_api_plugin::plugin_startup() { void wallet_api_plugin::plugin_initialize(const variables_map& options) { try { const auto& _http_plugin = app().get_plugin(); - if( !_http_plugin.is_on_loopback()) { - if( !_http_plugin.is_secure()) { + if( !_http_plugin.is_on_loopback(api_category::node)) { elog( "\n" "********!!!SECURITY ERROR!!!********\n" "* *\n" @@ -133,17 +133,6 @@ void wallet_api_plugin::plugin_initialize(const variables_map& options) { "* - are at HIGH risk of exposure - *\n" "* *\n" "************************************\n" ); - } else { - wlog( "\n" - "**********SECURITY WARNING**********\n" - "* *\n" - "* -- Wallet API -- *\n" - "* - EXPOSED to the LOCAL NETWORK - *\n" - "* - Password and/or Private Keys - *\n" - "* - are at risk of exposure - *\n" - "* *\n" - "************************************\n" ); - } } } FC_LOG_AND_RETHROW() } diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index bf184cf927..71b9d6c866 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -13,7 +13,7 @@ set(LOCALEDOMAIN ${CLI_CLIENT_EXECUTABLE_NAME}) target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} - PRIVATE appbase version leap-cli11 chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE appbase version leap-cli11 chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} Boost::process Boost::dll ) if (CURL_FOUND) target_sources(${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE do_http_post_libcurl.cpp) diff --git a/programs/cleos/help_text.cpp.in b/programs/cleos/help_text.cpp.in index 8f8987d3f9..3802b34a37 100644 --- a/programs/cleos/help_text.cpp.in +++ b/programs/cleos/help_text.cpp.in @@ -290,7 +290,7 @@ bool print_recognized_errors(const fc::exception& e, const bool verbose_errors) if (!log.get_context().get_method().empty() && verbose_errors) { stack_trace += "\n" + log.get_context().get_file() + ":" + - fc::to_string(log.get_context().get_line_number()) + " " + + std::to_string(log.get_context().get_line_number()) + " " + log.get_context().get_method(); } } diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 35503113db..47c7044abc 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -347,7 +347,7 @@ eosio::chain_apis::read_only::get_info_results get_info() { } string generate_nonce_string() { - return fc::to_string(fc::time_point::now().time_since_epoch().count()); + return std::to_string(fc::time_point::now().time_since_epoch().count()); } chain::action generate_nonce_action() { @@ -514,7 +514,7 @@ fc::variant push_transaction( signed_transaction& trx, const std::vectoradd_flag("--json,-j", print_json, localized("Output in JSON format")); list_producers->add_option("-l,--limit", limit, localized("The maximum number of rows to return")); list_producers->add_option("-L,--lower", lower, localized("Lower bound value of key, defaults to first")); - list_producers->add_option("--time-limit", time_limit_ms, localized("Limit time of execution in milliseconds, defaults to 10ms")); + list_producers->add_option("--time-limit", time_limit_ms, localized("Limit time of execution in milliseconds")); list_producers->callback([this] { fc::mutable_variant_object mo; mo("json", true)("lower_bound", lower)("limit", limit); - if( time_limit_ms != 10 ) mo("time_limit_ms", time_limit_ms); + if( time_limit_ms != 0 ) mo("time_limit_ms", time_limit_ms); auto rawResult = call(get_producers_func, mo); if ( print_json ) { std::cout << fc::json::to_pretty_string(rawResult) << std::endl; @@ -1555,7 +1555,7 @@ struct get_transaction_id_subcommand { // if actions.data & actions.hex_data provided, use the hex_data since only currently support unexploded data if( vo.contains("actions") ) { if( vo["actions"].is_array() ) { - fc::mutable_variant_object mvo = vo; + fc::mutable_variant_object mvo{vo}; fc::variants& action_variants = mvo["actions"].get_array(); for( auto& action_v : action_variants ) { if( !action_v.is_object() ) { @@ -1564,7 +1564,7 @@ struct get_transaction_id_subcommand { } fc::variant_object& action_vo = action_v.get_object(); if( action_vo.contains( "data" ) && action_vo.contains( "hex_data" ) ) { - fc::mutable_variant_object maction_vo = action_vo; + fc::mutable_variant_object maction_vo{action_vo}; maction_vo["data"] = maction_vo["hex_data"]; action_vo = maction_vo; vo = mvo; @@ -2903,7 +2903,7 @@ int main( int argc, char** argv ) { signed_transaction strx = packed_trx.get_signed_transaction(); fc::variant trx_var; if( unpack_action_data_flag ) { - abi_serializer::to_variant( strx, trx_var, abi_serializer_resolver, abi_serializer::create_yield_function( abi_serializer_max_time ) ); + abi_serializer::to_variant( strx, trx_var, abi_serializer_resolver, abi_serializer_max_time ); } else { trx_var = strx; } @@ -3142,7 +3142,7 @@ int main( int argc, char** argv ) { string encode_type{"dec"}; bool binary = false; uint32_t limit = 10; - uint32_t time_limit_ms = 10; + uint32_t time_limit_ms = 0; string index_position; bool reverse = false; bool show_payer = false; @@ -3151,7 +3151,7 @@ int main( int argc, char** argv ) { getTable->add_option( "scope", scope, localized("The scope within the contract in which the table is found") )->required(); getTable->add_option( "table", table, localized("The name of the table as specified by the contract abi") )->required(); getTable->add_option( "-l,--limit", limit, localized("The maximum number of rows to return") ); - getTable->add_option( "--time-limit", time_limit_ms, localized("Limit time of execution in milliseconds, defaults to 10ms")); + getTable->add_option( "--time-limit", time_limit_ms, localized("Limit time of execution in milliseconds")); getTable->add_option( "-k,--key", table_key, localized("Deprecated") ); getTable->add_option( "-L,--lower", lower, localized("JSON representation of lower bound value of key, defaults to first") ); getTable->add_option( "-U,--upper", upper, localized("JSON representation of upper bound value of key, defaults to last") ); @@ -3184,7 +3184,7 @@ int main( int argc, char** argv ) { ( "encode_type", encode_type ) ( "reverse", reverse ) ( "show_payer", show_payer ); - if( time_limit_ms != 10 ) mo( "time_limit_ms", time_limit_ms ); + if( time_limit_ms != 0 ) mo( "time_limit_ms", time_limit_ms ); auto result = call( get_table_func, mo ); std::cout << fc::json::to_pretty_string(result) @@ -3195,7 +3195,7 @@ int main( int argc, char** argv ) { getScope->add_option( "contract", code, localized("The contract who owns the table") )->required(); getScope->add_option( "-t,--table", table, localized("The name of the table as filter") ); getScope->add_option( "-l,--limit", limit, localized("The maximum number of rows to return") ); - getScope->add_option( "--time-limit", time_limit_ms, localized("Limit time of execution in milliseconds, defaults to 10ms")); + getScope->add_option( "--time-limit", time_limit_ms, localized("Limit time of execution in milliseconds")); getScope->add_option( "-L,--lower", lower, localized("Lower bound of scope") ); getScope->add_option( "-U,--upper", upper, localized("Upper bound of scope") ); getScope->add_flag("-r,--reverse", reverse, localized("Iterate in reverse order")); @@ -3207,7 +3207,7 @@ int main( int argc, char** argv ) { ( "upper_bound", upper ) ( "limit", limit ) ( "reverse", reverse ); - if( time_limit_ms != 10 ) mo( "time_limit_ms", time_limit_ms ); + if( time_limit_ms != 0 ) mo( "time_limit_ms", time_limit_ms ); auto result = call( get_table_by_scope_func, mo ); std::cout << fc::json::to_pretty_string(result) diff --git a/programs/keosd/main.cpp b/programs/keosd/main.cpp index d55d47b271..ceb5bc5903 100644 --- a/programs/keosd/main.cpp +++ b/programs/keosd/main.cpp @@ -95,7 +95,8 @@ int main(int argc, char** argv) http_plugin::set_defaults({ .default_unix_socket_path = keosd::config::key_store_executable_name + ".sock", .default_http_port = 0, - .server_header = keosd::config::key_store_executable_name + "/" + app->version_string() + .server_header = keosd::config::key_store_executable_name + "/" + app->version_string(), + .support_categories = false }); application::register_plugin(); if(!app->initialize(argc, argv, initialize_logging)) { @@ -107,11 +108,12 @@ int main(int argc, char** argv) return INITIALIZE_FAIL; } auto& http = app->get_plugin(); - http.add_handler("/v1/" + keosd::config::key_store_executable_name + "/stop", + http.add_handler({"/v1/" + keosd::config::key_store_executable_name + "/stop", + api_category::node, [&a=app](string, string, url_response_callback cb) { - cb(200, fc::time_point::maximum(), fc::variant(fc::variant_object())); + cb(200, fc::variant(fc::variant_object())); a->quit(); - }, appbase::exec_queue::read_write ); + }}, appbase::exec_queue::read_write ); app->startup(); app->exec(); } catch (const fc::exception& e) { diff --git a/programs/leap-util/actions/blocklog.cpp b/programs/leap-util/actions/blocklog.cpp index aa53ed288e..eee37e87df 100644 --- a/programs/leap-util/actions/blocklog.cpp +++ b/programs/leap-util/actions/blocklog.cpp @@ -85,19 +85,19 @@ void blocklog_actions::setup(CLI::App& app) { auto* extract_blocks = sub->add_subcommand("extract-blocks", "Extract range of blocks from blocks.log and write to output-dir. Must give 'first' and/or 'last'.")->callback([err_guard]() { err_guard(&blocklog_actions::extract_blocks); }); extract_blocks->add_option("--first,-f", opt->first_block, "The first block number to keep.")->required(); extract_blocks->add_option("--last,-l", opt->last_block, "The last block number to keep.")->required(); - extract_blocks->add_option("--output-dir", opt->output_dir, "The output directory for the block log extracted from blocks-dir."); + extract_blocks->add_option("--output-dir", opt->output_dir, "The output directory for the block log extracted from blocks-dir.")->required(); // subcommand - split blocks auto* split_blocks = sub->add_subcommand("split-blocks", "Split the blocks.log based on the stride and store the result in the specified 'output-dir'.")->callback([err_guard]() { err_guard(&blocklog_actions::split_blocks); }); split_blocks->add_option("--blocks-dir", opt->blocks_dir, "The location of the blocks directory (absolute path or relative to the current directory)."); - split_blocks->add_option("--output-dir", opt->output_dir, "The output directory for the split block log."); + split_blocks->add_option("--output-dir", opt->output_dir, "The output directory for the split block log.")->required(); split_blocks->add_option("--stride", opt->stride, "The number of blocks to split into each file.")->required(); // subcommand - merge blocks auto* merge_blocks = sub->add_subcommand("merge-blocks", "Merge block log files in 'blocks-dir' with the file pattern 'blocks-\\d+-\\d+.[log,index]' to 'output-dir' whenever possible." "The files in 'blocks-dir' will be kept without change.")->callback([err_guard]() { err_guard(&blocklog_actions::merge_blocks); }); merge_blocks->add_option("--blocks-dir", opt->blocks_dir, "The location of the blocks directory (absolute path or relative to the current directory)."); - merge_blocks->add_option("--output-dir", opt->output_dir, "The output directory for the merged block log."); + merge_blocks->add_option("--output-dir", opt->output_dir, "The output directory for the merged block log.")->required(); // subcommand - smoke test sub->add_subcommand("smoke-test", "Quick test that blocks.log and blocks.index are well formed and agree with each other.")->callback([err_guard]() { err_guard(&blocklog_actions::smoke_test); }); @@ -166,32 +166,33 @@ int blocklog_actions::extract_blocks() { } int blocklog_actions::do_genesis() { - std::optional gs; std::filesystem::path bld = opt->blocks_dir; - auto full_path = (bld / "blocks.log").generic_string(); - if(std::filesystem::exists(bld / "blocks.log")) { - gs = block_log::extract_genesis_state(opt->blocks_dir); - if(!gs) { - std::cerr << "Block log at '" << full_path - << "' does not contain a genesis state, it only has the chain-id." << std::endl; - return -1; - } - } else { - std::cerr << "No blocks.log found at '" << full_path << "'." << std::endl; + auto context = block_log::extract_chain_context(opt->blocks_dir,opt->blocks_dir); + + if (!context) { + std::cerr << "No blocks log found at '" << opt->blocks_dir.c_str() << "'." << std::endl; return -1; } + if(!std::holds_alternative(*context)) { + std::cerr << "Block log at '" << opt->blocks_dir.c_str() + << "' does not contain a genesis state, it only has the chain-id." << std::endl; + return -1; + } + + const genesis_state& gs = std::get(*context); + // just print if output not set if(opt->output_file.empty()) { - std::cout << json::to_pretty_string(*gs) << std::endl; + std::cout << json::to_pretty_string(gs) << std::endl; } else { std::filesystem::path p = opt->output_file; if(p.is_relative()) { p = std::filesystem::current_path() / p; } - if(!fc::json::save_to_file(*gs, p, true)) { + if(!fc::json::save_to_file(gs, p, true)) { std::cerr << "Error occurred while writing genesis JSON to '" << p.generic_string() << "'" << std::endl; return -1; } @@ -306,13 +307,12 @@ int blocklog_actions::read_log() { uint32_t block_num = (opt->first_block < 1) ? 1 : opt->first_block; signed_block_ptr next; fc::variant pretty_output; - const fc::microseconds deadline = fc::seconds(10); auto print_block = [&](signed_block_ptr& next) { abi_serializer::to_variant( *next, pretty_output, [](account_name n) { return std::optional(); }, - abi_serializer::create_yield_function(deadline)); + fc::seconds(1)); const auto block_id = next->calculate_id(); const uint32_t ref_block_prefix = block_id._hash[1]; const auto enhanced_object = fc::mutable_variant_object("block_num", next->block_num())("id", block_id)("ref_block_prefix", ref_block_prefix)(pretty_output.get_object()); diff --git a/programs/leap-util/actions/snapshot.cpp b/programs/leap-util/actions/snapshot.cpp index cfa105a7ce..7e98040f7a 100644 --- a/programs/leap-util/actions/snapshot.cpp +++ b/programs/leap-util/actions/snapshot.cpp @@ -79,6 +79,7 @@ int snapshot_actions::run_subcommand() { cfg.state_dir = state_dir; cfg.state_size = opt->db_size * 1024 * 1024; cfg.state_guard_size = opt->guard_size * 1024 * 1024; + cfg.eosvmoc_tierup = wasm_interface::vm_oc_enable::oc_none; // wasm not used, no use to fire up oc protocol_feature_set pfs = initialize_protocol_features( std::filesystem::path("protocol_features"), false ); try { diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 23ac7269c2..493c9e3a43 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -40,7 +40,8 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} prometheus_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${build_id_flag} PRIVATE chain_plugin http_plugin producer_plugin http_client_plugin - PRIVATE eosio_chain_wrap fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE eosio_chain_wrap fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} + Boost::dll ) include(additionalPlugins) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 2deda685f6..6c99a2fdd4 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -11,10 +11,16 @@ #include #include #include +#include #include #include +#include +#include +#include +#include + #include "config.hpp" using namespace appbase; @@ -22,6 +28,40 @@ using namespace eosio; namespace detail { +void log_non_default_options(const std::vector>& options) { + using namespace std::string_literals; + string result; + for (const auto& op : options) { + bool mask = false; + if (op.string_key == "signature-provider"s + || op.string_key == "peer-private-key"s + || op.string_key == "p2p-auto-bp-peer"s) { + mask = true; + } + std::string v; + for (auto i = op.value.cbegin(), b = op.value.cbegin(), e = op.value.cend(); i != e; ++i) { + if (i != b) + v += ", "; + if (mask) + v += "***"; + else + v += *i; + } + + if (!result.empty()) + result += ", "; + + if (v.empty()) { + result += op.string_key; + } else { + result += op.string_key; + result += " = "; + result += v; + } + } + ilog("Non-default options: ${v}", ("v", result)); +} + fc::logging_config& add_deep_mind_logger(fc::logging_config& config) { config.appenders.push_back( fc::appender_config( "deep-mind", "dmlog" ) @@ -109,6 +149,12 @@ int main(int argc, char** argv) { try { appbase::scoped_app app; + fc::scoped_exit> on_exit = [&]() { + ilog("${name} version ${ver} ${fv}", + ("name", nodeos::config::node_executable_name)("ver", app->version_string()) + ("fv", app->version_string() == app->full_version_string() ? "" : app->full_version_string()) ); + ::detail::log_non_default_options(app->get_parsed_options()); + }; uint32_t short_hash = 0; fc::from_hex(eosio::version::version_hash(), (char*)&short_hash, sizeof(short_hash)); @@ -137,11 +183,12 @@ int main(int argc, char** argv) elog("resource_monitor_plugin failed to initialize"); return INITIALIZE_FAIL; } - ilog( "${name} version ${ver} ${fv}", + ilog("${name} version ${ver} ${fv}", ("name", nodeos::config::node_executable_name)("ver", app->version_string()) ("fv", app->version_string() == app->full_version_string() ? "" : app->full_version_string()) ); ilog("${name} using configuration file ${c}", ("name", nodeos::config::node_executable_name)("c", app->full_config_file_path().string())); ilog("${name} data directory is ${d}", ("name", nodeos::config::node_executable_name)("d", app->data_dir().string())); + ::detail::log_non_default_options(app->get_parsed_options()); app->startup(); app->set_thread_priority_max(); app->exec(); diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index 3fb99184e0..0d3ecf04f4 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -1,8 +1,4 @@ -configure_file(eosio-tn_bounce.sh eosio-tn_bounce.sh COPYONLY) -configure_file(eosio-tn_down.sh eosio-tn_down.sh COPYONLY) -configure_file(eosio-tn_roll.sh eosio-tn_roll.sh COPYONLY) -configure_file(eosio-tn_up.sh eosio-tn_up.sh COPYONLY) configure_file(abi_is_json.py abi_is_json.py COPYONLY) configure_file(postinst . @ONLY) configure_file(prerm . @ONLY) diff --git a/scripts/eosio-tn_bounce.sh b/scripts/eosio-tn_bounce.sh deleted file mode 100755 index 55ef1d7815..0000000000 --- a/scripts/eosio-tn_bounce.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -# -# eosio-tn_bounce is used to restart a node that is acting badly or is down. -# usage: eosio-tn_bounce.sh [arglist] -# arglist will be passed to the node's command line. First with no modifiers -# then with --hard-replay-blockchain and then a third time with --delete-all-blocks -# -# the data directory and log file are set by this script. Do not pass them on -# the command line. -# -# in most cases, simply running ./eosio-tn_bounce.sh is sufficient. -# - -pushd $EOSIO_HOME - -if [ ! -f programs/nodeos/nodeos ]; then - echo unable to locate binary for nodeos - exit 1 -fi - -config_base=etc/eosio/node_ -if [ -z "$EOSIO_NODE" ]; then - DD=`ls -d ${config_base}[012]?` - ddcount=`echo $DD | wc -w` - if [ $ddcount -ne 1 ]; then - echo $HOSTNAME has $ddcount config directories, bounce not possible. Set environment variable - echo EOSIO_NODE to the 2-digit node id number to specify which node to bounce. For example: - echo EOSIO_NODE=06 $0 \ - cd - - exit 1 - fi - OFS=$((${#DD}-2)) - export EOSIO_NODE=${DD:$OFS} -else - DD=${config_base}$EOSIO_NODE - if [ ! \( -d $DD \) ]; then - echo no directory named $PWD/$DD - cd - - exit 1 - fi -fi - -bash $EOSIO_HOME/scripts/eosio-tn_down.sh -bash $EOSIO_HOME/scripts/eosio-tn_up.sh "$*" diff --git a/scripts/eosio-tn_down.sh b/scripts/eosio-tn_down.sh deleted file mode 100755 index e13d1357b0..0000000000 --- a/scripts/eosio-tn_down.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -# -# eosio-tn_down.sh is used by the eosio-tn_bounce.sh and eosio-tn_roll.sh scripts. -# It is intended to terminate specific EOS.IO daemon processes. -# - - -if [ "$PWD" != "$EOSIO_HOME" ]; then - echo $0 must only be run from $EOSIO_HOME - exit -1 -fi - -prog=nodeos - -DD=var/lib/node_$EOSIO_NODE -runtest=`cat $DD/$prog.pid` -echo runtest = $runtest -running=`ps -e | grep $runtest | grep -cv grep ` - -if [ $running -ne 0 ]; then - echo killing $prog - - kill -15 $runtest - - for (( a = 1;11-$a; a = $(($a + 1)) )); do - echo waiting for safe termination, pass $a - sleep 2 - running=`ps -e | grep $runtest | grep -cv grep` - echo running = $running - if [ $running -eq 0 ]; then - break; - fi - done -fi - -if [ $running -ne 0 ]; then - echo killing $prog with SIGTERM failed, trying with SIGKILL - pkill -9 $runtest -fi diff --git a/scripts/eosio-tn_roll.sh b/scripts/eosio-tn_roll.sh deleted file mode 100755 index 1b131edb0f..0000000000 --- a/scripts/eosio-tn_roll.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash -# -# eosio-tn_roll is used to have all of the instances of the EOS daemon on a host brought down -# so that the underlying executable image file (the "text file") can be replaced. Then -# all instances are restarted. -# usage: eosio-tn_roll.sh [arglist] -# arglist will be passed to the node's command line. First with no modifiers -# then with --hard-replay-blockchain and then a third time with --delete-all-blocks -# -# The data directory and log file are set by this script. Do not pass them on -# the command line. -# -# In most cases, simply running ./eosio-tn_roll.sh is sufficient. -# - -if [ -z "$EOSIO_HOME" ]; then - echo EOSIO_HOME not set - $0 unable to proceed. - exit -1 -fi - -cd $EOSIO_HOME - -if [ -z "$EOSIO_NODE" ]; then - DD=`ls -d var/lib/node_[012]?` - ddcount=`echo $DD | wc -w` - if [ $ddcount -gt 1 ]; then - DD="all" - fi - OFS=$((${#DD}-2)) - export EOSIO_NODE=${DD:$OFS} -else - DD=var/lib/node_$EOSIO_NODE - if [ ! \( -d $DD \) ]; then - echo no directory named $PWD/$DD - cd - - exit -1 - fi -fi - -prog="" -RD="" -for p in eosd eosiod nodeos; do - prog=$p - RD=bin - if [ -f $RD/$prog ]; then - break; - else - RD=programs/$prog - if [ -f $RD/$prog ]; then - break; - fi - fi - prog="" - RD="" -done - -if [ \( -z "$prog" \) -o \( -z "$RD" \) ]; then - echo unable to locate binary for eosd or eosiod or nodeos - exit 1 -fi - -SDIR=staging/eos -if [ ! -e $SDIR/$RD/$prog ]; then - echo $SDIR/$RD/$prog does not exist - exit 1 -fi - -if [ -e $RD/$prog ]; then - s1=`md5sum $RD/$prog | sed "s/ .*$//"` - s2=`md5sum $SDIR/$RD/$prog | sed "s/ .*$//"` - if [ "$s1" == "$s2" ]; then - echo $HOSTNAME no update $SDIR/$RD/$prog - exit 1; - fi -fi - -echo DD = $DD - -bash $EOSIO_HOME/scripts/eosio-tn_down.sh - -cp $SDIR/$RD/$prog $RD/$prog - -if [ $DD = "all" ]; then - for EOSIO_RESTART_DATA_DIR in `ls -d var/lib/node_??`; do - bash $EOSIO_HOME/scripts/eosio-tn_up.sh "$*" - done -else - bash $EOSIO_HOME/scripts/eosio-tn_up.sh "$*" -fi -unset EOSIO_RESTART_DATA_DIR - -cd - diff --git a/scripts/eosio-tn_up.sh b/scripts/eosio-tn_up.sh deleted file mode 100755 index 058ab16ed9..0000000000 --- a/scripts/eosio-tn_up.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash -# -# eosio-tn_up is a helper script used to start a node that was previously stopped. -# It is not intended to be run stand-alone; it is a companion to the -# eosio-tn_bounce.sh and eosio-tn_roll.sh scripts. - -connected="0" - -rundir=programs/nodeos -prog=nodeos - -# Quote any args that are "*", so they are not expanded -qargs=`echo "$*" | sed -e 's/ \* / "*" /' -e 's/ \*$/ "*"/'` - -if [ "$PWD" != "$EOSIO_HOME" ]; then - echo $0 must only be run from $EOSIO_HOME - exit -1 -fi - -if [ ! -e $rundir/$prog ]; then - echo unable to locate binary for nodeos - exit -1 -fi - -if [ -z "$EOSIO_NODE" ]; then - echo data directory not set - exit -1 -fi - -datadir=var/lib/node_$EOSIO_NODE -now=`date +'%Y_%m_%d_%H_%M_%S'` -log=stderr.$now.txt -touch $datadir/$log -rm $datadir/stderr.txt -ln -s $log $datadir/stderr.txt - -relaunch() { - echo "$rundir/$prog $qargs $* --data-dir $datadir --config-dir etc/eosio/node_$EOSIO_NODE > $datadir/stdout.txt 2>> $datadir/$log " - nohup $rundir/$prog $qargs $* --data-dir $datadir --config-dir etc/eosio/node_$EOSIO_NODE > $datadir/stdout.txt 2>> $datadir/$log & - pid=$! - echo pid = $pid - echo $pid > $datadir/$prog.pid - - for (( a = 10; $a; a = $(($a - 1)) )); do - echo checking viability pass $((11 - $a)) - sleep 2 - running=$(pgrep $prog | grep -c $pid) - echo running = $running - if [ -z "$running" ]; then - break; - fi - connected=`grep -c "net_plugin.cpp:.*connection" $datadir/$log` - if [ "$connected" -ne 0 ]; then - break; - fi - done -} - -if [ -z "$EOSIO_LEVEL" ]; then - echo starting with no modifiers - relaunch - if [ "$connected" -eq 0 ]; then - EOSIO_LEVEL=replay - else - exit 0 - fi -fi - -if [ "$EOSIO_LEVEL" == replay ]; then - echo starting with replay - relaunch --hard-replay-blockchain - if [ "$connected" -eq 0 ]; then - EOSIO_LEVEL=resync - else - exit 0 - fi -fi -if [ "$EOSIO_LEVEL" == resync ]; then - echo starting with delete-all-blocks - relaunch --delete-all-blocks -fi diff --git a/scripts/pinned_build.sh b/scripts/pinned_build.sh index c57257e957..ebf37d29d7 100755 --- a/scripts/pinned_build.sh +++ b/scripts/pinned_build.sh @@ -30,7 +30,6 @@ DEP_DIR="$(realpath "$1")" LEAP_DIR="$2" JOBS="$3" CLANG_VER=11.0.1 -BOOST_VER=1.82.0 LLVM_VER=11.0.1 SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]:-$0}"; )" &> /dev/null && pwd 2> /dev/null; )"; START_DIR="$(pwd)" @@ -101,27 +100,10 @@ install_llvm() { export LLVM_DIR="${LLVM_DIR}" } -install_boost() { - BOOST_DIR="$1" - - if [ ! -d "${BOOST_DIR}" ]; then - echo "Installing Boost ${BOOST_VER} @ ${BOOST_DIR}" - try wget -O "boost_${BOOST_VER//\./_}.tar.gz" "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VER}/source/boost_${BOOST_VER//\./_}.tar.gz" - try tar -xvzf "boost_${BOOST_VER//\./_}.tar.gz" -C "${DEP_DIR}" - pushdir "${BOOST_DIR}" - try ./bootstrap.sh -with-toolset=clang --prefix="${BOOST_DIR}/bin" - ./b2 toolset=clang cxxflags="-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I\${CLANG_DIR}/include/c++/v1 -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fPIE" linkflags='-stdlib=libc++ -pie' link=static threading=multi --with-iostreams --with-date_time --with-system --with-program_options --with-chrono --with-test -q -j "${JOBS}" install - popdir "${DEP_DIR}" - rm "boost_${BOOST_VER//\./_}.tar.gz" - fi - export BOOST_DIR="${BOOST_DIR}" -} - pushdir "${DEP_DIR}" install_clang "${DEP_DIR}/clang-${CLANG_VER}" install_llvm "${DEP_DIR}/llvm-${LLVM_VER}" -install_boost "${DEP_DIR}/boost_${BOOST_VER//\./_}" # go back to the directory where the script starts popdir "${START_DIR}" @@ -130,7 +112,7 @@ pushdir "${LEAP_DIR}" # build Leap echo "Building Leap ${SCRIPT_DIR}" -try cmake -DCMAKE_TOOLCHAIN_FILE="${SCRIPT_DIR}/pinned_toolchain.cmake" -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="${LLVM_DIR}/lib/cmake" -DCMAKE_PREFIX_PATH="${BOOST_DIR}/bin" "${SCRIPT_DIR}/.." +try cmake -DCMAKE_TOOLCHAIN_FILE="${SCRIPT_DIR}/pinned_toolchain.cmake" -DCMAKE_INSTALL_PREFIX=${LEAP_PINNED_INSTALL_PREFIX:-/usr/local} -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="${LLVM_DIR}/lib/cmake" "${SCRIPT_DIR}/.." try make -j "${JOBS}" try cpack diff --git a/scripts/pinned_toolchain.cmake b/scripts/pinned_toolchain.cmake index 83757c2eff..ba791016ac 100644 --- a/scripts/pinned_toolchain.cmake +++ b/scripts/pinned_toolchain.cmake @@ -10,6 +10,9 @@ set(CMAKE_C_FLAGS_INIT "-D_FORTIFY_SOURCE=2 -fstack-protector-strong -fpie") set(CMAKE_CXX_FLAGS_INIT "-nostdinc++ -D_FORTIFY_SOURCE=2 -fstack-protector-strong -fpie") set(CMAKE_EXE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++ -pie") +if(NOT APPLE) + string(APPEND CMAKE_EXE_LINKER_FLAGS_INIT " -Wl,-z,relro,-z,now") +endif() set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 5cfb339b09..c9cf6bbc12 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -5,7 +5,7 @@ list(REMOVE_ITEM UNIT_TESTS ship_client.cpp) list(REMOVE_ITEM UNIT_TESTS ship_streamer.cpp) add_executable( plugin_test ${UNIT_TESTS} ) -target_link_libraries( plugin_test eosio_testing eosio_chain chainbase chain_plugin producer_plugin wallet_plugin fc state_history ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( plugin_test eosio_testing eosio_chain_wrap chainbase chain_plugin producer_plugin wallet_plugin fc state_history ${PLATFORM_SPECIFIC_LIBS} ) target_include_directories( plugin_test PUBLIC ${CMAKE_SOURCE_DIR}/plugins/net_plugin/include @@ -16,8 +16,8 @@ target_include_directories( plugin_test PUBLIC configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_tests/dawn_515/test.sh ${CMAKE_CURRENT_BINARY_DIR}/p2p_tests/dawn_515/test.sh COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/block_log_util_test.py ${CMAKE_CURRENT_BINARY_DIR}/block_log_util_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/block_log_retain_blocks_test.py ${CMAKE_CURRENT_BINARY_DIR}/block_log_retain_blocks_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cluster_launcher.py ${CMAKE_CURRENT_BINARY_DIR}/cluster_launcher.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-test.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-remote-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-remote-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sample-cluster-map.json ${CMAKE_CURRENT_BINARY_DIR}/sample-cluster-map.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/restart-scenarios-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/terminate-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/terminate-scenarios-test.py COPYONLY) @@ -36,7 +36,6 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_protocol_feature_test.py ${CMA configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_multiple_version_protocol_feature_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_multiple_version_protocol_feature_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_extra_packed_data_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_extra_packed_data_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/keosd_auto_launch_test.py ${CMAKE_CURRENT_BINARY_DIR}/keosd_auto_launch_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/db_modes_test.sh ${CMAKE_CURRENT_BINARY_DIR}/db_modes_test.sh COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/prod_preactivation_test.py ${CMAKE_CURRENT_BINARY_DIR}/prod_preactivation_test.py COPYONLY) @@ -50,6 +49,8 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ship_streamer_test.py ${CMAKE_CURRENT configure_file(${CMAKE_CURRENT_SOURCE_DIR}/large-lib-test.py ${CMAKE_CURRENT_BINARY_DIR}/large-lib-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/http_plugin_test.py ${CMAKE_CURRENT_BINARY_DIR}/http_plugin_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_high_latency_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_high_latency_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_multiple_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_multiple_listen_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/p2p_no_listen_test.py ${CMAKE_CURRENT_BINARY_DIR}/p2p_no_listen_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compute_transaction_test.py ${CMAKE_CURRENT_BINARY_DIR}/compute_transaction_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/subjective_billing_test.py ${CMAKE_CURRENT_BINARY_DIR}/subjective_billing_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/get_account_test.py ${CMAKE_CURRENT_BINARY_DIR}/get_account_test.py COPYONLY) @@ -65,9 +66,10 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/light_validation_sync_test.py ${CMAKE configure_file(${CMAKE_CURRENT_SOURCE_DIR}/trace_plugin_test.py ${CMAKE_CURRENT_BINARY_DIR}/trace_plugin_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nested_container_multi_index_test.py ${CMAKE_CURRENT_BINARY_DIR}/nested_container_multi_index_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/large-lib-test.py ${CMAKE_CURRENT_BINARY_DIR}/large-lib-test.py COPYONLY) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher.py ${CMAKE_CURRENT_BINARY_DIR}/launcher.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/auto_bp_peering_test.py ${CMAKE_CURRENT_BINARY_DIR}/auto_bp_peering_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/auto_bp_peering_test_shape.json ${CMAKE_CURRENT_BINARY_DIR}/auto_bp_peering_test_shape.json COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/gelf_test.py ${CMAKE_CURRENT_BINARY_DIR}/gelf_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/split_blocklog_replay_test.py ${CMAKE_CURRENT_BINARY_DIR}/split_blocklog_replay_test.py COPYONLY) if(DEFINED ENV{GITHUB_ACTIONS}) set(UNSHARE "--unshared") @@ -76,15 +78,15 @@ else() endif() #To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose -add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output) +add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output --catch_system_errors=no) -add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME block_log_util_test COMMAND tests/block_log_util_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME block_log_util_test COMMAND tests/block_log_util_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST block_log_util_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME block_log_retain_blocks_test COMMAND tests/block_log_retain_blocks_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME block_log_retain_blocks_test COMMAND tests/block_log_retain_blocks_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST block_log_retain_blocks_test PROPERTY LABELS nonparallelizable_tests) option(ABIEOS_ONLY_LIBRARY "define and build the ABIEOS library" ON) @@ -104,47 +106,52 @@ add_subdirectory( performance_tests ) find_package(Threads) add_executable(ship_client ship_client.cpp) -target_link_libraries(ship_client abieos Boost::program_options Boost::system Threads::Threads) +target_link_libraries(ship_client abieos Boost::program_options Boost::system Boost::algorithm Boost::asio Boost::beast Threads::Threads) add_executable(ship_streamer ship_streamer.cpp) -target_link_libraries(ship_streamer abieos Boost::program_options Boost::system Threads::Threads) +target_link_libraries(ship_streamer abieos Boost::program_options Boost::system Boost::asio Boost::beast Threads::Threads) -add_test(NAME ship_test COMMAND tests/ship_test.py -v --num-clients 10 --num-requests 5000 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME cluster_launcher COMMAND tests/cluster_launcher.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST cluster_launcher PROPERTY LABELS nonparallelizable_tests) + +add_test(NAME ship_test COMMAND tests/ship_test.py -v --num-clients 10 --num-requests 5000 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME ship_test_unix COMMAND tests/ship_test.py -v --num-clients 10 --num-requests 5000 --clean-run ${UNSHARE} --unix-socket WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME ship_test_unix COMMAND tests/ship_test.py -v --num-clients 10 --num-requests 5000 ${UNSHARE} --unix-socket WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_test_unix PROPERTY LABELS nonparallelizable_tests) -add_test(NAME ship_streamer_test COMMAND tests/ship_streamer_test.py -v --num-clients 10 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME ship_streamer_test COMMAND tests/ship_streamer_test.py -v --num-clients 10 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST ship_streamer_test PROPERTY LABELS long_running_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME producer-preactivate-feature-test COMMAND tests/prod_preactivation_test.py --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME producer-preactivate-feature-test COMMAND tests/prod_preactivation_test.py ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST producer-preactivate-feature-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_protocol_feature_test COMMAND tests/nodeos_protocol_feature_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_protocol_feature_test COMMAND tests/nodeos_protocol_feature_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_protocol_feature_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME compute_transaction_test COMMAND tests/compute_transaction_test.py -v -p 2 -n 3 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME compute_transaction_test COMMAND tests/compute_transaction_test.py -v -p 2 -n 3 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST compute_transaction_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME read-only-trx-basic-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --read-only-threads 0 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME read-only-trx-basic-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --read-only-threads 0 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST read-only-trx-basic-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME read-only-trx-parallel-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --read-only-threads 6 --num-test-runs 2 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME read-only-trx-parallel-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --read-only-threads 6 --num-test-runs 3 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST read-only-trx-parallel-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME read-only-trx-parallel-eos-vm-oc-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --eos-vm-oc-enable --read-only-threads 6 --num-test-runs 2 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME read-only-trx-parallel-eos-vm-oc-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --eos-vm-oc-enable all --read-only-threads 6 --num-test-runs 3 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST read-only-trx-parallel-eos-vm-oc-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME subjective_billing_test COMMAND tests/subjective_billing_test.py -v -p 2 -n 4 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME read-only-trx-parallel-no-oc-test COMMAND tests/read_only_trx_test.py -v -p 2 -n 3 --eos-vm-oc-enable none --read-only-threads 6 --num-test-runs 2 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST read-only-trx-parallel-no-oc-test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME subjective_billing_test COMMAND tests/subjective_billing_test.py -v -p 2 -n 4 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST subjective_billing_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME get_account_test COMMAND tests/get_account_test.py -v -p 2 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME get_account_test COMMAND tests/get_account_test.py -v -p 2 -n 3 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST get_account_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME distributed-transactions-speculative-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 --speculative -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME distributed-transactions-speculative-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 --speculative -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-speculative-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) -add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-hard_replay PROPERTY LABELS nonparallelizable_tests) -add_test(NAME restart-scenarios-test-none COMMAND tests/restart-scenarios-test.py -c none --kill-sig term -p4 -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME restart-scenarios-test-none COMMAND tests/restart-scenarios-test.py -c none --kill-sig term -p4 -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-none PROPERTY LABELS nonparallelizable_tests) add_test(NAME terminate-scenarios-test-resync COMMAND tests/terminate-scenarios-test.py -c resync --terminate-at-block 10 --kill-sig term ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST terminate-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) @@ -152,21 +159,19 @@ add_test(NAME terminate-scenarios-test-replay COMMAND tests/terminate-scenarios- set_property(TEST terminate-scenarios-test-replay PROPERTY LABELS nonparallelizable_tests) add_test(NAME terminate-scenarios-test-hard_replay COMMAND tests/terminate-scenarios-test.py -c hardReplay --terminate-at-block 10 --kill-sig term ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST terminate-scenarios-test-hard_replay PROPERTY LABELS nonparallelizable_tests) -add_test(NAME validate_dirty_db_test COMMAND tests/validate-dirty-db.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME validate_dirty_db_test COMMAND tests/validate-dirty-db.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST validate_dirty_db_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME launcher_test COMMAND tests/launcher_test.py -v --clean-run ${UNSHARE} ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST launcher_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME keosd_auto_launch_test COMMAND tests/keosd_auto_launch_test.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST keosd_auto_launch_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_snapshot_diff_test COMMAND tests/nodeos_snapshot_diff_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_snapshot_diff_test COMMAND tests/nodeos_snapshot_diff_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_snapshot_diff_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_snapshot_forked_test COMMAND tests/nodeos_snapshot_forked_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_snapshot_forked_test COMMAND tests/nodeos_snapshot_forked_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_snapshot_forked_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME trx_finality_status_test COMMAND tests/trx_finality_status_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME trx_finality_status_test COMMAND tests/trx_finality_status_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST trx_finality_status_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME trx_finality_status_forked_test COMMAND tests/trx_finality_status_forked_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME trx_finality_status_forked_test COMMAND tests/trx_finality_status_forked_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST trx_finality_status_forked_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -174,56 +179,61 @@ set_tests_properties(db_modes_test PROPERTIES COST 6000) add_test(NAME release-build-test COMMAND tests/release-build.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME version-label-test COMMAND tests/version-label.sh "v${VERSION_FULL}" WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) add_test(NAME full-version-label-test COMMAND tests/full-version-label.sh "v${VERSION_FULL}" ${CMAKE_SOURCE_DIR} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME nested_container_multi_index_test COMMAND tests/nested_container_multi_index_test.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nested_container_multi_index_test COMMAND tests/nested_container_multi_index_test.py -n 2 WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nested_container_multi_index_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_run_check_test COMMAND tests/nodeos_run_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_run_check_test COMMAND tests/nodeos_run_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME p2p_multiple_listen_test COMMAND tests/p2p_multiple_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST p2p_multiple_listen_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME p2p_no_listen_test COMMAND tests/p2p_no_listen_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST p2p_no_listen_test PROPERTY LABELS nonparallelizable_tests) + # needs iproute-tc or iproute2 depending on platform -#add_test(NAME p2p_high_latency_test COMMAND tests/p2p_high_latency_test.py -v --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +#add_test(NAME p2p_high_latency_test COMMAND tests/p2p_high_latency_test.py -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) #set_property(TEST p2p_high_latency_test PROPERTY LABELS nonparallelizable_tests) -#add_test(NAME distributed_transactions_lr_test COMMAND tests/distributed-transactions-test.py -d 2 -p 21 -n 21 -v --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +#add_test(NAME distributed_transactions_lr_test COMMAND tests/distributed-transactions-test.py -d 2 -p 21 -n 21 -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) #set_property(TEST distributed_transactions_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --wallet-port 9901 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --wallet-port 9901 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_contrl_c_test COMMAND tests/nodeos_contrl_c_test.py -v --wallet-port 9901 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_contrl_c_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_irreversible_mode_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_read_terminate_at_block_lr_test COMMAND tests/nodeos_read_terminate_at_block_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_read_terminate_at_block_lr_test COMMAND tests/nodeos_read_terminate_at_block_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_read_terminate_at_block_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_chainbase_allocation_test COMMAND tests/nodeos_chainbase_allocation_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_chainbase_allocation_test COMMAND tests/nodeos_chainbase_allocation_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_chainbase_allocation_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_startup_catchup_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_short_fork_take_over_test COMMAND tests/nodeos_short_fork_take_over_test.py -v --wallet-port 9905 --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_short_fork_take_over_test COMMAND tests/nodeos_short_fork_take_over_test.py -v --wallet-port 9905 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_short_fork_take_over_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_extra_packed_data_test COMMAND tests/nodeos_extra_packed_data_test.py -v --clean-run -p 8 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_extra_packed_data_test COMMAND tests/nodeos_extra_packed_data_test.py -v -p 8 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_extra_packed_data_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_producer_watermark_lr_test COMMAND tests/nodeos_producer_watermark_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_producer_watermark_lr_test COMMAND tests/nodeos_producer_watermark_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_producer_watermark_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_high_transaction_lr_test COMMAND tests/nodeos_high_transaction_test.py -v --clean-run -p 4 -n 8 --num-transactions 10000 --max-transactions-per-second 500 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_high_transaction_lr_test COMMAND tests/nodeos_high_transaction_test.py -v -p 4 -n 8 --num-transactions 10000 --max-transactions-per-second 500 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_high_transaction_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_retry_transaction_lr_test COMMAND tests/nodeos_retry_transaction_test.py -v --clean-run --num-transactions 100 --max-transactions-per-second 10 --total-accounts 5 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_retry_transaction_lr_test COMMAND tests/nodeos_retry_transaction_test.py -v --num-transactions 100 --max-transactions-per-second 10 --total-accounts 5 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_retry_transaction_lr_test PROPERTY LABELS long_running_tests) @@ -241,6 +251,10 @@ add_test(NAME plugin_http_api_test COMMAND tests/plugin_http_api_test.py WORKING set_tests_properties(plugin_http_api_test PROPERTIES TIMEOUT 50) set_property(TEST plugin_http_api_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME plugin_http_category_api_test COMMAND tests/plugin_http_api_test.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_tests_properties(plugin_http_category_api_test PROPERTIES TIMEOUT 50 ENVIRONMENT "PLUGIN_HTTP_TEST_CATEGORY=ON") +set_property(TEST plugin_http_category_api_test PROPERTY LABELS nonparallelizable_tests) + add_test(NAME trace_plugin_test COMMAND tests/trace_plugin_test.py -v WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_tests_properties(trace_plugin_test PROPERTIES TIMEOUT 300) set_property(TEST trace_plugin_test PROPERTY LABELS nonparallelizable_tests) @@ -248,15 +262,18 @@ set_property(TEST trace_plugin_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME resource_monitor_plugin_test COMMAND tests/resource_monitor_plugin_test.py WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST resource_monitor_plugin_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_repeat_transaction_lr_test COMMAND tests/nodeos_high_transaction_test.py -v --clean-run -p 4 -n 8 --num-transactions 1000 --max-transactions-per-second 500 --send-duplicates ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME nodeos_repeat_transaction_lr_test COMMAND tests/nodeos_high_transaction_test.py -v -p 4 -n 8 --num-transactions 1000 --max-transactions-per-second 500 --send-duplicates ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_repeat_transaction_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME light_validation_sync_test COMMAND tests/light_validation_sync_test.py -v --clean-run ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME light_validation_sync_test COMMAND tests/light_validation_sync_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST light_validation_sync_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME auto_bp_peering_test COMMAND tests/auto_bp_peering_test.py ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME auto_bp_peering_test COMMAND tests/auto_bp_peering_test.py -v ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST auto_bp_peering_test PROPERTY LABELS long_running_tests) +add_test(NAME gelf_test COMMAND tests/gelf_test.py ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST gelf_test PROPERTY LABELS nonparallelizable_tests) + if(ENABLE_COVERAGE_TESTING) set(Coverage_NAME ${PROJECT_NAME}_coverage) diff --git a/tests/TestHarness/CMakeLists.txt b/tests/TestHarness/CMakeLists.txt index 20f24b5a61..e35782ead6 100644 --- a/tests/TestHarness/CMakeLists.txt +++ b/tests/TestHarness/CMakeLists.txt @@ -12,3 +12,5 @@ configure_file(interfaces.py . COPYONLY) configure_file(launch_transaction_generators.py . COPYONLY) configure_file(logging.py . COPYONLY) configure_file(depresolver.py . COPYONLY) +configure_file(launcher.py . COPYONLY) +configure_file(accounts.py . COPYONLY) diff --git a/tests/TestHarness/Cluster.py b/tests/TestHarness/Cluster.py index 4aa70ddec7..c028b7a632 100644 --- a/tests/TestHarness/Cluster.py +++ b/tests/TestHarness/Cluster.py @@ -1,3 +1,4 @@ +import atexit import copy import subprocess import time @@ -15,13 +16,14 @@ from pathlib import Path from .core_symbol import CORE_SYMBOL -from .testUtils import Account +from .accounts import Account, createAccountKeys from .testUtils import BlockLogAction from .testUtils import Utils from .Node import BlockType from .Node import Node from .WalletMgr import WalletMgr from .launch_transaction_generators import TransactionGeneratorsLauncher, TpsTrxGensConfig +from .launcher import cluster_generator try: from .libc import unshare, CLONE_NEWNET from .interfaces import getInterfaceFlags, setInterfaceUp, IFF_LOOPBACK @@ -43,38 +45,6 @@ def isValid(policy): policy == PFSetupPolicy.PREACTIVATE_FEATURE_ONLY or \ policy == PFSetupPolicy.FULL -# Class for generating distinct names for many accounts -class NamedAccounts: - - def __init__(self, cluster, numAccounts): - Utils.Print("NamedAccounts %d" % (numAccounts)) - self.numAccounts=numAccounts - self.accounts=cluster.createAccountKeys(numAccounts) - if self.accounts is None: - Utils.errorExit("FAILURE - create keys") - accountNum = 0 - for account in self.accounts: - Utils.Print("NamedAccounts Name for %d" % (accountNum)) - account.name=self.setName(accountNum) - accountNum+=1 - - def setName(self, num): - retStr="test" - digits=[] - maxDigitVal=5 - maxDigits=8 - temp=num - while len(digits) < maxDigits: - digit=(num % maxDigitVal)+1 - num=int(num/maxDigitVal) - digits.append(digit) - - digits.reverse() - retStr += "".join(map(str, digits)) - - Utils.Print("NamedAccounts Name for %d is %s" % (temp, retStr)) - return retStr - # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-public-methods class Cluster(object): @@ -88,11 +58,9 @@ class Cluster(object): __bootlog="leap-ignition-wd/bootlog.txt" # pylint: disable=too-many-arguments - # walletd [True|False] Is keosd running. If not load the wallet plugin - def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888, walletHost="localhost", walletPort=9899 - , defproduceraPrvtKey=None, defproducerbPrvtKey=None, staging=False, loggingLevel="debug", loggingLevelDict={}, nodeosVers="", unshared=False): + def __init__(self, localCluster=True, host="localhost", port=8888, walletHost="localhost", walletPort=9899 + , defproduceraPrvtKey=None, defproducerbPrvtKey=None, staging=False, loggingLevel="debug", loggingLevelDict={}, nodeosVers="", unshared=False, keepRunning=False, keepLogs=False): """Cluster container. - walletd [True|False] Is wallet keosd running. If not load the wallet plugin localCluster [True|False] Is cluster local to host. host: eos server host port: eos server port @@ -100,13 +68,20 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 walletPort: wos wallet port defproduceraPrvtKey: Defproducera account private key defproducerbPrvtKey: Defproducerb account private key + staging: [True|False] If true, don't generate new node configurations + loggingLevel: Logging level to apply to all nodeos loggers in all nodes + loggingLevelDict: Dictionary of node indices and logging level to apply to all nodeos loggers in that node + nodeosVers: Nodeos version string for compatibility + unshared: [True|False] If true, launch all processes in Linux namespace + keepRunning: [True|False] If true, leave nodes running when Cluster is destroyed. Implies keepLogs. + keepLogs: [True|False] If true, retain log files after cluster shuts down. """ + atexit.register(self.shutdown) self.accounts=[] self.nodes=[] self.unstartedNodes=[] self.localCluster=localCluster self.wallet=None - self.walletd=walletd self.walletMgr=None self.host=host self.port=port @@ -116,6 +91,8 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.staging=staging self.loggingLevel=loggingLevel self.loggingLevelDict=loggingLevelDict + self.keepRunning=keepRunning + self.keepLogs=keepLogs or keepRunning # init accounts self.defProducerAccounts={} self.defproduceraAccount=self.defProducerAccounts["defproducera"]= Account("defproducera") @@ -131,12 +108,12 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.preExistingFirstTrxFiles=[] self.filesToCleanup=[] + self.testFailed=False self.alternateVersionLabels=Cluster.__defaultAlternateVersionLabels() self.biosNode = None self.nodeosVers=nodeosVers self.nodeosLogPath=Path(Utils.TestLogRoot) / Path(f'{Path(sys.argv[0]).stem}{os.getpid()}') - self.launcherPath = Path(__file__).resolve().parents[1] / "launcher.py" self.libTestingContractsPath = Path(__file__).resolve().parents[2] / "libraries" / "testing" / "contracts" self.unittestsContractsPath = Path(__file__).resolve().parents[2] / "unittests" / "contracts" @@ -188,7 +165,7 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", delay=1, onlyBios=False, dontBootstrap=False, + def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=21, topo="mesh", delay=2, onlyBios=False, dontBootstrap=False, totalProducers=None, sharedProducers=0, extraNodeosArgs="", specificExtraNodeosArgs=None, specificNodeosInstances=None, onlySetProds=False, pfSetupPolicy=PFSetupPolicy.FULL, alternateVersionLabelsFile=None, associatedNodeLabels=None, loadSystemContract=True, nodeosLogPath=Path(Utils.TestLogRoot) / Path(f'{Path(sys.argv[0]).stem}{os.getpid()}'), genesisPath=None, maximumP2pPerHost=0, maximumClients=25, prodsEnableTraceApi=True): @@ -265,12 +242,17 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me tries = tries - 1 time.sleep(2) loggingLevelDictString = json.dumps(self.loggingLevelDict, separators=(',', ':')) - cmd="%s %s -p %s -n %s -d %s -i %s -f %s --unstarted-nodes %s --logging-level %s --logging-level-map %s" % ( - sys.executable, str(self.launcherPath), pnodes, totalNodes, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], - producerFlag, unstartedNodes, self.loggingLevel, loggingLevelDictString) - cmdArr=cmd.split() + args=(f'-p {pnodes} -n {totalNodes} -d {delay} ' + f'-i {datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]} -f {producerFlag} ' + f'--unstarted-nodes {unstartedNodes} --logging-level {self.loggingLevel} ' + f'--logging-level-map {loggingLevelDictString}') + argsArr=args.split() + argsArr.append("--config-dir") + argsArr.append(str(nodeosLogPath)) + argsArr.append("--data-dir") + argsArr.append(str(nodeosLogPath)) if self.staging: - cmdArr.append("--nogen") + argsArr.append("--nogen") nodeosArgs="" if "--max-transaction-time" not in extraNodeosArgs: nodeosArgs += " --max-transaction-time -1" @@ -280,8 +262,6 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me nodeosArgs += f" --p2p-max-nodes-per-host {maximumP2pPerHost}" if "--max-clients" not in extraNodeosArgs: nodeosArgs += f" --max-clients {maximumClients}" - if not self.walletd: - nodeosArgs += " --plugin eosio::wallet_api_plugin" if Utils.Debug and "--contracts-console" not in extraNodeosArgs: nodeosArgs += " --contracts-console" if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): @@ -298,24 +278,24 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me assert(isinstance(arg, str)) if not len(arg): continue - cmdArr.append("--specific-num") - cmdArr.append(str(nodeNum)) - cmdArr.append("--specific-nodeos") + argsArr.append("--specific-num") + argsArr.append(str(nodeNum)) + argsArr.append("--specific-nodeos") if arg.find("--http-max-response-time-ms") != -1: httpMaxResponseTimeSet = True if arg[0] != "'" and arg[-1] != "'": - cmdArr.append("'" + arg + "'") + argsArr.append("'" + arg + "'") else: - cmdArr.append(arg) + argsArr.append(arg) if specificNodeosInstances is not None: assert(isinstance(specificNodeosInstances, dict)) for nodeNum,arg in specificNodeosInstances.items(): assert(isinstance(nodeNum, (str,int))) assert(isinstance(arg, str)) - cmdArr.append("--spcfc-inst-num") - cmdArr.append(str(nodeNum)) - cmdArr.append("--spcfc-inst-nodeos") - cmdArr.append(arg) + argsArr.append("--spcfc-inst-num") + argsArr.append(str(nodeNum)) + argsArr.append("--spcfc-inst-nodeos") + argsArr.append(arg) if not httpMaxResponseTimeSet and extraNodeosArgs.find("--http-max-response-time-ms") == -1: extraNodeosArgs+=" --http-max-response-time-ms 990000 " @@ -325,19 +305,17 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me nodeosArgs += extraNodeosArgs if nodeosArgs: - cmdArr.append("--nodeos") - cmdArr.append(nodeosArgs) + argsArr.append("--nodeos") + argsArr.append(nodeosArgs) if genesisPath is None: - cmdArr.append("--max-block-cpu-usage") - cmdArr.append(str(500000)) - cmdArr.append("--max-transaction-cpu-usage") - cmdArr.append(str(475000)) + argsArr.append("--max-block-cpu-usage") + argsArr.append(str(500000)) + argsArr.append("--max-transaction-cpu-usage") + argsArr.append(str(475000)) else: - cmdArr.append("--genesis") - cmdArr.append(str(genesisPath)) - cmdArr.append("--nodeos-log-path") - cmdArr.append(str(nodeosLogPath)) + argsArr.append("--genesis") + argsArr.append(str(genesisPath)) if associatedNodeLabels is not None: for nodeNum,label in associatedNodeLabels.items(): @@ -346,26 +324,25 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me path=self.alternateVersionLabels.get(label) if path is None: Utils.errorExit("associatedNodeLabels passed in indicates label %s for node num %s, but it was not identified in %s" % (label, nodeNum, alternateVersionLabelsFile)) - cmdArr.append("--spcfc-inst-num") - cmdArr.append(str(nodeNum)) - cmdArr.append("--spcfc-inst-nodeos") - cmdArr.append(path) + argsArr.append("--spcfc-inst-num") + argsArr.append(str(nodeNum)) + argsArr.append("--spcfc-inst-nodeos") + argsArr.append(path) # must be last cmdArr.append before subprocess.call, so that everything is on the command line # before constructing the shape.json file for "bridge" if topo=="bridge": shapeFilePrefix="shape_bridge" shapeFile=shapeFilePrefix+".json" - cmdArrForOutput=copy.deepcopy(cmdArr) + cmdArrForOutput=copy.deepcopy(argsArr) cmdArrForOutput.append("--output") cmdArrForOutput.append(str(nodeosLogPath / shapeFile)) cmdArrForOutput.append("--shape") cmdArrForOutput.append("line") s=" ".join(cmdArrForOutput) - if Utils.Debug: Utils.Print("cmd: %s" % (s)) - if 0 != subprocess.call(cmdArrForOutput): - Utils.Print("ERROR: Launcher failed to create shape file \"%s\"." % (shapeFile)) - return False + bridgeLauncher = cluster_generator(cmdArrForOutput) + bridgeLauncher.define_network() + bridgeLauncher.generate() Utils.Print(f"opening {topo} shape file: {nodeosLogPath / shapeFile}") f = open(nodeosLogPath / shapeFile, "r") @@ -473,45 +450,54 @@ def connectGroup(group, producerNodes, bridgeNodes) : f.write(json.dumps(shapeFileObject, indent=4, sort_keys=True)) f.close() - cmdArr.append("--shape") - cmdArr.append(shapeFile) + argsArr.append("--shape") + argsArr.append(shapeFile) else: - cmdArr.append("--shape") - cmdArr.append(topo) + argsArr.append("--shape") + argsArr.append(topo) if type(specificExtraNodeosArgs) is dict: for args in specificExtraNodeosArgs.values(): if "--plugin eosio::history_api_plugin" in args: - cmdArr.append("--is-nodeos-v2") + argsArr.append("--is-nodeos-v2") break - Cluster.__LauncherCmdArr = cmdArr.copy() - - s=" ".join([("'{0}'".format(element) if (' ' in element) else element) for element in cmdArr.copy()]) - if Utils.Debug: Utils.Print("cmd: %s" % (s)) - if 0 != subprocess.call(cmdArr): - Utils.Print("ERROR: Launcher failed to launch. failed cmd: %s" % (s)) - return False + Cluster.__LauncherCmdArr = argsArr.copy() + + launcher = cluster_generator(argsArr) + launcher.define_network() + launcher.generate() + self.nodes = [] + for instance in launcher.network.nodes.values(): + eosdcmd = launcher.construct_command_line(instance) + + nodeNum = instance.index + node = Node(self.host, self.port + nodeNum, nodeNum, Path(instance.data_dir_name), + Path(instance.config_dir_name), eosdcmd, unstarted=instance.dont_start, + launch_time=launcher.launch_time, walletMgr=self.walletMgr, nodeosVers=self.nodeosVers) + if nodeNum == Node.biosNodeId: + self.biosNode = node + else: + if node.popenProc: + self.nodes.append(node) + else: + self.unstartedNodes.append(node) + time.sleep(delay) - startedNodes=totalNodes-unstartedNodes + self.startedNodesCount = totalNodes - unstartedNodes + self.productionNodesCount = pnodes + self.totalNodesCount = totalNodes - nodes=self.discoverLocalNodes(startedNodes, timeout=Utils.systemWaitTimeout) - if nodes is None or startedNodes != len(nodes): + if self.nodes is None or self.startedNodesCount != len(self.nodes): Utils.Print("ERROR: Unable to validate %s instances, expected: %d, actual: %d" % - (Utils.EosServerName, startedNodes, len(nodes))) + (Utils.EosServerName, self.startedNodesCount, len(self.nodes))) return False - self.nodes=nodes - - if unstartedNodes > 0: - self.unstartedNodes=self.discoverUnstartedLocalNodes(unstartedNodes, totalNodes) - - biosNode=self.discoverBiosNode(timeout=Utils.systemWaitTimeout) - if not biosNode or not Utils.waitForBool(biosNode.checkPulse, Utils.systemWaitTimeout): + if not self.biosNode or not Utils.waitForBool(self.biosNode.checkPulse, Utils.systemWaitTimeout): Utils.Print("ERROR: Bios node doesn't appear to be running...") return False if onlyBios: - self.nodes=[biosNode] + self.nodes=[self.biosNode] # ensure cluster node are inter-connected by ensuring everyone has block 1 Utils.Print("Cluster viability smoke test. Validate every cluster node has block 1. ") @@ -521,17 +507,14 @@ def connectGroup(group, producerNodes, bridgeNodes) : if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): Utils.Print("Activate Preactivate Feature.") - biosNode.activatePreactivateFeature() + self.biosNode.activatePreactivateFeature() if dontBootstrap: Utils.Print("Skipping bootstrap.") - self.biosNode=biosNode return True Utils.Print("Bootstrap cluster.") - self.biosNode=self.bootstrap(biosNode, startedNodes, prodCount + sharedProducers, totalProducers, pfSetupPolicy, onlyBios, onlySetProds, loadSystemContract) - - if self.biosNode is None: + if not self.bootstrap(self.biosNode, self.startedNodesCount, prodCount + sharedProducers, totalProducers, pfSetupPolicy, onlyBios, onlySetProds, loadSystemContract): Utils.Print("ERROR: Bootstrap failed.") return False @@ -685,54 +668,6 @@ def getClientVersion(fullVersion=False): Utils.Print("ERROR: Exception during client version query. %s" % (msg)) raise - @staticmethod - def createAccountKeys(count): - accounts=[] - p = re.compile('Private key: (.+)\nPublic key: (.+)\n', re.MULTILINE) - for _ in range(0, count): - try: - cmd="%s create key --to-console" % (Utils.EosClientPath) - if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - keyStr=Utils.checkOutput(cmd.split()) - m=p.search(keyStr) - if m is None: - Utils.Print("ERROR: Owner key creation regex mismatch") - break - - ownerPrivate=m.group(1) - ownerPublic=m.group(2) - - cmd="%s create key --to-console" % (Utils.EosClientPath) - if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - keyStr=Utils.checkOutput(cmd.split()) - m=p.match(keyStr) - if m is None: - Utils.Print("ERROR: Active key creation regex mismatch") - break - - activePrivate=m.group(1) - activePublic=m.group(2) - - name=''.join(random.choice(string.ascii_lowercase) for _ in range(12)) - account=Account(name) - account.ownerPrivateKey=ownerPrivate - account.ownerPublicKey=ownerPublic - account.activePrivateKey=activePrivate - account.activePublicKey=activePublic - accounts.append(account) - if Utils.Debug: Utils.Print("name: %s, key(owner): ['%s', '%s], key(active): ['%s', '%s']" % (name, ownerPublic, ownerPrivate, activePublic, activePrivate)) - - except subprocess.CalledProcessError as ex: - msg=ex.stderr.decode("utf-8") - Utils.Print("ERROR: Exception during key creation. %s" % (msg)) - break - - if count != len(accounts): - Utils.Print("Account keys creation failed. Expected %d, actual: %d" % (count, len(accounts))) - return None - - return accounts - # create account keys and import into wallet. Wallet initialization will be user responsibility # also imports defproducera and defproducerb accounts def populateWallet(self, accountsCount, wallet, accountNames: list=None, createProducerAccounts: bool=True): @@ -748,7 +683,7 @@ def populateWallet(self, accountsCount, wallet, accountNames: list=None, createP accounts=None if accountsCount > 0: Utils.Print ("Create account keys.") - accounts = self.createAccountKeys(accountsCount) + accounts = createAccountKeys(accountsCount) if accounts is None: Utils.Print("Account keys creation failed.") return False @@ -799,14 +734,14 @@ def getAllNodes(self): nodes += self.getNodes() return nodes - def launchUnstarted(self, numToLaunch=1, cachePopen=False): + def launchUnstarted(self, numToLaunch=1): assert(isinstance(numToLaunch, int)) assert(numToLaunch>0) launchList=self.unstartedNodes[:numToLaunch] del self.unstartedNodes[:numToLaunch] for node in launchList: # the node number is indexed off of the started nodes list - node.launchUnstarted(cachePopen=cachePopen) + node.launchUnstarted() self.nodes.append(node) # Spread funds across accounts with transactions spread through cluster nodes. @@ -939,11 +874,13 @@ def validateAccounts(self, accounts, testSysAccounts=True): node.validateAccounts(myAccounts) - def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet=100, stakeCPU=100, buyRAM=10000, validationNodeIndex=0): + def createAccountAndVerify(self, account, creator, stakedDeposit=1000, stakeNet=100, stakeCPU=100, buyRAM=10000, validationNodeIndex=-1): """create account, verify account and return transaction id""" - assert(len(self.nodes) > validationNodeIndex) node=self.nodes[validationNodeIndex] - trans=node.createInitializeAccount(account, creator, stakedDeposit, waitForTransBlock=True, stakeNet=stakeNet, stakeCPU=stakeCPU, buyRAM=buyRAM, exitOnError=True) + waitViaRetry = self.totalNodesCount > self.productionNodesCount + trans=node.createInitializeAccount(account, creator, stakedDeposit, waitForTransBlock=waitViaRetry, stakeNet=stakeNet, stakeCPU=stakeCPU, buyRAM=buyRAM, exitOnError=True) + if not waitViaRetry: + node.waitForTransBlockIfNeeded(trans, True, exitOnError=True) assert(node.verifyAccount(account)) return trans @@ -976,25 +913,25 @@ def nodeNameToId(name): return int(m.group(1)) @staticmethod - def parseProducerKeys(configFile, nodeName): - """Parse node config file for producer keys. Returns dictionary. (Keys: account name; Values: dictionary objects (Keys: ["name", "node", "private","public"]; Values: account name, node id returned by nodeNameToId(nodeName), private key(string)and public key(string))).""" + def parseProducerKeys(startFile, nodeName): + """Parse node start file for producer keys. Returns dictionary. (Keys: account name; Values: dictionary objects (Keys: ["name", "node", "private","public"]; Values: account name, node id returned by nodeNameToId(nodeName), private key(string)and public key(string))).""" - configStr=None - with open(configFile, 'r') as f: - configStr=f.read() + startStr=None + with open(startFile, 'r') as f: + startStr=f.read() - pattern=r"^\s*signature-provider\s*=\s*(\w+)=KEY:(\w+)$" - m=re.search(pattern, configStr, re.MULTILINE) + pattern=r"\s*--signature-provider\s*(\w+)=KEY:(\w+)" + m=re.search(pattern, startStr) regMsg="None" if m is None else "NOT None" if m is None: - if Utils.Debug: Utils.Print("Failed to find producer keys") + if Utils.Debug: Utils.Print(f'No producer keys found in node {nodeName}') return None pubKey=m.group(1) privateKey=m.group(2) - pattern=r"^\s*producer-name\s*=\W*(\w+)\W*$" - matches=re.findall(pattern, configStr, re.MULTILINE) + pattern=r"\s*--producer-name\s*\W*(\w+)" + matches=re.findall(pattern, startStr) if matches is None: if Utils.Debug: Utils.Print("Failed to find producers.") return None @@ -1010,51 +947,50 @@ def parseProducerKeys(configFile, nodeName): @staticmethod def parseProducers(nodeNum): - """Parse node config file for producers.""" + """Parse node start file for producers.""" - configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") - if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + startCmd=Utils.getNodeDataDir(nodeNum, "start.cmd") + if Utils.Debug: Utils.Print(f'Parsing file {startCmd}') configStr=None - with open(configFile, 'r') as f: + with open(startCmd, 'r') as f: configStr=f.read() - pattern=r"^\s*producer-name\s*=\W*(\w+)\W*$" - producerMatches=re.findall(pattern, configStr, re.MULTILINE) + pattern=r"\s*--producer-name\s*\W*(\w+)" + producerMatches=re.findall(pattern, configStr) if producerMatches is None: - if Utils.Debug: Utils.Print("Failed to find producers.") + if Utils.Debug: Utils.Print(f'No producers found in node_{nodeNum}.') return None - + if Utils.Debug: Utils.Print(f'Found producers {producerMatches}') return producerMatches @staticmethod def parseClusterKeys(totalNodes): - """Parse cluster config file. Updates producer keys data members.""" + """Parse cluster start files. Updates producer keys data members.""" - configFile=Utils.getNodeConfigDir("bios", "config.ini") - if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + startFile=Utils.getNodeDataDir("bios", "start.cmd") + if Utils.Debug: Utils.Print("Parsing file %s" % startFile) nodeName=Utils.nodeExtensionToName("bios") - producerKeys=Cluster.parseProducerKeys(configFile, nodeName) + producerKeys=Cluster.parseProducerKeys(startFile, nodeName) if producerKeys is None: - Utils.Print("ERROR: Failed to parse eosio private keys from cluster config files.") + Utils.Print("ERROR: Failed to parse eosio private keys from cluster start files.") return None for i in range(0, totalNodes): - configFile=Utils.getNodeConfigDir(i, "config.ini") - if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + startFile=Utils.getNodeDataDir(i, "start.cmd") + if Utils.Debug: Utils.Print("Parsing file %s" % startFile) nodeName=Utils.nodeExtensionToName(i) - keys=Cluster.parseProducerKeys(configFile, nodeName) + keys=Cluster.parseProducerKeys(startFile, nodeName) if keys is not None: producerKeys.update(keys) - keyMsg="None" if keys is None else len(keys) - + Utils.Print(f'Found {len(producerKeys)} producer keys') return producerKeys def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios=False, onlySetProds=False, loadSystemContract=True): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" - Utils.Print("Starting cluster bootstrap.") + Utils.Print(f'Starting cluster bootstrap of {prodCount} producers.') assert PFSetupPolicy.isValid(pfSetupPolicy) if totalProducers is None: totalProducers=totalNodes @@ -1062,15 +998,12 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli producerKeys=Cluster.parseClusterKeys(totalNodes) # should have totalNodes node plus bios node if producerKeys is None: - Utils.Print("ERROR: Failed to parse any producer keys from config files.") + Utils.Print("ERROR: Failed to parse any producer keys from start files.") return None elif len(producerKeys) < (totalProducers+1): - Utils.Print("ERROR: Failed to parse %d producer keys from cluster config files, only found %d." % (totalProducers+1,len(producerKeys))) + Utils.Print("ERROR: Failed to parse %d producer keys from cluster start files, only found %d." % (totalProducers+1,len(producerKeys))) return None - self.walletMgr.killall() - self.walletMgr.cleanup() - if not self.walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None @@ -1147,22 +1080,18 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli return None else: counts=dict.fromkeys(range(totalNodes), 0) #initialize node prods count to 0 - setProdsStr='{"schedule": [' - firstTime=True + setProdsStr='{"schedule": ' + prodStanzas=[] prodNames=[] - for name, keys in producerKeys.items(): + for name, keys in list(producerKeys.items())[:21]: if counts[keys["node"]] >= prodCount: + Utils.Print(f'Count for this node exceeded: {counts[keys["node"]]}') continue - if firstTime: - firstTime = False - else: - setProdsStr += ',' - - setProdsStr += ' { "producer_name": "%s", "block_signing_key": "%s" }' % (keys["name"], keys["public"]) + prodStanzas.append({ 'producer_name': keys['name'], 'block_signing_key': keys['public'] }) prodNames.append(keys["name"]) counts[keys["node"]] += 1 - - setProdsStr += ' ] }' + setProdsStr += json.dumps(prodStanzas) + setProdsStr += ' }' if Utils.Debug: Utils.Print("setprods: %s" % (setProdsStr)) Utils.Print("Setting producers: %s." % (", ".join(prodNames))) opts="--permission eosio@active" @@ -1247,14 +1176,9 @@ def createSystemAccount(accountName): return None Node.validateTransaction(trans[1]) - Utils.Print("Wait for issue action transaction to become finalized.") + Utils.Print("Wait for issue action transaction to appear in a block.") transId=Node.getTransId(trans[1]) - # biosNode.waitForTransactionInBlock(transId) - # guesstimating block finalization timeout. Two production rounds of 12 blocks per node, plus 60 seconds buffer - timeout = .5 * 12 * 2 * len(producerKeys) + 60 - if not biosNode.waitForTransFinalization(transId, timeout=timeout): - Utils.Print("ERROR: Failed to validate transaction %s got rolled into a finalized block on server port %d." % (transId, biosNode.port)) - return None + biosNode.waitForTransactionInBlock(transId) expectedAmount="1000000000.0000 {0}".format(CORE_SYMBOL) Utils.Print("Verify eosio issue, Expected: %s" % (expectedAmount)) @@ -1292,7 +1216,7 @@ def createSystemAccount(accountName): Node.validateTransaction(trans[1]) - Utils.Print("Wait for last transfer transaction to become finalized.") + Utils.Print("Wait for last transfer transaction to appear in a block.") transId=Node.getTransId(trans[1]) if not biosNode.waitForTransactionInBlock(transId): Utils.Print("ERROR: Failed to validate transaction %s got rolled into a block on server port %d." % (transId, biosNode.port)) @@ -1312,7 +1236,7 @@ def createSystemAccount(accountName): Utils.Print("Cluster bootstrap done.") - return biosNode + return True @staticmethod def pgrepEosServers(timeout=None): @@ -1332,61 +1256,6 @@ def myFunc(): return Utils.waitForObj(myFunc, timeout) - @staticmethod - def pgrepEosServerPattern(nodeInstance): - dataLocation=Utils.getNodeDataDir(nodeInstance) - return r"[\n]?(\d+) (.* --data-dir %s .*)\n" % (dataLocation) - - # Populates list of EosInstanceInfo objects, matched to actual running instances - def discoverLocalNodes(self, totalNodes, timeout=None): - nodes=[] - - psOut=Cluster.pgrepEosServers(timeout) - if psOut is None: - Utils.Print("ERROR: No nodes discovered.") - return nodes - - if len(psOut) < 6660: - psOutDisplay=psOut - else: - psOutDisplay=psOut[:6660]+"..." - if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOutDisplay) - for i in range(0, totalNodes): - instance=self.discoverLocalNode(i, psOut, timeout) - if instance is None: - break - nodes.append(instance) - - if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) - return nodes - - # Populate a node matched to actual running instance - def discoverLocalNode(self, nodeNum, psOut=None, timeout=None): - if psOut is None: - psOut=Cluster.pgrepEosServers(timeout) - if psOut is None: - Utils.Print("ERROR: No nodes discovered.") - return None - pattern=Cluster.pgrepEosServerPattern(nodeNum) - m=re.search(pattern, psOut, re.MULTILINE) - if m is None: - Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) - return None - instance=Node(self.host, self.port + nodeNum, nodeNum, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, nodeosVers=self.nodeosVers) - if Utils.Debug: Utils.Print("Node>", instance) - return instance - - def discoverBiosNode(self, timeout=None): - psOut=Cluster.pgrepEosServers(timeout=timeout) - pattern=Cluster.pgrepEosServerPattern("bios") - Utils.Print("pattern={\n%s\n}, psOut=\n%s\n" % (pattern,psOut)) - m=re.search(pattern, psOut, re.MULTILINE) - if m is None: - Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) - return None - else: - return Node(Cluster.__BiosHost, Cluster.__BiosPort, "bios", pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, nodeosVers=self.nodeosVers) - # Kills a percentange of Eos instances starting from the tail and update eosInstanceInfos state def killSomeEosInstances(self, killCount, killSignalStr=Utils.SigKillTag): killSignal=signal.SIGKILL @@ -1406,14 +1275,14 @@ def killSomeEosInstances(self, killCount, killSignalStr=Utils.SigKillTag): time.sleep(1) # Give processes time to stand down return True - def relaunchEosInstances(self, cachePopen=False, nodeArgs="", waitForTerm=False): + def relaunchEosInstances(self, nodeArgs="", waitForTerm=False): chainArg=self.__chainSyncStrategy.arg + " " + nodeArgs newChain= False if self.__chainSyncStrategy.name in [Utils.SyncHardReplayTag, Utils.SyncNoneTag] else True for i in range(0, len(self.nodes)): node=self.nodes[i] - if node.killed and not node.relaunch(chainArg, newChain=newChain, cachePopen=cachePopen, waitForTerm=waitForTerm): + if node.killed and not node.relaunch(chainArg, newChain=newChain, waitForTerm=waitForTerm): return False return True @@ -1447,33 +1316,23 @@ def dumpErrorDetails(self): for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) - def killall(self, kill=True, silent=True, allInstances=False): - """Kill cluster nodeos instances. allInstances will kill all nodeos instances running on the system.""" - signalNum=9 if kill else 15 - cmd="%s -k %d --nogen -p 1 -n 1 --nodeos-log-path %s" % (f"{sys.executable} {str(self.launcherPath)}", signalNum, self.nodeosLogPath) - if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull): - if not silent: Utils.Print("Launcher failed to shut down eos cluster.") - - if allInstances: - # ocassionally the launcher cannot kill the eos server - cmd="pkill -9 %s" % (Utils.EosServerName) - if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull): - if not silent: Utils.Print("Failed to shut down eos cluster.") - - # another explicit nodes shutdown - for node in self.nodes: - try: - if node.pid is not None: - os.kill(node.pid, signal.SIGKILL) - except OSError as _: - pass + def shutdown(self): + """Shut down all nodeos instances launched by this Cluster.""" + if not self.keepRunning: + Utils.Print('Cluster shutting down.') + for node in self.nodes: + node.kill(signal.SIGTERM) + if len(self.nodes) and self.biosNode != self.nodes[0]: + self.biosNode.kill(signal.SIGTERM) + else: + Utils.Print('Cluster left running.') # Make sure to cleanup all trx generators that may have been started and still generating trxs if self.trxGenLauncher is not None: self.trxGenLauncher.killAll() + self.cleanup() + def bounce(self, nodes, silent=True): """Bounces nodeos instances as indicated by parameter nodes. nodes should take the form of a comma-separated list as accepted by the launcher --bounce command (e.g. '00' or '00,01')""" @@ -1507,6 +1366,8 @@ def waitForNextBlock(self, timeout=None): return node.waitForNextBlock(timeout) def cleanup(self): + if self.keepRunning or self.keepLogs or self.testFailed: + return for f in glob.glob(Utils.DataPath): shutil.rmtree(f, ignore_errors=True) for f in glob.glob(Utils.ConfigDir + "node_*"): @@ -1522,7 +1383,7 @@ def cleanup(self): os.remove(f) # Create accounts, if account does not already exist, and validates that the last transaction is received on root node - def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000, validationNodeIndex=0): + def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000, validationNodeIndex=-1): if self.accounts is None: return True transId=None @@ -1705,11 +1566,12 @@ def launchTrxGenerators(self, contractOwnerAcctName: str, acctNamesList: list, a self.preExistingFirstTrxFiles = glob.glob(f"{Utils.DataDir}/first_trx_*.txt") connectionPairList = [f"{self.host}:{self.getNodeP2pPort(nodeId)}"] - tpsTrxGensConfig = TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator, connectionPairList=connectionPairList, endpointApi="p2p") + tpsTrxGensConfig = TpsTrxGensConfig(targetTps=targetTps, tpsLimitPerGenerator=tpsLimitPerGenerator, connectionPairList=connectionPairList) self.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=contractOwnerAcctName, accts=','.join(map(str, acctNamesList)), privateKeys=','.join(map(str, acctPrivKeysList)), trxGenDurationSec=durationSec, logDir=Utils.DataDir, - abiFile=abiFile, actionsData=actionsData, actionsAuths=actionsAuths, tpsTrxGensConfig=tpsTrxGensConfig) + abiFile=abiFile, actionsData=actionsData, actionsAuths=actionsAuths, tpsTrxGensConfig=tpsTrxGensConfig, + endpointMode="p2p") Utils.Print("Launch txn generators and start generating/sending transactions") self.trxGenLauncher.launch(waitToComplete=waitToComplete) @@ -1731,6 +1593,6 @@ def waitForTrxGeneratorsSpinup(self, nodeId: int, numGenerators: int, timeout: i firstTrxs.append(line.rstrip('\n')) Utils.Print(f"first transactions: {firstTrxs}") status = node.waitForTransactionsInBlock(firstTrxs) - if status is None: + if not status: Utils.Print('ERROR: Failed to spin up transaction generators: never received first transactions') return status diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index 16365daeea..888048d6dd 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -5,44 +5,67 @@ import os import re import json +import shlex import signal import sys +from pathlib import Path +from typing import List from datetime import datetime from datetime import timedelta from .core_symbol import CORE_SYMBOL from .queries import NodeosQueries, BlockType from .transactions import Transactions +from .accounts import Account from .testUtils import Utils -from .testUtils import Account from .testUtils import unhandledEnumType from .testUtils import ReturnType # pylint: disable=too-many-public-methods class Node(Transactions): + # Node number is used as an addend to determine the node listen ports. + # This value extends that pattern to all nodes, not just the numbered nodes. + biosNodeId = -100 # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-arguments - def __init__(self, host, port, nodeId, pid=None, cmd=None, walletMgr=None, nodeosVers=""): + def __init__(self, host, port, nodeId: int, data_dir: Path, config_dir: Path, cmd: List[str], unstarted=False, launch_time=None, walletMgr=None, nodeosVers=""): super().__init__(host, port, walletMgr) + assert isinstance(data_dir, Path), 'data_dir must be a Path instance' + assert isinstance(config_dir, Path), 'config_dir must be a Path instance' + assert isinstance(cmd, list), 'cmd must be a list' self.host=host self.port=port - self.pid=pid self.cmd=cmd - if nodeId != "bios": - assert isinstance(nodeId, int) - self.nodeId=nodeId - if Utils.Debug: Utils.Print("new Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) - self.killed=False # marks node as killed + if nodeId == Node.biosNodeId: + self.nodeId='bios' + self.name='node_bios' + else: + self.nodeId=nodeId + self.name=f'node_{str(nodeId).zfill(2)}' + if not unstarted: + self.popenProc=self.launchCmd(self.cmd, data_dir, launch_time) + self.pid=self.popenProc.pid + else: + self.popenProc=None + self.pid=None + if Utils.Debug: Utils.Print(f'unstarted node command: {" ".join(self.cmd)}') + start = data_dir / 'start.cmd' + with start.open('w') as f: + f.write(' '.join(cmd)) + self.killed=False self.infoValid=None self.lastRetrievedHeadBlockNum=None self.lastRetrievedLIB=None self.lastRetrievedHeadBlockProducer="" self.transCache={} self.missingTransaction=False - self.popenProc=None # initial process is started by launcher, this will only be set on relaunch self.lastTrackedTransactionId=None self.nodeosVers=nodeosVers + self.data_dir=data_dir + self.config_dir=config_dir + self.launch_time=launch_time + self.isProducer=False self.configureVersion() def configureVersion(self): @@ -52,17 +75,20 @@ def configureVersion(self): self.fetchBlock = lambda blockNum: self.processUrllibRequest("chain", "get_block", {"block_num_or_id":blockNum}, silentErrors=False, exitOnError=True) self.fetchKeyCommand = lambda: "[trx][trx][ref_block_num]" self.fetchRefBlock = lambda trans: trans["trx"]["trx"]["ref_block_num"] - self.cleosLimit = "" self.fetchHeadBlock = lambda node, headBlock: node.processUrllibRequest("chain", "get_block", {"block_num_or_id":headBlock}, silentErrors=False, exitOnError=True) + self.cleosLimit = "" else: self.fetchTransactionCommand = lambda: "get transaction_trace" self.fetchTransactionFromTrace = lambda trx: trx['id'] self.fetchBlock = lambda blockNum: self.processUrllibRequest("trace_api", "get_block", {"block_num":blockNum}, silentErrors=False, exitOnError=True) self.fetchKeyCommand = lambda: "[transaction][transaction_header][ref_block_num]" - self.fetchRefBlock = lambda trans: trans["transaction_header"]["ref_block_num"] - self.cleosLimit = "--time-limit 999" + self.fetchRefBlock = lambda trans: trans["block_num"] self.fetchHeadBlock = lambda node, headBlock: node.processUrllibRequest("chain", "get_block_info", {"block_num":headBlock}, silentErrors=False, exitOnError=True) + if 'v3.1' in self.nodeosVers: + self.cleosLimit = "" + else: + self.cleosLimit = "--time-limit 999" def __str__(self): return "Host: %s, Port:%d, NodeNum:%s, Pid:%s" % (self.host, self.port, self.nodeId, self.pid) @@ -226,6 +252,7 @@ def waitForProducer(self, producer, timeout=None, exitOnError=False): # default to the typical configuration of 21 producers, each producing 12 blocks in a row (every 1/2 second) timeout = 21 * 6; start=time.perf_counter() + Utils.Print(self.getInfo()) initialProducer=self.getInfo()["head_block_producer"] def isProducer(): return self.getInfo()["head_block_producer"] == producer; @@ -245,29 +272,30 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head def kill(self, killSignal): if Utils.Debug: Utils.Print("Killing node: %s" % (self.cmd)) - assert(self.pid is not None) try: if self.popenProc is not None: - self.popenProc.send_signal(killSignal) - self.popenProc.wait() + self.popenProc.send_signal(killSignal) + self.popenProc.wait() + elif self.pid is not None: + os.kill(self.pid, killSignal) + + # wait for kill validation + def myFunc(): + try: + os.kill(self.pid, 0) #check if process with pid is running + except OSError as _: + return True + return False + + if not Utils.waitForBool(myFunc): + Utils.Print("ERROR: Failed to validate node shutdown.") + return False else: - os.kill(self.pid, killSignal) + if Utils.Debug: Utils.Print(f"Called kill on node {self.nodeId} but it has already exited.") except OSError as ex: Utils.Print("ERROR: Failed to kill node (%s)." % (self.cmd), ex) return False - # wait for kill validation - def myFunc(): - try: - os.kill(self.pid, 0) #check if process with pid is running - except OSError as _: - return True - return False - - if not Utils.waitForBool(myFunc): - Utils.Print("ERROR: Failed to validate node shutdown.") - return False - # mark node as killed self.pid=None self.killed=True @@ -275,7 +303,7 @@ def myFunc(): def interruptAndVerifyExitStatus(self, timeout=60): if Utils.Debug: Utils.Print("terminating node: %s" % (self.cmd)) - assert self.popenProc is not None, f"node: '{self.cmd}' does not have a popenProc, this may be because it is only set after a relaunch." + assert self.popenProc is not None, f"node: '{self.cmd}' does not have a popenProc." self.popenProc.send_signal(signal.SIGINT) try: outs, _ = self.popenProc.communicate(timeout=timeout) @@ -290,69 +318,72 @@ def interruptAndVerifyExitStatus(self, timeout=60): def verifyAlive(self, silent=False): logStatus=not silent and Utils.Debug pid=self.pid - if logStatus: Utils.Print("Checking if node(pid=%s) is alive(killed=%s): %s" % (self.pid, self.killed, self.cmd)) - if self.killed or self.pid is None: + if logStatus: Utils.Print(f'Checking if node id {self.nodeId} (pid={self.pid}) is alive (killed={self.killed}): {self.cmd}') + if self.killed or self.pid is None or self.popenProc is None: self.killed=True self.pid=None return False - try: - os.kill(self.pid, 0) - except ProcessLookupError as ex: - # mark node as killed + if self.popenProc.poll() is not None: self.pid=None self.killed=True - if logStatus: Utils.Print("Determined node(formerly pid=%s) is killed" % (pid)) + if logStatus: Utils.Print(f'Determined node id {self.nodeId} (formerly pid={pid}) is killed') return False - except PermissionError as ex: - if logStatus: Utils.Print("Determined node(formerly pid=%s) is alive" % (pid)) + else: + if logStatus: Utils.Print(f'Determined node id {self.nodeId} (pid={pid}) is alive') return True - if logStatus: Utils.Print("Determined node(pid=%s) is alive" % (self.pid)) - return True + def rmFromCmd(self, matchValue: str): + '''Removes all instances of matchValue from cmd array and succeeding value if it's an option value string.''' + if not self.cmd: + return + + while True: + try: + i = self.cmd.index(matchValue) + self.cmd.pop(i) + if len(self.cmd) > i: + if self.cmd[i][0] != '-': + self.cmd.pop(i) + except ValueError: + break # pylint: disable=too-many-locals # If nodeosPath is equal to None, it will use the existing nodeos path - def relaunch(self, chainArg=None, newChain=False, skipGenesis=True, timeout=Utils.systemWaitTimeout, addSwapFlags=None, cachePopen=False, nodeosPath=None, waitForTerm=False): + def relaunch(self, chainArg=None, newChain=False, skipGenesis=True, timeout=Utils.systemWaitTimeout, addSwapFlags=None, nodeosPath=None, waitForTerm=False): assert(self.pid is None) assert(self.killed) - if Utils.Debug: Utils.Print("Launching node process, Id: {}".format(self.nodeId)) + if Utils.Debug: Utils.Print(f"Launching node process, Id: {self.nodeId}") - cmdArr=[] - splittedCmd=self.cmd.split() - if nodeosPath: splittedCmd[0] = nodeosPath - myCmd=" ".join(splittedCmd) + cmdArr=self.cmd[:] + if nodeosPath: cmdArr[0] = nodeosPath toAddOrSwap=copy.deepcopy(addSwapFlags) if addSwapFlags is not None else {} if not newChain: - skip=False - swapValue=None - for i in splittedCmd: - Utils.Print("\"%s\"" % (i)) - if skip: - skip=False - continue - if skipGenesis and ("--genesis-json" == i or "--genesis-timestamp" == i): - skip=True - continue - - if swapValue is None: - cmdArr.append(i) - else: - cmdArr.append(swapValue) - swapValue=None - - if i in toAddOrSwap: - swapValue=toAddOrSwap[i] - del toAddOrSwap[i] + if skipGenesis: + try: + i = cmdArr.index('--genesis-json') + cmdArr.pop(i) + cmdArr.pop(i) + i = cmdArr.index('--genesis-timestamp') + cmdArr.pop(i) + cmdArr.pop(i) + except ValueError: + pass for k,v in toAddOrSwap.items(): - cmdArr.append(k) - cmdArr.append(v) - myCmd=" ".join(cmdArr) - - cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) - self.launchCmd(cmd, cachePopen) + try: + i = cmdArr.index(k) + cmdArr[i+1] = v + except ValueError: + cmdArr.append(k) + if v: + cmdArr.append(v) + + if chainArg: + cmdArr.extend(shlex.split(chainArg)) + self.popenProc=self.launchCmd(cmdArr, self.data_dir, launch_time=datetime.now().strftime('%Y_%m_%d_%H_%M_%S')) + self.pid=self.popenProc.pid def isNodeAlive(): """wait for node to be responsive.""" @@ -389,7 +420,7 @@ def didNodeExitGracefully(popen, timeout): self.pid=None return False - self.cmd=cmd + self.cmd=cmdArr self.killed=False return True @@ -401,26 +432,34 @@ def unstartedFile(nodeId): Utils.errorExit("Cannot find unstarted node since %s file does not exist" % startFile) return startFile - def launchUnstarted(self, cachePopen=False): + def launchUnstarted(self): Utils.Print("launchUnstarted cmd: %s" % (self.cmd)) - self.launchCmd(self.cmd, cachePopen) - - def launchCmd(self, cmd, cachePopen=False): - dataDir=Utils.getNodeDataDir(self.nodeId) - dt = datetime.now() - dateStr=Utils.getDateString(dt) - stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) - stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) - with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: - Utils.Print("cmd: %s" % (cmd)) - popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) - if cachePopen: - popen.outfile=sout - popen.errfile=serr - self.popenProc=popen - self.pid=popen.pid + self.popenProc = self.launchCmd(self.cmd, self.data_dir, self.launch_time) + + def launchCmd(self, cmd: List[str], data_dir: Path, launch_time: str): + dd = data_dir + out = dd / 'stdout.txt' + err_sl = dd / 'stderr.txt' + err = dd / Path(f'stderr.{launch_time}.txt') + pidf = dd / Path(f'{Utils.EosServerName}.pid') + + Utils.Print(f'spawning child: {" ".join(cmd)}') + dd.mkdir(parents=True, exist_ok=True) + with out.open('w') as sout, err.open('w') as serr: + popen = subprocess.Popen(cmd, stdout=sout, stderr=serr) + popen.outfile = sout + popen.errfile = serr + self.pid = popen.pid self.cmd = cmd - if Utils.Debug: Utils.Print("start Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + self.isProducer = '--producer-name' in self.cmd + with pidf.open('w') as pidout: + pidout.write(str(popen.pid)) + try: + err_sl.unlink() + except FileNotFoundError: + pass + err_sl.symlink_to(err.name) + return popen def trackCmdTransaction(self, trans, ignoreNonTrans=False, reportStatus=True): if trans is None: @@ -468,7 +507,7 @@ def scheduleProtocolFeatureActivations(self, featureDigests=[]): self.processUrllibRequest("producer", "schedule_protocol_feature_activations", param) def modifyBuiltinPFSubjRestrictions(self, featureCodename, subjectiveRestriction={}): - jsonPath = os.path.join(Utils.getNodeConfigDir(self.nodeId), + jsonPath = os.path.join(self.config_dir, "protocol_features", "BUILTIN-{}.json".format(featureCodename)) protocolFeatureJson = [] @@ -489,14 +528,6 @@ def scheduleSnapshotAt(self, sbn): param = { "start_block_num": sbn, "end_block_num": sbn } return self.processUrllibRequest("producer", "schedule_snapshot", param) - # kill all existing nodeos in case lingering from previous test - @staticmethod - def killAllNodeos(): - # kill the eos server - cmd="pkill -9 %s" % (Utils.EosServerName) - ret_code = subprocess.call(cmd.split(), stdout=Utils.FNull) - Utils.Print("cmd: %s, ret:%d" % (cmd, ret_code)) - @staticmethod def findStderrFiles(path): files=[] @@ -509,6 +540,16 @@ def findStderrFiles(path): files.sort() return files + def findInLog(self, searchStr): + dataDir=Utils.getNodeDataDir(self.nodeId) + files=Node.findStderrFiles(dataDir) + for file in files: + with open(file, 'r') as f: + for line in f: + if searchStr in line: + return True + return False + def analyzeProduction(self, specificBlockNum=None, thresholdMs=500): dataDir=Utils.getNodeDataDir(self.nodeId) files=Node.findStderrFiles(dataDir) diff --git a/tests/TestHarness/TestHelper.py b/tests/TestHarness/TestHelper.py index ad1184adfc..6f3a1244ae 100644 --- a/tests/TestHarness/TestHelper.py +++ b/tests/TestHarness/TestHelper.py @@ -19,15 +19,15 @@ def __init__(self, flag, help, type=None, default=None, choices=None, action=Non self.choices=choices self.action=action - def add(self, flag, type, help, default, choices=None): - arg=self.AppArg(flag, help, type=type, default=default, choices=choices) + def add(self, flag, type, help, default=None, action=None, choices=None): + arg=self.AppArg(flag, help, action=action, type=type, default=default, choices=choices) self.args.append(arg) - def add_bool(self, flag, help, action='store_true'): arg=self.AppArg(flag, help, action=action) self.args.append(arg) + # pylint: disable=too-many-instance-attributes class TestHelper(object): LOCAL_HOST="localhost" @@ -85,7 +85,7 @@ def createArgumentParser(includeArgs, applicationSpecificArgs=AppArgs(), suppres thGrp.add_argument("--wallet-port", type=int, help=argparse.SUPPRESS if suppressHelp else "%s port" % Utils.EosWalletName, default=TestHelper.DEFAULT_WALLET_PORT) if "--prod-count" in includeArgs: - thGrp.add_argument("-c", "--prod-count", type=int, help=argparse.SUPPRESS if suppressHelp else "Per node producer count", default=1) + thGrp.add_argument("-c", "--prod-count", type=int, help=argparse.SUPPRESS if suppressHelp else "Per node producer count", default=21) if "--defproducera_prvt_key" in includeArgs: thGrp.add_argument("--defproducera_prvt_key", type=str, help=argparse.SUPPRESS if suppressHelp else "defproducera private key.") if "--defproducerb_prvt_key" in includeArgs: @@ -106,8 +106,6 @@ def createArgumentParser(includeArgs, applicationSpecificArgs=AppArgs(), suppres thGrp.add_argument("--leave-running", help=argparse.SUPPRESS if suppressHelp else "Leave cluster running after test finishes", action='store_true') if "--only-bios" in includeArgs: thGrp.add_argument("--only-bios", help=argparse.SUPPRESS if suppressHelp else "Limit testing to bios node.", action='store_true') - if "--clean-run" in includeArgs: - thGrp.add_argument("--clean-run", help=argparse.SUPPRESS if suppressHelp else "Kill all nodeos and keosd instances", action='store_true') if "--sanity-test" in includeArgs: thGrp.add_argument("--sanity-test", help=argparse.SUPPRESS if suppressHelp else "Validates nodeos and keosd are in path and can be started up.", action='store_true') if "--alternate-version-labels-file" in includeArgs: @@ -123,7 +121,7 @@ def createArgumentParser(includeArgs, applicationSpecificArgs=AppArgs(), suppres appArgsGrp = thParser.add_argument_group(title=None if suppressHelp else appArgsGrpTitle, description=None if suppressHelp else appArgsGrpdescription) for arg in applicationSpecificArgs.args: if arg.type is not None: - appArgsGrp.add_argument(arg.flag, type=arg.type, help=argparse.SUPPRESS if suppressHelp else arg.help, choices=arg.choices, default=arg.default) + appArgsGrp.add_argument(arg.flag, action=arg.action, type=arg.type, help=argparse.SUPPRESS if suppressHelp else arg.help, choices=arg.choices, default=arg.default) else: appArgsGrp.add_argument(arg.flag, help=argparse.SUPPRESS if suppressHelp else arg.help, action=arg.action) @@ -149,17 +147,13 @@ def printSystemInfo(prefix): Utils.Print("OS name: %s" % (platform.platform())) @staticmethod - # pylint: disable=too-many-arguments - def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, killWallet=True, keepLogs=False, cleanRun=True, dumpErrorDetails=False): + def shutdown(cluster, walletMgr, testSuccessful=True, dumpErrorDetails=False): """Cluster and WalletMgr shutdown and cleanup.""" assert(cluster) assert(isinstance(cluster, Cluster)) if walletMgr: assert(isinstance(walletMgr, WalletMgr)) assert(isinstance(testSuccessful, bool)) - assert(isinstance(killEosInstances, bool)) - assert(isinstance(killWallet, bool)) - assert(isinstance(cleanRun, bool)) assert(isinstance(dumpErrorDetails, bool)) Utils.ShuttingDown=True @@ -187,17 +181,6 @@ def reportProductionAnalysis(thresholdMs): # for now report these to know how many blocks we are missing production windows for reportProductionAnalysis(thresholdMs=200) - if killEosInstances: - Utils.Print("Shut down the cluster.") - cluster.killall(allInstances=cleanRun, kill=testSuccessful) - if testSuccessful and not keepLogs: - Utils.Print("Cleanup cluster data.") - cluster.cleanup() - - if walletMgr and killWallet: - Utils.Print("Shut down the wallet.") - walletMgr.killall(allInstances=cleanRun) - if testSuccessful and not keepLogs: - Utils.Print("Cleanup wallet data.") - walletMgr.cleanup() - + cluster.testFailed = not testSuccessful + if walletMgr: + walletMgr.testFailed = not testSuccessful diff --git a/tests/TestHarness/WalletMgr.py b/tests/TestHarness/WalletMgr.py index 06ba3b9061..3fc05c2f31 100644 --- a/tests/TestHarness/WalletMgr.py +++ b/tests/TestHarness/WalletMgr.py @@ -1,3 +1,4 @@ +import atexit import subprocess import time import shutil @@ -12,21 +13,26 @@ Wallet=namedtuple("Wallet", "name password host port") # pylint: disable=too-many-instance-attributes class WalletMgr(object): - __walletDataDir=f"{Utils.TestLogRoot}/test_wallet_0" + __walletDataDir=f"{Utils.DataPath}/test_wallet_0" __walletLogOutFile=f"{__walletDataDir}/test_keosd_out.log" __walletLogErrFile=f"{__walletDataDir}/test_keosd_err.log" __MaxPort=9999 # pylint: disable=too-many-arguments # walletd [True|False] True=Launch wallet(keosd) process; False=Manage launch process externally. - def __init__(self, walletd, nodeosPort=8888, nodeosHost="localhost", port=9899, host="localhost"): + def __init__(self, walletd, nodeosPort=8888, nodeosHost="localhost", port=9899, host="localhost", keepRunning=False, keepLogs=False): + atexit.register(self.shutdown) self.walletd=walletd self.nodeosPort=nodeosPort self.nodeosHost=nodeosHost self.port=port self.host=host + self.keepRunning=keepRunning + self.keepLogs=keepLogs or keepRunning + self.testFailed=False self.wallets={} - self.__walletPid=None + self.popenProc=None + self.walletPid=None def getWalletEndpointArgs(self): if not self.walletd or not self.isLaunched(): @@ -38,7 +44,7 @@ def getArgs(self): return " --url http://%s:%d%s %s" % (self.nodeosHost, self.nodeosPort, self.getWalletEndpointArgs(), Utils.MiscEosClientArgs) def isLaunched(self): - return self.__walletPid is not None + return self.walletPid is not None def isLocal(self): return self.host=="localhost" or self.host=="127.0.0.1" @@ -85,10 +91,10 @@ def launch(self): if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) if not os.path.isdir(WalletMgr.__walletDataDir): if Utils.Debug: Utils.Print(f"Creating dir {WalletMgr.__walletDataDir} in dir: {os.getcwd()}") - os.mkdir(WalletMgr.__walletDataDir) + os.makedirs(WalletMgr.__walletDataDir) with open(WalletMgr.__walletLogOutFile, 'w') as sout, open(WalletMgr.__walletLogErrFile, 'w') as serr: - popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) - self.__walletPid=popen.pid + self.popenProc=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) + self.walletPid=self.popenProc.pid # Give keosd time to warm up time.sleep(2) @@ -278,7 +284,7 @@ def getKeys(self, wallet): def dumpErrorDetails(self): Utils.Print("=================================================================") - if self.__walletPid is not None: + if self.walletPid is not None: Utils.Print("Contents of %s:" % (WalletMgr.__walletLogOutFile)) Utils.Print("=================================================================") with open(WalletMgr.__walletLogOutFile, "r") as f: @@ -288,19 +294,21 @@ def dumpErrorDetails(self): with open(WalletMgr.__walletLogErrFile, "r") as f: shutil.copyfileobj(f, sys.stdout) - def killall(self, allInstances=False): - """Kill keos instances. allInstances will kill all keos instances running on the system.""" - if self.__walletPid: - Utils.Print("Killing wallet manager process %d" % (self.__walletPid)) - os.kill(self.__walletPid, signal.SIGKILL) - - if allInstances: - cmd="pkill -9 %s" % (Utils.EosWalletName) - if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - subprocess.call(cmd.split()) - - - @staticmethod - def cleanup(): + def shutdown(self): + '''Shutdown the managed keosd instance unless keepRunning was set.''' + if self.keepRunning: + return + if self.popenProc: + Utils.Print(f"Shutting down wallet manager process {self.walletPid}") + self.popenProc.send_signal(signal.SIGTERM) + self.popenProc.wait() + elif self.walletPid: + Utils.Print("Killing wallet manager process %d" % (self.walletPid)) + os.kill(self.walletPid, signal.SIGKILL) + self.cleanup() + + def cleanup(self): + if self.keepLogs or self.keepRunning or self.testFailed: + return if os.path.isdir(WalletMgr.__walletDataDir) and os.path.exists(WalletMgr.__walletDataDir): shutil.rmtree(WalletMgr.__walletDataDir) diff --git a/tests/TestHarness/__init__.py b/tests/TestHarness/__init__.py index ed13cba533..0341b64a05 100644 --- a/tests/TestHarness/__init__.py +++ b/tests/TestHarness/__init__.py @@ -1,10 +1,11 @@ -__all__ = ['Node', 'Cluster', 'WalletMgr', 'logging', 'depresolver', 'testUtils', 'TestHelper', 'queries', 'transactions', 'launch_transaction_generators', 'TransactionGeneratorsLauncher', 'TpsTrxGensConfig', 'core_symbol'] +__all__ = ['Node', 'Cluster', 'WalletMgr', 'launcher', 'logging', 'depresolver', 'testUtils', 'TestHelper', 'queries', 'transactions', 'accounts', 'launch_transaction_generators', 'TransactionGeneratorsLauncher', 'TpsTrxGensConfig', 'core_symbol'] from .Cluster import Cluster from .Node import Node from .WalletMgr import WalletMgr +from .launcher import testnetDefinition, nodeDefinition from .logging import fc_log_level -from .testUtils import Account +from .accounts import Account, createAccountKeys from .testUtils import Utils from .Node import ReturnType from .TestHelper import TestHelper diff --git a/tests/TestHarness/accounts.py b/tests/TestHarness/accounts.py new file mode 100644 index 0000000000..e81932e40e --- /dev/null +++ b/tests/TestHarness/accounts.py @@ -0,0 +1,105 @@ +import random +import re +import string +import subprocess +from typing import List + +from .testUtils import Utils + +# Class for generating distinct names for many accounts +class NamedAccounts: + + def __init__(self, cluster, numAccounts): + Utils.Print("NamedAccounts %d" % (numAccounts)) + self.numAccounts=numAccounts + self.accounts=createAccountKeys(numAccounts) + if self.accounts is None: + Utils.errorExit("FAILURE - create keys") + accountNum = 0 + for account in self.accounts: + Utils.Print("NamedAccounts Name for %d" % (accountNum)) + account.name=self.setName(accountNum) + accountNum+=1 + + def setName(self, num): + retStr="test" + digits=[] + maxDigitVal=5 + maxDigits=8 + temp=num + while len(digits) < maxDigits: + digit=(num % maxDigitVal)+1 + num=int(num/maxDigitVal) + digits.append(digit) + + digits.reverse() + retStr += "".join(map(str, digits)) + + Utils.Print("NamedAccounts Name for %d is %s" % (temp, retStr)) + return retStr + +########################################################################################### +class Account(object): + # pylint: disable=too-few-public-methods + + def __init__(self, name): + self.name=name + + self.ownerPrivateKey=None + self.ownerPublicKey=None + self.activePrivateKey=None + self.activePublicKey=None + + + def __str__(self): + return "Name: %s" % (self.name) + + def __repr__(self): + return "Name: %s" % (self.name) + +def createAccountKeys(count: int) -> List[Account]: + accounts=[] + p = re.compile('Private key: (.+)\nPublic key: (.+)\n', re.MULTILINE) + for _ in range(0, count): + try: + cmd="%s create key --to-console" % (Utils.EosClientPath) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + keyStr=Utils.checkOutput(cmd.split()) + m=p.search(keyStr) + if m is None: + Utils.Print("ERROR: Owner key creation regex mismatch") + break + + ownerPrivate=m.group(1) + ownerPublic=m.group(2) + + cmd="%s create key --to-console" % (Utils.EosClientPath) + if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + keyStr=Utils.checkOutput(cmd.split()) + m=p.match(keyStr) + if m is None: + Utils.Print("ERROR: Active key creation regex mismatch") + break + + activePrivate=m.group(1) + activePublic=m.group(2) + + name=''.join(random.choice(string.ascii_lowercase) for _ in range(12)) + account=Account(name) + account.ownerPrivateKey=ownerPrivate + account.ownerPublicKey=ownerPublic + account.activePrivateKey=activePrivate + account.activePublicKey=activePublic + accounts.append(account) + if Utils.Debug: Utils.Print("name: %s, key(owner): ['%s', '%s], key(active): ['%s', '%s']" % (name, ownerPublic, ownerPrivate, activePublic, activePrivate)) + + except subprocess.CalledProcessError as ex: + msg=ex.stderr.decode("utf-8") + Utils.Print("ERROR: Exception during key creation. %s" % (msg)) + break + + if count != len(accounts): + Utils.Print("Account keys creation failed. Expected %d, actual: %d" % (count, len(accounts))) + return None + + return accounts diff --git a/tests/TestHarness/launch_transaction_generators.py b/tests/TestHarness/launch_transaction_generators.py index 3f10e35f92..353e361333 100644 --- a/tests/TestHarness/launch_transaction_generators.py +++ b/tests/TestHarness/launch_transaction_generators.py @@ -16,7 +16,7 @@ class TpsTrxGensConfig: - def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList: list, endpointApi: str): + def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList: list): self.targetTps: int = targetTps self.tpsLimitPerGenerator: int = tpsLimitPerGenerator self.connectionPairList = connectionPairList @@ -27,7 +27,6 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList self.modTps = self.targetTps % self.numGenerators self.cleanlyDivisible = self.modTps == 0 self.incrementPoint = self.numGenerators + 1 - self.modTps - self.endpointApi = endpointApi self.targetTpsPerGenList = [] curTps = self.initialTpsPerGenerator @@ -39,7 +38,7 @@ def __init__(self, targetTps: int, tpsLimitPerGenerator: int, connectionPairList class TransactionGeneratorsLauncher: def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAccount: str, accts: str, privateKeys: str, trxGenDurationSec: int, logDir: str, - abiFile: Path, actionsData, actionsAuths, tpsTrxGensConfig: TpsTrxGensConfig): + abiFile: Path, actionsData, actionsAuths, tpsTrxGensConfig: TpsTrxGensConfig, endpointMode: str, apiEndpoint: str=None): self.chainId = chainId self.lastIrreversibleBlockId = lastIrreversibleBlockId self.contractOwnerAccount = contractOwnerAccount @@ -51,6 +50,8 @@ def __init__(self, chainId: int, lastIrreversibleBlockId: int, contractOwnerAcco self.abiFile = abiFile self.actionsData = actionsData self.actionsAuths = actionsAuths + self.endpointMode = endpointMode + self.apiEndpoint = apiEndpoint def launch(self, waitToComplete=True): self.subprocess_ret_codes = [] @@ -68,13 +69,16 @@ def launch(self, waitToComplete=True): '--trx-gen-duration', f'{self.trxGenDurationSec}', '--target-tps', f'{targetTps}', '--log-dir', f'{self.logDir}', - '--peer-endpoint-type', f'{self.tpsTrxGensConfig.endpointApi}', + '--peer-endpoint-type', f'{self.endpointMode}', '--peer-endpoint', f'{connectionPair[0]}', '--port', f'{connectionPair[1]}'] if self.abiFile is not None and self.actionsData is not None and self.actionsAuths is not None: popenStringList.extend(['--abi-file', f'{self.abiFile}', '--actions-data', f'{self.actionsData}', '--actions-auths', f'{self.actionsAuths}']) + if self.apiEndpoint is not None: + popenStringList.extend(['--api-endpoint', f'{self.apiEndpoint}']) + if Utils.Debug: Print(f"Running trx_generator: {' '.join(popenStringList)}") self.subprocess_ret_codes.append(subprocess.Popen(popenStringList)) @@ -106,10 +110,13 @@ def parseArgs(): parser.add_argument("actions_data", type=str, help="The json actions data file or json actions data description string to use") parser.add_argument("actions_auths", type=str, help="The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") parser.add_argument("connection_pair_list", type=str, help="Comma separated list of endpoint:port combinations to send transactions to", default="localhost:9876") - parser.add_argument("endpoint_api", type=str, help="Endpoint API mode (\"p2p\", \"http\"). \ - In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ - In \"http\" mode transactions will be directed to the http endpoint on an api node.", - choices=["p2p", "http"], default="p2p") + parser.add_argument("endpoint_mode", type=str, help="Endpoint mode (\"p2p\", \"http\"). \ + In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ + In \"http\" mode transactions will be directed to the http endpoint on an api node.", + choices=["p2p", "http"], default="p2p") + parser.add_argument("api_endpoint", type=str, help="The api endpoint to use to submit transactions. (Only used with http api nodes currently as p2p transactions are streamed)", + default="/v1/chain/send_transaction2") + args = parser.parse_args() return args @@ -123,7 +130,8 @@ def main(): privateKeys=args.priv_keys, trxGenDurationSec=args.trx_gen_duration, logDir=args.log_dir, abiFile=args.abi_file, actionsData=args.actions_data, actionsAuths=args.actions_auths, tpsTrxGensConfig=TpsTrxGensConfig(targetTps=args.target_tps, tpsLimitPerGenerator=args.tps_limit_per_generator, - connectionPairList=connectionPairList, endpointApi=args.endpoint_api)) + connectionPairList=connectionPairList), + endpointMode=args.endpoint_mode, apiEndpoint=args.api_endpoint) exit_codes = trxGenLauncher.launch() diff --git a/tests/launcher.py b/tests/TestHarness/launcher.py old mode 100755 new mode 100644 similarity index 75% rename from tests/launcher.py rename to tests/TestHarness/launcher.py index 0d9cd9e66d..9fca7c85c4 --- a/tests/launcher.py +++ b/tests/TestHarness/launcher.py @@ -1,29 +1,18 @@ -#!/usr/bin/env python3 - import argparse import datetime from dataclasses import InitVar, dataclass, field, is_dataclass, asdict from enum import Enum -import errno -import glob import json -from pathlib import Path -import os import math -import platform +from pathlib import Path import shlex -import shutil -import select -import signal import string import subprocess -import sys -import time from typing import ClassVar, Dict, List -from TestHarness import Cluster -from TestHarness import Utils -from TestHarness import fc_log_level +from .testUtils import Utils +from .logging import fc_log_level +from .accounts import createAccountKeys block_dir = 'blocks' @@ -50,7 +39,7 @@ class nodeDefinition: base_dir: InitVar[str] cfg_name: InitVar[str] data_name: InitVar[str] - keys: List[str] = field(default_factory=list) + keys: List[KeyStrings] = field(default_factory=list) peers: List[str] = field(default_factory=list) producers: List[str] = field(default_factory=list) dont_start: bool = field(init=False, default=False) @@ -132,6 +121,11 @@ def mk_host_dot_label(self): class testnetDefinition: name: str nodes: Dict[str, nodeDefinition] = field(init=False, default_factory=dict) + def __post_init__(self): + nodeDefinition.p2p_count = 0 + nodeDefinition.http_count = 0 + nodeDefinition.p2p_port_generator = None + nodeDefinition.http_port_generator = None def producer_name(producer_number: int, shared_producer: bool = False): '''For first 26 return "defproducera" ... "defproducerz". @@ -147,17 +141,13 @@ def alpha_str_base(number: int, base: str): else: return ('shr' if shared_producer else 'def') + 'producer' + string.ascii_lowercase[producer_number] -class launcher(object): +class cluster_generator: def __init__(self, args): - self.args = self.parseArgs(args) - self.network = testnetDefinition(self.args.network_name) - self.aliases: List[str] = [] - self.next_node = 0 - - self.launch_time = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') - - self.define_network() - self.generate() + self.args = self.parseArgs(args) + self.next_node = 0 + self.network = testnetDefinition(self.args.network_name) + self.aliases: List[str] = [] + self.launch_time = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') def parseArgs(self, args): '''Configure argument parser and use it on the passed in list of strings. @@ -174,7 +164,7 @@ def comma_separated(string): parser.add_argument('-i', '--timestamp', help='set the timestamp for the first block. Use "now" to indicate the current time') parser.add_argument('-l', '--launch', choices=['all', 'none', 'local'], help='select a subset of nodes to launch. If not set, the default is to launch all unless an output file is named, in which case none are started.', default='all') parser.add_argument('-o', '--output', help='save a copy of the generated topology in this file and exit without launching', dest='topology_filename') - parser.add_argument('-k', '--kill', type=int, help='kill a network as specified in arguments with given signal') + parser.add_argument('-k', '--kill', help='retrieve the list of previously started process ids and issue a kill to each') parser.add_argument('--down', type=comma_separated, help='comma-separated list of node numbers that will be shut down', default=[]) parser.add_argument('--bounce', type=comma_separated, help='comma-separated list of node numbers that will be restarted', default=[]) parser.add_argument('--roll', type=comma_separated, help='comma-separated list of host names where the nodes will be rolled to a new version') @@ -210,9 +200,8 @@ def comma_separated(string): cfg.add_argument('--enable-gelf-logging', action='store_true', help='enable gelf logging appender in logging configuration file', default=False) cfg.add_argument('--gelf-endpoint', help='hostname:port or ip:port of GELF endpoint', default='128.0.0.1:12201') cfg.add_argument('--template', help='the startup script template', default='testnet.template') - cfg.add_argument('--max-block-cpu-usage', type=int, help='the "max-block-cpu-usage" value to use in the genesis.json file', default=200000) - cfg.add_argument('--max-transaction-cpu-usage', type=int, help='the "max-transaction-cpu-usage" value to use in the genesis.json file', default=150000) - cfg.add_argument('--nodeos-log-path', type=Path, help='path to nodeos log directory') + cfg.add_argument('--max-block-cpu-usage', type=int, help='the "max-block-cpu-usage" value to use in the genesis.json file', default=None) + cfg.add_argument('--max-transaction-cpu-usage', type=int, help='the "max-transaction-cpu-usage" value to use in the genesis.json file', default=None) cfg.add_argument('--logging-level', type=fc_log_level, help='Provide the "level" value to use in the logging.json file') cfg.add_argument('--logging-level-map', type=json.loads, help='JSON string of a logging level dictionary to use in the logging.json file for specific nodes, matching based on node number. Ex: {"bios":"off","00":"info"}') cfg.add_argument('--is-nodeos-v2', action='store_true', help='Toggles old nodeos compatibility', default=False) @@ -238,7 +227,7 @@ def comma_separated(string): def assign_name(self, is_bios): if is_bios: - return -1, 'bios', 'node_bios' + return -100, 'bios', 'node_bios' else: index = self.next_node indexStr = str(self.next_node) @@ -249,7 +238,7 @@ def define_network(self): if self.args.per_host == 0: for i in range(self.args.total_nodes): index, node_name, cfg_name = self.assign_name(i == 0) - node = nodeDefinition(index, node_name, cfg_name, self.args.base_dir, self.args.config_dir, self.args.nodeos_log_path) + node = nodeDefinition(index, node_name, cfg_name, self.args.base_dir, self.args.config_dir, self.args.data_dir) node.set_host(i == 0) self.aliases.append(node.name) self.network.nodes[node.name] = node @@ -261,7 +250,7 @@ def define_network(self): for i in range(self.args.total_nodes, 0, -1): do_bios = False index, node_name, cfg_name = self.assign_name(i == 0) - lhost = nodeDefinition(index, node_name, cfg_name, self.args.base_dir, self.args.config_dir, self.args.nodeos_log_path) + lhost = nodeDefinition(index, node_name, cfg_name, self.args.base_dir, self.args.config_dir, self.args.data_dir) lhost.set_host(i == 0) if ph_count == 0: if host_ndx < num_prod_addr: @@ -294,7 +283,7 @@ def bind_nodes(self): i = 0 producer_number = 0 to_not_start_node = self.args.total_nodes - self.args.unstarted_nodes - 1 - accounts = Cluster.createAccountKeys(len(self.network.nodes.values())) + accounts = createAccountKeys(len(self.network.nodes.values())) for account, node in zip(accounts, self.network.nodes.values()): is_bios = node.name == 'bios' if is_bios: @@ -320,7 +309,6 @@ def bind_nodes(self): if not is_bios: i += 1 - def generate(self): { 'ring': self.make_line, @@ -333,7 +321,6 @@ def generate(self): genesis = self.init_genesis() for node_name, node in self.network.nodes.items(): node.config_dir_name.mkdir(parents=True, exist_ok=True) - self.write_config_file(node) self.write_logging_config_file(node) self.write_genesis_file(node, genesis) node.data_dir_name.mkdir(parents=True, exist_ok=True) @@ -344,35 +331,6 @@ def generate(self): with open(self.args.topology_filename, 'w') as topo: json.dump(self.network, topo, cls=EnhancedEncoder, indent=2, separators=[', ', ': ']) - def write_config_file(self, node): - with open(node.config_dir_name / 'config.ini', 'w') as cfg: - is_bios = node.name == 'bios' - peers = '\n'.join([f'p2p-peer-address = {self.network.nodes[p].p2p_endpoint}' for p in node.peers]) - if len(node.producers) > 0: - producer_keys = f'signature-provider = {node.keys[0].pubkey}=KEY:{node.keys[0].privkey}\n' - producer_names = '\n'.join([f'producer-name = {p}' for p in node.producers]) - producer_plugin = 'plugin = eosio::producer_plugin\n' - else: - producer_keys = '' - producer_names = '' - producer_plugin = '' - config = \ -f'''blocks-dir = {block_dir} -http-server-address = {node.host_name}:{node.http_port} -http-validate-host = false -p2p-listen-endpoint = {node.listen_addr}:{node.p2p_port} -p2p-server-address = {node.p2p_endpoint} -{"enable-stale-production = true" if is_bios else ""} -{"p2p-peer-address = %s" % self.network.nodes['bios'].p2p_endpoint if not is_bios else ""} -{peers} -{producer_keys} -{producer_names} -{producer_plugin} -plugin = eosio::net_plugin -plugin = eosio::chain_api_plugin -''' - cfg.write(config) - def write_logging_config_file(self, node): ll = fc_log_level.debug if self.args.logging_level: @@ -380,7 +338,7 @@ def write_logging_config_file(self, node): dex = str(node.index).zfill(2) if dex in self.args.logging_level_map: ll = self.args.logging_level_map[dex] - with open(Path(__file__).resolve().parents[0] / 'TestHarness' / 'logging-template.json', 'r') as default: + with open(Path(__file__).resolve().parents[0] / 'logging-template.json', 'r') as default: cfg = json.load(default) for logger in cfg['loggers']: logger['level'] = ll @@ -401,9 +359,9 @@ def init_genesis(self): 'net_usage_leeway': 500, 'context_free_discount_net_usage_num': 20, 'context_free_discount_net_usage_den': 100, - 'max_block_cpu_usage': self.args.max_block_cpu_usage, + 'max_block_cpu_usage': 500000 if self.args.max_block_cpu_usage is None else self.args.max_block_cpu_usage, 'target_block_cpu_usage_pct': 1000, - 'max_transaction_cpu_usage': self.args.max_transaction_cpu_usage, + 'max_transaction_cpu_usage': 475000 if self.args.max_transaction_cpu_usage is None else self.args.max_transaction_cpu_usage, 'min_transaction_cpu_usage': 100, 'max_transaction_lifetime': 3600, 'deferred_trx_expiration_window': 600, @@ -417,8 +375,8 @@ def init_genesis(self): with open(genesis_path, 'r') as f: genesis = json.load(f) genesis['initial_key'] = self.network.nodes['bios'].keys[0].pubkey - genesis['max_block_cpu_usage'] = self.args.max_block_cpu_usage - genesis['max_transaction_cpu_usage'] = self.args.max_transaction_cpu_usage + if self.args.max_block_cpu_usage is not None: genesis['initial_configuration']['max_block_cpu_usage'] = self.args.max_block_cpu_usage + if self.args.max_transaction_cpu_usage is not None: genesis['initial_configuration']['max_transaction_cpu_usage'] = self.args.max_transaction_cpu_usage return genesis def write_genesis_file(self, node, genesis): @@ -527,48 +485,91 @@ def make_custom(self): for producer in node['producers']: self.network.nodes[nodeName].producers.append(producer) - def launch(self, instance: nodeDefinition): - dd = Path(instance.data_dir_name) - out = dd / 'stdout.txt' - err_sl = dd / 'stderr.txt' - err = dd / Path(f'stderr.{self.launch_time}.txt') - pidf = dd / Path(f'{Utils.EosServerName}.pid') + def construct_command_line(self, instance: nodeDefinition): + is_bios = instance.name == 'bios' if instance.index in self.args.spcfc_inst_nums: eosdcmd = [f"{getattr(self.args, f'spcfc_inst_{Utils.EosServerName}es')[self.args.spcfc_inst_nums.index(instance.index)]}"] else: eosdcmd = [Utils.EosServerPath] + + a = lambda l, e: l.append(e) or l + + a(a(eosdcmd, '--blocks-dir'), block_dir) + a(a(eosdcmd, '--p2p-listen-endpoint'), f'{instance.listen_addr}:{instance.p2p_port}') + a(a(eosdcmd, '--p2p-server-address'), f'{instance.p2p_endpoint}') + if is_bios: + a(eosdcmd, '--enable-stale-production') + else: + a(a(eosdcmd, '--p2p-peer-address'), f'{self.network.nodes["bios"].p2p_endpoint}') + peers = list(sum([('--p2p-peer-address', self.network.nodes[p].p2p_endpoint) for p in instance.peers], ())) + eosdcmd.extend(peers) + if len(instance.producers) > 0: + a(a(eosdcmd, '--plugin'), 'eosio::producer_plugin') + producer_keys = list(sum([('--signature-provider', f'{key.pubkey}=KEY:{key.privkey}') for key in instance.keys], ())) + eosdcmd.extend(producer_keys) + producer_names = list(sum([('--producer-name', p) for p in instance.producers], ())) + eosdcmd.extend(producer_names) + else: + a(a(eosdcmd, '--transaction-retry-max-storage-size-gb'), '100') + a(a(eosdcmd, '--plugin'), 'eosio::net_plugin') + a(a(eosdcmd, '--plugin'), 'eosio::chain_api_plugin') + if self.args.skip_signature: - eosdcmd.append('--skip-transaction-signatures') + a(eosdcmd, '--skip-transaction-signatures') if getattr(self.args, Utils.EosServerName): eosdcmd.extend(shlex.split(getattr(self.args, Utils.EosServerName))) if instance.index in self.args.specific_nums: i = self.args.specific_nums.index(instance.index) specifics = getattr(self.args, f'specific_{Utils.EosServerName}es')[i] if specifics[0] == "'" and specifics[-1] == "'": - eosdcmd.extend(shlex.split(specifics[1:-1])) + specificList = shlex.split(specifics[1:-1]) else: - eosdcmd.extend(shlex.split(specifics)) - eosdcmd.append('--config-dir') - eosdcmd.append(str(instance.config_dir_name)) - eosdcmd.append('--data-dir') - eosdcmd.append(str(instance.data_dir_name)) - eosdcmd.append('--genesis-json') - eosdcmd.append(f'{instance.config_dir_name}/genesis.json') + specificList = shlex.split(specifics) + # Allow specific nodeos args to override existing args up to this point. + # Consider moving specific arg handling to the end to allow overriding all args. + repeatable = [ + # appbase + '--plugin', + # chain_plugin + '--checkpoint', '--profile-account', '--actor-whitelist', '--actor-blacklist', + '--contract-whitelist', '--contract-blacklist', '--action-blacklist', '--key-blacklist', + '--sender-bypass-whiteblacklist', '--trusted-producer', + # http_plugin + '--http-alias', + # net_plugin + '--p2p-peer-address', '--p2p-auto-bp-peer', '--peer-key', '--peer-private-key', + # producer_plugin + '--producer-name', '--signature-provider', '--greylist-account', '--disable-subjective-account-billing', + # trace_api_plugin + '--trace-rpc-abi'] + for arg in specificList: + if '-' in arg and arg not in repeatable: + if arg in eosdcmd: + i = eosdcmd.index(arg) + if eosdcmd[i+1] != '-': + eosdcmd.pop(i+1) + eosdcmd.pop(i) + eosdcmd.extend(specificList) + a(a(eosdcmd, '--config-dir'), str(instance.config_dir_name)) + a(a(eosdcmd, '--data-dir'), str(instance.data_dir_name)) + a(a(eosdcmd, '--genesis-json'), f'{instance.config_dir_name}/genesis.json') if self.args.timestamp: - eosdcmd.append('--genesis-timestamp') - eosdcmd.append(self.args.timestamp) + a(a(eosdcmd, '--genesis-timestamp'), self.args.timestamp) + + if '--http-validate-host' not in eosdcmd: + a(a(eosdcmd, '--http-validate-host'), 'false') + + if '--http-server-address' not in eosdcmd: + a(a(eosdcmd, '--http-server-address'), f'{instance.host_name}:{instance.http_port}') # Always enable a history query plugin on the bios node - if instance.name == 'bios': + if is_bios: if self.args.is_nodeos_v2: - eosdcmd.append('--plugin') - eosdcmd.append('eosio::history_api_plugin') - eosdcmd.append('--filter-on') - eosdcmd.append('"*"') + a(a(eosdcmd, '--plugin'), 'eosio::history_api_plugin') + a(a(eosdcmd, '--filter-on'), '"*"') else: - eosdcmd.append('--plugin') - eosdcmd.append('eosio::trace_api_plugin') + a(a(eosdcmd, '--plugin'), 'eosio::trace_api_plugin') if 'eosio::history_api_plugin' in eosdcmd and 'eosio::trace_api_plugin' in eosdcmd: eosdcmd.remove('--trace-no-abis') @@ -578,126 +579,17 @@ def launch(self, instance: nodeDefinition): i -= 1 eosdcmd.pop(i) - if not instance.dont_start: - Utils.Print(f'spawning child: {" ".join(eosdcmd)}') - - dd.mkdir(parents=True, exist_ok=True) - - stdout = open(out, 'w') - stderr = open(err, 'w') - c = subprocess.Popen(eosdcmd, stdout=stdout, stderr=stderr) - with pidf.open('w') as pidout: - pidout.write(str(c.pid)) - try: - err_sl.unlink() - except FileNotFoundError: - pass - err_sl.symlink_to(err.name) - else: - Utils.Print(f'unstarted node command: {" ".join(eosdcmd)}') - - with open(instance.data_dir_name / 'start.cmd', 'w') as f: - f.write(' '.join(eosdcmd)) - - def bounce(self, nodeNumbers): - self.down(nodeNumbers, True) - - def down(self, nodeNumbers, relaunch=False): - for num in nodeNumbers: - for node in self.network.nodes.values(): - if self.network.name + num == node.name: - Utils.Print(f'{"Restarting" if relaunch else "Shutting down"} node {node.name}') - with open(node.data_dir_name / f'{Utils.EosServerName}.pid', 'r') as f: - pid = int(f.readline()) - self.terminate_wait_pid(pid, raise_if_missing=not relaunch) - if relaunch: - self.launch(node) - - def kill(self, signum): - errorCode = 0 - for node in self.network.nodes.values(): - try: - with open(node.data_dir_name / f'{Utils.EosServerName}.pid', 'r') as f: - pid = int(f.readline()) - self.terminate_wait_pid(pid, signum, raise_if_missing=False) - except FileNotFoundError as err: - errorCode = 1 - return errorCode - - def start_all(self): - if self.args.launch.lower() != 'none': - for instance in self.network.nodes.values(): - self.launch(instance) - time.sleep(self.args.delay) + return eosdcmd def write_dot_file(self): - with open('testnet.dot', 'w') as f: + with open(Utils.DataDir + 'testnet.dot', 'w') as f: f.write('digraph G\n{\nlayout="circo";\n') for node in self.network.nodes.values(): for p in node.peers: pname = self.network.nodes[p].dot_label f.write(f'"{node.dot_label}"->"{pname}" [dir="forward"];\n') f.write('}') - - def terminate_wait_pid(self, pid, signum = signal.SIGTERM, raise_if_missing=True): - '''Terminate a non-child process with given signal number or with SIGTERM if not - provided and wait for it to exit.''' - if sys.version_info >= (3, 9) and platform.system() == 'Linux': # on our supported platforms, Python 3.9 accompanies a kernel > 5.3 - try: - fd = os.pidfd_open(pid) - except: - if raise_if_missing: - raise - else: - po = select.poll() - po.register(fd, select.POLLIN) - try: - os.kill(pid, signum) - except ProcessLookupError: - if raise_if_missing: - raise - po.poll(None) - else: - if platform.system() in {'Linux', 'Darwin'}: - def pid_exists(pid): - try: - os.kill(pid, 0) - except OSError as err: - if err.errno == errno.ESRCH: - return False - elif err.errno == errno.EPERM: - return True - else: - raise err - return True - def backoff_timer(delay): - time.sleep(delay) - return min(delay * 2, 0.04) - delay = 0.0001 - try: - os.kill(pid, signum) - except ProcessLookupError: - if raise_if_missing: - raise - else: - return - while True: - if pid_exists(pid): - delay = backoff_timer(delay) - else: - return - -if __name__ == '__main__': - errorCode = 0 - l = launcher(sys.argv[1:]) - if len(l.args.down): - l.down(l.args.down) - elif len(l.args.bounce): - l.bounce(l.args.bounce) - elif l.args.kill: - errorCode = l.kill(l.args.kill) - elif l.args.launch == 'all' or l.args.launch == 'local': - l.start_all() - for f in glob.glob(Utils.DataPath): - shutil.rmtree(f) - sys.exit(errorCode) + try: + subprocess.run(['dot', '-Tpng', f'-o{Utils.DataDir}testnet.png', Utils.DataDir + 'testnet.dot']) + except FileNotFoundError: + pass diff --git a/tests/TestHarness/queries.py b/tests/TestHarness/queries.py index 5521be8870..aa28521476 100644 --- a/tests/TestHarness/queries.py +++ b/tests/TestHarness/queries.py @@ -11,7 +11,7 @@ import urllib.error from .core_symbol import CORE_SYMBOL -from .testUtils import Account +from .accounts import Account from .testUtils import EnumType from .testUtils import addEnum from .testUtils import ReturnType @@ -297,7 +297,7 @@ def getBlockNumByTransId(self, transId, exitOnError=True, delayedRetry=True, blo try: key = self.fetchKeyCommand() refBlockNum = self.fetchRefBlock(trans) - refBlockNum=int(refBlockNum)+1 + refBlockNum=int(refBlockNum) except (TypeError, ValueError, KeyError) as _: Utils.Print("transaction%s not found. Transaction: %s" % (key, trans)) return None @@ -596,7 +596,7 @@ def processCleosCmd(self, cmd, cmdDesc, silentErrors=True, exitOnError=False, ex return trans - def processUrllibRequest(self, resource, command, payload={}, silentErrors=False, exitOnError=False, exitMsg=None, returnType=ReturnType.json, method="POST", endpoint=None): + def processUrllibRequest(self, resource, command, payload={}, silentErrors=False, exitOnError=False, exitMsg=None, returnType=ReturnType.json, method="POST", endpoint=None, prettyPrint=False, printReturnLimit=1024): if not endpoint: endpoint = self.endpointHttp cmd = f"{endpoint}/v1/{resource}/{command}" @@ -625,8 +625,10 @@ def processUrllibRequest(self, resource, command, payload={}, silentErrors=False if Utils.Debug: end=time.perf_counter() Utils.Print("cmd Duration: %.3f sec" % (end-start)) - printReturn=json.dumps(rtn) if returnType==ReturnType.json else rtn - Utils.Print("cmd returned: %s" % (printReturn[:1024])) + indent = 2 if prettyPrint else None + separators = (', ',': ') if prettyPrint else (',',':') + printReturn=json.dumps(rtn, indent=indent, separators=separators) if returnType==ReturnType.json else rtn + Utils.Print("cmd returned: %s" % (printReturn[:printReturnLimit])) except urllib.error.HTTPError as ex: if not silentErrors: end=time.perf_counter() diff --git a/tests/TestHarness/testUtils.py b/tests/TestHarness/testUtils.py index 9a08c68ad1..b232a93eb3 100644 --- a/tests/TestHarness/testUtils.py +++ b/tests/TestHarness/testUtils.py @@ -76,8 +76,8 @@ class Utils: DataRoot=os.path.basename(sys.argv[0]).rsplit('.',maxsplit=1)[0] PID = os.getpid() DataPath= f"{TestLogRoot}/{DataRoot}{PID}" - DataDir= f"{DataPath}/" - ConfigDir=f"{str(Path.cwd().resolve())}/etc/eosio/" + DataDir=f"{DataPath}/" + ConfigDir=f"{DataPath}/" TimeFmt='%Y-%m-%dT%H:%M:%S.%f' @@ -88,16 +88,18 @@ def timestamp(): @staticmethod def checkOutputFileWrite(time, cmd, output, error): stop=Utils.timestamp() + if not os.path.isdir(Utils.TestLogRoot): + if Utils.Debug: Utils.Print("TestLogRoot creating dir %s in dir: %s" % (Utils.TestLogRoot, os.getcwd())) + os.mkdir(Utils.TestLogRoot) + if not os.path.isdir(Utils.DataPath): + if Utils.Debug: Utils.Print("DataPath creating dir %s in dir: %s" % (Utils.DataPath, os.getcwd())) + os.mkdir(Utils.DataPath) if not hasattr(Utils, "checkOutputFile"): - if not os.path.isdir(Utils.TestLogRoot): - if Utils.Debug: Utils.Print("TestLogRoot creating dir %s in dir: %s" % (Utils.TestLogRoot, os.getcwd())) - os.mkdir(Utils.TestLogRoot) - if not os.path.isdir(Utils.DataPath): - if Utils.Debug: Utils.Print("DataPath creating dir %s in dir: %s" % (Utils.DataPath, os.getcwd())) - os.mkdir(Utils.DataPath) - filename=f"{Utils.DataPath}/subprocess_results.log" - if Utils.Debug: Utils.Print("opening %s in dir: %s" % (filename, os.getcwd())) - Utils.checkOutputFile=open(filename,"w") + Utils.checkOutputFilename=f"{Utils.DataPath}/subprocess_results.log" + if Utils.Debug: Utils.Print("opening %s in dir: %s" % (Utils.checkOutputFilename, os.getcwd())) + Utils.checkOutputFile=open(Utils.checkOutputFilename,"w") + else: + Utils.checkOutputFile=open(Utils.checkOutputFilename,"a") Utils.checkOutputFile.write(Utils.FileDivider + "\n") Utils.checkOutputFile.write("start={%s}\n" % (time)) @@ -159,11 +161,13 @@ def getNodeDataDir(ext, relativeDir=None, trailingSlash=False): return path @staticmethod - def rmNodeDataDir(ext, rmState=True, rmBlocks=True): + def rmNodeDataDir(ext, rmState=True, rmBlocks=True, rmStateHist=True): if rmState: shutil.rmtree(Utils.getNodeDataDir(ext, "state")) if rmBlocks: shutil.rmtree(Utils.getNodeDataDir(ext, "blocks")) + if rmStateHist: + shutil.rmtree(Utils.getNodeDataDir(ext, "state-history"), ignore_errors=True) @staticmethod def getNodeConfigDir(ext, relativeDir=None, trailingSlash=False): @@ -231,7 +235,7 @@ def cmdError(name, cmdCode=0): Utils.Print(msg) @staticmethod - def waitForObj(lam, timeout=None, sleepTime=3, reporter=None): + def waitForObj(lam, timeout=None, sleepTime=1, reporter=None): if timeout is None: timeout=60 @@ -259,13 +263,13 @@ def waitForObj(lam, timeout=None, sleepTime=3, reporter=None): return None @staticmethod - def waitForBool(lam, timeout=None, sleepTime=3, reporter=None): + def waitForBool(lam, timeout=None, sleepTime=1, reporter=None): myLam = lambda: True if lam() else None ret=Utils.waitForObj(myLam, timeout, sleepTime, reporter=reporter) return False if ret is None else ret @staticmethod - def waitForBoolWithArg(lam, arg, timeout=None, sleepTime=3, reporter=None): + def waitForBoolWithArg(lam, arg, timeout=None, sleepTime=1, reporter=None): myLam = lambda: True if lam(arg, timeout) else None ret=Utils.waitForObj(myLam, timeout, sleepTime, reporter=reporter) return False if ret is None else ret @@ -536,23 +540,6 @@ def compareFiles(file1: str, file2: str): f2.close() return same - @staticmethod - def rmFromFile(file: str, matchValue: str): - """Rm lines from file that match *matchValue*""" - - lines = [] - with open(file, "r") as f: - lines = f.readlines() - - c = 0 - with open(file, "w") as f: - for line in lines: - if matchValue not in line: - f.write(line) - c += 1 - - return c - @staticmethod def addAmount(assetStr: str, deltaStr: str) -> str: asset = assetStr.split() @@ -626,23 +613,3 @@ def readSocketDataStr(sock : socket.socket, maxMsgSize : int, enc : str) -> str: @staticmethod def getNodeosVersion(): return os.popen(f"{Utils.EosServerPath} --version").read().replace("\n", "") - - -########################################################################################### -class Account(object): - # pylint: disable=too-few-public-methods - - def __init__(self, name): - self.name=name - - self.ownerPrivateKey=None - self.ownerPublicKey=None - self.activePrivateKey=None - self.activePublicKey=None - - - def __str__(self): - return "Name: %s" % (self.name) - - def __repr__(self): - return "Name: %s" % (self.name) diff --git a/tests/TestHarness/transactions.py b/tests/TestHarness/transactions.py index 4c16a17438..c00c7ad4ea 100644 --- a/tests/TestHarness/transactions.py +++ b/tests/TestHarness/transactions.py @@ -7,51 +7,59 @@ from .core_symbol import CORE_SYMBOL from .depresolver import dep from .queries import NodeosQueries -from .testUtils import Account +from .accounts import Account from .testUtils import Utils class Transactions(NodeosQueries): + retry_num_blocks_default = 1 + def __init__(self, host, port, walletMgr=None): super().__init__(host, port, walletMgr) # Create & initialize account and return creation transactions. Return transaction json object - def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False, sign=False, additionalArgs=''): + def createInitializeAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, silentErrors=False, stakeNet=100, stakeCPU=100, buyRAM=10000, exitOnError=False, sign=False, additionalArgs='', retry_num_blocks=None): signStr = NodeosQueries.sign_str(sign, [ creatorAccount.activePublicKey ]) cmdDesc="system newaccount" - cmd='%s -j %s %s %s \'%s\' \'%s\' --stake-net "%s %s" --stake-cpu "%s %s" --buy-ram "%s %s" %s' % ( - cmdDesc, signStr, creatorAccount.name, account.name, account.ownerPublicKey, - account.activePublicKey, stakeNet, CORE_SYMBOL, stakeCPU, CORE_SYMBOL, buyRAM, CORE_SYMBOL, additionalArgs) + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr = f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd=(f'{cmdDesc} -j {signStr} {creatorAccount.name} {account.name} \'{account.ownerPublicKey}\' ' + f'\'{account.activePublicKey}\' --stake-net "{stakeNet} {CORE_SYMBOL}" --stake-cpu ' + f'"{stakeCPU} {CORE_SYMBOL}" --buy-ram "{buyRAM} {CORE_SYMBOL}" {additionalArgs} {retryStr}') msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); - trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) transId=NodeosQueries.getTransId(trans) if stakedDeposit > 0: - self.waitForTransactionInBlock(transId) # seems like account creation needs to be finalized before transfer can happen - trans = self.transferFunds(creatorAccount, account, NodeosQueries.currencyIntToStr(stakedDeposit, CORE_SYMBOL), "init") + if not waitForTransBlock: # Wait for account creation to be finalized if we haven't already + self.waitForTransactionInBlock(transId) + trans = self.transferFunds(creatorAccount, account, NodeosQueries.currencyIntToStr(stakedDeposit, CORE_SYMBOL), "init", waitForTransBlock=waitForTransBlock) transId=NodeosQueries.getTransId(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans - def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, exitOnError=False, sign=False): + def createAccount(self, account, creatorAccount, stakedDeposit=1000, waitForTransBlock=False, silentErrors=False,exitOnError=False, sign=False, retry_num_blocks=None): """Create account and return creation transactions. Return transaction json object. waitForTransBlock: wait on creation transaction id to appear in a block.""" signStr = NodeosQueries.sign_str(sign, [ creatorAccount.activePublicKey ]) cmdDesc="create account" - cmd="%s -j %s %s %s %s %s" % ( - cmdDesc, signStr, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr = f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd=(f"{cmdDesc} -j {signStr} {creatorAccount.name} {account.name} {account.ownerPublicKey} " + f"{account.activePublicKey} {retryStr}") msg="(creator account=%s, account=%s)" % (creatorAccount.name, account.name); - trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) transId=NodeosQueries.getTransId(trans) if stakedDeposit > 0: - self.waitForTransactionInBlock(transId) # seems like account creation needs to be finlized before transfer can happen + if not waitForTransBlock: # account creation needs to be finalized before transfer can happen so wait if we haven't already + self.waitForTransactionInBlock(transId) trans = self.transferFunds(creatorAccount, account, "%0.04f %s" % (stakedDeposit/10000, CORE_SYMBOL), "init") self.trackCmdTransaction(trans) transId=NodeosQueries.getTransId(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans def transferFundsCmdArr(self, source, destination, amountStr, memo, force, retry, sign, dontSend, expiration, skipSign): assert isinstance(amountStr, str) @@ -96,6 +104,9 @@ def transferFundsCmdArr(self, source, destination, amountStr, memo, force, retry # Trasfer funds. Returns "transfer" json return object def transferFunds(self, source, destination, amountStr, memo="memo", force=False, waitForTransBlock=False, exitOnError=True, reportStatus=True, retry=None, sign=False, dontSend=False, expiration=90, skipSign=False): cmdArr = self.transferFundsCmdArr(source, destination, amountStr, memo, force, retry, sign, dontSend, expiration, skipSign) + if waitForTransBlock: + cmdArr.append('--retry-num-blocks') + cmdArr.append('1') trans=None start=time.perf_counter() try: @@ -118,7 +129,7 @@ def transferFunds(self, source, destination, amountStr, memo="memo", force=False Utils.cmdError("could not transfer \"%s\" from %s to %s" % (amountStr, source, destination)) Utils.errorExit("Failed to transfer \"%s\" from %s to %s" % (amountStr, source, destination)) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans # Trasfer funds. Returns (popen, cmdArr) for checkDelayedOutput def transferFundsAsync(self, source, destination, amountStr, memo="memo", force=False, exitOnError=True, retry=None, sign=False, dontSend=False, expiration=90, skipSign=False): @@ -261,56 +272,62 @@ def setPermission(self, account, code, pType, requirement, waitForTransBlock=Fal return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) - def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, transferTo=False, waitForTransBlock=False, exitOnError=False, reportStatus=True, sign=False): + def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, transferTo=False, waitForTransBlock=False, silentErrors=True, exitOnError=False, reportStatus=True, sign=False, retry_num_blocks=None): if toAccount is None: toAccount=fromAccount signStr = NodeosQueries.sign_str(sign, [ fromAccount.activePublicKey ]) cmdDesc="system delegatebw" transferStr="--transfer" if transferTo else "" - cmd="%s -j %s %s %s \"%s %s\" \"%s %s\" %s" % ( - cmdDesc, signStr, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL, transferStr) + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr=f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd=(f'{cmdDesc} -j {signStr} {fromAccount.name} {toAccount.name} "{netQuantity} {CORE_SYMBOL}" ' + f'"{cpuQuantity} {CORE_SYMBOL}" {transferStr} {retryStr}') msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); - trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans, reportStatus=reportStatus) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans - def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, exitOnError=False, sign=False): + def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, silentErrors=True, exitOnError=False, sign=False, retry_num_blocks=None): if toAccount is None: toAccount=fromAccount signStr = NodeosQueries.sign_str(sign, [ fromAccount.activePublicKey ]) cmdDesc="system undelegatebw" - cmd="%s -j %s %s %s \"%s %s\" \"%s %s\"" % ( - cmdDesc, signStr, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL) + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr=f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd=(f'{cmdDesc} -j {signStr} {fromAccount.name} {toAccount.name} "{netQuantity} {CORE_SYMBOL}" ' + f'"{cpuQuantity} {CORE_SYMBOL}" {retryStr}') msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); - trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + trans=self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans - def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnError=False, sign=False): + def regproducer(self, producer, url, location, waitForTransBlock=False, silentErrors=True, exitOnError=False, sign=False, retry_num_blocks=None): signStr = NodeosQueries.sign_str(sign, [ producer.activePublicKey ]) - cmdDesc="system regproducer" - cmd="%s -j %s %s %s %s %s" % ( - cmdDesc, signStr, producer.name, producer.activePublicKey, url, location) - msg="producer=%s" % (producer.name); - trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + cmdDesc = "system regproducer" + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr = f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd = f'{cmdDesc} -j {signStr} {producer.name} {producer.activePublicKey} {url} {location} {retryStr}' + msg = f"producer={producer.name}" + trans = self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans - def vote(self, account, producers, waitForTransBlock=False, exitOnError=False, sign=False): + def vote(self, account, producers, waitForTransBlock=False, silentErrors=True, exitOnError=False, sign=False, retry_num_blocks=None): signStr = NodeosQueries.sign_str(sign, [ account.activePublicKey ]) cmdDesc = "system voteproducer prods" - cmd="%s -j %s %s %s" % ( - cmdDesc, signStr, account.name, " ".join(producers)) - msg="account=%s, producers=[ %s ]" % (account.name, ", ".join(producers)); - trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + retry_num_blocks = self.retry_num_blocks_default if retry_num_blocks is None else retry_num_blocks + retryStr = f"--retry-num-blocks {retry_num_blocks}" if waitForTransBlock else "" + cmd = f'{cmdDesc} -j {signStr} {account.name} {" ".join(producers)} {retryStr}' + msg = "account=%s, producers=[ %s ]" % (account.name, ", ".join(producers)); + trans = self.processCleosCmd(cmd, cmdDesc, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=msg) self.trackCmdTransaction(trans) - return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + return trans # Require producer_api_plugin def activatePreactivateFeature(self): diff --git a/tests/auto_bp_peering_test.py b/tests/auto_bp_peering_test.py index a2d9f6d08c..a55bdd8807 100755 --- a/tests/auto_bp_peering_test.py +++ b/tests/auto_bp_peering_test.py @@ -1,10 +1,8 @@ #!/usr/bin/env python3 -import re -import signal -import time +import socket -from TestHarness import Cluster, TestHelper, Utils, WalletMgr, ReturnType +from TestHarness import Cluster, TestHelper, Utils, WalletMgr ############################################################### # auto_bp_peering_test @@ -25,7 +23,6 @@ # Parse command line arguments args = TestHelper.parse_args({ "-v", - "--clean-run", "--dump-error-details", "--leave-running", "--keep-logs", @@ -33,16 +30,12 @@ }) Utils.Debug = args.v -killAll = args.clean_run dumpErrorDetails = args.dump_error_details -dontKill = args.leave_running -killEosInstances = not dontKill -killWallet = not dontKill keepLogs = args.keep_logs -# Setup cluster and it's wallet manager +# Setup cluster and its wallet manager walletMgr = WalletMgr(True) -cluster = Cluster(walletd=True) +cluster = Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) cluster.setWalletMgr(walletMgr) @@ -52,12 +45,17 @@ for nodeId in range(0, producerNodes): producer_name = "defproducer" + chr(ord('a') + nodeId) port = cluster.p2pBasePort + nodeId - hostname = "localhost:" + str(port) + if producer_name == 'defproducerf': + hostname = 'ext-ip0:9999' + elif producer_name == 'defproducerk': + hostname = socket.gethostname() + ':9886' + else: + hostname = "localhost:" + str(port) peer_names[hostname] = producer_name auto_bp_peer_args += (" --p2p-auto-bp-peer " + producer_name + "," + hostname) -def neigbors_in_schedule(name, schedule): +def neighbors_in_schedule(name, schedule): index = schedule.index(name) result = [] num = len(schedule) @@ -76,10 +74,10 @@ def neigbors_in_schedule(name, schedule): for nodeId in range(0, producerNodes): specificNodeosArgs[nodeId] = auto_bp_peer_args - # Kill any existing instances and launch cluster + specificNodeosArgs[5] = specificNodeosArgs[5] + ' --p2p-server-address ext-ip0:9999' + specificNodeosArgs[10] = specificNodeosArgs[10] + ' --p2p-server-address ""' + TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=killAll) - cluster.cleanup() cluster.launch( prodCount=producerCountInEachNode, totalNodes=totalNodes, @@ -121,23 +119,19 @@ def neigbors_in_schedule(name, schedule): peers = peers.sort() name = "defproducer" + chr(ord('a') + nodeId) - expected_peers = neigbors_in_schedule(name, scheduled_producers) + expected_peers = neighbors_in_schedule(name, scheduled_producers) if peers != expected_peers: Utils.Print("ERROR: expect {} has connections to {}, got connections to {}".format( name, expected_peers, peers)) connection_check_failures = connection_check_failures+1 - testSuccessful = (connection_check_failures == 0) + testSuccessful = connection_check_failures == 0 finally: TestHelper.shutdown( cluster, walletMgr, testSuccessful, - killEosInstances, - killWallet, - keepLogs, - killAll, dumpErrorDetails ) diff --git a/tests/block_log.cpp b/tests/block_log.cpp index 9d55ada915..837bf04f37 100644 --- a/tests/block_log.cpp +++ b/tests/block_log.cpp @@ -67,7 +67,8 @@ struct block_log_fixture { void check_range_present(uint32_t first, uint32_t last) { BOOST_REQUIRE_EQUAL(log->first_block_num(), first); - BOOST_REQUIRE_EQUAL(eosio::chain::block_header::num_from_id(log->head_id()), last); + BOOST_REQUIRE(log->head_id()); + BOOST_REQUIRE_EQUAL(eosio::chain::block_header::num_from_id(*log->head_id()), last); if(enable_read) { for(auto i = first; i <= last; i++) { std::vector buff; diff --git a/tests/block_log_retain_blocks_test.py b/tests/block_log_retain_blocks_test.py index 23e1e145cd..f9d592c46f 100755 --- a/tests/block_log_retain_blocks_test.py +++ b/tests/block_log_retain_blocks_test.py @@ -19,19 +19,16 @@ Print=Utils.Print errorExit=Utils.errorExit -args=TestHelper.parse_args({"--keep-logs" ,"--dump-error-details","-v","--leave-running","--clean-run","--unshared" }) +args=TestHelper.parse_args({"--keep-logs" ,"--dump-error-details","-v","--leave-running","--unshared"}) debug=args.v -killEosInstances= not args.leave_running dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -killAll=args.clean_run seed=1 Utils.Debug=debug testSuccessful=False random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) # the first node for --block-log-retain-blocks 0, @@ -45,11 +42,6 @@ cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - specificExtraNodeosArgs={} specificExtraNodeosArgs[0]=f' --block-log-retain-blocks 0 ' specificExtraNodeosArgs[1]=f' --block-log-retain-blocks 10 ' @@ -87,7 +79,7 @@ testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killEosInstances, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) diff --git a/tests/block_log_util_test.py b/tests/block_log_util_test.py index f910943cbd..bd7bff144e 100755 --- a/tests/block_log_util_test.py +++ b/tests/block_log_util_test.py @@ -29,22 +29,17 @@ def verifyBlockLog(expected_block_num, trimmedBlockLog): appArgs=AppArgs() -args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--unshared"}) +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--unshared"}) Utils.Debug=args.v pnodes=2 -cluster=Cluster(walletd=True,unshared=args.unshared) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) prodCount=2 -killAll=args.clean_run walletPort=TestHelper.DEFAULT_WALLET_PORT totalNodes=pnodes+1 walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -53,8 +48,6 @@ def verifyBlockLog(expected_block_num, trimmedBlockLog): TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount) is False: Utils.errorExit("Failed to stand up eos cluster.") @@ -152,7 +145,7 @@ def checkBlockLog(blockLog, blockNumsToFind, firstBlockNum=1): # relaunch the node with the truncated block log and ensure it catches back up with the producers current_head_block_num = node1.getInfo()["head_block_num"] - cluster.getNode(2).relaunch(cachePopen=True) + cluster.getNode(2).relaunch() assert cluster.getNode(2).waitForBlock(current_head_block_num, timeout=60, reportInterval=15) # ensure it continues to advance @@ -182,7 +175,7 @@ def checkBlockLog(blockLog, blockNumsToFind, firstBlockNum=1): # relaunch the node with the truncated block log and ensure it catches back up with the producers current_head_block_num = node1.getInfo()["head_block_num"] assert current_head_block_num >= info["head_block_num"] - cluster.getNode(2).relaunch(cachePopen=True) + cluster.getNode(2).relaunch() assert cluster.getNode(2).waitForBlock(current_head_block_num, timeout=60, reportInterval=15) # ensure it continues to advance @@ -202,7 +195,7 @@ def checkBlockLog(blockLog, blockNumsToFind, firstBlockNum=1): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/chain_plugin_tests.cpp b/tests/chain_plugin_tests.cpp index 05c73db5e7..e31d96bc70 100644 --- a/tests/chain_plugin_tests.cpp +++ b/tests/chain_plugin_tests.cpp @@ -94,7 +94,7 @@ BOOST_FIXTURE_TEST_CASE( get_block_with_invalid_abi, validating_tester ) try { // block should be decoded successfully auto block = plugin.get_raw_block(param, fc::time_point::maximum()); auto abi_cache = plugin.get_block_serializers(block, fc::microseconds::maximum()); - std::string block_str = json::to_pretty_string(plugin.convert_block(block, abi_cache, fc::microseconds::maximum())); + std::string block_str = json::to_pretty_string(plugin.convert_block(block, abi_cache)); BOOST_TEST(block_str.find("procassert") != std::string::npos); BOOST_TEST(block_str.find("condition") != std::string::npos); BOOST_TEST(block_str.find("Should Not Assert!") != std::string::npos); @@ -114,7 +114,7 @@ BOOST_FIXTURE_TEST_CASE( get_block_with_invalid_abi, validating_tester ) try { // get the same block as string, results in decode failed(invalid abi) but not exception auto block2 = plugin.get_raw_block(param, fc::time_point::maximum()); auto abi_cache2 = plugin.get_block_serializers(block2, fc::microseconds::maximum()); - std::string block_str2 = json::to_pretty_string(plugin.convert_block(block2, abi_cache2, fc::microseconds::maximum())); + std::string block_str2 = json::to_pretty_string(plugin.convert_block(block2, abi_cache2)); BOOST_TEST(block_str2.find("procassert") != std::string::npos); BOOST_TEST(block_str2.find("condition") == std::string::npos); // decode failed BOOST_TEST(block_str2.find("Should Not Assert!") == std::string::npos); // decode failed diff --git a/tests/cli_test.py b/tests/cli_test.py index eb0ed9efe0..74e730e60c 100755 --- a/tests/cli_test.py +++ b/tests/cli_test.py @@ -4,12 +4,15 @@ # response to the `--help` option. It also contains a couple of additional # CLI-related checks as well as test cases for CLI bugfixes. +import datetime import subprocess import re import os import time +import shlex import shutil import signal +from pathlib import Path from TestHarness import Account, Node, ReturnType, Utils, WalletMgr @@ -352,9 +355,9 @@ def abi_file_with_nodeos_test(): os.makedirs(data_dir, exist_ok=True) walletMgr = WalletMgr(True) walletMgr.launch() - node = Node('localhost', 8888, nodeId, cmd="./programs/nodeos/nodeos -e -p eosio --plugin eosio::trace_api_plugin --trace-no-abis --plugin eosio::producer_plugin --plugin eosio::producer_api_plugin --plugin eosio::chain_api_plugin --plugin eosio::chain_plugin --plugin eosio::http_plugin --access-control-allow-origin=* --http-validate-host=false --max-transaction-time=-1 --resource-monitor-not-shutdown-on-threshold-exceeded " + "--data-dir " + data_dir + " --config-dir " + data_dir, walletMgr=walletMgr) - node.verifyAlive() # Setting node state to not alive - node.relaunch(newChain=True, cachePopen=True) + cmd = "./programs/nodeos/nodeos -e -p eosio --plugin eosio::trace_api_plugin --trace-no-abis --plugin eosio::producer_plugin --plugin eosio::producer_api_plugin --plugin eosio::chain_api_plugin --plugin eosio::chain_plugin --plugin eosio::http_plugin --access-control-allow-origin=* --http-validate-host=false --max-transaction-time=-1 --resource-monitor-not-shutdown-on-threshold-exceeded " + "--data-dir " + data_dir + " --config-dir " + data_dir + node = Node('localhost', 8888, nodeId, data_dir=Path(data_dir), config_dir=Path(data_dir), cmd=shlex.split(cmd), launch_time=datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S'), walletMgr=walletMgr) + time.sleep(5) node.waitForBlock(1) accountNames = ["eosio", "eosio.token", "alice", "bob"] accounts = [] @@ -399,8 +402,7 @@ def abi_file_with_nodeos_test(): Utils.Print("Test failed.") if node: if not node.killed: - if node.pid: - os.kill(node.pid, signal.SIGKILL) + node.kill(signal.SIGKILL) if testSuccessful: Utils.Print("Cleanup nodeos data.") shutil.rmtree(Utils.DataPath) @@ -409,10 +411,7 @@ def abi_file_with_nodeos_test(): if os.path.exists(malicious_token_abi_path): os.remove(malicious_token_abi_path) - walletMgr.killall() - if testSuccessful: - Utils.Print("Cleanup wallet data.") - walletMgr.cleanup() + walletMgr.testFailed = not testSuccessful nodeos_help_test() diff --git a/tests/cluster_launcher.py b/tests/cluster_launcher.py new file mode 100755 index 0000000000..d4db91c72e --- /dev/null +++ b/tests/cluster_launcher.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +from TestHarness import Cluster, TestHelper, Utils, WalletMgr +from TestHarness.TestHelper import AppArgs + +############################################################### +# cluster_launcher +# +# Smoke test for TestHarness launching and bootstrapping a cluster. +# +############################################################### + + +Print=Utils.Print +errorExit=Utils.errorExit + +appArgs = AppArgs() +appArgs.add(flag="--plugin",action='append',type=str,help="Run nodes with additional plugins") + +args=TestHelper.parse_args({"-p","-n","-d","-s","--keep-logs" + ,"--dump-error-details","-v" + ,"--leave-running","--unshared"}, + applicationSpecificArgs=appArgs) +pnodes=args.p +delay=args.d +topo=args.s +debug=args.v +total_nodes=pnodes +dumpErrorDetails=args.dump_error_details + +Utils.Debug=debug +testSuccessful=False + +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) +walletMgr=WalletMgr(True) + +try: + TestHelper.printSystemInfo("BEGIN") + + cluster.setWalletMgr(walletMgr) + + Print(f'producing nodes: {pnodes}, topology: {topo}, delay between nodes launch: {delay} second{"s" if delay != 1 else ""}') + + Print("Stand up cluster") + if args.plugin: + extraNodeosArgs = ''.join([i+j for i,j in zip([' --plugin '] * len(args.plugin), args.plugin)]) + else: + extraNodeosArgs = '' + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, + extraNodeosArgs=extraNodeosArgs) is False: + errorExit("Failed to stand up eos cluster.") + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) diff --git a/tests/compute_transaction_test.py b/tests/compute_transaction_test.py index 2c8b22c6c3..e257c3fb37 100755 --- a/tests/compute_transaction_test.py +++ b/tests/compute_transaction_test.py @@ -16,7 +16,7 @@ args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed" ,"--dump-error-details","-v","--leave-running" - ,"--clean-run","--keep-logs","--unshared"}) + ,"--keep-logs","--unshared"}) pnodes=args.p topo=args.s @@ -26,21 +26,13 @@ nodesFile=args.nodes_file dontLaunch=nodesFile is not None seed=args.seed -dontKill=args.leave_running dumpErrorDetails=args.dump_error_details -killAll=args.clean_run -keepLogs=args.keep_logs - -killWallet=not dontKill -killEosInstances=not dontKill -if nodesFile is not None: - killEosInstances=False Utils.Debug=debug testSuccessful=False random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) EOSIO_ACCT_PRIVATE_DEFAULT_KEY = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" @@ -55,14 +47,9 @@ errorExit("Failed to initilize nodes from Json string.") total_nodes=len(cluster.getNodes()) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() print("Stand up walletd") if walletMgr.launch() is False: errorExit("Failed to stand up keosd.") - else: - cluster.killall(allInstances=killAll) - cluster.cleanup() Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) @@ -99,7 +86,7 @@ transferAmount="1000.0000 {0}".format(CORE_SYMBOL) - node.transferFunds(cluster.eosioAccount, account1, transferAmount, "fund account", waitForTransBlock=True) + npnode.transferFunds(cluster.eosioAccount, account1, transferAmount, "fund account", waitForTransBlock=True) preBalances = node.getEosBalances([account1, account2]) Print("Starting balances:") Print(preBalances) @@ -176,7 +163,7 @@ testSuccessful = True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) errorCode = 0 if testSuccessful else 1 exit(errorCode) diff --git a/tests/distributed-transactions-remote-test.py b/tests/distributed-transactions-remote-test.py deleted file mode 100755 index 8dcd50a6cd..0000000000 --- a/tests/distributed-transactions-remote-test.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python3 - -import subprocess -import tempfile -import os - -from TestHarness import Cluster, TestHelper, Utils - -############################################################### -# distributed-transactions-remote-test -# -# Tests remote capability of the distributed-transactions-test. Test will setup cluster and pass nodes info to distributed-transactions-test. E.g. -# distributed-transactions-remote-test.py -v --clean-run --dump-error-detail -# -############################################################### - -Print=Utils.Print -errorExit=Utils.errorExit - -args = TestHelper.parse_args({"-p","--dump-error-details","-v","--leave-running","--clean-run","--unshared"}) -pnodes=args.p -debug=args.v -dontKill=args.leave_running -dumpErrorDetails=args.dump_error_details -killAll=args.clean_run - -Utils.Debug=debug - -killEosInstances=not dontKill -topo="mesh" -delay=1 -prodCount=1 # producers per producer node -total_nodes=pnodes+3 -actualTest="tests/distributed-transactions-test.py" -testSuccessful=False - -clusterMapJsonTemplate="""{ - "keys": { - "defproduceraPrivateKey": "%s", - "defproducerbPrivateKey": "%s" - }, - "nodes": [ - {"port": 8888, "host": "localhost"}, - {"port": 8889, "host": "localhost"}, - {"port": 8890, "host": "localhost"} - ] -} -""" - -cluster=Cluster(walletd=True,unshared=args.unshared) - -(fd, nodesFile) = tempfile.mkstemp() -try: - TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=killAll) - cluster.cleanup() - - Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % - (pnodes, total_nodes-pnodes, topo, delay)) - Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, prodCount=prodCount, topo=topo, delay=delay) is False: - errorExit("Failed to stand up eos cluster.") - - Print ("Wait for Cluster stabilization") - # wait for cluster to start producing blocks - if not cluster.waitOnClusterBlockNumSync(3): - errorExit("Cluster never stabilized") - - producerKeys=Cluster.parseClusterKeys(total_nodes) - defproduceraPrvtKey=producerKeys["defproducera"]["private"] - defproducerbPrvtKey=producerKeys["defproducerb"]["private"] - - clusterMapJson = clusterMapJsonTemplate % (defproduceraPrvtKey, defproducerbPrvtKey) - - tfile = os.fdopen(fd, "w") - tfile.write(clusterMapJson) - tfile.close() - - cmd="%s --nodes-file %s %s %s" % (actualTest, nodesFile, "-v" if debug else "", "--leave-running" if dontKill else "") - Print("Starting up distributed transactions test: %s" % (actualTest)) - Print("cmd: %s\n" % (cmd)) - if 0 != subprocess.call(cmd, shell=True): - errorExit("failed to run cmd.") - - testSuccessful=True - Print("\nEND") -finally: - os.remove(nodesFile) - TestHelper.shutdown(cluster, None, testSuccessful, killEosInstances, False, False, killAll, dumpErrorDetails) - -exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 6a0aa2c959..d95dbf64df 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import random +import signal from TestHarness import Cluster, TestHelper, Utils, WalletMgr from TestHarness.TestHelper import AppArgs @@ -23,7 +24,7 @@ appArgs = AppArgs() extraArgs = appArgs.add_bool(flag="--speculative", help="Run nodes in read-mode=speculative") args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed", "--speculative" - ,"--dump-error-details","-v","--leave-running","--clean-run","--keep-logs","--unshared"}, applicationSpecificArgs=appArgs) + ,"--dump-error-details","-v","--leave-running","--keep-logs","--unshared"}, applicationSpecificArgs=appArgs) pnodes=args.p topo=args.s @@ -33,22 +34,14 @@ nodesFile=args.nodes_file dontLaunch=nodesFile is not None seed=args.seed -dontKill=args.leave_running dumpErrorDetails=args.dump_error_details -killAll=args.clean_run -keepLogs=args.keep_logs speculative=args.speculative -killWallet=not dontKill -killEosInstances=not dontKill -if nodesFile is not None: - killEosInstances=False - Utils.Debug=debug testSuccessful=False random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=True if nodesFile is not None else args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) try: @@ -62,15 +55,10 @@ errorExit("Failed to initilize nodes from Json string.") total_nodes=len(cluster.getNodes()) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() print("Stand up walletd") if walletMgr.launch() is False: errorExit("Failed to stand up keosd.") else: - cluster.killall(allInstances=killAll) - cluster.cleanup() - Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) @@ -115,8 +103,9 @@ print("Funds spread validated") - if not dontKill: - cluster.killall(allInstances=killAll) + if not args.leave_running: + for node in cluster.getAllNodes(): + node.kill(signal.SIGTERM) else: print("NOTE: Skip killing nodes, block log verification will be limited") @@ -124,7 +113,7 @@ testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/gelf_test.py b/tests/gelf_test.py new file mode 100755 index 0000000000..ebad30fd39 --- /dev/null +++ b/tests/gelf_test.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +import atexit, os, signal, shlex, shutil, time +import socket, threading +import zlib, json, glob +from pathlib import Path +from TestHarness import Node, TestHelper, Utils + +############################################################################################### +# This test starts nodeos process which is configured with GELF logging endpoint as localhost, +# receives data from the GELF loggging UDP PORT, and checks if the received log entries match +# those in stderr log file. +########################################################################################### + + +GELF_PORT = 24081 + +# We need debug level to get more information about nodeos process +logging="""{ + "includes": [], + "appenders": [{ + "name": "stderr", + "type": "console", + "args": { + "stream": "std_error", + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ], + "flush": true + }, + "enabled": true + },{ + "name": "net", + "type": "gelf", + "args": { + "endpoint": "localhost:GELF_PORT", + "host": "localhost", + "_network": "testnet" + }, + "enabled": true + } + ], + "loggers": [{ + "name": "default", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] + } + ] +}""" + +logging = logging.replace("GELF_PORT", str(GELF_PORT)) + +nodeos_run_time_in_sec = 5 + +node_id = 1 +received_logs = [] +BUFFER_SIZE = 1024 + +def gelfServer(stop): + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.settimeout(1) + s.bind((TestHelper.LOCAL_HOST, GELF_PORT)) + while not stop(): + try: + data, _ = s.recvfrom(BUFFER_SIZE) + message = zlib.decompress(data, zlib.MAX_WBITS|32) + entry = json.loads(message.decode()) + global num_received_logs, last_received_log + received_logs.append(entry["short_message"]) + except socket.timeout: + pass + s.close() + + +data_dir = Path(Utils.getNodeDataDir(node_id)) +config_dir = Path(Utils.getNodeConfigDir(node_id)) +start_nodeos_cmd = shlex.split(f"{Utils.EosServerPath} -e -p eosio --data-dir={data_dir} --config-dir={config_dir}") +if os.path.exists(data_dir): + shutil.rmtree(data_dir) +os.makedirs(data_dir) +if not os.path.exists(config_dir): + os.makedirs(config_dir) +nodeos = Node(TestHelper.LOCAL_HOST, TestHelper.DEFAULT_PORT, node_id, config_dir, data_dir, start_nodeos_cmd, unstarted=True) + +with open(config_dir / 'logging.json', 'w') as textFile: + print(logging,file=textFile) + +stop_threads = False +t1 = threading.Thread(target = gelfServer, args =(lambda : stop_threads, )) + +try: + @atexit.register + def cleanup(): + nodeos.kill(signal.SIGINT) + global stop_threads + stop_threads = True + t1.join() + + t1.start() + + nodeos.launchUnstarted() + time.sleep(nodeos_run_time_in_sec) +finally: + cleanup() + +stderr_file = data_dir / 'stderr.txt' +with open(stderr_file, "r") as f: + stderr_txt = f.read().rstrip() + +assert len(received_logs) > 10, "Not enough gelf logs are received" +for received_log in received_logs: + assert received_log in stderr_txt, "received GELF log entry does not match that of stderr" + +if os.path.exists(Utils.DataPath): + shutil.rmtree(Utils.DataPath) diff --git a/tests/get_account_test.py b/tests/get_account_test.py index b1edf744f1..c691c9a3a2 100755 --- a/tests/get_account_test.py +++ b/tests/get_account_test.py @@ -16,7 +16,7 @@ args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed" ,"--dump-error-details","-v","--leave-running" - ,"--clean-run","--keep-logs","--unshared"}) + ,"--keep-logs","--unshared"}) pnodes=args.p topo=args.s @@ -26,21 +26,13 @@ nodesFile=args.nodes_file dontLaunch=nodesFile is not None seed=args.seed -dontKill=args.leave_running dumpErrorDetails=args.dump_error_details -killAll=args.clean_run -keepLogs=args.keep_logs - -killWallet=not dontKill -killEosInstances=not dontKill -if nodesFile is not None: - killEosInstances=False Utils.Debug=debug testSuccessful=False random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=True if nodesFile is not None else args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) EOSIO_ACCT_PRIVATE_DEFAULT_KEY = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" @@ -55,14 +47,9 @@ errorExit("Failed to initilize nodes from Json string.") total_nodes=len(cluster.getNodes()) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() print("Stand up walletd") if walletMgr.launch() is False: errorExit("Failed to stand up keosd.") - else: - cluster.killall(allInstances=killAll) - cluster.cleanup() Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) @@ -97,7 +84,7 @@ testSuccessful = True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) errorCode = 0 if testSuccessful else 1 -exit(errorCode) \ No newline at end of file +exit(errorCode) diff --git a/tests/http_plugin_test.py b/tests/http_plugin_test.py index cfb3d17606..26d8abe6c4 100755 --- a/tests/http_plugin_test.py +++ b/tests/http_plugin_test.py @@ -15,16 +15,13 @@ errorExit=Utils.errorExit cmdError=Utils.cmdError -args = TestHelper.parse_args({"-v","--clean-run", "--dump-error-details","--keep-logs","--unshared"}) +args = TestHelper.parse_args({"--leave-running","-v", "--dump-error-details","--keep-logs","--unshared"}) debug=args.v -killAll=args.clean_run -killEosInstances = True -keepLogs = args.keep_logs dumpErrorDetails = dumpErrorDetails=args.dump_error_details Utils.Debug=debug -cluster=Cluster(host="127.0.0.1",walletd=True,unshared=args.unshared) +cluster=Cluster(host="127.0.0.1", unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) testSuccessful=False @@ -63,7 +60,8 @@ def get_info_status(url): testSuccessful = True finally: - TestHelper.shutdown(cluster, None, testSuccessful, killEosInstances, True, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, None, testSuccessful, dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) + diff --git a/tests/large-lib-test.py b/tests/large-lib-test.py index 1a3045bdcc..6c5138b4b1 100755 --- a/tests/large-lib-test.py +++ b/tests/large-lib-test.py @@ -20,15 +20,12 @@ errorExit=Utils.errorExit args=TestHelper.parse_args({"--kill-sig","--kill-count","--keep-logs" - ,"--dump-error-details","-v","--leave-running","--clean-run","--unshared" + ,"--dump-error-details","-v","--leave-running","--unshared" }) pnodes=1 total_nodes=3 # first one is producer, and last two are speculative nodes debug=args.v -killEosInstances=not args.leave_running dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -killAll=args.clean_run relaunchTimeout=10 # Don't want to set too big, trying to reduce test time, but needs to be large enough for test to finish before # restart re-creates this many blocks. @@ -40,12 +37,12 @@ seed=1 random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) cluster.setWalletMgr(walletMgr) def relaunchNode(node: Node, chainArg="", skipGenesis=True, relaunchAssertMessage="Fail to relaunch"): - isRelaunchSuccess=node.relaunch(chainArg=chainArg, timeout=relaunchTimeout, skipGenesis=skipGenesis, cachePopen=True) + isRelaunchSuccess=node.relaunch(chainArg=chainArg, timeout=relaunchTimeout, skipGenesis=skipGenesis) time.sleep(1) # Give a second to replay or resync if needed assert isRelaunchSuccess, relaunchAssertMessage return isRelaunchSuccess @@ -53,11 +50,6 @@ def relaunchNode(node: Node, chainArg="", skipGenesis=True, relaunchAssertMessag try: TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=killAll) - cluster.cleanup() - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - Print("Stand up cluster") if cluster.launch( pnodes=pnodes, @@ -77,6 +69,7 @@ def relaunchNode(node: Node, chainArg="", skipGenesis=True, relaunchAssertMessag Print("Wait for producing {} blocks".format(numBlocksToProduceBeforeRelaunch)) producingNode.waitForBlock(numBlocksToProduceBeforeRelaunch, blockType=BlockType.lib) + producingNode.waitForProducer("defproducera") Print("Kill all node instances.") for clusterNode in cluster.nodes: @@ -91,8 +84,9 @@ def relaunchNode(node: Node, chainArg="", skipGenesis=True, relaunchAssertMessag Utils.rmNodeDataDir(2) Print ("Relaunch all cluster nodes instances.") - # -e -p eosio for resuming production, skipGenesis=False for launch the same chain as before - relaunchNode(producingNode, chainArg="-e -p eosio --sync-fetch-span 5 ", skipGenesis=False) + # -e for resuming production, defproducera only producer at this point + # skipGenesis=False for launch the same chain as before + relaunchNode(producingNode, chainArg="-e --sync-fetch-span 5 ", skipGenesis=False) relaunchNode(speculativeNode1, chainArg="--sync-fetch-span 5 ") relaunchNode(speculativeNode2, chainArg="--sync-fetch-span 5 ", skipGenesis=False) @@ -116,7 +110,7 @@ def relaunchNode(node: Node, chainArg="", skipGenesis=True, relaunchAssertMessag testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killEosInstances, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) diff --git a/tests/launcher_test.py b/tests/launcher_test.py deleted file mode 100755 index f5471c8a80..0000000000 --- a/tests/launcher_test.py +++ /dev/null @@ -1,241 +0,0 @@ -#!/usr/bin/env python3 - -import decimal -import re -import os - -from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, CORE_SYMBOL -from pathlib import Path - -############################################################### -# launcher-test -# -# Specifically tests using the bios bootstrap script that is created by eosio-launcher -# -############################################################### - -Print=Utils.Print -errorExit=Utils.errorExit -cmdError=Utils.cmdError - -args = TestHelper.parse_args({"--defproducera_prvt_key","--dump-error-details","--dont-launch","--keep-logs", - "-v","--leave-running","--clean-run","--unshared"}) -debug=args.v -defproduceraPrvtKey=args.defproducera_prvt_key -dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontLaunch=args.dont_launch -dontKill=args.leave_running -killAll=args.clean_run - -Utils.Debug=debug -cluster=Cluster(walletd=True, defproduceraPrvtKey=defproduceraPrvtKey,unshared=args.unshared) -walletMgr=WalletMgr(True) -testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill - -WalletdName=Utils.EosWalletName -ClientName="cleos" -timeout = .5 * 12 * 2 + 60 # time for finalization with 1 producer + 60 seconds padding -Utils.setIrreversibleTimeout(timeout) - -try: - TestHelper.printSystemInfo("BEGIN") - - cluster.setWalletMgr(walletMgr) - - if not dontLaunch: - cluster.killall(allInstances=killAll) - cluster.cleanup() - Print("Stand up cluster") - pnodes=4 - abs_path = os.path.abspath(os.getcwd() + '/unittests/contracts/eosio.token/eosio.token.abi') - traceNodeosArgs=" --trace-rpc-abi eosio.token=" + abs_path - if cluster.launch(pnodes=pnodes, totalNodes=pnodes, extraNodeosArgs=traceNodeosArgs) is False: - cmdError("launcher") - errorExit("Failed to stand up eos cluster.") - else: - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - cluster.initializeNodes(defproduceraPrvtKey=defproduceraPrvtKey) - killEosInstances=False - - print("Stand up walletd") - if walletMgr.launch() is False: - cmdError("%s" % (WalletdName)) - errorExit("Failed to stand up eos walletd.") - - Print("Validating system accounts after bootstrap") - cluster.validateAccounts(None) - - accounts=Cluster.createAccountKeys(3) - if accounts is None: - errorExit("FAILURE - create keys") - testeraAccount=accounts[0] - testeraAccount.name="testera11111" - currencyAccount=accounts[1] - currencyAccount.name="currency1111" - exchangeAccount=accounts[2] - exchangeAccount.name="exchange1111" - - PRV_KEY1=testeraAccount.ownerPrivateKey - PUB_KEY1=testeraAccount.ownerPublicKey - PRV_KEY2=currencyAccount.ownerPrivateKey - PUB_KEY2=currencyAccount.ownerPublicKey - PRV_KEY3=exchangeAccount.activePrivateKey - PUB_KEY3=exchangeAccount.activePublicKey - - testeraAccount.activePrivateKey=currencyAccount.activePrivateKey=PRV_KEY3 - testeraAccount.activePublicKey=currencyAccount.activePublicKey=PUB_KEY3 - - exchangeAccount.ownerPrivateKey=PRV_KEY2 - exchangeAccount.ownerPublicKey=PUB_KEY2 - - testWalletName="test" - Print("Creating wallet \"%s\"." % (testWalletName)) - testWallet=walletMgr.create(testWalletName, [cluster.eosioAccount,cluster.defproduceraAccount]) - - Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) - - for account in accounts: - Print("Importing keys for account %s into wallet %s." % (account.name, testWallet.name)) - if not walletMgr.importKey(account, testWallet): - cmdError("%s wallet import" % (ClientName)) - errorExit("Failed to import key for account %s" % (account.name)) - - defproduceraWalletName="defproducera" - Print("Creating wallet \"%s\"." % (defproduceraWalletName)) - defproduceraWallet=walletMgr.create(defproduceraWalletName) - - Print("Wallet \"%s\" password=%s." % (defproduceraWalletName, defproduceraWallet.password.encode("utf-8"))) - - defproduceraAccount=cluster.defproduceraAccount - - Print("Importing keys for account %s into wallet %s." % (defproduceraAccount.name, defproduceraWallet.name)) - if not walletMgr.importKey(defproduceraAccount, defproduceraWallet): - cmdError("%s wallet import" % (ClientName)) - errorExit("Failed to import key for account %s" % (defproduceraAccount.name)) - - node=cluster.getNode(0) - - Print("Validating accounts before user accounts creation") - cluster.validateAccounts(None) - - # create accounts via eosio as otherwise a bid is needed - Print("Create new account %s via %s" % (testeraAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(testeraAccount, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) - - Print("Create new account %s via %s" % (currencyAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(currencyAccount, cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000, exitOnError=True) - - Print("Create new account %s via %s" % (exchangeAccount.name, cluster.eosioAccount.name)) - transId=node.createInitializeAccount(exchangeAccount, cluster.eosioAccount, buyRAM=1000000, waitForTransBlock=True, exitOnError=True) - - Print("Validating accounts after user accounts creation") - accounts=[testeraAccount, currencyAccount, exchangeAccount] - cluster.validateAccounts(accounts) - - Print("Verify account %s" % (testeraAccount)) - if not node.verifyAccount(testeraAccount): - errorExit("FAILURE - account creation failed.", raw=True) - - transferAmount="97.5321 {0}".format(CORE_SYMBOL) - Print("Transfer funds %s from account %s to %s" % (transferAmount, defproduceraAccount.name, testeraAccount.name)) - node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer", waitForTransBlock=True) - - expectedAmount=transferAmount - Print("Verify transfer, Expected: %s" % (expectedAmount)) - actualAmount=node.getAccountEosBalanceStr(testeraAccount.name) - if expectedAmount != actualAmount: - cmdError("FAILURE - transfer failed") - errorExit("Transfer verification failed. Excepted %s, actual: %s" % (expectedAmount, actualAmount)) - - transferAmount="0.0100 {0}".format(CORE_SYMBOL) - Print("Force transfer funds %s from account %s to %s" % ( - transferAmount, defproduceraAccount.name, testeraAccount.name)) - node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer", force=True, waitForTransBlock=True) - - expectedAmount="97.5421 {0}".format(CORE_SYMBOL) - Print("Verify transfer, Expected: %s" % (expectedAmount)) - actualAmount=node.getAccountEosBalanceStr(testeraAccount.name) - if expectedAmount != actualAmount: - cmdError("FAILURE - transfer failed") - errorExit("Transfer verification failed. Excepted %s, actual: %s" % (expectedAmount, actualAmount)) - - Print("Validating accounts after some user transactions") - accounts=[testeraAccount, currencyAccount, exchangeAccount] - cluster.validateAccounts(accounts) - - transferAmount="97.5311 {0}".format(CORE_SYMBOL) - Print("Transfer funds %s from account %s to %s" % ( - transferAmount, testeraAccount.name, currencyAccount.name)) - trans=node.transferFunds(testeraAccount, currencyAccount, transferAmount, "test transfer a->b", waitForTransBlock=True) - transId=Node.getTransId(trans) - - expectedAmount="98.0311 {0}".format(CORE_SYMBOL) # 5000 initial deposit - Print("Verify transfer, Expected: %s" % (expectedAmount)) - actualAmount=node.getAccountEosBalanceStr(currencyAccount.name) - if expectedAmount != actualAmount: - cmdError("FAILURE - transfer failed") - errorExit("Transfer verification failed. Excepted %s, actual: %s" % (expectedAmount, actualAmount)) - - node.waitForTransactionInBlock(transId) - - transaction=node.getTransaction(transId, exitOnError=True, delayedRetry=False) - - typeVal=None - amountVal=None - key="" - try: - key = "[actions][0][action]" - typeVal = transaction["actions"][0]["action"] - key = "[actions][0][params][quantity]" - amountVal = transaction["actions"][0]["params"]["quantity"] - amountVal = int(decimal.Decimal(amountVal.split()[0]) * 10000) - except (TypeError, KeyError) as e: - Print("transaction%s not found. Transaction: %s" % (key, transaction)) - raise - - if typeVal != "transfer" or amountVal != 975311: - errorExit("FAILURE - get transaction trans_id failed: %s %s %s" % (transId, typeVal, amountVal), raw=True) - - Print("Bouncing nodes #00 and #01") - if cluster.bounce("00,01") is False: - cmdError("launcher bounce") - errorExit("Failed to bounce eos node.") - - Print("Taking down node #02") - if cluster.down("02") is False: - cmdError("launcher down command") - errorExit("Failed to take down eos node.") - - Print("Using bounce option to re-launch node #02") - if cluster.bounce("02") is False: - cmdError("launcher bounce") - errorExit("Failed to bounce eos node.") - - p = re.compile('Assert') - errFileName=f"{cluster.nodeosLogPath}/node_00/stderr.txt" - assertionsFound = False - with open(errFileName) as errFile: - for line in errFile: - if p.search(line): - assertionsFound=True - - if assertionsFound: - # Too many assertion logs, hard to validate how many are genuine. Make this a warning - # for now, hopefully the logs will get cleaned up in future. - Print(f"WARNING: Asserts in {cluster.nodeosLogPath}/node_00/stderr.txt") - #errorExit("FAILURE - Assert in launcher_test.py/node_00/stderr.txt") - - Print("Validating accounts at end of test") - accounts=[testeraAccount, currencyAccount, exchangeAccount] - cluster.validateAccounts(accounts) - - testSuccessful=True -finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) - -exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file diff --git a/tests/light_validation_sync_test.py b/tests/light_validation_sync_test.py index 10ddec5946..df6e2e95d2 100755 --- a/tests/light_validation_sync_test.py +++ b/tests/light_validation_sync_test.py @@ -18,24 +18,19 @@ ############################################################### # Parse command line arguments -args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs","--unshared"}) +args = TestHelper.parse_args({"-v","--dump-error-details","--leave-running","--keep-logs","--unshared"}) Utils.Debug = args.v -killAll=args.clean_run dumpErrorDetails=args.dump_error_details dontKill=args.leave_running -killEosInstances=not dontKill -killWallet=not dontKill keepLogs=args.keep_logs walletMgr=WalletMgr(True) -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) cluster.setWalletMgr(walletMgr) testSuccessful = False try: TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=killAll) - cluster.cleanup() assert cluster.launch( pnodes=1, prodCount=1, @@ -93,7 +88,7 @@ def isBlockNumIrr(): testSuccessful = True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) diff --git a/tests/nested_container_multi_index_test.py b/tests/nested_container_multi_index_test.py index e6086e45d0..442fadb14a 100755 --- a/tests/nested_container_multi_index_test.py +++ b/tests/nested_container_multi_index_test.py @@ -32,7 +32,7 @@ args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed" ,"--dump-error-details","-v","--leave-running" - ,"--clean-run","--keep-logs","--unshared"}) + ,"--keep-logs","--unshared"}) pnodes=args.p topo=args.s @@ -42,20 +42,12 @@ nodesFile=args.nodes_file dontLaunch=nodesFile is not None seed=args.seed -dontKill=args.leave_running dumpErrorDetails=args.dump_error_details -killAll=args.clean_run -keepLogs=args.keep_logs - -killWallet=not dontKill -killEosInstances=not dontKill -if nodesFile is not None: - killEosInstances=False Utils.Debug=debug testSuccessful=False -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=True if nodesFile is not None else args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) EOSIO_ACCT_PRIVATE_DEFAULT_KEY = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" @@ -73,9 +65,6 @@ def create_action(action, data, contract_account, usr): Print("result= ", result) try: - cluster.killall(allInstances=False) - cluster.cleanup() - Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) @@ -384,9 +373,9 @@ def create_action(action, data, contract_account, usr): assert testSuccessful finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet) + TestHelper.shutdown(cluster, walletMgr, testSuccessful) if testSuccessful: exit(0) else: - exit(-1) \ No newline at end of file + exit(-1) diff --git a/tests/nodeos_chainbase_allocation_test.py b/tests/nodeos_chainbase_allocation_test.py index 9de53d0a4f..5771428b80 100755 --- a/tests/nodeos_chainbase_allocation_test.py +++ b/tests/nodeos_chainbase_allocation_test.py @@ -16,24 +16,17 @@ ############################################################### # Parse command line arguments -args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs","--unshared"}) +args = TestHelper.parse_args({"-v","--dump-error-details","--leave-running","--keep-logs","--unshared"}) Utils.Debug = args.v -killAll=args.clean_run dumpErrorDetails=args.dump_error_details -dontKill=args.leave_running -killEosInstances=not dontKill -killWallet=not dontKill -keepLogs=args.keep_logs walletMgr=WalletMgr(True) -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) cluster.setWalletMgr(walletMgr) testSuccessful = False try: TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=killAll) - cluster.cleanup() # The following is the list of chainbase objects that need to be verified: # - account_object (bootstrap) @@ -50,15 +43,17 @@ pnodes=1, prodCount=1, totalProducers=1, - totalNodes=2, + totalNodes=3, loadSystemContract=False, specificExtraNodeosArgs={ 1:"--read-mode irreversible --plugin eosio::producer_api_plugin"}) producerNodeId = 0 irrNodeId = 1 + nonProdNodeId = 2 producerNode = cluster.getNode(producerNodeId) irrNode = cluster.getNode(irrNodeId) + nonProdNode = cluster.getNode(nonProdNodeId) # Create delayed transaction to create "generated_transaction_object" cmd = "create account -j eosio sample EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV\ @@ -70,7 +65,7 @@ newProducerAcc = Account("newprod") newProducerAcc.ownerPublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" newProducerAcc.activePublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" - producerNode.createAccount(newProducerAcc, cluster.eosioAccount) + nonProdNode.createAccount(newProducerAcc, cluster.eosioAccount, waitForTransBlock=True) setProdsStr = '{"schedule": [' setProdsStr += '{"producer_name":' + newProducerAcc.name + ',"block_signing_key":' + newProducerAcc.activePublicKey + '}' @@ -97,7 +92,7 @@ def isSetProdsBlockNumIrr(): # Restart irr node and ensure the snapshot is still identical irrNode.kill(signal.SIGTERM) - isRelaunchSuccess = irrNode.relaunch(timeout=5, cachePopen=True) + isRelaunchSuccess = irrNode.relaunch(timeout=5) assert isRelaunchSuccess, "Fail to relaunch" res = irrNode.createSnapshot() afterShutdownSnapshotPath = res["payload"]["snapshot_name"] @@ -105,7 +100,7 @@ def isSetProdsBlockNumIrr(): testSuccessful = True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) diff --git a/tests/nodeos_contrl_c_test.py b/tests/nodeos_contrl_c_test.py index 3ba58b6561..e379a6bf0f 100755 --- a/tests/nodeos_contrl_c_test.py +++ b/tests/nodeos_contrl_c_test.py @@ -3,7 +3,7 @@ import signal import time -from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, CORE_SYMBOL +from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys ############################################################### # nodeos_contrl_c_lr_test @@ -20,8 +20,7 @@ args = TestHelper.parse_args({"--wallet-port", "-v","--unshared"}) -cluster=Cluster(walletd=True,unshared=args.unshared) -killAll=True +cluster=Cluster(unshared=args.unshared) totalProducerNodes=2 totalNonProducerNodes=1 totalNodes=totalProducerNodes+totalNonProducerNodes @@ -38,8 +37,6 @@ try: TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() specificExtraNodeosArgs = {} # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node @@ -62,9 +59,9 @@ cluster.validateAccounts(None) prodNode = cluster.getNode(0) - nonProdNode = cluster.getNode(1) + nonProdNode = cluster.getNode(2) - accounts=cluster.createAccountKeys(2) + accounts=createAccountKeys(2) if accounts is None: Utils.errorExit("FAILURE - create keys") @@ -97,7 +94,7 @@ testSuccessful=False Print("Configure and launch txn generators") - targetTpsPerGenerator = 100 + targetTpsPerGenerator = 10 testTrxGenDurationSec=60 trxGeneratorCnt=1 cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[accounts[0].name,accounts[1].name], @@ -111,11 +108,11 @@ testSuccessful = nonProdNode.kill(signal.SIGTERM) if not testSuccessful: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=True, cleanRun=True, dumpErrorDetails=True) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=True) errorExit("Failed to kill the seed node") finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=True, killWallet=True, keepLogs=False, cleanRun=True, dumpErrorDetails=True) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=True) errorCode = 0 if testSuccessful else 1 -exit(errorCode) \ No newline at end of file +exit(errorCode) diff --git a/tests/nodeos_extra_packed_data_test.py b/tests/nodeos_extra_packed_data_test.py index 1af8b0c1a3..3d38aa0691 100755 --- a/tests/nodeos_extra_packed_data_test.py +++ b/tests/nodeos_extra_packed_data_test.py @@ -3,7 +3,7 @@ import json import copy -from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL +from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys from TestHarness.Cluster import PFSetupPolicy from TestHarness.TestHelper import AppArgs @@ -19,7 +19,7 @@ cmdError=Utils.cmdError args = TestHelper.parse_args({"--host","--port","-p","--defproducera_prvt_key","--defproducerb_prvt_key" - ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--clean-run" + ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running" ,"--sanity-test","--wallet-port","--unshared"}) server=args.host port=args.port @@ -27,11 +27,9 @@ defproduceraPrvtKey=args.defproducera_prvt_key defproducerbPrvtKey=args.defproducerb_prvt_key dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs dontLaunch=args.dont_launch -dontKill=args.leave_running pnodes=args.p -killAll=args.clean_run +totalNodes=pnodes+1 sanityTest=args.sanity_test walletPort=args.wallet_port @@ -39,14 +37,13 @@ localTest=True if server == TestHelper.LOCAL_HOST else False cluster=Cluster(host=server, port=port, - walletd=True, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey, - unshared=args.unshared) + unshared=args.unshared, + keepRunning=args.leave_running, + keepLogs=args.keep_logs) walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill dontBootstrap=sanityTest # intent is to limit the scope of the sanity test to just verifying that nodes can be started WalletdName=Utils.EosWalletName @@ -61,8 +58,6 @@ Print("PORT: %d" % (port)) if localTest and not dontLaunch: - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") specificExtraNodeosArgs = {} associatedNodeLabels = {} @@ -71,7 +66,7 @@ if pnodes > 3: specificExtraNodeosArgs[pnodes - 2] = "" - if cluster.launch(totalNodes=pnodes, + if cluster.launch(totalNodes=totalNodes, pnodes=pnodes, dontBootstrap=dontBootstrap, pfSetupPolicy=PFSetupPolicy.PREACTIVATE_FEATURE_ONLY, @@ -82,10 +77,7 @@ else: Print("Collecting cluster info.") cluster.initializeNodes(defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey) - killEosInstances=False Print("Stand up %s" % (WalletdName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() print("Stand up walletd") if walletMgr.launch() is False: cmdError("%s" % (WalletdName)) @@ -98,7 +90,7 @@ Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) - accounts=Cluster.createAccountKeys(2) + accounts=createAccountKeys(2) if accounts is None: errorExit("FAILURE - create keys") testeraAccount=accounts[0] @@ -123,12 +115,13 @@ errorExit("Failed to import key for account %s" % (account.name)) node=cluster.getNode(0) + nonProdNode=cluster.getAllNodes()[-1] Print("Create new account %s via %s" % (testeraAccount.name, cluster.defproduceraAccount.name)) transId=node.createInitializeAccount(testeraAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) Print("Create new account %s via %s" % (testerbAccount.name, cluster.defproduceraAccount.name)) - transId=node.createInitializeAccount(testerbAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=True, exitOnError=True) + transId=nonProdNode.createInitializeAccount(testerbAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=True, exitOnError=True) Print("Validating accounts after user accounts creation") accounts=[testeraAccount, testerbAccount] @@ -182,4 +175,7 @@ testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) + +errorCode = 0 if testSuccessful else 1 +exit(errorCode) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 8803403f19..b998d15a59 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -5,8 +5,10 @@ import time import json import signal +import os -from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, CORE_SYMBOL +from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys +from TestHarness.TestHelper import AppArgs ############################################################### # nodeos_forked_chain_test @@ -27,6 +29,8 @@ # Time is allowed to progress so that the "bridge" node can catchup and both producer nodes to come to consensus # The block log is then checked for both producer nodes to verify that the 10 producer fork is selected and that # both nodes are in agreement on the block log. +# This test also runs a state_history_plugin (SHiP) on node 0 and uses ship_streamer to verify all blocks are received +# across the fork. # ############################################################### @@ -121,27 +125,24 @@ def getMinHeadAndLib(prodNodes): return (headBlockNum, libNum) - -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", - "--wallet-port","--unshared"}) +appArgs = AppArgs() +extraArgs = appArgs.add(flag="--num-ship-clients", type=int, help="How many ship_streamers should be started", default=2) +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running", + "--wallet-port","--unshared"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v totalProducerNodes=2 totalNonProducerNodes=1 totalNodes=totalProducerNodes+totalNonProducerNodes maxActiveProducers=21 totalProducers=maxActiveProducers -cluster=Cluster(walletd=True,unshared=args.unshared) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running prodCount=args.prod_count -killAll=args.clean_run walletPort=args.wallet_port +num_clients=args.num_ship_clients +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -150,10 +151,11 @@ def getMinHeadAndLib(prodNodes): TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") specificExtraNodeosArgs={} + shipNodeNum = 0 + specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --disable-replay-opts" + # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin" @@ -174,7 +176,7 @@ def getMinHeadAndLib(prodNodes): # *** create accounts to vote in desired producers *** - accounts=cluster.createAccountKeys(5) + accounts=createAccountKeys(5) if accounts is None: Utils.errorExit("FAILURE - create keys") accounts[0].name="tester111111" @@ -220,7 +222,7 @@ def getMinHeadAndLib(prodNodes): # *** delegate bandwidth to accounts *** - node=prodNodes[0] + node=nonProdNode # create accounts via eosio as otherwise a bid is needed for account in accounts: Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) @@ -286,6 +288,31 @@ def getBlock(self, blockNum): timestampStr=Node.getBlockAttribute(block, "timestamp", blockNum) timestamp=datetime.strptime(timestampStr, Utils.TimeFmt) + shipNode = cluster.getNode(0) + block_range = 1000 + start_block_num = blockNum + end_block_num = start_block_num + block_range + + shipClient = "tests/ship_streamer" + cmd = f"{shipClient} --start-block-num {start_block_num} --end-block-num {end_block_num} --fetch-block --fetch-traces --fetch-deltas" + if Utils.Debug: Utils.Print(f"cmd: {cmd}") + clients = [] + files = [] + shipTempDir = os.path.join(Utils.DataDir, "ship") + os.makedirs(shipTempDir, exist_ok = True) + shipClientFilePrefix = os.path.join(shipTempDir, "client") + + starts = [] + for i in range(0, num_clients): + start = time.perf_counter() + outFile = open(f"{shipClientFilePrefix}{i}.out", "w") + errFile = open(f"{shipClientFilePrefix}{i}.err", "w") + Print(f"Start client {i}") + popen=Utils.delayedCheckOutput(cmd, stdout=outFile, stderr=errFile) + starts.append(time.perf_counter()) + clients.append((popen, cmd)) + files.append((outFile, errFile)) + Utils.Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") # *** Identify what the production cycle is *** @@ -559,21 +586,41 @@ def getBlock(self, blockNum): Utils.errorExit("Did not find find block %s (the original divergent block) in blockProducers0, test setup is wrong. blockProducers0: %s" % (killBlockNum, ", ".join(blockProducers0))) Print("Fork resolved and determined producer %s for block %s" % (resolvedKillBlockProducer, killBlockNum)) + Print(f"Stopping all {num_clients} clients") + for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): + popen.wait() + Print(f"Stopped client {index}. Ran for {time.perf_counter() - start:.3f} seconds.") + out.close() + err.close() + outFile = open(f"{shipClientFilePrefix}{index}.out", "r") + data = json.load(outFile) + block_num = start_block_num + for i in data: + # fork can cause block numbers to be repeated + this_block_num = i['get_blocks_result_v0']['this_block']['block_num'] + if this_block_num < block_num: + block_num = this_block_num + assert block_num == this_block_num, f"{block_num} != {this_block_num}" + assert isinstance(i['get_blocks_result_v0']['block'], str) # verify block in result + block_num += 1 + assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" + blockProducers0=[] blockProducers1=[] testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) - - if not testSuccessful: - Print(Utils.FileDivider) - Print("Compare Blocklog") - cluster.compareBlockLogs() - Print(Utils.FileDivider) - Print("Print Blocklog") - cluster.printBlockLog() - Print(Utils.FileDivider) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) + +# Too much output for ci/cd +# if not testSuccessful: +# Print(Utils.FileDivider) +# Print("Compare Blocklog") +# cluster.compareBlockLogs() +# Print(Utils.FileDivider) +# Print("Print Blocklog") +# cluster.printBlockLog() +# Print(Utils.FileDivider) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/nodeos_high_transaction_test.py b/tests/nodeos_high_transaction_test.py index 32691bc25c..0b0550408a 100755 --- a/tests/nodeos_high_transaction_test.py +++ b/tests/nodeos_high_transaction_test.py @@ -5,7 +5,7 @@ import json from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, CORE_SYMBOL -from TestHarness.Cluster import NamedAccounts +from TestHarness.accounts import NamedAccounts from TestHarness.TestHelper import AppArgs ############################################################### @@ -27,7 +27,7 @@ extraArgs = appArgs.add(flag="--max-transactions-per-second", type=int, help="How many transactions per second should be sent", default=500) extraArgs = appArgs.add(flag="--total-accounts", type=int, help="How many accounts should be involved in sending transfers. Must be greater than %d" % (minTotalAccounts), default=100) extraArgs = appArgs.add_bool(flag="--send-duplicates", help="If identical transactions should be sent to all nodes") -args = TestHelper.parse_args({"-p", "-n","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--unshared"}, applicationSpecificArgs=appArgs) +args = TestHelper.parse_args({"-p", "-n","--dump-error-details","--keep-logs","-v","--leave-running","--unshared"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v totalProducerNodes=args.p @@ -37,11 +37,8 @@ totalNonProducerNodes=totalNodes-totalProducerNodes maxActiveProducers=totalProducerNodes totalProducers=totalProducerNodes -cluster=Cluster(walletd=True,unshared=args.unshared) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running -killAll=args.clean_run +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) walletPort=TestHelper.DEFAULT_WALLET_PORT blocksPerSec=2 transBlocksBehind=args.transaction_time_delta * blocksPerSec @@ -58,8 +55,6 @@ walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -71,8 +66,6 @@ TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") if cluster.launch(pnodes=totalProducerNodes, @@ -381,15 +374,17 @@ def findTransInBlock(transId, transToBlock, node): testSuccessful = not delayedReportError finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) - if not testSuccessful: - Print(Utils.FileDivider) - Print("Compare Blocklog") - cluster.compareBlockLogs() - Print(Utils.FileDivider) - Print("Print Blocklog") - cluster.printBlockLog() - Print(Utils.FileDivider) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) + +# Too much output for ci/cd +# if not testSuccessful: +# Print(Utils.FileDivider) +# Print("Compare Blocklog") +# cluster.compareBlockLogs() +# Print(Utils.FileDivider) +# Print("Print Blocklog") +# cluster.printBlockLog() +# Print(Utils.FileDivider) errorCode = 0 if testSuccessful else 1 exit(errorCode) diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index cfe0b1c92e..812e338695 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -21,22 +21,18 @@ cmdError = Utils.cmdError relaunchTimeout = 30 numOfProducers = 4 -totalNodes = 15 +totalNodes = 20 # Parse command line arguments -args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs","--unshared"}) +args = TestHelper.parse_args({"-v","--dump-error-details","--leave-running","--keep-logs","--unshared"}) Utils.Debug = args.v -killAll=args.clean_run dumpErrorDetails=args.dump_error_details -dontKill=args.leave_running -killEosInstances=not dontKill -killWallet=not dontKill -keepLogs=args.keep_logs speculativeReadMode="head" +blockLogRetainBlocks="10000" # Setup cluster and it's wallet manager walletMgr=WalletMgr(True) -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) cluster.setWalletMgr(walletMgr) def backupBlksDir(nodeId): @@ -152,7 +148,7 @@ def confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBefore "Fork db head ({}) should be equal to fork db head before switch mode ({}) ".format(forkDbHead, forkDbHeadBeforeSwitchMode) def relaunchNode(node: Node, chainArg="", addSwapFlags=None, relaunchAssertMessage="Fail to relaunch"): - isRelaunchSuccess = node.relaunch(chainArg=chainArg, addSwapFlags=addSwapFlags, timeout=relaunchTimeout, cachePopen=True) + isRelaunchSuccess = node.relaunch(chainArg=chainArg, addSwapFlags=addSwapFlags, timeout=relaunchTimeout) time.sleep(1) # Give a second to replay or resync if needed assert isRelaunchSuccess, relaunchAssertMessage return isRelaunchSuccess @@ -163,8 +159,6 @@ def relaunchNode(node: Node, chainArg="", addSwapFlags=None, relaunchAssertMessa try: # Kill any existing instances and launch cluster TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=killAll) - cluster.cleanup() cluster.launch( prodCount=numOfProducers, totalProducers=numOfProducers, @@ -180,7 +174,13 @@ def relaunchNode(node: Node, chainArg="", addSwapFlags=None, relaunchAssertMessa 11:"--read-mode irreversible", 12:"--read-mode speculative", 13:"--read-mode irreversible", - 14:"--read-mode speculative --plugin eosio::producer_api_plugin"}) + 14:"--read-mode speculative --plugin eosio::producer_api_plugin", + 15:"--read-mode speculative", + 16:"--read-mode irreversible", + 17:"--read-mode speculative", + 18:"--read-mode irreversible", + 19:"--read-mode speculative --plugin eosio::producer_api_plugin" + }) producingNodeId = 0 producingNode = cluster.getNode(producingNodeId) @@ -260,7 +260,7 @@ def switchSpecToIrrMode(nodeIdOfNodeToTest, nodeToTest): # Kill and relaunch in irreversible mode nodeToTest.kill(signal.SIGTERM) - relaunchNode(nodeToTest, addSwapFlags={"--read-mode": "irreversible"}) + relaunchNode(nodeToTest, addSwapFlags={"--read-mode": "irreversible", "--block-log-retain-blocks":blockLogRetainBlocks}) # Ensure the node condition is as expected after relaunch confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) @@ -273,7 +273,7 @@ def switchIrrToSpecMode(nodeIdOfNodeToTest, nodeToTest): # Kill and relaunch in speculative mode nodeToTest.kill(signal.SIGTERM) - relaunchNode(nodeToTest, addSwapFlags={"--read-mode": speculativeReadMode}) + relaunchNode(nodeToTest, addSwapFlags={"--read-mode": speculativeReadMode, "--block-log-retain-blocks":blockLogRetainBlocks}) # Ensure the node condition is as expected after relaunch confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) @@ -289,7 +289,7 @@ def switchSpecToIrrModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): # Kill and relaunch in irreversible mode nodeToTest.kill(signal.SIGTERM) waitForBlksProducedAndLibAdvanced() # Wait for some blks to be produced and lib advance - relaunchNode(nodeToTest, addSwapFlags={"--read-mode": "irreversible"}) + relaunchNode(nodeToTest, addSwapFlags={"--read-mode": "irreversible", "--block-log-retain-blocks":blockLogRetainBlocks}) # Ensure the node condition is as expected after relaunch ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) @@ -308,7 +308,7 @@ def switchIrrToSpecModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): # Kill and relaunch in irreversible mode nodeToTest.kill(signal.SIGTERM) waitForBlksProducedAndLibAdvanced() # Wait for some blks to be produced and lib advance) - relaunchNode(nodeToTest, addSwapFlags={"--read-mode": speculativeReadMode}) + relaunchNode(nodeToTest, addSwapFlags={"--read-mode": speculativeReadMode, "--block-log-retain-blocks":blockLogRetainBlocks}) # Ensure the node condition is as expected after relaunch ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) @@ -400,6 +400,50 @@ def switchToSpecModeWithIrrModeSnapshot(nodeIdOfNodeToTest, nodeToTest): finally: stopProdNode() + # 10th test case: Load an irreversible snapshot into a node running without a block log + # Expectation: Node launches successfully + # and the head and lib should be advancing after some blocks produced + def switchToNoBlockLogWithIrrModeSnapshot(nodeIdOfNodeToTest, nodeToTest): + try: + # Kill node and backup blocks directory of speculative mode + headLibAndForkDbHeadBeforeShutdown = getHeadLibAndForkDbHead(nodeToTest) + nodeToTest.kill(signal.SIGTERM) + + # Relaunch in irreversible mode and create the snapshot + relaunchNode(nodeToTest, addSwapFlags={"--read-mode": "irreversible", "--block-log-retain-blocks":"0"}) + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + nodeToTest.createSnapshot() + nodeToTest.kill(signal.SIGTERM) + + # Start from clean data dir and then relaunch with irreversible snapshot, no block log means that fork_db will be reset + removeState(nodeIdOfNodeToTest) + relaunchNode(nodeToTest, chainArg=" --snapshot {}".format(getLatestSnapshot(nodeIdOfNodeToTest)), addSwapFlags={"--read-mode": speculativeReadMode, "--block-log-retain-blocks":"0"}) + confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest) + # Ensure it does not replay "reversible blocks", i.e. head and lib should be different + headLibAndForkDbHeadAfterRelaunch = getHeadLibAndForkDbHead(nodeToTest) + assert headLibAndForkDbHeadBeforeShutdown != headLibAndForkDbHeadAfterRelaunch, \ + "1: Head, Lib, and Fork Db same after relaunch {} vs {}".format(headLibAndForkDbHeadBeforeShutdown, headLibAndForkDbHeadAfterRelaunch) + + # Start production and wait until lib advance, ensure everything is alright + startProdNode() + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + + # Note the head, lib and fork db head + stopProdNode() + headLibAndForkDbHeadBeforeShutdown = getHeadLibAndForkDbHead(nodeToTest) + nodeToTest.kill(signal.SIGTERM) + + # Relaunch the node again (using the same snapshot) + # The end result should be the same as before shutdown + removeState(nodeIdOfNodeToTest) + relaunchNode(nodeToTest) + headLibAndForkDbHeadAfterRelaunch2 = getHeadLibAndForkDbHead(nodeToTest) + assert headLibAndForkDbHeadAfterRelaunch == headLibAndForkDbHeadAfterRelaunch2, \ + "2: Head, Lib, and Fork Db after relaunch is different {} vs {}".format(headLibAndForkDbHeadAfterRelaunch, headLibAndForkDbHeadAfterRelaunch2) + finally: + stopProdNode() + + # Start executing test cases here testSuccessful = executeTest(1, replayInIrrModeWithRevBlks) testSuccessful = testSuccessful and executeTest(2, replayInIrrModeWithoutRevBlks) @@ -419,8 +463,17 @@ def switchToSpecModeWithIrrModeSnapshot(nodeIdOfNodeToTest, nodeToTest): testSuccessful = testSuccessful and executeTest(13, switchIrrToSpecModeWithConnectedToProdNode) testSuccessful = testSuccessful and executeTest(14, switchToSpecModeWithIrrModeSnapshot) + # retest with read-mode head and no block log + speculativeReadMode="head" + blockLogRetainBlocks="0" + testSuccessful = testSuccessful and executeTest(15, switchSpecToIrrMode) + testSuccessful = testSuccessful and executeTest(16, switchIrrToSpecMode) + testSuccessful = testSuccessful and executeTest(17, switchSpecToIrrModeWithConnectedToProdNode) + testSuccessful = testSuccessful and executeTest(18, switchIrrToSpecModeWithConnectedToProdNode) + testSuccessful = testSuccessful and executeTest(19, switchToNoBlockLogWithIrrModeSnapshot) + finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) # Print test result for msg in testResultMsgs: Print(msg) diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index ac190287af..b355c4983b 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -6,6 +6,7 @@ import os from os.path import join, exists from datetime import datetime +from typing import List from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr from TestHarness.Cluster import PFSetupPolicy @@ -18,26 +19,21 @@ ############################################################### # Parse command line arguments -args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running", +args = TestHelper.parse_args({"-v","--dump-error-details","--leave-running", "--keep-logs","--alternate-version-labels-file","--unshared"}) Utils.Debug=args.v -killAll=args.clean_run dumpErrorDetails=args.dump_error_details -dontKill=args.leave_running -killEosInstances=not dontKill -killWallet=not dontKill -keepLogs=args.keep_logs alternateVersionLabelsFile=args.alternate_version_labels_file walletMgr=WalletMgr(True) -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) cluster.setWalletMgr(walletMgr) def restartNode(node: Node, chainArg=None, addSwapFlags=None, nodeosPath=None): if not node.killed: node.kill(signal.SIGTERM) isRelaunchSuccess = node.relaunch(chainArg, addSwapFlags=addSwapFlags, - timeout=5, cachePopen=True, nodeosPath=nodeosPath) + timeout=5, nodeosPath=nodeosPath) assert isRelaunchSuccess, "Fail to relaunch" def shouldNodeContainPreactivateFeature(node): @@ -78,8 +74,6 @@ def hasBlockBecomeIrr(): testSuccessful = False try: TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=killAll) - cluster.cleanup() # Create a cluster of 4 nodes, each node has 1 producer. The first 3 nodes use the latest vesion, # While the 4th node use the version that doesn't support protocol feature activation (i.e. 1.7.0) @@ -116,7 +110,7 @@ def resumeBlockProductions(): for node in allNodes: if not node.killed: node.processUrllibRequest("producer", "resume") - def areNodesInSync(nodes:[Node]): + def areNodesInSync(nodes: List[Node]): # Pause all block production to ensure the head is not moving pauseBlockProductions() time.sleep(2) # Wait for some time to ensure all blocks are propagated @@ -199,7 +193,7 @@ def areNodesInSync(nodes:[Node]): testSuccessful = True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) diff --git a/tests/nodeos_producer_watermark_test.py b/tests/nodeos_producer_watermark_test.py index 6f0c815878..178891b9a2 100755 --- a/tests/nodeos_producer_watermark_test.py +++ b/tests/nodeos_producer_watermark_test.py @@ -5,7 +5,7 @@ import math import re -from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr +from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, createAccountKeys ############################################################### # nodeos_producer_watermark_test @@ -147,22 +147,17 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): Print=Utils.Print errorExit=Utils.errorExit -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running", "--wallet-port","--unshared"}) Utils.Debug=args.v totalNodes=3 -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running prodCount=args.prod_count -killAll=args.clean_run walletPort=args.wallet_port walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -173,8 +168,6 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, onlySetProds=True, sharedProducers=1) is False: Utils.cmdError("launcher") @@ -220,7 +213,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): verifyProductionRounds(trans, node0, prodsActive, 1) # change signing key of shrproducera that no one can sign - accounts = cluster.createAccountKeys(1) + accounts = createAccountKeys(1) Print("change producer signing key of shrproducera that none of the node has") shracc_node1.activePublicKey = accounts[0].activePublicKey del prodsActive["shrproducera"] @@ -247,7 +240,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/nodeos_protocol_feature_test.py b/tests/nodeos_protocol_feature_test.py index e8db7f2a76..4ed776eb7d 100755 --- a/tests/nodeos_protocol_feature_test.py +++ b/tests/nodeos_protocol_feature_test.py @@ -16,32 +16,25 @@ ############################################################### # Parse command line arguments -args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs","--unshared"}) +args = TestHelper.parse_args({"-v","--dump-error-details","--leave-running","--keep-logs","--unshared"}) Utils.Debug = args.v -killAll=args.clean_run dumpErrorDetails=args.dump_error_details -dontKill=args.leave_running -killEosInstances=not dontKill -killWallet=not dontKill -keepLogs=args.keep_logs # The following test case will test the Protocol Feature JSON reader of the blockchain def restartNode(node: Node, chainArg=None, addSwapFlags=None): if not node.killed: node.kill(signal.SIGTERM) - isRelaunchSuccess = node.relaunch(chainArg, addSwapFlags=addSwapFlags, timeout=5, cachePopen=True) + isRelaunchSuccess = node.relaunch(chainArg, addSwapFlags=addSwapFlags, timeout=5) assert isRelaunchSuccess, "Fail to relaunch" walletMgr=WalletMgr(True) -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) cluster.setWalletMgr(walletMgr) testSuccessful = False try: TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=killAll) - cluster.cleanup() cluster.launch(extraNodeosArgs=" --plugin eosio::producer_api_plugin --http-max-response-time-ms 990000 ", dontBootstrap=True, pfSetupPolicy=PFSetupPolicy.NONE) @@ -65,7 +58,7 @@ def restartNode(node: Node, chainArg=None, addSwapFlags=None): testSuccessful = True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) diff --git a/tests/nodeos_read_terminate_at_block_test.py b/tests/nodeos_read_terminate_at_block_test.py index 222d39b2e0..aace1a5645 100755 --- a/tests/nodeos_read_terminate_at_block_test.py +++ b/tests/nodeos_read_terminate_at_block_test.py @@ -24,7 +24,6 @@ # Parse command line arguments args = TestHelper.parse_args({ "-v", - "--clean-run", "--dump-error-details", "--leave-running", "--keep-logs", @@ -32,12 +31,7 @@ }) Utils.Debug = args.v -killAll = args.clean_run dumpErrorDetails = args.dump_error_details -dontKill = args.leave_running -killEosInstances = not dontKill -killWallet = not dontKill -keepLogs = args.keep_logs # Wrapper function to execute test # This wrapper function will resurrect the node to be tested, and shut @@ -60,7 +54,7 @@ def executeTest(cluster, testNodeId, testNodeArgs, resultMsgs): testNode = cluster.getNode(testNodeId) assert not testNode.verifyAlive() # resets pid so reluanch works - testNode.relaunch(addSwapFlags={"--terminate-at-block": "9999999"}, cachePopen=True) + testNode.relaunch(addSwapFlags={"--terminate-at-block": "9999999"}) # Wait for node to start up. time.sleep(3) @@ -131,7 +125,7 @@ def checkReplay(testNode, testNodeArgs): ])) assert not testNode.verifyAlive() - testNode.relaunch(chainArg="--replay-blockchain", addSwapFlags={"--terminate-at-block": "9999999"}, cachePopen=True) + testNode.relaunch(chainArg="--replay-blockchain", addSwapFlags={"--terminate-at-block": "9999999"}) # Wait for node to finish up. time.sleep(3) @@ -180,7 +174,7 @@ def checkHeadOrSpeculative(head, lib): # Setup cluster and it's wallet manager walletMgr = WalletMgr(True) -cluster = Cluster(walletd=True,unshared=args.unshared) +cluster = Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) cluster.setWalletMgr(walletMgr) # List to contain the test result message @@ -194,10 +188,7 @@ def checkHeadOrSpeculative(head, lib): 3 : "--read-mode speculative --terminate-at-block 125" } - # Kill any existing instances and launch cluster TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=killAll) - cluster.cleanup() cluster.launch( prodCount=numOfProducers, totalProducers=numOfProducers, @@ -242,10 +233,6 @@ def checkHeadOrSpeculative(head, lib): cluster, walletMgr, testSuccessful, - killEosInstances, - killWallet, - keepLogs, - killAll, dumpErrorDetails ) diff --git a/tests/nodeos_retry_transaction_test.py b/tests/nodeos_retry_transaction_test.py index 855d2c9bd3..d1b82388da 100755 --- a/tests/nodeos_retry_transaction_test.py +++ b/tests/nodeos_retry_transaction_test.py @@ -5,7 +5,7 @@ import json from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, CORE_SYMBOL -from TestHarness.Cluster import NamedAccounts +from TestHarness.accounts import NamedAccounts from TestHarness.TestHelper import AppArgs ############################################################### @@ -31,7 +31,7 @@ extraArgs = appArgs.add(flag="--num-transactions", type=int, help="How many total transactions should be sent", default=1000) extraArgs = appArgs.add(flag="--max-transactions-per-second", type=int, help="How many transactions per second should be sent", default=50) extraArgs = appArgs.add(flag="--total-accounts", type=int, help="How many accounts should be involved in sending transfers. Must be greater than %d" % (minTotalAccounts), default=10) -args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--unshared"}, applicationSpecificArgs=appArgs) +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--unshared"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v totalProducerNodes=3 @@ -39,11 +39,8 @@ totalNonProducerNodes=totalNodes-totalProducerNodes maxActiveProducers=totalProducerNodes totalProducers=totalProducerNodes -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running -killAll=args.clean_run walletPort=TestHelper.DEFAULT_WALLET_PORT blocksPerSec=2 transBlocksBehind=args.transaction_time_delta * blocksPerSec @@ -61,8 +58,6 @@ walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -71,8 +66,6 @@ TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") specificExtraNodeosArgs={ @@ -95,7 +88,8 @@ Utils.Print("Bios node killed") # need bios to pass along blocks so api node can continue without its other peer, but drop trx which is the point of this test Utils.Print("Restart bios in drop transactions mode") - cluster.biosNode.relaunch("bios", cachePopen=True, addSwapFlags={"--p2p-accept-transactions": "false"}) + if not cluster.biosNode.relaunch(addSwapFlags={"--p2p-accept-transactions": "false"}): + Utils.errorExit("Failed to relaunch bios node") # *** create accounts to vote in desired producers *** @@ -277,8 +271,9 @@ def findTransInBlock(transId, transToBlock, node): if round % 3 == 0: relaunchTime = time.perf_counter() - cluster.getNode(4).relaunch(cachePopen=True) - cluster.getNode(6).relaunch(cachePopen=True) + time.sleep(1) # give time for transactions to be sent + cluster.getNode(4).relaunch() + cluster.getNode(6).relaunch() startRound = startRound - ( time.perf_counter() - relaunchTime ) startTime = startTime - ( time.perf_counter() - relaunchTime ) @@ -379,7 +374,7 @@ def findTransInBlock(transId, transToBlock, node): testSuccessful = not missingReportError and not delayedReportError finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) errorCode = 0 if testSuccessful else 1 exit(errorCode) diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 1b87863966..48e72a202b 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -from TestHarness import Account, Cluster, Node, ReturnType, TestHelper, Utils, WalletMgr, CORE_SYMBOL +from TestHarness import Account, Cluster, Node, ReturnType, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys from pathlib import Path import decimal @@ -21,7 +21,7 @@ cmdError=Utils.cmdError args = TestHelper.parse_args({"--host","--port","--prod-count","--defproducera_prvt_key","--defproducerb_prvt_key" - ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios","--clean-run" + ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios" ,"--sanity-test","--wallet-port", "--error-log-path", "--unshared"}) server=args.host port=args.port @@ -29,25 +29,20 @@ defproduceraPrvtKey=args.defproducera_prvt_key defproducerbPrvtKey=args.defproducerb_prvt_key dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs dontLaunch=args.dont_launch -dontKill=args.leave_running prodCount=args.prod_count onlyBios=args.only_bios -killAll=args.clean_run sanityTest=args.sanity_test walletPort=args.wallet_port Utils.Debug=debug localTest=True if server == TestHelper.LOCAL_HOST else False -cluster=Cluster(host=server, port=port, walletd=True, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey,unshared=args.unshared) +cluster=Cluster(host=server, port=port, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey,unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) errFileName=f"{cluster.nodeosLogPath}/node_00/stderr.txt" if args.error_log_path: errFileName=args.error_log_path walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill dontBootstrap=sanityTest # intent is to limit the scope of the sanity test to just verifying that nodes can be started WalletdName=Utils.EosWalletName @@ -62,24 +57,18 @@ Print("PORT: %d" % (port)) if localTest and not dontLaunch: - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") abs_path = os.path.abspath(os.getcwd() + '/unittests/contracts/eosio.token/eosio.token.abi') traceNodeosArgs=" --http-max-response-time-ms 990000 --trace-rpc-abi eosio.token=" + abs_path specificNodeosInstances={0: "bin/nodeos"} - if cluster.launch(prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, extraNodeosArgs=traceNodeosArgs, specificNodeosInstances=specificNodeosInstances) is False: + if cluster.launch(totalNodes=2, prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, extraNodeosArgs=traceNodeosArgs, specificNodeosInstances=specificNodeosInstances) is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: Print("Collecting cluster info.") cluster.initializeNodes(defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey) - killEosInstances=False Print("Stand up %s" % (WalletdName)) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - print("Stand up walletd") if walletMgr.launch() is False: cmdError("%s" % (WalletdName)) errorExit("Failed to stand up eos walletd.") @@ -91,7 +80,7 @@ Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) - accounts=Cluster.createAccountKeys(4) + accounts=createAccountKeys(4) if accounts is None: errorExit("FAILURE - create keys") testeraAccount=accounts[0] @@ -206,13 +195,13 @@ if len(noMatch) > 0: errorExit("FAILURE - wallet keys did not include %s" % (noMatch), raw=True) - node=cluster.getNode(0) + node=cluster.getNode(1) Print("Validating accounts before user accounts creation") cluster.validateAccounts(None) Print("Create new account %s via %s" % (testeraAccount.name, cluster.defproduceraAccount.name)) - transId=node.createInitializeAccount(testeraAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) + transId=node.createInitializeAccount(testeraAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=True, exitOnError=True) Print("Create new account %s via %s" % (testerbAccount.name, cluster.defproduceraAccount.name)) transId=node.createInitializeAccount(testerbAccount, cluster.defproduceraAccount, stakedDeposit=0, waitForTransBlock=False, exitOnError=True) @@ -233,7 +222,7 @@ transferAmount="97.5321 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, defproduceraAccount.name, testeraAccount.name)) - node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer") + node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer", waitForTransBlock=True) expectedAmount=transferAmount Print("Verify transfer, Expected: %s" % (expectedAmount)) @@ -245,7 +234,7 @@ transferAmount="0.0100 {0}".format(CORE_SYMBOL) Print("Force transfer funds %s from account %s to %s" % ( transferAmount, defproduceraAccount.name, testeraAccount.name)) - node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer", force=True) + node.transferFunds(defproduceraAccount, testeraAccount, transferAmount, "test transfer", force=True, waitForTransBlock=True) expectedAmount="97.5421 {0}".format(CORE_SYMBOL) Print("Verify transfer, Expected: %s" % (expectedAmount)) @@ -271,7 +260,7 @@ transferAmount="97.5311 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % ( transferAmount, testeraAccount.name, currencyAccount.name)) - trans=node.transferFunds(testeraAccount, currencyAccount, transferAmount, "test transfer a->b") + trans=node.transferFunds(testeraAccount, currencyAccount, transferAmount, "test transfer a->b", waitForTransBlock=True) transId=Node.getTransId(trans) expectedAmount="98.0311 {0}".format(CORE_SYMBOL) # 5000 initial deposit @@ -304,6 +293,7 @@ Print("Currency Contract Tests") Print("verify no contract in place") Print("Get code hash for account %s" % (currencyAccount.name)) + node=cluster.getNode(0) codeHash=node.getAccountCodeHash(currencyAccount.name) if codeHash is None: cmdError("%s get code currency1111" % (ClientName)) @@ -758,7 +748,7 @@ testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) errorCode = 0 if testSuccessful else 1 -exit(errorCode) \ No newline at end of file +exit(errorCode) diff --git a/tests/nodeos_short_fork_take_over_test.py b/tests/nodeos_short_fork_take_over_test.py index 87e8f66fef..f468cae52c 100755 --- a/tests/nodeos_short_fork_take_over_test.py +++ b/tests/nodeos_short_fork_take_over_test.py @@ -105,7 +105,7 @@ def getMinHeadAndLib(prodNodes): -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running", "--wallet-port","--unshared"}) Utils.Debug=args.v totalProducerNodes=2 @@ -113,17 +113,12 @@ def getMinHeadAndLib(prodNodes): totalNodes=totalProducerNodes+totalNonProducerNodes maxActiveProducers=3 totalProducers=maxActiveProducers -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running -killAll=args.clean_run walletPort=args.wallet_port walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -132,8 +127,6 @@ def getMinHeadAndLib(prodNodes): TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") specificExtraNodeosArgs={} # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node @@ -414,16 +407,17 @@ def getMinHeadAndLib(prodNodes): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) - - if not testSuccessful: - Print(Utils.FileDivider) - Print("Compare Blocklog") - cluster.compareBlockLogs() - Print(Utils.FileDivider) - Print("Compare Blocklog") - cluster.printBlockLog() - Print(Utils.FileDivider) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) + +# Too much output for ci/cd +# if not testSuccessful: +# Print(Utils.FileDivider) +# Print("Compare Blocklog") +# cluster.compareBlockLogs() +# Print(Utils.FileDivider) +# Print("Print Blocklog") +# cluster.printBlockLog() +# Print(Utils.FileDivider) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/nodeos_snapshot_diff_test.py b/tests/nodeos_snapshot_diff_test.py index 72b8d57e1d..4e0624030c 100755 --- a/tests/nodeos_snapshot_diff_test.py +++ b/tests/nodeos_snapshot_diff_test.py @@ -32,7 +32,7 @@ errorExit=Utils.errorExit appArgs=AppArgs() -args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--wallet-port","--unshared"}, +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--wallet-port","--unshared"}, applicationSpecificArgs=appArgs) relaunchTimeout = 30 @@ -41,19 +41,14 @@ testAccounts = 2 trxGeneratorCnt=2 startedNonProdNodes = 3 -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running prodCount=2 -killAll=args.clean_run walletPort=args.wallet_port totalNodes=startedNonProdNodes+pnodes walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -80,12 +75,9 @@ def removeState(nodeId): TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() - specificExtraNodeosArgs={} Print("Stand up cluster") if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, - specificExtraNodeosArgs=specificExtraNodeosArgs, loadSystemContract=True, maximumP2pPerHost=totalNodes+trxGeneratorCnt) is False: + loadSystemContract=True, maximumP2pPerHost=totalNodes+trxGeneratorCnt) is False: Utils.errorExit("Failed to stand up eos cluster.") Print("Create test wallet") @@ -93,7 +85,7 @@ def removeState(nodeId): cluster.populateWallet(2, wallet) Print("Create test accounts for transactions.") - cluster.createAccounts(cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=0) + cluster.createAccounts(cluster.eosioAccount, stakedDeposit=0) account1Name = cluster.accounts[0].name account2Name = cluster.accounts[1].name @@ -180,8 +172,8 @@ def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportI nodeProg.kill(signal.SIGTERM) output=cluster.getBlockLog(progNodeId, blockLogAction=BlockLogAction.trim, first=0, last=ret_head_block_num, throwException=True) removeState(progNodeId) - Utils.rmFromFile(Utils.getNodeConfigDir(progNodeId, "config.ini"), "p2p-peer-address") - isRelaunchSuccess = nodeProg.relaunch(chainArg="--replay", addSwapFlags={}, timeout=relaunchTimeout, cachePopen=True) + nodeProg.rmFromCmd('--p2p-peer-address') + isRelaunchSuccess = nodeProg.relaunch(chainArg="--replay", addSwapFlags={}, timeout=relaunchTimeout) assert isRelaunchSuccess, "Failed to relaunch programmable node" Print("Schedule snapshot (node 2)") @@ -205,9 +197,9 @@ def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportI Print("Relaunch irreversible node in irreversible mode") removeState(irrNodeId) - Utils.rmFromFile(Utils.getNodeConfigDir(irrNodeId, "config.ini"), "p2p-peer-address") + nodeIrr.rmFromCmd('--p2p-peer-address') swapFlags = {"--read-mode":"irreversible", "--p2p-max-nodes-per-host":"0", "--max-clients":"0", "--allowed-connection":"none"} - isRelaunchSuccess = nodeIrr.relaunch(chainArg="--replay", addSwapFlags=swapFlags, timeout=relaunchTimeout, cachePopen=True) + isRelaunchSuccess = nodeIrr.relaunch(chainArg="--replay", addSwapFlags=swapFlags, timeout=relaunchTimeout) assert isRelaunchSuccess, "Failed to relaunch snapshot node" Print("Create snapshot from irreversible") @@ -231,7 +223,7 @@ def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportI testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/nodeos_snapshot_forked_test.py b/tests/nodeos_snapshot_forked_test.py index e9453e0e87..6afe803385 100755 --- a/tests/nodeos_snapshot_forked_test.py +++ b/tests/nodeos_snapshot_forked_test.py @@ -20,7 +20,7 @@ Print=Utils.Print errorExit=Utils.errorExit -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running", "--wallet-port","--unshared"}) Utils.Debug=args.v totalProducerNodes=2 @@ -28,17 +28,12 @@ totalNodes=totalProducerNodes+totalNonProducerNodes maxActiveProducers=3 totalProducers=maxActiveProducers -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running -killAll=args.clean_run walletPort=args.wallet_port walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -67,8 +62,6 @@ def getSnapshotsCount(nodeId): TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") specificExtraNodeosArgs={} # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node @@ -191,7 +184,7 @@ def getState(status): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) errorCode = 0 if testSuccessful else 1 -exit(errorCode) \ No newline at end of file +exit(errorCode) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index 364d4948a5..deeae78193 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -31,26 +31,21 @@ appArgs=AppArgs() extraArgs = appArgs.add(flag="--catchup-count", type=int, help="How many catchup-nodes to launch", default=10) extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=2) -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running", "-p","--wallet-port","--unshared"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v pnodes=args.p if args.p > 0 else 1 startedNonProdNodes = args.txn_gen_nodes if args.txn_gen_nodes >= 2 else 2 trxGeneratorCnt=startedNonProdNodes -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running -prodCount=args.prod_count if args.prod_count > 1 else 2 -killAll=args.clean_run +prodCount=2 walletPort=args.wallet_port catchupCount=args.catchup_count if args.catchup_count > 0 else 1 totalNodes=startedNonProdNodes+pnodes+catchupCount walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -60,12 +55,9 @@ TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() - specificExtraNodeosArgs={} Print("Stand up cluster") if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, - specificExtraNodeosArgs=specificExtraNodeosArgs, unstartedNodes=catchupCount, loadSystemContract=True, + unstartedNodes=catchupCount, loadSystemContract=True, maximumP2pPerHost=totalNodes+trxGeneratorCnt) is False: Utils.errorExit("Failed to stand up eos cluster.") @@ -74,7 +66,7 @@ cluster.populateWallet(2, wallet) Print("Create test accounts for transactions.") - cluster.createAccounts(cluster.eosioAccount, stakedDeposit=0, validationNodeIndex=0) + cluster.createAccounts(cluster.eosioAccount, stakedDeposit=0) account1Name = cluster.accounts[0].name account2Name = cluster.accounts[1].name @@ -111,7 +103,7 @@ def waitForNodeStarted(node): waitForBlock(node0, blockNum, blockType=BlockType.lib) Print("Configure and launch txn generators") - targetTpsPerGenerator = 100 + targetTpsPerGenerator = 10 testTrxGenDurationSec=60*60 cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[account1Name, account2Name], acctPrivKeysList=[account1PrivKey,account2PrivKey], nodeId=node0.nodeId, @@ -149,7 +141,7 @@ def waitForNodeStarted(node): twoRoundsTimeout=(twoRounds/2 + 10) #2 rounds in seconds + some leeway for catchup_num in range(0, catchupCount): Print("Start catchup node") - cluster.launchUnstarted(cachePopen=True) + cluster.launchUnstarted() lastLibNum=lib(node0) # verify producer lib is still advancing waitForBlock(node0, lastLibNum+1, timeout=twoRoundsTimeout, blockType=BlockType.lib) @@ -178,7 +170,7 @@ def waitForNodeStarted(node): waitForBlock(node0, catchupHead+5, timeout=twoRoundsTimeout*2, blockType=BlockType.lib) Print("Restart catchup node") - catchupNode.relaunch(cachePopen=True) + catchupNode.relaunch() waitForNodeStarted(catchupNode) lastCatchupLibNum=lib(catchupNode) @@ -200,7 +192,7 @@ def waitForNodeStarted(node): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index 7e57a9dc99..a16a776560 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -2,8 +2,8 @@ import time -from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL -from TestHarness.Cluster import NamedAccounts +from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys +from TestHarness.accounts import NamedAccounts ############################################################### # nodeos_under_min_avail_ram @@ -18,20 +18,16 @@ Print=Utils.Print errorExit=Utils.errorExit -args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--wallet-port","--unshared"}) +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--wallet-port","--unshared"}) Utils.Debug=args.v -totalNodes=4 -cluster=Cluster(walletd=True,unshared=args.unshared) +pNodes=4 +totalNodes=5 +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running -killAll=args.clean_run walletPort=args.wallet_port walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -40,15 +36,13 @@ TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") minRAMFlag="--chain-state-db-guard-size-mb" minRAMValue=1002 maxRAMFlag="--chain-state-db-size-mb" maxRAMValue=1010 extraNodeosArgs=" %s %d %s %d --http-max-response-time-ms 990000 " % (minRAMFlag, minRAMValue, maxRAMFlag, maxRAMValue) - if cluster.launch(onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs) is False: + if cluster.launch(onlyBios=False, pnodes=pNodes, totalNodes=totalNodes, totalProducers=totalNodes, extraNodeosArgs=extraNodeosArgs) is False: Utils.cmdError("launcher") errorExit("Failed to stand up eos cluster.") @@ -75,7 +69,7 @@ nodes.append(cluster.getNode(2)) nodes.append(cluster.getNode(3)) numNodes=len(nodes) - + nonProdNode = cluster.getNode(4) for account in accounts: walletMgr.importKey(account, testWallet) @@ -83,21 +77,21 @@ # create accounts via eosio as otherwise a bid is needed for account in accounts: Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) - trans=nodes[0].createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=True, stakeNet=50000, stakeCPU=50000, buyRAM=50000, exitOnError=True) + trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=True, stakeNet=50000, stakeCPU=50000, buyRAM=50000, exitOnError=True) transferAmount="70000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) - nodes[0].transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) - trans=nodes[0].delegatebw(account, 1000000.0000, 68000000.0000, waitForTransBlock=True, exitOnError=True) + nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) + trans=nonProdNode.delegatebw(account, 1000000.0000, 68000000.0000, waitForTransBlock=True, exitOnError=True) - contractAccount=cluster.createAccountKeys(1)[0] + contractAccount=createAccountKeys(1)[0] contractAccount.name="contracttest" walletMgr.importKey(contractAccount, testWallet) Print("Create new account %s via %s" % (contractAccount.name, cluster.eosioAccount.name)) - trans=nodes[0].createInitializeAccount(contractAccount, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=True, stakeNet=50000, stakeCPU=50000, buyRAM=50000, exitOnError=True) + trans=nonProdNode.createInitializeAccount(contractAccount, cluster.eosioAccount, stakedDeposit=500000, waitForTransBlock=True, stakeNet=50000, stakeCPU=50000, buyRAM=50000, exitOnError=True) transferAmount="90000000.0000 {0}".format(CORE_SYMBOL) Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, contractAccount.name)) - nodes[0].transferFunds(cluster.eosioAccount, contractAccount, transferAmount, "test transfer", waitForTransBlock=True) - trans=nodes[0].delegatebw(contractAccount, 1000000.0000, 88000000.0000, waitForTransBlock=True, exitOnError=True) + nonProdNode.transferFunds(cluster.eosioAccount, contractAccount, transferAmount, "test transfer", waitForTransBlock=True) + trans=nonProdNode.delegatebw(contractAccount, 1000000.0000, 88000000.0000, waitForTransBlock=True, exitOnError=True) contractDir="unittests/test-contracts/integration_test" wasmFile="integration_test.wasm" @@ -191,7 +185,7 @@ addSwapFlags["--enable-stale-production"]="" # just enable stale production for the first node enabledStaleProduction=True if not nodes[nodeIndex].relaunch("", newChain=False, addSwapFlags=addSwapFlags): - Utils.cmdError("Failed to restart node0 with new capacity %s" % (maxRAMValue)) + Utils.cmdError(f'Failed to restart {nodes[nodeIndex].name} with new capacity {maxRAMValue}') errorExit("Failure - Node should have restarted") addSwapFlags={} maxRAMValue=currentMinimumMaxRAM+30 @@ -302,7 +296,7 @@ testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index 5672c1a1bb..b9aabaa288 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL +from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys ############################################################### # nodeos_voting_test @@ -136,22 +136,18 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): Print=Utils.Print errorExit=Utils.errorExit -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running", "--wallet-port","--unshared"}) Utils.Debug=args.v -totalNodes=4 -cluster=Cluster(walletd=True,unshared=args.unshared) +prodNodes=4 +totalNodes=5 +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running prodCount=args.prod_count -killAll=args.clean_run walletPort=args.wallet_port walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -160,17 +156,15 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21) is False: + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=prodNodes, totalNodes=totalNodes, totalProducers=prodNodes*21) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) - accounts=cluster.createAccountKeys(5) + accounts=createAccountKeys(5) if accounts is None: Utils.errorExit("FAILURE - create keys") accounts[0].name="tester111111" @@ -189,11 +183,14 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8"))) + nonProdNode=cluster.getNode(4) for i in range(0, totalNodes): node=cluster.getNode(i) node.producers=Cluster.parseProducers(i) for prod in node.producers: - trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) + trans=nonProdNode.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, + waitForTransBlock=True if prod == node.producers[-1] else False, + silentErrors=False if prod == node.producers[-1] else True, exitOnError=True) node0=cluster.getNode(0) node1=cluster.getNode(1) @@ -205,20 +202,18 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) for account in accounts: Print("Create new account %s via %s" % (account.name, cluster.eosioAccount.name)) - trans=node.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) - - node.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, + waitForTransBlock=True if account == accounts[-1] else False, + stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True) for account in accounts: Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.eosioAccount.name, account.name)) - node.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=False) - - node.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", + waitForTransBlock=True if account == accounts[-1] else False) for account in accounts: - trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=False, exitOnError=True) - - node.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, + waitForTransBlock=True if account == accounts[-1] else False, exitOnError=True) # containers for tracking producers prodsActive={} @@ -231,9 +226,11 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): #first account will vote for node0 producers, all others will vote for node1 producers node=node0 for account in accounts: - trans=node.vote(account, node.producers, waitForTransBlock=True) + trans=nonProdNode.vote(account, node.producers, waitForTransBlock=True if account == accounts[-1] else False) node=node1 + nonProdNode.undelegatebw(account, 1.0000, 1.0000, waitForTransBlock=True, silentErrors=False, exitOnError=True) + setActiveProducers(prodsActive, node1.producers) verifyProductionRounds(trans, node2, prodsActive, 2) @@ -242,7 +239,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): # first account will vote for node2 producers, all others will vote for node3 producers node1 for account in accounts: - trans=node.vote(account, node.producers, waitForTransBlock=True) + trans=nonProdNode.vote(account, node.producers, waitForTransBlock=True if account == accounts[-1] else False) node=node2 setActiveProducers(prodsActive, node2.producers) @@ -251,7 +248,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/p2p_high_latency_test.py b/tests/p2p_high_latency_test.py index 905e75e050..861fade6a0 100644 --- a/tests/p2p_high_latency_test.py +++ b/tests/p2p_high_latency_test.py @@ -48,20 +48,16 @@ def exec(cmd): Print=Utils.Print -args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--unshared"}) +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--unshared"}) Utils.Debug=args.v producers=1 syncingNodes=1 totalNodes=producers+syncingNodes -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running -killAll=args.clean_run testSuccessful=False -killEosInstances=not dontKill specificExtraNodeosArgs={} producerNodeId=0 @@ -72,8 +68,6 @@ def exec(cmd): try: TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=killAll) - cluster.cleanup() traceNodeosArgs=" --plugin eosio::producer_plugin --produce-time-offset-us 0 --last-block-time-offset-us 0 --cpu-effort-percent 100 \ --last-block-cpu-effort-percent 100 --producer-threads 1 --plugin eosio::net_plugin --net-threads 1" if cluster.launch(pnodes=1, totalNodes=totalNodes, totalProducers=1, specificExtraNodeosArgs=specificExtraNodeosArgs, extraNodeosArgs=traceNodeosArgs) is False: @@ -109,7 +103,7 @@ def exec(cmd): print(err.decode("utf-8")) # print error details of network slowdown termination commands Utils.errorExit("failed to remove network latency, exited with error code {}".format(ReturnCode)) finally: - TestHelper.shutdown(cluster, None, testSuccessful, killEosInstances, False, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, None, testSuccessful, dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/p2p_multiple_listen_test.py b/tests/p2p_multiple_listen_test.py new file mode 100755 index 0000000000..62f1534c63 --- /dev/null +++ b/tests/p2p_multiple_listen_test.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 + +import signal + +from TestHarness import Cluster, TestHelper, Utils, WalletMgr + +############################################################### +# p2p_multiple_listen_test +# +# Test nodeos ability to listen on multiple ports for p2p +# +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +args=TestHelper.parse_args({"-p","-n","-d","--keep-logs" + ,"--dump-error-details","-v" + ,"--leave-running","--unshared"}) +pnodes=args.p +delay=args.d +debug=args.v +total_nodes=5 +dumpErrorDetails=args.dump_error_details + +Utils.Debug=debug +testSuccessful=False + +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) +walletMgr=WalletMgr(True) + +try: + TestHelper.printSystemInfo("BEGIN") + + cluster.setWalletMgr(walletMgr) + + Print(f'producing nodes: {pnodes}, delay between nodes launch: {delay} second{"s" if delay != 1 else ""}') + + Print("Stand up cluster") + specificArgs = { + '0': '--agent-name node-00 --p2p-listen-endpoint 0.0.0.0:9876 --p2p-listen-endpoint 0.0.0.0:9779 --p2p-server-address ext-ip0:20000 --p2p-server-address ext-ip1:20001 --plugin eosio::net_api_plugin', + '2': '--agent-name node-02 --p2p-peer-address localhost:9779 --plugin eosio::net_api_plugin', + '4': '--agent-name node-04 --p2p-peer-address localhost:9876 --plugin eosio::net_api_plugin', + } + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo='line', delay=delay, + specificExtraNodeosArgs=specificArgs) is False: + errorExit("Failed to stand up eos cluster.") + + # Be sure all nodes start out connected (bios node omitted from diagram for brevity) + # node00 node01 node02 node03 node04 + # localhost:9876 -> localhost:9877 -> localhost:9878 -> localhost:9879 -> localhost:9880 + # localhost:9779 ^ | | + # ^ +---------------------------+ | + # +------------------------------------------------------------------------+ + cluster.waitOnClusterSync(blockAdvancing=5) + # Shut down bios node, which is connected to all other nodes in all topologies + cluster.biosNode.kill(signal.SIGTERM) + # Shut down second node, interrupting the default connections between it and nodes 00 and 02 + cluster.getNode(1).kill(signal.SIGTERM) + # Shut down the fourth node, interrupting the default connections between it and nodes 02 and 04 + cluster.getNode(3).kill(signal.SIGTERM) + # Be sure all remaining nodes continue to sync via the two listen ports on node 00 + # node00 node01 node02 node03 node04 + # localhost:9876 offline localhost:9878 offline localhost:9880 + # localhost:9779 ^ | | + # ^ +---------------------------+ | + # +------------------------------------------------------------------------+ + cluster.waitOnClusterSync(blockAdvancing=5) + connections = cluster.nodes[0].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + if conn['last_handshake']['agent'] == 'node-02': + assert conn['last_handshake']['p2p_address'].split()[0] == 'localhost:9878', f"Connected node is listening on '{conn['last_handshake']['p2p_address'].split()[0]}' instead of port 9878" + elif conn['last_handshake']['agent'] == 'node-04': + assert conn['last_handshake']['p2p_address'].split()[0] == 'localhost:9880', f"Connected node is listening on '{conn['last_handshake']['p2p_address'].split()[0]}' instead of port 9880" + assert open_socket_count == 2, 'Node 0 is expected to have only two open sockets' + + connections = cluster.nodes[2].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + assert conn['last_handshake']['agent'] == 'node-00', f"Connected node identifed as '{conn['last_handshake']['agent']}' instead of node-00" + assert conn['last_handshake']['p2p_address'].split()[0] == 'ext-ip0:20000', f"Connected node is advertising '{conn['last_handshake']['p2p_address'].split()[0]}' instead of ext-ip0:20000" + assert open_socket_count == 1, 'Node 2 is expected to have only one open socket' + + connections = cluster.nodes[4].processUrllibRequest('net', 'connections') + open_socket_count = 0 + for conn in connections['payload']: + if conn['is_socket_open']: + open_socket_count += 1 + assert conn['last_handshake']['agent'] == 'node-00', f"Connected node identifed as '{conn['last_handshake']['agent']}' instead of node-00" + assert conn['last_handshake']['p2p_address'].split()[0] == 'ext-ip1:20001', f"Connected node is advertising '{conn['last_handshake']['p2p_address'].split()[0]} 'instead of ext-ip1:20001" + assert open_socket_count == 1, 'Node 4 is expected to have only one open socket' + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) diff --git a/tests/p2p_no_listen_test.py b/tests/p2p_no_listen_test.py new file mode 100755 index 0000000000..76b3c76886 --- /dev/null +++ b/tests/p2p_no_listen_test.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 + +import errno +import pathlib +import shutil +import signal +import socket +import time + +from TestHarness import Node, TestHelper, Utils + +############################################################### +# p2p_no_listen_test +# +# Test nodeos disabling p2p +# +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +args=TestHelper.parse_args({"--keep-logs","-v","--leave-running","--unshared"}) +debug=args.v + +Utils.Debug=debug +testSuccessful=False + +try: + TestHelper.printSystemInfo("BEGIN") + + cmd = [ + Utils.EosServerPath, + '-e', + '-p', + 'eosio', + '--p2p-listen-endpoint', + '', + '--plugin', + 'eosio::chain_api_plugin', + '--config-dir', + Utils.ConfigDir, + '--data-dir', + Utils.DataDir, + '--http-server-address', + 'localhost:8888' + ] + node = Node('localhost', '8888', '00', data_dir=pathlib.Path(Utils.DataDir), + config_dir=pathlib.Path(Utils.ConfigDir), cmd=cmd) + + time.sleep(1) + if not node.verifyAlive(): + raise RuntimeError + time.sleep(10) + node.waitForBlock(5) + + s = socket.socket() + err = s.connect_ex(('localhost',9876)) + assert err == errno.ECONNREFUSED, 'Connection to port 9876 must be refused' + + testSuccessful=True +finally: + Utils.ShuttingDown=True + + if not args.leave_running: + node.kill(signal.SIGTERM) + + if not (args.leave_running or args.keep_logs or not testSuccessful): + shutil.rmtree(Utils.DataPath, ignore_errors=True) + + if testSuccessful: + Utils.Print("Test succeeded.") + else: + Utils.Print("Test failed.") + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) diff --git a/tests/performance_tests/CMakeLists.txt b/tests/performance_tests/CMakeLists.txt index e4edbc8805..c770cb2dfc 100644 --- a/tests/performance_tests/CMakeLists.txt +++ b/tests/performance_tests/CMakeLists.txt @@ -4,6 +4,7 @@ configure_file(log_reader.py . COPYONLY) configure_file(genesis.json . COPYONLY) configure_file(cpuTrxData.json . COPYONLY) configure_file(ramTrxData.json . COPYONLY) +configure_file(readOnlyTrxData.json . COPYONLY) configure_file(userTrxDataTransfer.json . COPYONLY) configure_file(userTrxDataNewAccount.json . COPYONLY) @@ -13,23 +14,27 @@ else() set(UNSHARE "") endif() -add_test(NAME performance_test COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --clean-run --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_api COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --clean-run --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --clean-run --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_http COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-api http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -add_test(NAME performance_test_basic_ex_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --clean-run --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST performance_test PROPERTY LABELS long_running_tests) +add_test(NAME performance_test_bp COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_api COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 --calc-chain-threads lmax overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_read_only_trxs COMMAND tests/performance_tests/performance_test.py testApiOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --api-nodes-read-only-threads 2 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_cpu_trx_spec COMMAND tests/performance_tests/performance_test.py testBpOpMode --max-tps-to-test 50 --test-iteration-min-step 10 --test-iteration-duration-sec 10 --final-iterations-duration-sec 10 overrideBasicTestConfig -v --tps-limit-per-generator 25 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_p2p COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_http COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --target-tps 10 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_transfer_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataTransfer.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_new_acct_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --user-trx-data-file tests/performance_tests/userTrxDataNewAccount.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_cpu_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "c" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/cpuTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_ram_trx_spec COMMAND tests/performance_tests/performance_test_basic.py -v --producer-nodes 1 --validation-nodes 1 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "r" --abi-file eosmechanics.abi --wasm-file eosmechanics.wasm --contract-dir unittests/contracts/eosio.mechanics --user-trx-data-file tests/performance_tests/ramTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME performance_test_basic_read_only_trxs COMMAND tests/performance_tests/performance_test_basic.py -v --endpoint-mode http --producer-nodes 1 --validation-nodes 1 --api-nodes 1 --api-nodes-read-only-threads 2 --target-tps 20 --tps-limit-per-generator 10 --test-duration-sec 5 --chain-state-db-size-mb 200 --account-name "payloadless" --abi-file payloadless.abi --wasm-file payloadless.wasm --contract-dir unittests/test-contracts/payloadless --user-trx-data-file tests/performance_tests/readOnlyTrxData.json ${UNSHARE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST performance_test_bp PROPERTY LABELS long_running_tests) set_property(TEST performance_test_api PROPERTY LABELS long_running_tests) -set_property(TEST performance_test_ex_cpu_trx_spec PROPERTY LABELS long_running_tests) -set_property(TEST performance_test_basic PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_read_only_trxs PROPERTY LABELS long_running_tests) +set_property(TEST performance_test_cpu_trx_spec PROPERTY LABELS long_running_tests) +set_property(TEST performance_test_basic_p2p PROPERTY LABELS nonparallelizable_tests) set_property(TEST performance_test_basic_http PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests) -set_property(TEST performance_test_basic_ex_ram_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_transfer_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_new_acct_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_cpu_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_ram_trx_spec PROPERTY LABELS nonparallelizable_tests) +set_property(TEST performance_test_basic_read_only_trxs PROPERTY LABELS nonparallelizable_tests) add_subdirectory( NodeosPluginArgs ) diff --git a/tests/performance_tests/README.md b/tests/performance_tests/README.md index c7eefe9053..68de0e5ba9 100644 --- a/tests/performance_tests/README.md +++ b/tests/performance_tests/README.md @@ -342,6 +342,7 @@ usage: performance_test.py testBpOpMode [--skip-tps-test] [--calc-net-threads {none,lmax,full}] [--del-test-report] [--max-tps-to-test MAX_TPS_TO_TEST] + [--min-tps-to-test MIN_TPS_TO_TEST] [--test-iteration-duration-sec TEST_ITERATION_DURATION_SEC] [--test-iteration-min-step TEST_ITERATION_MIN_STEP] [--final-iterations-duration-sec FINAL_ITERATIONS_DURATION_SEC] @@ -418,6 +419,8 @@ Performance Harness - TPS Test Config: --max-tps-to-test MAX_TPS_TO_TEST The max target transfers realistic as ceiling of test range + --min-tps-to-test MIN_TPS_TO_TEST + The min target transfers to use as floor of test range --test-iteration-duration-sec TEST_ITERATION_DURATION_SEC The duration of transfer trx generation for each iteration of the test during the initial search @@ -455,9 +458,10 @@ Advanced Configuration Options: ``` usage: performance_test.py testBpOpMode overrideBasicTestConfig - [-h] [-d D] [--dump-error-details] [-v] [--leave-running] [--clean-run] [--unshared] - [--endpoint-api {p2p,http}] + [-h] [-d D] [--dump-error-details] [-v] [--leave-running] [--unshared] + [--endpoint-mode {p2p,http}] [--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES] + [--api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS] [--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR] [--genesis GENESIS] [--num-blocks-to-prune NUM_BLOCKS_TO_PRUNE] [--signature-cpu-billable-pct SIGNATURE_CPU_BILLABLE_PCT] @@ -500,20 +504,21 @@ Test Helper Arguments: --dump-error-details Upon error print etc/eosio/node_*/config.ini and /node_*/stderr.log to stdout -v verbose logging --leave-running Leave cluster running after test finishes - --clean-run Kill all nodeos and keosd instances --unshared Run test in isolated network namespace Performance Test Basic Base: Performance Test Basic base configuration items. - --endpoint-api {p2p,http} - Endpointt API mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. + --endpoint-mode {p2p,http} + Endpoint Mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. --producer-nodes PRODUCER_NODES Producing nodes count --validation-nodes VALIDATION_NODES Validation nodes count --api-nodes API_NODES API nodes count + --api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS + API nodes read only threads count for use with read-only transactions --tps-limit-per-generator TPS_LIMIT_PER_GENERATOR Maximum amount of transactions per second a single generator can have. --genesis GENESIS Path to genesis.json @@ -601,11 +606,12 @@ The following scripts are typically used by the Performance Harness main script ``` usage: performance_test_basic.py [-h] [-d D] [--dump-error-details] [-v] [--leave-running] - [--clean-run] [--unshared] - [--endpoint-api {p2p,http}] + [--unshared] + [--endpoint-mode {p2p,http}] [--producer-nodes PRODUCER_NODES] [--validation-nodes VALIDATION_NODES] [--api-nodes API_NODES] + [--api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS] [--tps-limit-per-generator TPS_LIMIT_PER_GENERATOR] [--genesis GENESIS] [--num-blocks-to-prune NUM_BLOCKS_TO_PRUNE] @@ -655,14 +661,13 @@ Test Helper Arguments: --dump-error-details Upon error print etc/eosio/node_*/config.ini and /node_*/stderr.log to stdout (default: False) -v verbose logging (default: False) --leave-running Leave cluster running after test finishes (default: False) - --clean-run Kill all nodeos and keosd instances (default: False) --unshared Run test in isolated network namespace (default: False) Performance Test Basic Base: Performance Test Basic base configuration items. - --endpoint-api {p2p,http} - Endpointt API mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. + --endpoint-mode {p2p,http} + Endpoint Mode ("p2p", "http"). In "p2p" mode transactions will be directed to the p2p endpoint on a producer node. In "http" mode transactions will be directed to the http endpoint on an api node. (default: p2p) --producer-nodes PRODUCER_NODES Producing nodes count (default: 1) @@ -670,6 +675,8 @@ Performance Test Basic Base: Validation nodes count (default: 1) --api-nodes API_NODES API nodes count (default: 0) + --api-nodes-read-only-threads API_NODES_READ_ONLY_THREADS + API nodes read only threads count for use with read-only transactions (default: 0) --tps-limit-per-generator TPS_LIMIT_PER_GENERATOR Maximum amount of transactions per second a single generator can have. (default: 4000) --genesis GENESIS Path to genesis.json (default: tests/performance_tests/genesis.json) @@ -803,6 +810,12 @@ Transaction Generator command line options.: actions auths description string to use, containting authAcctName to activePrivateKey pairs. + --api-endpoint arg The api endpoint to direct transactions to. + Defaults to: '/v1/chain/send_transaction2' + --peer-endpoint-type arg (=p2p) Identify the peer endpoint api type to + determine how to send transactions. + Allowable 'p2p' and 'http'. Default: + 'p2p' --peer-endpoint arg (=127.0.0.1) set the peer endpoint to send transactions to --port arg (=9876) set the peer endpoint port to send @@ -821,7 +834,7 @@ The Performance Harness generates a report to summarize results of test scenario Command used to run test and generate report: ``` bash -.build/tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax +./build/tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax ``` ### Report Breakdown @@ -830,28 +843,52 @@ The report begins by delivering the max TPS results of the performance run. * `InitialMaxTpsAchieved` - the max TPS throughput achieved during initial, short duration test scenarios to narrow search window * `LongRunningMaxTpsAchieved` - the max TPS throughput achieved during final, longer duration test scenarios to zero in on sustainable max TPS -Next, a summary of the search scenario conducted and respective results is included. Each summary includes information on the current state of the overarching search as well as basic results of the individual test that are used to determine whether the basic test was considered successful. The list of summary results are included in `InitialSearchResults` and `LongRunningSearchResults`. The number of entries in each list will vary depending on the TPS range tested (`--max-tps-to-test`) and the configured `--test-iteration-min-step`. +Next, a high level summary of the search scenario target and results is included. Each line item shows a target tps search scenario and whether that scenario passed or failed. +
+ Expand Search Scenario Results Summary Example + +``` json + "InitialSearchScenariosSummary": { + "50000": "FAIL", + "25001": "FAIL", + "12501": "PASS", + "19001": "FAIL", + "16001": "FAIL", + "14501": "FAIL", + "13501": "FAIL", + "13001": "PASS" + }, + "LongRunningSearchScenariosSummary": { + "13001": "PASS" + }, +``` +
+ +Next, a summary of the search scenario conducted and respective results is included. Each summary includes information on the current state of the overarching search as well as basic results of the individual test that are used to determine whether the basic test was considered successful. The list of summary results are included in `InitialSearchResults` and `LongRunningSearchResults`. The number of entries in each list will vary depending on the TPS range tested (`--min-tps-to-test` & `--max-tps-to-test`) and the configured `--test-iteration-min-step`.
Expand Search Scenario Summary Example ``` json - "0": { + "2": { "success": true, - "searchTarget": 13501, + "searchTarget": 12501, "searchFloor": 1, - "searchCeiling": 13501, + "searchCeiling": 24501, "basicTestResult": { - "targetTPS": 13501, - "resultAvgTps": 13555.25, - "expectedTxns": 135010, - "resultTxns": 135010, + "testStart": "2023-06-05T19:13:42.528121", + "testEnd": "2023-06-05T19:15:00.441933", + "testDuration": "0:01:17.913812", + "testPassed": true, + "testRunSuccessful": true, + "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, - "basicTestSuccess": true, + "targetTPS": 12501, + "resultAvgTps": 12523.6875, + "expectedTxns": 125010, + "resultTxns": 125010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-25-39-13501", - "testStart": "2023-04-05T16:25:39.588359", - "testEnd": "2023-04-05T16:26:58.326668" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-13-42-12501" } } ``` @@ -865,6 +902,9 @@ Finally, the full detail test report for each of the determined max TPS throughp ``` json "InitialMaxTpsReport": { + "Result": { + + }, "Analysis": { }, @@ -884,12 +924,28 @@ Finally, the full detail test report for each of the determined max TPS throughp ``` json { - "perfTestsBegin": "2023-04-05T14:35:59.067345", - "perfTestsFinish": "2023-04-05T16:26:58.466421", - "InitialMaxTpsAchieved": 13501, - "LongRunningMaxTpsAchieved": 13501, - "tpsTestStart": "2023-04-05T16:14:31.272616", - "tpsTestFinish": "2023-04-05T16:26:58.466404", + "perfTestsBegin": "2023-06-05T17:59:49.175441", + "perfTestsFinish": "2023-06-05T19:23:03.723738", + "perfTestsDuration": "1:23:14.548297", + "operationalMode": "Block Producer Operational Mode", + "InitialMaxTpsAchieved": 13001, + "LongRunningMaxTpsAchieved": 13001, + "tpsTestStart": "2023-06-05T19:10:32.123231", + "tpsTestFinish": "2023-06-05T19:23:03.723722", + "tpsTestDuration": "0:12:31.600491", + "InitialSearchScenariosSummary": { + "50000": "FAIL", + "25001": "FAIL", + "12501": "PASS", + "19001": "FAIL", + "16001": "FAIL", + "14501": "FAIL", + "13501": "FAIL", + "13001": "PASS" + }, + "LongRunningSearchScenariosSummary": { + "13001": "PASS" + }, "InitialSearchResults": { "0": { "success": false, @@ -897,17 +953,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 1, "searchCeiling": 50000, "basicTestResult": { - "targetTPS": 50000, - "resultAvgTps": 10022.914285714285, - "expectedTxns": 500000, - "resultTxns": 196920, + "testStart": "2023-06-05T19:10:32.123282", + "testEnd": "2023-06-05T19:12:12.746349", + "testDuration": "0:01:40.623067", + "testPassed": false, + "testRunSuccessful": false, + "testRunCompleted": true, "tpsExpectMet": false, "trxExpectMet": false, - "basicTestSuccess": false, - "testAnalysisBlockCnt": 36, - "logsDir": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-14-31-50000", - "testStart": "2023-04-05T16:14:31.272670", - "testEnd": "2023-04-05T16:16:03.534429" + "targetTPS": 50000, + "resultAvgTps": 14015.564102564103, + "expectedTxns": 500000, + "resultTxns": 309515, + "testAnalysisBlockCnt": 40, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-10-32-50000" } }, "1": { @@ -916,17 +975,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 1, "searchCeiling": 49500, "basicTestResult": { + "testStart": "2023-06-05T19:12:12.749120", + "testEnd": "2023-06-05T19:13:42.524984", + "testDuration": "0:01:29.775864", + "testPassed": false, + "testRunSuccessful": false, + "testRunCompleted": true, + "tpsExpectMet": false, + "trxExpectMet": false, "targetTPS": 25001, - "resultAvgTps": 13099.29411764706, + "resultAvgTps": 13971.5, "expectedTxns": 250010, - "resultTxns": 250010, - "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 35, - "logsDir": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-16-03-25001", - "testStart": "2023-04-05T16:16:03.661754", - "testEnd": "2023-04-05T16:17:34.071307" + "resultTxns": 249981, + "testAnalysisBlockCnt": 33, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-12-12-25001" } }, "2": { @@ -935,17 +997,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 1, "searchCeiling": 24501, "basicTestResult": { + "testStart": "2023-06-05T19:13:42.528121", + "testEnd": "2023-06-05T19:15:00.441933", + "testDuration": "0:01:17.913812", + "testPassed": true, + "testRunSuccessful": true, + "testRunCompleted": true, + "tpsExpectMet": true, + "trxExpectMet": true, "targetTPS": 12501, - "resultAvgTps": 12541.1875, + "resultAvgTps": 12523.6875, "expectedTxns": 125010, "resultTxns": 125010, - "tpsExpectMet": true, - "trxExpectMet": true, - "basicTestSuccess": true, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-17-34-12501", - "testStart": "2023-04-05T16:17:34.183148", - "testEnd": "2023-04-05T16:18:52.175133" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-13-42-12501" } }, "3": { @@ -954,17 +1019,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 13001, "searchCeiling": 24501, "basicTestResult": { + "testStart": "2023-06-05T19:15:00.444109", + "testEnd": "2023-06-05T19:16:25.749654", + "testDuration": "0:01:25.305545", + "testPassed": false, + "testRunSuccessful": false, + "testRunCompleted": true, + "tpsExpectMet": false, + "trxExpectMet": false, "targetTPS": 19001, - "resultAvgTps": 13541.0, + "resultAvgTps": 14858.095238095239, "expectedTxns": 190010, - "resultTxns": 190010, - "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 25, - "logsDir": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-18-52-19001", - "testStart": "2023-04-05T16:18:52.284361", - "testEnd": "2023-04-05T16:20:17.939051" + "resultTxns": 189891, + "testAnalysisBlockCnt": 22, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-15-00-19001" } }, "4": { @@ -973,17 +1041,20 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 13001, "searchCeiling": 18501, "basicTestResult": { + "testStart": "2023-06-05T19:16:25.751860", + "testEnd": "2023-06-05T19:17:48.336896", + "testDuration": "0:01:22.585036", + "testPassed": false, + "testRunSuccessful": false, + "testRunCompleted": true, + "tpsExpectMet": false, + "trxExpectMet": false, "targetTPS": 16001, - "resultAvgTps": 13426.8, + "resultAvgTps": 14846.0, "expectedTxns": 160010, - "resultTxns": 160010, - "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 21, - "logsDir": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-20-18-16001", - "testStart": "2023-04-05T16:20:18.060543", - "testEnd": "2023-04-05T16:21:40.127977" + "resultTxns": 159988, + "testAnalysisBlockCnt": 19, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-16-25-16001" } }, "5": { @@ -992,60 +1063,72 @@ Finally, the full detail test report for each of the determined max TPS throughp "searchFloor": 13001, "searchCeiling": 15501, "basicTestResult": { + "testStart": "2023-06-05T19:17:48.339990", + "testEnd": "2023-06-05T19:19:07.843311", + "testDuration": "0:01:19.503321", + "testPassed": false, + "testRunSuccessful": false, + "testRunCompleted": true, + "tpsExpectMet": false, + "trxExpectMet": false, "targetTPS": 14501, - "resultAvgTps": 13151.166666666666, + "resultAvgTps": 13829.588235294117, "expectedTxns": 145010, - "resultTxns": 145010, - "tpsExpectMet": false, - "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 19, - "logsDir": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-21-40-14501", - "testStart": "2023-04-05T16:21:40.237483", - "testEnd": "2023-04-05T16:23:00.432464" + "resultTxns": 144964, + "testAnalysisBlockCnt": 18, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-17-48-14501" } }, "6": { - "success": true, + "success": false, "searchTarget": 13501, "searchFloor": 13001, "searchCeiling": 14001, "basicTestResult": { + "testStart": "2023-06-05T19:19:07.845657", + "testEnd": "2023-06-05T19:20:27.815030", + "testDuration": "0:01:19.969373", + "testPassed": false, + "testRunSuccessful": false, + "testRunCompleted": true, + "tpsExpectMet": true, + "trxExpectMet": false, "targetTPS": 13501, - "resultAvgTps": 13555.0625, + "resultAvgTps": 13470.375, "expectedTxns": 135010, - "resultTxns": 135010, - "tpsExpectMet": true, - "trxExpectMet": true, - "basicTestSuccess": true, + "resultTxns": 135000, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-23-00-13501", - "testStart": "2023-04-05T16:23:00.540367", - "testEnd": "2023-04-05T16:24:19.418664" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-19-07-13501" } }, "7": { - "success": false, - "searchTarget": 14001, - "searchFloor": 14001, - "searchCeiling": 14001, + "success": true, + "searchTarget": 13001, + "searchFloor": 13001, + "searchCeiling": 13001, "basicTestResult": { - "targetTPS": 14001, - "resultAvgTps": 13482.823529411764, - "expectedTxns": 140010, - "resultTxns": 140010, - "tpsExpectMet": false, + "testStart": "2023-06-05T19:20:27.817483", + "testEnd": "2023-06-05T19:21:44.846130", + "testDuration": "0:01:17.028647", + "testPassed": true, + "testRunSuccessful": true, + "testRunCompleted": true, + "tpsExpectMet": true, "trxExpectMet": true, - "basicTestSuccess": true, - "testAnalysisBlockCnt": 18, - "logsDir": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-24-19-14001", - "testStart": "2023-04-05T16:24:19.526361", - "testEnd": "2023-04-05T16:25:39.445419" + "targetTPS": 13001, + "resultAvgTps": 13032.5625, + "expectedTxns": 130010, + "resultTxns": 130010, + "testAnalysisBlockCnt": 17, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-20-27-13001" } } }, "InitialMaxTpsReport": { + + "Result": { + }, "Analysis": { }, @@ -1060,26 +1143,32 @@ Finally, the full detail test report for each of the determined max TPS throughp "LongRunningSearchResults": { "0": { "success": true, - "searchTarget": 13501, + "searchTarget": 13001, "searchFloor": 1, - "searchCeiling": 13501, + "searchCeiling": 13001, "basicTestResult": { - "targetTPS": 13501, - "resultAvgTps": 13555.25, - "expectedTxns": 135010, - "resultTxns": 135010, + "testStart": "2023-06-05T19:21:44.879637", + "testEnd": "2023-06-05T19:23:03.697671", + "testDuration": "0:01:18.818034", + "testPassed": true, + "testRunSuccessful": true, + "testRunCompleted": true, "tpsExpectMet": true, "trxExpectMet": true, - "basicTestSuccess": true, + "targetTPS": 13001, + "resultAvgTps": 13027.0, + "expectedTxns": 130010, + "resultTxns": 130010, "testAnalysisBlockCnt": 17, - "logsDir": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-25-39-13501", - "testStart": "2023-04-05T16:25:39.588359", - "testEnd": "2023-04-05T16:26:58.326668" + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001" } } }, "LongRunningMaxTpsReport": { + + "Result": { + }, "Analysis": { }, @@ -1092,50 +1181,44 @@ Finally, the full detail test report for each of the determined max TPS throughp }, "ProducerThreadAnalysis": { - "recommendedThreadCount": 5, + "recommendedThreadCount": 2, "threadToMaxTpsDict": { - "2": 14501, - "3": 15501, - "4": 18001, - "5": 19001, - "6": 18501 + "2": 12001, + "3": 12001 }, - "analysisStart": "2023-04-05T14:35:59.130340", - "analysisFinish": "2023-04-05T15:30:15.071101" + "analysisStart": "2023-06-05T17:59:49.197967", + "analysisFinish": "2023-06-05T18:18:33.449126" }, "ChainThreadAnalysis": { - "recommendedThreadCount": 2, + "recommendedThreadCount": 3, "threadToMaxTpsDict": { - "2": 15001, - "3": 14501 + "2": 4001, + "3": 13001, + "4": 5501 }, - "analysisStart": "2023-04-05T15:30:15.072156", - "analysisFinish": "2023-04-05T15:52:23.942065" + "analysisStart": "2023-06-05T18:18:33.449689", + "analysisFinish": "2023-06-05T18:48:02.262053" }, "NetThreadAnalysis": { "recommendedThreadCount": 4, "threadToMaxTpsDict": { - "4": 13501, - "5": 13001 + "4": 14501, + "5": 13501 }, - "analysisStart": "2023-04-05T15:52:23.943254", - "analysisFinish": "2023-04-05T16:14:31.271855" + "analysisStart": "2023-06-05T18:48:02.262594", + "analysisFinish": "2023-06-05T19:10:32.123003" }, "args": { "rawCmdLine ": "./tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", - "killAll": false, - "dontKill": false, - "keepLogs": true, "dumpErrorDetails": false, "delay": 1, "nodesFile": null, "verbose": false, "unshared": false, - "_killEosInstances": true, - "_killWallet": true, - "pnodes": 1, - "totalNodes": 0, - "topo": "mesh", + "producerNodeCount": 1, + "validationNodeCount": 1, + "apiNodeCount": 0, + "dontKill": false, "extraNodeosArgs": { "chainPluginArgs": { "_pluginNamespace": "eosio", @@ -1327,6 +1410,9 @@ Finally, the full detail test report for each of the determined max TPS throughp "httpServerAddress": null, "_httpServerAddressNodeosDefault": "127.0.0.1:8888", "_httpServerAddressNodeosArg": "--http-server-address", + "httpCategoryAddress": null, + "_httpCategoryAddressNodeosDefault": null, + "_httpCategoryAddressNodeosArg": "--http-category-address", "accessControlAllowOrigin": null, "_accessControlAllowOriginNodeosDefault": null, "_accessControlAllowOriginNodeosArg": "--access-control-allow-origin", @@ -1345,11 +1431,11 @@ Finally, the full detail test report for each of the determined max TPS throughp "httpMaxBytesInFlightMb": -1, "_httpMaxBytesInFlightMbNodeosDefault": 500, "_httpMaxBytesInFlightMbNodeosArg": "--http-max-bytes-in-flight-mb", - "httpMaxInFlightRequests": null, + "httpMaxInFlightRequests": -1, "_httpMaxInFlightRequestsNodeosDefault": -1, "_httpMaxInFlightRequestsNodeosArg": "--http-max-in-flight-requests", "httpMaxResponseTimeMs": -1, - "_httpMaxResponseTimeMsNodeosDefault": 30, + "_httpMaxResponseTimeMsNodeosDefault": 15, "_httpMaxResponseTimeMsNodeosArg": "--http-max-response-time-ms", "verboseHttpErrors": null, "_verboseHttpErrorsNodeosDefault": false, @@ -1416,8 +1502,11 @@ Finally, the full detail test report for each of the determined max TPS throughp "_netThreadsNodeosDefault": 4, "_netThreadsNodeosArg": "--net-threads", "syncFetchSpan": null, - "_syncFetchSpanNodeosDefault": 100, + "_syncFetchSpanNodeosDefault": 1000, "_syncFetchSpanNodeosArg": "--sync-fetch-span", + "syncPeerLimit": null, + "_syncPeerLimitNodeosDefault": 3, + "_syncPeerLimitNodeosArg": "--sync-peer-limit", "useSocketReadWatermark": null, "_useSocketReadWatermarkNodeosDefault": 0, "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", @@ -1456,7 +1545,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "_greylistLimitNodeosDefault": 1000, "_greylistLimitNodeosArg": "--greylist-limit", "cpuEffortPercent": 100, - "_cpuEffortPercentNodeosDefault": 80, + "_cpuEffortPercentNodeosDefault": 90, "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", "maxBlockCpuUsageThresholdUs": null, "_maxBlockCpuUsageThresholdUsNodeosDefault": 5000, @@ -1485,16 +1574,13 @@ Finally, the full detail test report for each of the determined max TPS throughp "incomingTransactionQueueSizeMb": null, "_incomingTransactionQueueSizeMbNodeosDefault": 1024, "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", - "disableSubjectiveBilling": true, - "_disableSubjectiveBillingNodeosDefault": 1, - "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", "disableSubjectiveAccountBilling": null, "_disableSubjectiveAccountBillingNodeosDefault": false, "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", - "disableSubjectiveP2pBilling": null, + "disableSubjectiveP2pBilling": true, "_disableSubjectiveP2pBillingNodeosDefault": 1, "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", - "disableSubjectiveApiBilling": null, + "disableSubjectiveApiBilling": true, "_disableSubjectiveApiBillingNodeosDefault": 1, "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", "producerThreads": 2, @@ -1611,6 +1697,7 @@ Finally, the full detail test report for each of the determined max TPS throughp "genesisPath": "tests/performance_tests/genesis.json", "maximumP2pPerHost": 5000, "maximumClients": 0, + "keepLogs": true, "loggingLevel": "info", "loggingDict": { "bios": "off" @@ -1618,14 +1705,27 @@ Finally, the full detail test report for each of the determined max TPS throughp "prodsEnableTraceApi": false, "nodeosVers": "v4", "specificExtraNodeosArgs": { - "1": "--plugin eosio::trace_api_plugin " + "1": "--plugin eosio::trace_api_plugin ", + "2": "--plugin eosio::chain_api_plugin --plugin eosio::net_api_plugin --read-only-threads 0 " }, "_totalNodes": 2, + "_pNodes": 1, + "_producerNodeIds": [ + 0 + ], + "_validationNodeIds": [ + 1 + ], + "_apiNodeIds": [ + 2 + ], "nonProdsEosVmOcEnable": false, + "apiNodesReadOnlyThreadCount": 0, "testDurationSec": 10, "finalDurationSec": 30, "delPerfLogs": false, "maxTpsToTest": 50000, + "minTpsToTest": 1, "testIterationMinStep": 500, "tpsLimitPerGenerator": 4000, "delReport": false, @@ -1638,11 +1738,13 @@ Finally, the full detail test report for each of the determined max TPS throughp "calcChainThreads": "lmax", "calcNetThreads": "lmax", "userTrxDataFile": null, + "endpointMode": "p2p", + "opModeCmd": "testBpOpMode", "logDirBase": "performance_test", - "logDirTimestamp": "2023-04-05_14-35-59", - "logDirPath": "performance_test/2023-04-05_14-35-59", - "ptbLogsDirPath": "performance_test/2023-04-05_14-35-59/testRunLogs", - "pluginThreadOptLogsDirPath": "performance_test/2023-04-05_14-35-59/pluginThreadOptRunLogs" + "logDirTimestamp": "2023-06-05_17-59-49", + "logDirPath": "performance_test/2023-06-05_17-59-49", + "ptbLogsDirPath": "performance_test/2023-06-05_17-59-49/testRunLogs", + "pluginThreadOptLogsDirPath": "performance_test/2023-06-05_17-59-49/pluginThreadOptRunLogs" }, "env": { "system": "Linux", @@ -1665,72 +1767,98 @@ The Performance Test Basic generates, by default, a report that details results ``` json { - "completedRun": true, - "testStart": "2023-04-05T16:25:39.588359", - "testFinish": "2023-04-05T16:26:58.326668", + "targetApiEndpointType": "p2p", + "targetApiEndpoint": "NA for P2P", + "Result": { + "testStart": "2023-06-05T19:21:44.879637", + "testEnd": "2023-06-05T19:23:03.697671", + "testDuration": "0:01:18.818034", + "testPassed": true, + "testRunSuccessful": true, + "testRunCompleted": true, + "tpsExpectMet": true, + "trxExpectMet": true, + "targetTPS": 13001, + "resultAvgTps": 13027.0, + "expectedTxns": 130010, + "resultTxns": 130010, + "testAnalysisBlockCnt": 17, + "logsDir": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001" + }, "Analysis": { "BlockSize": { - "min": 139000, - "max": 182475, - "avg": 168440.0588235294, - "sigma": 11024.182010314407, + "min": 153503, + "max": 169275, + "avg": 162269.76470588235, + "sigma": 3152.279353278714, "emptyBlocks": 0, "numBlocks": 17 }, "BlocksGuide": { - "firstBlockNum": 113, - "lastBlockNum": 145, - "totalBlocks": 33, - "testStartBlockNum": 113, - "testEndBlockNum": 145, + "firstBlockNum": 110, + "lastBlockNum": 140, + "totalBlocks": 31, + "testStartBlockNum": 110, + "testEndBlockNum": 140, "setupBlocksCnt": 0, "tearDownBlocksCnt": 0, "leadingEmptyBlocksCnt": 1, - "trailingEmptyBlocksCnt": 11, + "trailingEmptyBlocksCnt": 9, "configAddlDropCnt": 2, "testAnalysisBlockCnt": 17 }, "TPS": { - "min": 12390, - "max": 14454, - "avg": 13555.25, - "sigma": 551.2613377518869, + "min": 12775, + "max": 13285, + "avg": 13027.0, + "sigma": 92.70854868888844, "emptyBlocks": 0, "numBlocks": 17, - "configTps": 13501, + "configTps": 13001, "configTestDuration": 10, "tpsPerGenerator": [ - 3375, - 3375, - 3375, - 3376 + 3250, + 3250, + 3250, + 3251 ], "generatorCount": 4 }, "TrxCPU": { "min": 8.0, - "max": 1629.0, - "avg": 27.252588697133547, - "sigma": 23.105861916437814, - "samples": 135010 + "max": 1180.0, + "avg": 25.89257749403892, + "sigma": 12.604252354938811, + "samples": 130010 }, "TrxLatency": { "min": 0.0009999275207519531, - "max": 0.5899999141693115, - "avg": 0.2691181839364483, - "sigma": 0.1464077116092879, - "samples": 135010 + "max": 0.5399999618530273, + "avg": 0.2522121298066488, + "sigma": 0.14457374598663084, + "samples": 130010, + "units": "seconds" }, "TrxNet": { "min": 24.0, "max": 25.0, - "avg": 24.851892452410933, - "sigma": 0.35520656234959913, - "samples": 135010 + "avg": 24.846196446427196, + "sigma": 0.3607603366241642, + "samples": 130010 }, + "TrxAckResponseTime": { + "min": -1.0, + "max": -1.0, + "avg": -1.0, + "sigma": 0.0, + "samples": 130010, + "measurementApplicable": "NOT APPLICABLE", + "units": "microseconds" + }, + "ExpectedTransactions": 130010, "DroppedTransactions": 0, - "ProductionWindowsTotal": 0, - "ProductionWindowsAverageSize": 0, + "ProductionWindowsTotal": 2, + "ProductionWindowsAverageSize": 12.0, "ProductionWindowsMissed": 0, "ForkedBlocks": { "00": [], @@ -1751,19 +1879,15 @@ The Performance Test Basic generates, by default, a report that details results }, "args": { "rawCmdLine ": "./tests/performance_tests/performance_test.py testBpOpMode --test-iteration-duration-sec 10 --final-iterations-duration-sec 30 --calc-producer-threads lmax --calc-chain-threads lmax --calc-net-threads lmax", - "killAll": false, - "dontKill": false, - "keepLogs": true, "dumpErrorDetails": false, "delay": 1, "nodesFile": null, "verbose": false, "unshared": false, - "_killEosInstances": true, - "_killWallet": true, - "pnodes": 1, - "totalNodes": 0, - "topo": "mesh", + "producerNodeCount": 1, + "validationNodeCount": 1, + "apiNodeCount": 0, + "dontKill": false, "extraNodeosArgs": { "chainPluginArgs": { "_pluginNamespace": "eosio", @@ -1955,6 +2079,9 @@ The Performance Test Basic generates, by default, a report that details results "httpServerAddress": null, "_httpServerAddressNodeosDefault": "127.0.0.1:8888", "_httpServerAddressNodeosArg": "--http-server-address", + "httpCategoryAddress": null, + "_httpCategoryAddressNodeosDefault": null, + "_httpCategoryAddressNodeosArg": "--http-category-address", "accessControlAllowOrigin": null, "_accessControlAllowOriginNodeosDefault": null, "_accessControlAllowOriginNodeosArg": "--access-control-allow-origin", @@ -1973,11 +2100,11 @@ The Performance Test Basic generates, by default, a report that details results "httpMaxBytesInFlightMb": -1, "_httpMaxBytesInFlightMbNodeosDefault": 500, "_httpMaxBytesInFlightMbNodeosArg": "--http-max-bytes-in-flight-mb", - "httpMaxInFlightRequests": null, + "httpMaxInFlightRequests": -1, "_httpMaxInFlightRequestsNodeosDefault": -1, "_httpMaxInFlightRequestsNodeosArg": "--http-max-in-flight-requests", "httpMaxResponseTimeMs": -1, - "_httpMaxResponseTimeMsNodeosDefault": 30, + "_httpMaxResponseTimeMsNodeosDefault": 15, "_httpMaxResponseTimeMsNodeosArg": "--http-max-response-time-ms", "verboseHttpErrors": null, "_verboseHttpErrorsNodeosDefault": false, @@ -2044,8 +2171,11 @@ The Performance Test Basic generates, by default, a report that details results "_netThreadsNodeosDefault": 4, "_netThreadsNodeosArg": "--net-threads", "syncFetchSpan": null, - "_syncFetchSpanNodeosDefault": 100, + "_syncFetchSpanNodeosDefault": 1000, "_syncFetchSpanNodeosArg": "--sync-fetch-span", + "syncPeerLimit": null, + "_syncPeerLimitNodeosDefault": 3, + "_syncPeerLimitNodeosArg": "--sync-peer-limit", "useSocketReadWatermark": null, "_useSocketReadWatermarkNodeosDefault": 0, "_useSocketReadWatermarkNodeosArg": "--use-socket-read-watermark", @@ -2084,7 +2214,7 @@ The Performance Test Basic generates, by default, a report that details results "_greylistLimitNodeosDefault": 1000, "_greylistLimitNodeosArg": "--greylist-limit", "cpuEffortPercent": 100, - "_cpuEffortPercentNodeosDefault": 80, + "_cpuEffortPercentNodeosDefault": 90, "_cpuEffortPercentNodeosArg": "--cpu-effort-percent", "maxBlockCpuUsageThresholdUs": null, "_maxBlockCpuUsageThresholdUsNodeosDefault": 5000, @@ -2113,16 +2243,13 @@ The Performance Test Basic generates, by default, a report that details results "incomingTransactionQueueSizeMb": null, "_incomingTransactionQueueSizeMbNodeosDefault": 1024, "_incomingTransactionQueueSizeMbNodeosArg": "--incoming-transaction-queue-size-mb", - "disableSubjectiveBilling": true, - "_disableSubjectiveBillingNodeosDefault": 1, - "_disableSubjectiveBillingNodeosArg": "--disable-subjective-billing", "disableSubjectiveAccountBilling": null, "_disableSubjectiveAccountBillingNodeosDefault": false, "_disableSubjectiveAccountBillingNodeosArg": "--disable-subjective-account-billing", - "disableSubjectiveP2pBilling": null, + "disableSubjectiveP2pBilling": true, "_disableSubjectiveP2pBillingNodeosDefault": 1, "_disableSubjectiveP2pBillingNodeosArg": "--disable-subjective-p2p-billing", - "disableSubjectiveApiBilling": null, + "disableSubjectiveApiBilling": true, "_disableSubjectiveApiBillingNodeosDefault": 1, "_disableSubjectiveApiBillingNodeosArg": "--disable-subjective-api-billing", "producerThreads": 2, @@ -2239,6 +2366,7 @@ The Performance Test Basic generates, by default, a report that details results "genesisPath": "tests/performance_tests/genesis.json", "maximumP2pPerHost": 5000, "maximumClients": 0, + "keepLogs": true, "loggingLevel": "info", "loggingDict": { "bios": "off" @@ -2246,25 +2374,40 @@ The Performance Test Basic generates, by default, a report that details results "prodsEnableTraceApi": false, "nodeosVers": "v4", "specificExtraNodeosArgs": { - "1": "--plugin eosio::trace_api_plugin " + "1": "--plugin eosio::trace_api_plugin ", + "2": "--plugin eosio::chain_api_plugin --plugin eosio::net_api_plugin --read-only-threads 0 " }, "_totalNodes": 2, + "_pNodes": 1, + "_producerNodeIds": [ + 0 + ], + "_validationNodeIds": [ + 1 + ], + "_apiNodeIds": [ + 2 + ], "nonProdsEosVmOcEnable": false, - "targetTps": 13501, + "apiNodesReadOnlyThreadCount": 0, + "targetTps": 13001, "testTrxGenDurationSec": 10, "tpsLimitPerGenerator": 4000, "numAddlBlocksToPrune": 2, - "logDirRoot": "performance_test/2023-04-05_14-35-59/testRunLogs", + "logDirRoot": "performance_test/2023-06-05_17-59-49/testRunLogs", "delReport": false, "quiet": false, "delPerfLogs": false, - "expectedTransactionsSent": 135010, + "expectedTransactionsSent": 130010, "printMissingTransactions": false, "userTrxDataFile": null, - "logDirBase": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test", - "logDirTimestamp": "2023-04-05_16-25-39", - "logDirTimestampedOptSuffix": "-13501", - "logDirPath": "performance_test/2023-04-05_14-35-59/testRunLogs/performance_test/2023-04-05_16-25-39-13501" + "endpointMode": "p2p", + "apiEndpoint": null, + "logDirBase": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test", + "logDirTimestamp": "2023-06-05_19-21-44", + "logDirTimestampedOptSuffix": "-13001", + "logDirPath": "performance_test/2023-06-05_17-59-49/testRunLogs/performance_test/2023-06-05_19-21-44-13001", + "userTrxData": "NOT CONFIGURED" }, "env": { "system": "Linux", diff --git a/tests/performance_tests/genesis.json b/tests/performance_tests/genesis.json index 57474c902d..868a3d6c12 100644 --- a/tests/performance_tests/genesis.json +++ b/tests/performance_tests/genesis.json @@ -11,7 +11,7 @@ "context_free_discount_net_usage_den": 100, "max_block_cpu_usage": 500000, "target_block_cpu_usage_pct": 500, - "max_transaction_cpu_usage": 90000, + "max_transaction_cpu_usage": 475000, "min_transaction_cpu_usage": 0, "max_transaction_lifetime": 3600, "deferred_trx_expiration_window": 600, diff --git a/tests/performance_tests/log_reader.py b/tests/performance_tests/log_reader.py index e9d63fdad8..70bb605fc1 100644 --- a/tests/performance_tests/log_reader.py +++ b/tests/performance_tests/log_reader.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -import os import sys import re import numpy as np @@ -11,9 +10,8 @@ sys.path.append(str(PurePath(PurePath(Path(__file__).absolute()).parent).parent)) from TestHarness import Utils, Account -from dataclasses import dataclass, asdict, field -from platform import release, system -from datetime import datetime +from dataclasses import dataclass, field +from datetime import datetime, timedelta from typing import List from pathlib import Path @@ -41,6 +39,7 @@ class TpsTestConfig: numTrxGensUsed: int = 0 targetTpsPerGenList: List[int] = field(default_factory=list) quiet: bool = False + printMissingTransactions: bool=False @dataclass class stats(): @@ -66,6 +65,8 @@ class trxData(): netUsageUs: int = 0 blockTime: datetime = None latency: float = 0 + acknowledged: str = "NA" + ackRespTimeUs: int = -1 _sentTimestamp: str = "" _calcdTimeEpoch: float = 0 @@ -191,6 +192,19 @@ def __str__(self): def assertEquality(self, other): assert self == other, f"Error: Actual log:\n{self}\ndid not match expected log:\n{other}" +@dataclass +class LogAnalysis: + guide: chainBlocksGuide + tpsStats: stats + blockSizeStats: stats + trxLatencyStats: basicStats + trxCpuStats: basicStats + trxNetStats: basicStats + trxAckStatsApplicable: str + trxAckStats: basicStats + prodWindows: productionWindows + notFound: list + def selectedOpen(path): return gzip.open if path.suffix == '.gz' else open @@ -224,13 +238,29 @@ def scrapeLogDroppedForkedBlocks(data: chainData, path): data.droppedBlocks[str(nodeNum).zfill(2)] = droppedBlocksByCurrentNode data.forkedBlocks[str(nodeNum).zfill(2)] = forkedBlocksByCurrentNode -def scrapeTrxGenLog(trxSent, path): +@dataclass +class sentTrx(): + sentTime: str = "" + acked: str = "" + ackResponseTimeUs: int = -1 + +@dataclass +class sentTrxExtTrace(): + sentTime: str = "" + acked: str = "" + ackResponseTimeUs: int = -1 + blockNum: int = -1 + cpuUsageUs: int = -1 + netUsageWords: int = -1 + blockTime: str = "" + +def scrapeTrxGenLog(trxSent: dict, path): #trxGenLogs/trx_data_output_*.txt selectedopen = selectedOpen(path) with selectedopen(path, 'rt') as f: - trxSent.update(dict([(x[0], x[1]) for x in (line.rstrip('\n').split(',') for line in f)])) + trxSent.update(dict([(x[0], sentTrx(x[1], x[2], x[3]) if len(x) == 4 else sentTrxExtTrace(x[1], x[2], x[3], x[4], x[5], x[6], x[7])) for x in (line.rstrip('\n').split(',') for line in f)])) -def scrapeTrxGenTrxSentDataLogs(trxSent, trxGenLogDirPath, quiet): +def scrapeTrxGenTrxSentDataLogs(trxSent: dict, trxGenLogDirPath, quiet): filesScraped = [] for fileName in trxGenLogDirPath.glob("trx_data_output_*.txt"): filesScraped.append(fileName) @@ -239,10 +269,15 @@ def scrapeTrxGenTrxSentDataLogs(trxSent, trxGenLogDirPath, quiet): if not quiet: print(f"Transaction Log Files Scraped: {filesScraped}") -def populateTrxSentTimestamp(trxSent: dict, trxDict: dict, notFound): +def populateTrxSentAndAcked(trxSent: dict, trxDict: dict, notFound): for sentTrxId in trxSent.keys(): - if sentTrxId in trxDict.keys(): - trxDict[sentTrxId].sentTimestamp = trxSent[sentTrxId] + if (isinstance(trxSent[sentTrxId], sentTrxExtTrace)): + trxDict[sentTrxId] = trxData(blockNum=trxSent[sentTrxId].blockNum, cpuUsageUs=trxSent[sentTrxId].cpuUsageUs, netUsageUs=trxSent[sentTrxId].netUsageWords, blockTime=trxSent[sentTrxId].blockTime, acknowledged=trxSent[sentTrxId].acked, ackRespTimeUs=trxSent[sentTrxId].ackResponseTimeUs) + trxDict[sentTrxId].sentTimestamp = trxSent[sentTrxId].sentTime + elif sentTrxId in trxDict.keys(): + trxDict[sentTrxId].sentTimestamp = trxSent[sentTrxId].sentTime + trxDict[sentTrxId].acknowledged = trxSent[sentTrxId].acked + trxDict[sentTrxId].ackRespTimeUs = trxSent[sentTrxId].ackResponseTimeUs else: notFound.append(sentTrxId) @@ -257,9 +292,9 @@ def updateBlockTotals(data: chainData): def writeTransactionMetrics(trxDict: dict, path): with open(path, 'wt') as transactionMetricsFile: - transactionMetricsFile.write("TransactionId,BlockNumber,BlockTime,CpuUsageUs,NetUsageUs,Latency,SentTimestamp,CalcdTimeEpoch\n") + transactionMetricsFile.write("TransactionId,BlockNumber,BlockTime,CpuUsageUs,NetUsageUs,Latency,SentTimestamp,CalcdTimeEpoch,Acknowledged,SentToAckDurationUs\n") for trxId, data in trxDict.items(): - transactionMetricsFile.write(f"{trxId},{data.blockNum},{data.blockTime},{data.cpuUsageUs},{data.netUsageUs},{data.latency},{data._sentTimestamp},{data._calcdTimeEpoch}\n") + transactionMetricsFile.write(f"{trxId},{data.blockNum},{data.blockTime},{data.cpuUsageUs},{data.netUsageUs},{data.latency},{data._sentTimestamp},{data._calcdTimeEpoch}{data.acknowledged}{data.ackRespTimeUs}\n") def getProductionWindows(prodDict: dict, data: chainData): prod = "" @@ -410,55 +445,21 @@ def calcTrxLatencyCpuNetStats(trxDict : dict): Returns: transaction latency stats as a basicStats object """ - trxLatencyCpuNetList = [(data.latency, data.cpuUsageUs, data.netUsageUs) for trxId, data in trxDict.items() if data.calcdTimeEpoch != 0] - - npLatencyCpuNetList = np.array(trxLatencyCpuNetList, dtype=float) - - return basicStats(float(np.min(npLatencyCpuNetList[:,0])), float(np.max(npLatencyCpuNetList[:,0])), float(np.average(npLatencyCpuNetList[:,0])), float(np.std(npLatencyCpuNetList[:,0])), len(npLatencyCpuNetList)), \ - basicStats(float(np.min(npLatencyCpuNetList[:,1])), float(np.max(npLatencyCpuNetList[:,1])), float(np.average(npLatencyCpuNetList[:,1])), float(np.std(npLatencyCpuNetList[:,1])), len(npLatencyCpuNetList)), \ - basicStats(float(np.min(npLatencyCpuNetList[:,2])), float(np.max(npLatencyCpuNetList[:,2])), float(np.average(npLatencyCpuNetList[:,2])), float(np.std(npLatencyCpuNetList[:,2])), len(npLatencyCpuNetList)) - -def createReport(guide: chainBlocksGuide, tpsTestConfig: TpsTestConfig, tpsStats: stats, blockSizeStats: stats, trxLatencyStats: basicStats, trxCpuStats: basicStats, - trxNetStats: basicStats, forkedBlocks, droppedBlocks, prodWindows: productionWindows, notFound: dict, testStart: datetime, testFinish: datetime, - argsDict: dict, completedRun: bool, nodeosVers: str, numNodes: int) -> dict: - report = {} - report['completedRun'] = completedRun - report['testStart'] = testStart - report['testFinish'] = testFinish - report['Analysis'] = {} - report['Analysis']['BlockSize'] = asdict(blockSizeStats) - report['Analysis']['BlocksGuide'] = asdict(guide) - report['Analysis']['TPS'] = asdict(tpsStats) - report['Analysis']['TPS']['configTps'] = tpsTestConfig.targetTps - report['Analysis']['TPS']['configTestDuration'] = tpsTestConfig.testDurationSec - report['Analysis']['TPS']['tpsPerGenerator'] = tpsTestConfig.targetTpsPerGenList - report['Analysis']['TPS']['generatorCount'] = tpsTestConfig.numTrxGensUsed - report['Analysis']['TrxCPU'] = asdict(trxCpuStats) - report['Analysis']['TrxLatency'] = asdict(trxLatencyStats) - report['Analysis']['TrxNet'] = asdict(trxNetStats) - report['Analysis']['DroppedTransactions'] = len(notFound) - report['Analysis']['ProductionWindowsTotal'] = prodWindows.totalWindows - report['Analysis']['ProductionWindowsAverageSize'] = prodWindows.averageWindowSize - report['Analysis']['ProductionWindowsMissed'] = prodWindows.missedWindows - report['Analysis']['ForkedBlocks'] = {} - report['Analysis']['ForksCount'] = {} - report['Analysis']['DroppedBlocks'] = {} - report['Analysis']['DroppedBlocksCount'] = {} - for nodeNum in range(0, numNodes): - formattedNodeNum = str(nodeNum).zfill(2) - report['Analysis']['ForkedBlocks'][formattedNodeNum] = forkedBlocks[formattedNodeNum] - report['Analysis']['ForksCount'][formattedNodeNum] = len(forkedBlocks[formattedNodeNum]) - report['Analysis']['DroppedBlocks'][formattedNodeNum] = droppedBlocks[formattedNodeNum] - report['Analysis']['DroppedBlocksCount'][formattedNodeNum] = len(droppedBlocks[formattedNodeNum]) - report['args'] = argsDict - report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} - report['nodeosVersion'] = nodeosVers - return report + trxLatencyCpuNetAckList = [(data.latency, data.cpuUsageUs, data.netUsageUs, data.ackRespTimeUs) for trxId, data in trxDict.items() if data.calcdTimeEpoch != 0] + + npLatencyCpuNetAckList = np.array(trxLatencyCpuNetAckList, dtype=float) + + return basicStats(float(np.min(npLatencyCpuNetAckList[:,0])), float(np.max(npLatencyCpuNetAckList[:,0])), float(np.average(npLatencyCpuNetAckList[:,0])), float(np.std(npLatencyCpuNetAckList[:,0])), len(npLatencyCpuNetAckList)), \ + basicStats(float(np.min(npLatencyCpuNetAckList[:,1])), float(np.max(npLatencyCpuNetAckList[:,1])), float(np.average(npLatencyCpuNetAckList[:,1])), float(np.std(npLatencyCpuNetAckList[:,1])), len(npLatencyCpuNetAckList)), \ + basicStats(float(np.min(npLatencyCpuNetAckList[:,2])), float(np.max(npLatencyCpuNetAckList[:,2])), float(np.average(npLatencyCpuNetAckList[:,2])), float(np.std(npLatencyCpuNetAckList[:,2])), len(npLatencyCpuNetAckList)), \ + basicStats(float(np.min(npLatencyCpuNetAckList[:,3])), float(np.max(npLatencyCpuNetAckList[:,3])), float(np.average(npLatencyCpuNetAckList[:,3])), float(np.std(npLatencyCpuNetAckList[:,3])), len(npLatencyCpuNetAckList)) class LogReaderEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime): return obj.isoformat() + if isinstance(obj, timedelta): + return str(obj) if isinstance(obj, PurePath): return str(obj) if obj is None: @@ -474,32 +475,39 @@ def default(self, obj): defaultStr = f"ERROR: {str(err)}" return defaultStr -def reportAsJSON(report: dict) -> json: - return json.dumps(report, indent=2, cls=LogReaderEncoder) +class JsonReportHandler: + def reportAsJSON(report: dict) -> json: + return json.dumps(report, indent=2, cls=LogReaderEncoder) -def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: ArtifactPaths, argsDict: dict, testStart: datetime=None, completedRun: bool=True, nodeosVers: str="") -> dict: + def exportReportAsJSON(report: json, exportPath): + with open(exportPath, 'wt') as f: + f.write(report) + +def analyzeLogResults(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: ArtifactPaths) -> LogAnalysis: scrapeLogBlockElapsedTime(data, artifacts.nodeosLogPath) scrapeLogDroppedForkedBlocks(data, artifacts.nodeosLogDir) trxSent = {} scrapeTrxGenTrxSentDataLogs(trxSent, artifacts.trxGenLogDirPath, tpsTestConfig.quiet) + trxAckStatsApplicable="NOT APPLICABLE" if list(trxSent.values())[0].acked == "NA" else "APPLICABLE" + notFound = [] - populateTrxSentTimestamp(trxSent, data.trxDict, notFound) + populateTrxSentAndAcked(trxSent, data.trxDict, notFound) prodDict = {} getProductionWindows(prodDict, data) if len(notFound) > 0: print(f"Transactions logged as sent but NOT FOUND in block!! lost {len(notFound)} out of {len(trxSent)}") - if argsDict.get("printMissingTransactions"): + if tpsTestConfig.printMissingTransactions: print(notFound) updateBlockTotals(data) populateTrxLatencies(data) writeTransactionMetrics(data.trxDict, artifacts.transactionMetricsDataPath) guide = calcChainGuide(data, tpsTestConfig.numBlocksToPrune) - trxLatencyStats, trxCpuStats, trxNetStats = calcTrxLatencyCpuNetStats(data.trxDict) + trxLatencyStats, trxCpuStats, trxNetStats, trxAckStats = calcTrxLatencyCpuNetStats(data.trxDict) tpsStats = scoreTransfersPerSecond(data, guide) blkSizeStats = calcBlockSizeStats(data, guide) prodWindows = calcProductionWindows(prodDict) @@ -507,18 +515,5 @@ def calcAndReport(data: chainData, tpsTestConfig: TpsTestConfig, artifacts: Arti if not tpsTestConfig.quiet: print(f"Blocks Guide: {guide}\nTPS: {tpsStats}\nBlock Size: {blkSizeStats}\nTrx Latency: {trxLatencyStats}\nTrx CPU: {trxCpuStats}\nTrx Net: {trxNetStats}") - start = None - finish = None - if testStart is not None: - start = testStart - finish = datetime.utcnow() - - report = createReport(guide=guide, tpsTestConfig=tpsTestConfig, tpsStats=tpsStats, blockSizeStats=blkSizeStats, trxLatencyStats=trxLatencyStats, - trxCpuStats=trxCpuStats, trxNetStats=trxNetStats, forkedBlocks=data.forkedBlocks, droppedBlocks=data.droppedBlocks, - prodWindows=prodWindows, notFound=notFound, testStart=start, testFinish=finish, argsDict=argsDict, completedRun=completedRun, - nodeosVers=nodeosVers, numNodes=data.numNodes) - return report - -def exportReportAsJSON(report: json, exportPath): - with open(exportPath, 'wt') as f: - f.write(report) + return LogAnalysis(guide=guide, tpsStats=tpsStats, blockSizeStats=blkSizeStats, trxLatencyStats=trxLatencyStats, trxCpuStats=trxCpuStats, trxNetStats=trxNetStats, + trxAckStatsApplicable=trxAckStatsApplicable, trxAckStats=trxAckStats, prodWindows=prodWindows, notFound=notFound) diff --git a/tests/performance_tests/performance_test.py b/tests/performance_tests/performance_test.py index a8cce91411..537f1926a2 100755 --- a/tests/performance_tests/performance_test.py +++ b/tests/performance_tests/performance_test.py @@ -5,7 +5,6 @@ import math import os import sys -import json import shutil from pathlib import Path, PurePath @@ -18,31 +17,17 @@ from dataclasses import dataclass, asdict, field from datetime import datetime from enum import Enum -from log_reader import LogReaderEncoder +from log_reader import JsonReportHandler class PerformanceTest: @dataclass class PerfTestSearchIndivResult: - @dataclass - class PerfTestBasicResult: - targetTPS: int = 0 - resultAvgTps: float = 0 - expectedTxns: int = 0 - resultTxns: int = 0 - tpsExpectMet: bool = False - trxExpectMet: bool = False - basicTestSuccess: bool = False - testAnalysisBlockCnt: int = 0 - logsDir: Path = Path("") - testStart: datetime = None - testEnd: datetime = None - success: bool = False searchTarget: int = 0 searchFloor: int = 0 searchCeiling: int = 0 - basicTestResult: PerfTestBasicResult = field(default_factory=PerfTestBasicResult) + basicTestResult: PerformanceTestBasic.PerfTestBasicResult = field(default_factory=PerformanceTestBasic.PerfTestBasicResult) @dataclass class PtConfig: @@ -50,6 +35,7 @@ class PtConfig: finalDurationSec: int=300 delPerfLogs: bool=False maxTpsToTest: int=50000 + minTpsToTest: int=1 testIterationMinStep: int=500 tpsLimitPerGenerator: int=4000 delReport: bool=False @@ -62,7 +48,17 @@ class PtConfig: calcChainThreads: str="none" calcNetThreads: str="none" userTrxDataFile: Path=None - endpointApi: str="p2p" + endpointMode: str="p2p" + opModeCmd: str="" + + def __post_init__(self): + self.opModeDesc = "Block Producer Operational Mode" if self.opModeCmd == "testBpOpMode" else "API Node Operational Mode" if self.opModeCmd == "testApiOpMode" else "Undefined Operational Mode" + if self.maxTpsToTest < 1: + self.maxTpsToTest = 1 + if self.minTpsToTest < 1: + self.minTpsToTest = 1 + if self.maxTpsToTest < self.minTpsToTest: + self.minTpsToTest = self.maxTpsToTest @dataclass class TpsTestResult: @@ -103,7 +99,7 @@ def __init__(self, testHelperConfig: PerformanceTestBasic.TestHelperConfig=Perfo logDirTimestamp=f"{self.testsStart.strftime('%Y-%m-%d_%H-%M-%S')}") def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConfig, logDirRoot: Path, delReport: bool, quiet: bool, delPerfLogs: bool) -> TpsTestResult.PerfTestSearchResults: - floor = 1 + floor = self.ptConfig.minTpsToTest ceiling = self.ptConfig.maxTpsToTest binSearchTarget = self.ptConfig.maxTpsToTest minStep = self.ptConfig.testIterationMinStep @@ -114,15 +110,14 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf while ceiling >= floor: print(f"Running scenario: floor {floor} binSearchTarget {binSearchTarget} ceiling {ceiling}") - ptbResult = PerformanceTest.PerfTestSearchIndivResult.PerfTestBasicResult() - scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling, basicTestResult=ptbResult) + scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=binSearchTarget, searchFloor=floor, searchCeiling=ceiling) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=binSearchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=logDirRoot, delReport=delReport, - quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointApi=self.ptConfig.endpointApi) + quiet=quiet, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointMode=self.ptConfig.endpointMode) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=clusterConfig, ptbConfig=ptbConfig, testNamePath="performance_test") - testSuccessful = myTest.runTest() - if self.evaluateSuccess(myTest, testSuccessful, ptbResult): + myTest.runTest() + if myTest.testResult.testPassed: maxTpsAchieved = binSearchTarget maxTpsReport = myTest.report floor = binSearchTarget + minStep @@ -130,10 +125,10 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf else: ceiling = binSearchTarget - minStep - scenarioResult.basicTestResult = ptbResult + scenarioResult.basicTestResult = myTest.testResult searchResults.append(scenarioResult) if not self.ptConfig.quiet: - print(f"searchResult: {binSearchTarget} : {searchResults[-1]}") + print(f"binary search result -- target: {binSearchTarget} | result: {searchResults[-1]}") binSearchTarget = floor + (math.ceil(((ceiling - floor) / minStep) / 2) * minStep) @@ -141,9 +136,9 @@ def performPtbBinarySearch(self, clusterConfig: PerformanceTestBasic.ClusterConf def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTestSearchResults: - # Default - Decrementing Max TPS in range [1, tpsInitial] - absFloor = 1 - tpsInitial = absFloor if tpsInitial <= 0 else tpsInitial + # Default - Decrementing Max TPS in range [minTpsToTest (def=1), tpsInitial] + absFloor = self.ptConfig.minTpsToTest + tpsInitial = absFloor if tpsInitial <= 0 or tpsInitial < absFloor else tpsInitial absCeiling = tpsInitial step = self.ptConfig.testIterationMinStep @@ -157,15 +152,14 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTe while not maxFound: print(f"Running scenario: floor {absFloor} searchTarget {searchTarget} ceiling {absCeiling}") - ptbResult = PerformanceTest.PerfTestSearchIndivResult.PerfTestBasicResult() - scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling, basicTestResult=ptbResult) + scenarioResult = PerformanceTest.PerfTestSearchIndivResult(success=False, searchTarget=searchTarget, searchFloor=absFloor, searchCeiling=absCeiling) ptbConfig = PerformanceTestBasic.PtbConfig(targetTps=searchTarget, testTrxGenDurationSec=self.ptConfig.testDurationSec, tpsLimitPerGenerator=self.ptConfig.tpsLimitPerGenerator, numAddlBlocksToPrune=self.ptConfig.numAddlBlocksToPrune, logDirRoot=self.loggingConfig.ptbLogsDirPath, delReport=self.ptConfig.delReport, - quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointApi=self.ptConfig.endpointApi) + quiet=self.ptConfig.quiet, delPerfLogs=self.ptConfig.delPerfLogs, userTrxDataFile=self.ptConfig.userTrxDataFile, endpointMode=self.ptConfig.endpointMode) myTest = PerformanceTestBasic(testHelperConfig=self.testHelperConfig, clusterConfig=self.clusterConfig, ptbConfig=ptbConfig, testNamePath="performance_test") - testSuccessful = myTest.runTest() - if self.evaluateSuccess(myTest, testSuccessful, ptbResult): + myTest.runTest() + if myTest.testResult.testPassed: maxTpsAchieved = searchTarget maxTpsReport = myTest.report scenarioResult.success = True @@ -176,33 +170,13 @@ def performPtbReverseLinearSearch(self, tpsInitial: int) -> TpsTestResult.PerfTe maxFound = True searchTarget = max(searchTarget - step, absFloor) - scenarioResult.basicTestResult = ptbResult + scenarioResult.basicTestResult = myTest.testResult searchResults.append(scenarioResult) if not self.ptConfig.quiet: - print(f"searchResult: {searchTarget} : {searchResults[-1]}") + print(f"reverse linear search result -- target: {searchTarget} | result: {searchResults[-1]}") return PerformanceTest.TpsTestResult.PerfTestSearchResults(maxTpsAchieved=maxTpsAchieved, searchResults=searchResults, maxTpsReport=maxTpsReport) - def evaluateSuccess(self, test: PerformanceTestBasic, testSuccessful: bool, result: PerfTestSearchIndivResult.PerfTestBasicResult) -> bool: - result.targetTPS = test.ptbConfig.targetTps - result.expectedTxns = test.ptbConfig.expectedTransactionsSent - reportDict = test.report - result.testStart = reportDict["testStart"] - result.testEnd = reportDict["testFinish"] - result.resultAvgTps = reportDict["Analysis"]["TPS"]["avg"] - result.resultTxns = reportDict["Analysis"]["TrxLatency"]["samples"] - print(f"targetTPS: {result.targetTPS} expectedTxns: {result.expectedTxns} resultAvgTps: {result.resultAvgTps} resultTxns: {result.resultTxns}") - - result.tpsExpectMet = True if result.resultAvgTps >= result.targetTPS else abs(result.targetTPS - result.resultAvgTps) < 100 - result.trxExpectMet = result.expectedTxns == result.resultTxns - result.basicTestSuccess = testSuccessful - result.testAnalysisBlockCnt = reportDict["Analysis"]["BlocksGuide"]["testAnalysisBlockCnt"] - result.logsDir = test.loggingConfig.logDirPath - - print(f"basicTestSuccess: {result.basicTestSuccess} tpsExpectationMet: {result.tpsExpectMet} trxExpectationMet: {result.trxExpectMet}") - - return result.basicTestSuccess and result.tpsExpectMet and result.trxExpectMet - class PluginThreadOpt(Enum): PRODUCER = "producer" CHAIN = "chain" @@ -279,6 +253,9 @@ def createTpsTestReport(self, tpsTestResult: TpsTestResult) -> dict: report['LongRunningMaxTpsAchieved'] = tpsTestResult.longRunningSearchResults.maxTpsAchieved report['tpsTestStart'] = tpsTestResult.tpsTestStart report['tpsTestFinish'] = tpsTestResult.tpsTestFinish + report['tpsTestDuration'] = tpsTestResult.tpsTestFinish - tpsTestResult.tpsTestStart + report['InitialSearchScenariosSummary'] = {tpsTestResult.binSearchResults.searchResults[x].searchTarget : "PASS" if tpsTestResult.binSearchResults.searchResults[x].success else "FAIL" for x in range(len(tpsTestResult.binSearchResults.searchResults))} + report['LongRunningSearchScenariosSummary'] = {tpsTestResult.longRunningSearchResults.searchResults[x].searchTarget : "PASS" if tpsTestResult.longRunningSearchResults.searchResults[x].success else "FAIL" for x in range(len(tpsTestResult.longRunningSearchResults.searchResults))} report['InitialSearchResults'] = {x: asdict(tpsTestResult.binSearchResults.searchResults[x]) for x in range(len(tpsTestResult.binSearchResults.searchResults))} report['InitialMaxTpsReport'] = tpsTestResult.binSearchResults.maxTpsReport report['LongRunningSearchResults'] = {x: asdict(tpsTestResult.longRunningSearchResults.searchResults[x]) for x in range(len(tpsTestResult.longRunningSearchResults.searchResults))} @@ -290,6 +267,8 @@ def createReport(self, producerThreadResult: PluginThreadOptResult=None, chainTh report = {} report['perfTestsBegin'] = self.testsStart report['perfTestsFinish'] = self.testsFinish + report['perfTestsDuration'] = self.testsFinish - self.testsStart + report['operationalMode'] = self.ptConfig.opModeDesc if tpsTestResult is not None: report.update(self.createTpsTestReport(tpsTestResult)) @@ -307,13 +286,6 @@ def createReport(self, producerThreadResult: PluginThreadOptResult=None, chainTh report['nodeosVersion'] = nodeosVers return report - def reportAsJSON(self, report: dict) -> json: - return json.dumps(report, indent=2, cls=LogReaderEncoder) - - def exportReportAsJSON(self, report: json, exportPath): - with open(exportPath, 'wt') as f: - f.write(report) - def testDirsCleanup(self): try: def removeArtifacts(path): @@ -437,13 +409,13 @@ def runTest(self): self.testsFinish = datetime.utcnow() self.report = self.createReport(producerThreadResult=prodResults, chainThreadResult=chainResults, netThreadResult=netResults, tpsTestResult=tpsTestResult, nodeosVers=self.clusterConfig.nodeosVers) - jsonReport = self.reportAsJSON(self.report) + jsonReport = JsonReportHandler.reportAsJSON(self.report) if not self.ptConfig.quiet: print(f"Full Performance Test Report: {jsonReport}") if not self.ptConfig.delReport: - self.exportReportAsJSON(jsonReport, self.loggingConfig.logDirPath/Path("report.json")) + JsonReportHandler.exportReportAsJSON(jsonReport, self.loggingConfig.logDirPath/Path("report.json")) if self.ptConfig.delPerfLogs: print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") @@ -487,6 +459,7 @@ def createPtParser(suppressHelp:bool=False): ptTpsParserGroup = ptParser.add_argument_group(title=None if suppressHelp else ptTpsGrpTitle, description=None if suppressHelp else ptTpsGrpDescription) ptTpsParserGroup.add_argument("--max-tps-to-test", type=int, help=argparse.SUPPRESS if suppressHelp else "The max target transfers realistic as ceiling of test range", default=50000) + ptTpsParserGroup.add_argument("--min-tps-to-test", type=int, help=argparse.SUPPRESS if suppressHelp else "The min target transfers to use as floor of test range", default=1) ptTpsParserGroup.add_argument("--test-iteration-duration-sec", type=int, help=argparse.SUPPRESS if suppressHelp else "The duration of transfer trx generation for each iteration of the test during the initial search (seconds)", default=150) ptTpsParserGroup.add_argument("--test-iteration-min-step", type=int, help=argparse.SUPPRESS if suppressHelp else "The step size determining granularity of tps result during initial search", default=500) ptTpsParserGroup.add_argument("--final-iterations-duration-sec", type=int, help=argparse.SUPPRESS if suppressHelp else "The duration of transfer trx generation for each final longer run iteration of the test during the final search (seconds)", default=300) @@ -511,7 +484,7 @@ def createPtParser(suppressHelp:bool=False): Eg: performance_test.py testBpOpMode --help") ptParserSubparsers = phParser.add_subparsers(title="Operational Modes", description=opModeDesc, - dest="Operational Mode sub-command", + dest="op_mode_sub_cmd", required=True, help="Currently supported operational mode sub-commands.") #Create the Block Producer Operational Mode Sub-Command and Parsers @@ -550,7 +523,6 @@ def parseArgs(): return args def main(): - args = PerfTestArgumentsHandler.parseArgs() Utils.Debug = args.v @@ -561,6 +533,7 @@ def main(): finalDurationSec=args.final_iterations_duration_sec, delPerfLogs=args.del_perf_logs, maxTpsToTest=args.max_tps_to_test, + minTpsToTest=args.min_tps_to_test, testIterationMinStep=args.test_iteration_min_step, tpsLimitPerGenerator=args.tps_limit_per_generator, delReport=args.del_report, @@ -573,7 +546,8 @@ def main(): calcChainThreads=args.calc_chain_threads, calcNetThreads=args.calc_net_threads, userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, - endpointApi=args.endpoint_api) + endpointMode=args.endpoint_mode, + opModeCmd=args.op_mode_sub_cmd) myTest = PerformanceTest(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptConfig=ptConfig) diff --git a/tests/performance_tests/performance_test_basic.py b/tests/performance_tests/performance_test_basic.py index 42dbab30b1..bdd2163b86 100755 --- a/tests/performance_tests/performance_test_basic.py +++ b/tests/performance_tests/performance_test_basic.py @@ -9,7 +9,6 @@ import signal import json import log_reader -import inspect import traceback from pathlib import Path, PurePath @@ -19,8 +18,9 @@ from TestHarness import Account, Cluster, TestHelper, Utils, WalletMgr, TransactionGeneratorsLauncher, TpsTrxGensConfig from TestHarness.TestHelper import AppArgs from dataclasses import dataclass, asdict, field -from datetime import datetime +from datetime import datetime, timedelta from pathlib import Path +from platform import release, system class PerformanceTestBasic: @dataclass @@ -30,22 +30,37 @@ class PtbTpsTestResult: targetTpsPerGenList: list = field(default_factory=list) trxGenExitCodes: list = field(default_factory=list) + @dataclass + class PerfTestBasicResult: + testStart: datetime = None + testEnd: datetime = None + testDuration: timedelta = None + testPassed: bool = False + testRunSuccessful: bool = False + testRunCompleted: bool = False + tpsExpectMet: bool = False + trxExpectMet: bool = False + targetTPS: int = 0 + resultAvgTps: float = 0 + expectedTxns: int = 0 + resultTxns: int = 0 + testAnalysisBlockCnt: int = 0 + logsDir: Path = Path("") + + def __post_init__(self): + self.testDuration = None if self.testStart is None or self.testEnd is None else self.testEnd - self.testStart + self.tpsExpectMet=True if self.resultAvgTps >= self.targetTPS else abs(self.targetTPS - self.resultAvgTps) < 100 + self.trxExpectMet=self.expectedTxns == self.resultTxns + self.testRunSuccessful = self.testRunCompleted and self.expectedTxns == self.resultTxns + self.testPassed = self.testRunSuccessful and self.tpsExpectMet and self.trxExpectMet + @dataclass class TestHelperConfig: - killAll: bool = True # clean_run - dontKill: bool = False # leave_running - keepLogs: bool = True dumpErrorDetails: bool = False delay: int = 1 nodesFile: str = None verbose: bool = False unshared: bool = False - _killEosInstances: bool = True - _killWallet: bool = True - - def __post_init__(self): - self._killEosInstances = not self.dontKill - self._killWallet = not self.dontKill @dataclass class ClusterConfig: @@ -79,11 +94,13 @@ class SpecifiedContract: producerNodeCount: int = 1 validationNodeCount: int = 1 apiNodeCount: int = 0 + dontKill: bool = False # leave_running extraNodeosArgs: ExtraNodeosArgs = field(default_factory=ExtraNodeosArgs) specifiedContract: SpecifiedContract = field(default_factory=SpecifiedContract) genesisPath: Path = Path("tests")/"performance_tests"/"genesis.json" maximumP2pPerHost: int = 5000 maximumClients: int = 0 + keepLogs: bool = True loggingLevel: str = "info" loggingDict: dict = field(default_factory=lambda: { "bios": "off" }) prodsEnableTraceApi: bool = False @@ -95,6 +112,7 @@ class SpecifiedContract: _validationNodeIds: list = field(default_factory=list) _apiNodeIds: list = field(default_factory=list) nonProdsEosVmOcEnable: bool = False + apiNodesReadOnlyThreadCount: int = 0 def __post_init__(self): self._totalNodes = self.producerNodeCount + self.validationNodeCount + self.apiNodeCount @@ -106,31 +124,36 @@ def __post_init__(self): def configureValidationNodes(): validationNodeSpecificNodeosStr = "" - if self.nodeosVers == "v2": + if "v2" in self.nodeosVers: validationNodeSpecificNodeosStr += '--plugin eosio::history_api_plugin --filter-on "*" ' else: #If prodsEnableTraceApi, then Cluster configures all nodes with trace_api_plugin so no need to duplicate here if not self.prodsEnableTraceApi: validationNodeSpecificNodeosStr += "--plugin eosio::trace_api_plugin " if self.nonProdsEosVmOcEnable: - validationNodeSpecificNodeosStr += "--eos-vm-oc-enable " + validationNodeSpecificNodeosStr += "--eos-vm-oc-enable all " if validationNodeSpecificNodeosStr: self.specificExtraNodeosArgs.update({f"{nodeId}" : validationNodeSpecificNodeosStr for nodeId in self._validationNodeIds}) def configureApiNodes(): apiNodeSpecificNodeosStr = "" apiNodeSpecificNodeosStr += "--plugin eosio::chain_api_plugin " + apiNodeSpecificNodeosStr += "--plugin eosio::net_api_plugin " + if "v4" in self.nodeosVers: + apiNodeSpecificNodeosStr += f"--read-only-threads {self.apiNodesReadOnlyThreadCount} " if apiNodeSpecificNodeosStr: self.specificExtraNodeosArgs.update({f"{nodeId}" : apiNodeSpecificNodeosStr for nodeId in self._apiNodeIds}) - configureValidationNodes() - configureApiNodes() + if self.validationNodeCount > 0: + configureValidationNodes() + if self.apiNodeCount > 0: + configureApiNodes() - assert self.nodeosVers != "v1" and self.nodeosVers != "v0", f"nodeos version {Utils.getNodeosVersion().split('.')[0]} is unsupported by performance test" - if self.nodeosVers == "v2": + assert "v1" not in self.nodeosVers and "v0" not in self.nodeosVers, f"nodeos version {Utils.getNodeosVersion()} is unsupported by performance test" + if "v2" in self.nodeosVers: self.writeTrx = lambda trxDataFile, blockNum, trx: [trxDataFile.write(f"{trx['trx']['id']},{blockNum},{trx['cpu_usage_us']},{trx['net_usage_words']}\n")] self.createBlockData = lambda block, blockTransactionTotal, blockNetTotal, blockCpuTotal: log_reader.blockData(blockId=block["payload"]["id"], blockNum=block['payload']['block_num'], transactions=blockTransactionTotal, net=blockNetTotal, cpu=blockCpuTotal, producer=block["payload"]["producer"], status=block["payload"]["confirmed"], _timestamp=block["payload"]["timestamp"]) - self.updateTrxDict = lambda blockNum, transaction, trxDict: trxDict.update(dict([(transaction['trx']['id'], log_reader.trxData(blockNum, transaction['cpu_usage_us'],transaction['net_usage_words']))])) + self.updateTrxDict = lambda blockNum, transaction, trxDict: trxDict.update(dict([(transaction['trx']['id'], log_reader.trxData(blockNum, transaction['cpu_usage_us'], transaction['net_usage_words']))])) else: self.writeTrx = lambda trxDataFile, blockNum, trx:[ trxDataFile.write(f"{trx['id']},{trx['block_num']},{trx['block_time']},{trx['cpu_usage_us']},{trx['net_usage_words']},{trx['actions']}\n") ] self.createBlockData = lambda block, blockTransactionTotal, blockNetTotal, blockCpuTotal: log_reader.blockData(blockId=block["payload"]["id"], blockNum=block['payload']['number'], transactions=blockTransactionTotal, net=blockNetTotal, cpu=blockCpuTotal, producer=block["payload"]["producer"], status=block["payload"]["status"], _timestamp=block["payload"]["timestamp"]) @@ -148,10 +171,14 @@ class PtbConfig: expectedTransactionsSent: int = field(default_factory=int, init=False) printMissingTransactions: bool=False userTrxDataFile: Path=None - endpointApi: str="p2p" + endpointMode: str="p2p" + apiEndpoint: str=None + def __post_init__(self): self.expectedTransactionsSent = self.testTrxGenDurationSec * self.targetTps + if (self.endpointMode == "http"): + self.apiEndpoint="/v1/chain/send_transaction2" @dataclass class LoggingConfig: @@ -168,6 +195,10 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.clusterConfig = clusterConfig self.ptbConfig = ptbConfig + #Results + self.ptbTpsTestResult = PerformanceTestBasic.PtbTpsTestResult() + self.testResult = PerformanceTestBasic.PerfTestBasicResult() + self.testHelperConfig.keepLogs = not self.ptbConfig.delPerfLogs Utils.Debug = self.testHelperConfig.verbose @@ -175,6 +206,7 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste self.emptyBlockGoal = 1 self.testStart = datetime.utcnow() + self.testEnd = self.testStart self.testNamePath = testNamePath self.loggingConfig = PerformanceTestBasic.LoggingConfig(logDirBase=Path(self.ptbConfig.logDirRoot)/self.testNamePath, logDirTimestamp=f"{self.testStart.strftime('%Y-%m-%d_%H-%M-%S')}", @@ -199,14 +231,11 @@ def __init__(self, testHelperConfig: TestHelperConfig=TestHelperConfig(), cluste # Setup cluster and its wallet manager self.walletMgr=WalletMgr(True) - self.cluster=Cluster(walletd=True, loggingLevel=self.clusterConfig.loggingLevel, loggingLevelDict=self.clusterConfig.loggingDict, - nodeosVers=self.clusterConfig.nodeosVers,unshared=self.testHelperConfig.unshared) + self.cluster=Cluster(loggingLevel=self.clusterConfig.loggingLevel, loggingLevelDict=self.clusterConfig.loggingDict, + nodeosVers=self.clusterConfig.nodeosVers,unshared=self.testHelperConfig.unshared, + keepRunning=self.clusterConfig.dontKill, keepLogs=self.clusterConfig.keepLogs) self.cluster.setWalletMgr(self.walletMgr) - def cleanupOldClusters(self): - self.cluster.killall(allInstances=self.testHelperConfig.killAll) - self.cluster.cleanup() - def testDirsCleanup(self, delReport: bool=False): try: def removeArtifacts(path): @@ -258,7 +287,7 @@ def fileOpenMode(self, filePath) -> str: def isOnBlockTransaction(self, transaction): # v2 history does not include onblock - if self.clusterConfig.nodeosVers == "v2": + if "v2" in self.clusterConfig.nodeosVers: return False else: if transaction['actions'][0]['account'] != 'eosio' or transaction['actions'][0]['action'] != 'onblock': @@ -368,10 +397,10 @@ def runTpsTest(self) -> PtbTpsTestResult: self.connectionPairList = [] def configureConnections(): - if(self.ptbConfig.endpointApi == "http"): + if(self.ptbConfig.endpointMode == "http"): for apiNodeId in self.clusterConfig._apiNodeIds: self.connectionPairList.append(f"{self.cluster.getNode(apiNodeId).host}:{self.cluster.getNode(apiNodeId).port}") - else: # endpointApi == p2p + else: # endpointMode == p2p for producerId in self.clusterConfig._producerNodeIds: self.connectionPairList.append(f"{self.cluster.getNode(producerId).host}:{self.cluster.getNodeP2pPort(producerId)}") @@ -396,6 +425,9 @@ def configureConnections(): print(f"Creating accounts specified in userTrxData: {self.userTrxDataDict['initAccounts']}") self.setupWalletAndAccounts(accountCnt=len(self.userTrxDataDict['initAccounts']), accountNames=self.userTrxDataDict['initAccounts']) abiFile = self.userTrxDataDict['abiFile'] + if 'apiEndpoint' in self.userTrxDataDict: + self.ptbConfig.apiEndpoint = self.userTrxDataDict['apiEndpoint'] + print(f'API Endpoint specified: {self.ptbConfig.apiEndpoint}') actionsDataJson = json.dumps(self.userTrxDataDict['actions']) @@ -420,13 +452,13 @@ def configureConnections(): self.cluster.biosNode.kill(signal.SIGTERM) self.data.startBlock = self.waitForEmptyBlocks(self.validationNode, self.emptyBlockGoal) - tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, connectionPairList=self.connectionPairList, endpointApi=self.ptbConfig.endpointApi) + tpsTrxGensConfig = TpsTrxGensConfig(targetTps=self.ptbConfig.targetTps, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, connectionPairList=self.connectionPairList) self.cluster.trxGenLauncher = TransactionGeneratorsLauncher(chainId=chainId, lastIrreversibleBlockId=lib_id, contractOwnerAccount=self.clusterConfig.specifiedContract.account.name, accts=','.join(map(str, self.accountNames)), privateKeys=','.join(map(str, self.accountPrivKeys)), trxGenDurationSec=self.ptbConfig.testTrxGenDurationSec, logDir=self.trxGenLogDirPath, abiFile=abiFile, actionsData=actionsDataJson, actionsAuths=actionsAuthsJson, - tpsTrxGensConfig=tpsTrxGensConfig) + tpsTrxGensConfig=tpsTrxGensConfig, endpointMode=self.ptbConfig.endpointMode, apiEndpoint=self.ptbConfig.apiEndpoint) trxGenExitCodes = self.cluster.trxGenLauncher.launch() print(f"Transaction Generator exit codes: {trxGenExitCodes}") @@ -443,7 +475,7 @@ def configureConnections(): if len(trxSent) != self.ptbConfig.expectedTransactionsSent: print(f"ERROR: Transactions generated: {len(trxSent)} does not match the expected number of transactions: {self.ptbConfig.expectedTransactionsSent}") blocksToWait = 2 * self.ptbConfig.testTrxGenDurationSec + 10 - trxSent = self.validationNode.waitForTransactionsInBlockRange(trxSent, self.data.startBlock, blocksToWait) + trxNotFound = self.validationNode.waitForTransactionsInBlockRange(trxSent, self.data.startBlock, blocksToWait) self.data.ceaseBlock = self.validationNode.getHeadBlockNum() return PerformanceTestBasic.PtbTpsTestResult(completedRun=completedRun, numGeneratorsUsed=tpsTrxGensConfig.numGenerators, @@ -460,40 +492,83 @@ def prepArgs(self) -> dict: def captureLowLevelArtifacts(self): try: - pid = os.getpid() shutil.move(f"{self.cluster.nodeosLogPath}", f"{self.varLogsDirPath}") except Exception as e: print(f"Failed to move '{self.cluster.nodeosLogPath}' to '{self.varLogsDirPath}': {type(e)}: {e}") - etcEosioDir = Path("etc")/"eosio" - for path in os.listdir(etcEosioDir): - if path == "launcher": - try: - # Need to copy here since testnet.template is only generated at compile time then reused, therefore - # it needs to remain in etc/eosio/launcher for subsequent tests. - shutil.copytree(etcEosioDir/Path(path), self.etcEosioLogsDirPath/Path(path)) - except Exception as e: - print(f"Failed to copy '{etcEosioDir}/{path}' to '{self.etcEosioLogsDirPath}/{path}': {type(e)}: {e}") - else: - try: - shutil.move(etcEosioDir/Path(path), self.etcEosioLogsDirPath/Path(path)) - except Exception as e: - print(f"Failed to move '{etcEosioDir}/{path}' to '{self.etcEosioLogsDirPath}/{path}': {type(e)}: {e}") - + def createReport(self, logAnalysis: log_reader.LogAnalysis, tpsTestConfig: log_reader.TpsTestConfig, argsDict: dict, testResult: PerfTestBasicResult) -> dict: + report = {} + report['targetApiEndpointType'] = self.ptbConfig.endpointMode + report['targetApiEndpoint'] = self.ptbConfig.apiEndpoint if self.ptbConfig.apiEndpoint is not None else "NA for P2P" + report['Result'] = asdict(testResult) + report['Analysis'] = {} + report['Analysis']['BlockSize'] = asdict(logAnalysis.blockSizeStats) + report['Analysis']['BlocksGuide'] = asdict(logAnalysis.guide) + report['Analysis']['TPS'] = asdict(logAnalysis.tpsStats) + report['Analysis']['TPS']['configTps'] = tpsTestConfig.targetTps + report['Analysis']['TPS']['configTestDuration'] = tpsTestConfig.testDurationSec + report['Analysis']['TPS']['tpsPerGenerator'] = tpsTestConfig.targetTpsPerGenList + report['Analysis']['TPS']['generatorCount'] = tpsTestConfig.numTrxGensUsed + report['Analysis']['TrxCPU'] = asdict(logAnalysis.trxCpuStats) + report['Analysis']['TrxLatency'] = asdict(logAnalysis.trxLatencyStats) + report['Analysis']['TrxLatency']['units'] = "seconds" + report['Analysis']['TrxNet'] = asdict(logAnalysis.trxNetStats) + report['Analysis']['TrxAckResponseTime'] = asdict(logAnalysis.trxAckStats) + report['Analysis']['TrxAckResponseTime']['measurementApplicable'] = logAnalysis.trxAckStatsApplicable + report['Analysis']['TrxAckResponseTime']['units'] = "microseconds" + report['Analysis']['ExpectedTransactions'] = testResult.expectedTxns + report['Analysis']['DroppedTransactions'] = len(logAnalysis.notFound) + report['Analysis']['ProductionWindowsTotal'] = logAnalysis.prodWindows.totalWindows + report['Analysis']['ProductionWindowsAverageSize'] = logAnalysis.prodWindows.averageWindowSize + report['Analysis']['ProductionWindowsMissed'] = logAnalysis.prodWindows.missedWindows + report['Analysis']['ForkedBlocks'] = {} + report['Analysis']['ForksCount'] = {} + report['Analysis']['DroppedBlocks'] = {} + report['Analysis']['DroppedBlocksCount'] = {} + for nodeNum in range(0, self.data.numNodes): + formattedNodeNum = str(nodeNum).zfill(2) + report['Analysis']['ForkedBlocks'][formattedNodeNum] = self.data.forkedBlocks[formattedNodeNum] + report['Analysis']['ForksCount'][formattedNodeNum] = len(self.data.forkedBlocks[formattedNodeNum]) + report['Analysis']['DroppedBlocks'][formattedNodeNum] = self.data.droppedBlocks[formattedNodeNum] + report['Analysis']['DroppedBlocksCount'][formattedNodeNum] = len(self.data.droppedBlocks[formattedNodeNum]) + report['args'] = argsDict + report['args']['userTrxData'] = self.userTrxDataDict if self.ptbConfig.userTrxDataFile is not None else "NOT CONFIGURED" + report['env'] = {'system': system(), 'os': os.name, 'release': release(), 'logical_cpu_count': os.cpu_count()} + report['nodeosVersion'] = self.clusterConfig.nodeosVers + return report def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): args = self.prepArgs() artifactsLocate = log_reader.ArtifactPaths(nodeosLogDir=self.nodeosLogDir, nodeosLogPath=self.nodeosLogPath, trxGenLogDirPath=self.trxGenLogDirPath, blockTrxDataPath=self.blockTrxDataPath, blockDataPath=self.blockDataPath, transactionMetricsDataPath=self.transactionMetricsDataPath) tpsTestConfig = log_reader.TpsTestConfig(targetTps=self.ptbConfig.targetTps, testDurationSec=self.ptbConfig.testTrxGenDurationSec, tpsLimitPerGenerator=self.ptbConfig.tpsLimitPerGenerator, - numBlocksToPrune=self.ptbConfig.numAddlBlocksToPrune, numTrxGensUsed=testResult.numGeneratorsUsed, - targetTpsPerGenList=testResult.targetTpsPerGenList, quiet=self.ptbConfig.quiet) - self.report = log_reader.calcAndReport(data=self.data, tpsTestConfig=tpsTestConfig, artifacts=artifactsLocate, argsDict=args, testStart=self.testStart, - completedRun=testResult.completedRun,nodeosVers=self.clusterConfig.nodeosVers) + numBlocksToPrune=self.ptbConfig.numAddlBlocksToPrune, numTrxGensUsed=testResult.numGeneratorsUsed, targetTpsPerGenList=testResult.targetTpsPerGenList, + quiet=self.ptbConfig.quiet, printMissingTransactions=self.ptbConfig.printMissingTransactions) + self.logAnalysis = log_reader.analyzeLogResults(data=self.data, tpsTestConfig=tpsTestConfig, artifacts=artifactsLocate) + self.testEnd = datetime.utcnow() + + self.testResult = PerformanceTestBasic.PerfTestBasicResult(targetTPS=self.ptbConfig.targetTps, resultAvgTps=self.logAnalysis.tpsStats.avg, expectedTxns=self.ptbConfig.expectedTransactionsSent, + resultTxns=self.logAnalysis.trxLatencyStats.samples, testRunCompleted=self.ptbTpsTestResult.completedRun, + testAnalysisBlockCnt=self.logAnalysis.guide.testAnalysisBlockCnt, logsDir=self.loggingConfig.logDirPath, + testStart=self.testStart, testEnd=self.testEnd) + + print(f"targetTPS: {self.testResult.targetTPS} expectedTxns: {self.testResult.expectedTxns} resultAvgTps: {self.testResult.resultAvgTps} resultTxns: {self.testResult.resultTxns}") + + if not self.ptbTpsTestResult.completedRun: + for exitCode in self.ptbTpsTestResult.trxGenExitCodes: + if exitCode != 0: + print(f"Error: Transaction Generator exited with error {exitCode}") + + if self.ptbTpsTestResult.completedRun and self.ptbConfig.expectedTransactionsSent != self.data.totalTransactions: + print(f"Error: Transactions received: {self.data.totalTransactions} did not match expected total: {self.ptbConfig.expectedTransactionsSent}") + + print(f"testRunSuccessful: {self.testResult.testRunSuccessful} testPassed: {self.testResult.testPassed} tpsExpectationMet: {self.testResult.tpsExpectMet} trxExpectationMet: {self.testResult.trxExpectMet}") + + self.report = self.createReport(logAnalysis=self.logAnalysis, tpsTestConfig=tpsTestConfig, argsDict=args, testResult=self.testResult) jsonReport = None if not self.ptbConfig.quiet or not self.ptbConfig.delReport: - jsonReport = log_reader.reportAsJSON(self.report) + jsonReport = log_reader.JsonReportHandler.reportAsJSON(self.report) if not self.ptbConfig.quiet: print(self.data) @@ -501,44 +576,34 @@ def analyzeResultsAndReport(self, testResult: PtbTpsTestResult): print(f"Report:\n{jsonReport}") if not self.ptbConfig.delReport: - log_reader.exportReportAsJSON(jsonReport, self.reportPath) + log_reader.JsonReportHandler.exportReportAsJSON(jsonReport, self.reportPath) def preTestSpinup(self): - self.cleanupOldClusters() self.testDirsCleanup() self.testDirsSetup() + self.walletMgr.launch() if self.launchCluster() == False: self.errorExit('Failed to stand up cluster.') def postTpsTestSteps(self): self.queryBlockTrxData(self.validationNode, self.blockDataPath, self.blockTrxDataPath, self.data.startBlock, self.data.ceaseBlock) + self.cluster.shutdown() + self.walletMgr.shutdown() def runTest(self) -> bool: - testSuccessful = False try: # Kill any existing instances and launch cluster TestHelper.printSystemInfo("BEGIN") self.preTestSpinup() - self.ptbTestResult = self.runTpsTest() + self.ptbTpsTestResult = self.runTpsTest() self.postTpsTestSteps() self.captureLowLevelArtifacts() - self.analyzeResultsAndReport(self.ptbTestResult) - - testSuccessful = self.ptbTestResult.completedRun - - if not self.PtbTpsTestResult.completedRun: - for exitCode in self.ptbTestResult.trxGenExitCodes: - if exitCode != 0: - print(f"Error: Transaction Generator exited with error {exitCode}") - - if testSuccessful and self.ptbConfig.expectedTransactionsSent != self.data.totalTransactions: - testSuccessful = False - print(f"Error: Transactions received: {self.data.totalTransactions} did not match expected total: {self.ptbConfig.expectedTransactionsSent}") + self.analyzeResultsAndReport(self.ptbTpsTestResult) except: traceback.print_exc() @@ -549,11 +614,7 @@ def runTest(self) -> bool: TestHelper.shutdown( cluster=self.cluster, walletMgr=self.walletMgr, - testSuccessful=testSuccessful, - killEosInstances=self.testHelperConfig._killEosInstances, - killWallet=self.testHelperConfig._killWallet, - keepLogs=False, - cleanRun=self.testHelperConfig.killAll, + testSuccessful=self.testResult.testRunSuccessful, dumpErrorDetails=self.testHelperConfig.dumpErrorDetails ) @@ -561,11 +622,10 @@ def runTest(self) -> bool: print(f"Cleaning up logs directory: {self.loggingConfig.logDirPath}") self.testDirsCleanup(self.ptbConfig.delReport) - return testSuccessful + return self.testResult.testRunSuccessful def setupTestHelperConfig(args) -> TestHelperConfig: - return PerformanceTestBasic.TestHelperConfig(killAll=args.clean_run, dontKill=args.leave_running, keepLogs=not args.del_perf_logs, - dumpErrorDetails=args.dump_error_details, delay=args.d, verbose=args.v) + return PerformanceTestBasic.TestHelperConfig(dumpErrorDetails=args.dump_error_details, delay=args.d, verbose=args.v) def setupClusterConfig(args) -> ClusterConfig: @@ -576,42 +636,46 @@ def setupClusterConfig(args) -> ClusterConfig: blockLogRetainBlocks=args.block_log_retain_blocks, chainStateDbSizeMb=args.chain_state_db_size_mb, abiSerializerMaxTimeMs=990000) - producerPluginArgs = ProducerPluginArgs(disableSubjectiveBilling=args.disable_subjective_billing, + producerPluginArgs = ProducerPluginArgs(disableSubjectiveApiBilling=args.disable_subjective_billing, + disableSubjectiveP2pBilling=args.disable_subjective_billing, cpuEffortPercent=args.cpu_effort_percent, producerThreads=args.producer_threads, maxTransactionTime=-1) httpPluginArgs = HttpPluginArgs(httpMaxBytesInFlightMb=args.http_max_bytes_in_flight_mb, httpMaxInFlightRequests=args.http_max_in_flight_requests, httpMaxResponseTimeMs=args.http_max_response_time_ms, httpThreads=args.http_threads) netPluginArgs = NetPluginArgs(netThreads=args.net_threads, maxClients=0) - nodeosVers=Utils.getNodeosVersion().split('.')[0] - resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=not nodeosVers == "v2") + nodeosVers=Utils.getNodeosVersion() + resourceMonitorPluginArgs = ResourceMonitorPluginArgs(resourceMonitorNotShutdownOnThresholdExceeded=not "v2" in nodeosVers) ENA = PerformanceTestBasic.ClusterConfig.ExtraNodeosArgs extraNodeosArgs = ENA(chainPluginArgs=chainPluginArgs, httpPluginArgs=httpPluginArgs, producerPluginArgs=producerPluginArgs, netPluginArgs=netPluginArgs, resourceMonitorPluginArgs=resourceMonitorPluginArgs) SC = PerformanceTestBasic.ClusterConfig.SpecifiedContract specifiedContract=SC(contractDir=args.contract_dir, wasmFile=args.wasm_file, abiFile=args.abi_file, account=Account(args.account_name)) - return PerformanceTestBasic.ClusterConfig(producerNodeCount=args.producer_nodes, validationNodeCount=args.validation_nodes, apiNodeCount=args.api_nodes, + return PerformanceTestBasic.ClusterConfig(dontKill=args.leave_running, keepLogs=not args.del_perf_logs, + producerNodeCount=args.producer_nodes, validationNodeCount=args.validation_nodes, apiNodeCount=args.api_nodes, genesisPath=args.genesis, prodsEnableTraceApi=args.prods_enable_trace_api, extraNodeosArgs=extraNodeosArgs, specifiedContract=specifiedContract, loggingLevel=args.cluster_log_lvl, - nodeosVers=nodeosVers, nonProdsEosVmOcEnable=args.non_prods_eos_vm_oc_enable) + nodeosVers=nodeosVers, nonProdsEosVmOcEnable=args.non_prods_eos_vm_oc_enable, + apiNodesReadOnlyThreadCount=args.api_nodes_read_only_threads) class PtbArgumentsHandler(object): @staticmethod def _createBaseArgumentParser(defEndpointApiDef: str, defProdNodeCnt: int, defValidationNodeCnt: int, defApiNodeCnt: int, suppressHelp: bool=False): testHelperArgParser=TestHelper.createArgumentParser(includeArgs={"-d","--dump-error-details","-v","--leave-running" - ,"--clean-run","--unshared"}, suppressHelp=suppressHelp) + ,"--unshared"}, suppressHelp=suppressHelp) ptbBaseParser = argparse.ArgumentParser(parents=[testHelperArgParser], add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter) ptbBaseGrpTitle="Performance Test Basic Base" ptbBaseGrpDescription="Performance Test Basic base configuration items." ptbBaseParserGroup = ptbBaseParser.add_argument_group(title=None if suppressHelp else ptbBaseGrpTitle, description=None if suppressHelp else ptbBaseGrpDescription) - ptbBaseParserGroup.add_argument("--endpoint-api", type=str, help=argparse.SUPPRESS if suppressHelp else "Endpointt API mode (\"p2p\", \"http\"). \ + ptbBaseParserGroup.add_argument("--endpoint-mode", type=str, help=argparse.SUPPRESS if suppressHelp else "Endpoint mode (\"p2p\", \"http\"). \ In \"p2p\" mode transactions will be directed to the p2p endpoint on a producer node. \ In \"http\" mode transactions will be directed to the http endpoint on an api node.", choices=["p2p", "http"], default=defEndpointApiDef) ptbBaseParserGroup.add_argument("--producer-nodes", type=int, help=argparse.SUPPRESS if suppressHelp else "Producing nodes count", default=defProdNodeCnt) ptbBaseParserGroup.add_argument("--validation-nodes", type=int, help=argparse.SUPPRESS if suppressHelp else "Validation nodes count", default=defValidationNodeCnt) ptbBaseParserGroup.add_argument("--api-nodes", type=int, help=argparse.SUPPRESS if suppressHelp else "API nodes count", default=defApiNodeCnt) + ptbBaseParserGroup.add_argument("--api-nodes-read-only-threads", type=int, help=argparse.SUPPRESS if suppressHelp else "API nodes read only threads count for use with read-only transactions", default=0) ptbBaseParserGroup.add_argument("--tps-limit-per-generator", type=int, help=argparse.SUPPRESS if suppressHelp else "Maximum amount of transactions per second a single generator can have.", default=4000) ptbBaseParserGroup.add_argument("--genesis", type=str, help=argparse.SUPPRESS if suppressHelp else "Path to genesis.json", default="tests/performance_tests/genesis.json") ptbBaseParserGroup.add_argument("--num-blocks-to-prune", type=int, help=argparse.SUPPRESS if suppressHelp else ("The number of potentially non-empty blocks, in addition to leading and trailing size 0 blocks, " @@ -712,7 +776,7 @@ def main(): delPerfLogs=args.del_perf_logs, printMissingTransactions=args.print_missing_transactions, userTrxDataFile=Path(args.user_trx_data_file) if args.user_trx_data_file is not None else None, - endpointApi=args.endpoint_api) + endpointMode=args.endpoint_mode) myTest = PerformanceTestBasic(testHelperConfig=testHelperConfig, clusterConfig=testClusterConfig, ptbConfig=ptbConfig) diff --git a/tests/performance_tests/readOnlyTrxData.json b/tests/performance_tests/readOnlyTrxData.json new file mode 100644 index 0000000000..9c6a367b9f --- /dev/null +++ b/tests/performance_tests/readOnlyTrxData.json @@ -0,0 +1,14 @@ +{ + "initAccounts": ["payloadless"], + "abiFile": "unittests/test-contracts/payloadless/payloadless.abi", + "apiEndpoint": "/v1/chain/send_read_only_transaction", + "actions": [ + { + "actionName": "doit", + "actionData": { + }, + "actionAuthAcct": "payloadless", + "authorization": {} + } + ] +} diff --git a/tests/plugin_http_api_test.py b/tests/plugin_http_api_test.py index a27b07fe5c..33755e482f 100755 --- a/tests/plugin_http_api_test.py +++ b/tests/plugin_http_api_test.py @@ -7,42 +7,79 @@ import unittest import socket import re +import shlex +import argparse +import sys +import signal +from pathlib import Path from TestHarness import Account, Node, TestHelper, Utils, WalletMgr, ReturnType +class HttpCategoryConfig: + categories = ["chain_ro", "chain_rw", "db_size", "net_ro", "net_rw", "producer_ro", + "producer_rw", "snapshot", "trace_api", "prometheus", "test_control"] + default_port = int(TestHelper.DEFAULT_PORT) + def __init__(self, use_category: bool): + if use_category: + self.ports = dict(zip(HttpCategoryConfig.categories, range( + HttpCategoryConfig.default_port, HttpCategoryConfig.default_port+len(HttpCategoryConfig.categories)))) + else: + self.ports = None + + def nodeosArgs(self): + if not self.ports: + return "" + args = list(map( + lambda item: f"--http-category-address {item[0]},{TestHelper.LOCAL_HOST}:{item[1]}", self.ports.items())) + + # at this moment, cleos cannot handle split api endpoints, put all these categories in the same default endpoint + cleos_categories = ["chain_rw", + "producer_ro", "producer_rw", "trace_api"] + args += list(map( + lambda item: f"--http-category-address {item},{TestHelper.LOCAL_HOST}:{HttpCategoryConfig.default_port}", cleos_categories)) + + return " ".join(["--http-server-address http-category-address"] + args) + + def port(self, category: str): + return self.ports[category] if self.ports else HttpCategoryConfig.default_port + + class PluginHttpTest(unittest.TestCase): sleep_s = 2 base_wallet_cmd_str = f"http://{TestHelper.LOCAL_HOST}:{TestHelper.DEFAULT_WALLET_PORT}" keosd = WalletMgr(True, TestHelper.DEFAULT_PORT, TestHelper.LOCAL_HOST, TestHelper.DEFAULT_WALLET_PORT, TestHelper.LOCAL_HOST) node_id = 1 - nodeos = Node(TestHelper.LOCAL_HOST, TestHelper.DEFAULT_PORT, node_id, walletMgr=keosd) - data_dir = Utils.getNodeDataDir(node_id) - config_dir = Utils.getNodeConfigDir(node_id) + data_dir = Path(Utils.getNodeDataDir(node_id)) + config_dir = Path(Utils.getNodeConfigDir(node_id)) empty_content_dict = {} http_post_invalid_param = '{invalid}' EOSIO_ACCT_PRIVATE_DEFAULT_KEY = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" EOSIO_ACCT_PUBLIC_DEFAULT_KEY = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + def endpoint(self, category: str): + return f'http://{self.nodeos.host}:{category_config.port(category)}' + # make a fresh data dir def createDataDir(self): - if os.path.exists(self.data_dir): + if self.data_dir.exists(): shutil.rmtree(self.data_dir) - os.makedirs(self.data_dir) + self.data_dir.mkdir(parents=True) # make a fresh config dir def createConfigDir(self): - if os.path.exists(self.config_dir): + if self.config_dir.exists(): shutil.rmtree(self.config_dir) - os.makedirs(self.config_dir) + self.config_dir.mkdir() + + # kill nodeos. keosd shuts down automatically + def killNodes(self): + self.nodeos.kill(signal.SIGTERM) - # kill nodeos and keosd and clean up dirs + # clean up dirs def cleanEnv(self) : - self.keosd.killall(True) - WalletMgr.cleanup() - Node.killAllNodeos() - if os.path.exists(Utils.DataPath): + if self.data_dir.exists(): shutil.rmtree(Utils.DataPath) - if os.path.exists(self.config_dir): + if self.config_dir.exists(): shutil.rmtree(self.config_dir) time.sleep(self.sleep_s) @@ -58,8 +95,10 @@ def startEnv(self) : nodeos_flags = (" --data-dir=%s --config-dir=%s --trace-dir=%s --trace-no-abis --access-control-allow-origin=%s " "--contracts-console --http-validate-host=%s --verbose-http-errors --max-transaction-time -1 --abi-serializer-max-time-ms 30000 --http-max-response-time-ms 30000 " "--p2p-peer-address localhost:9011 --resource-monitor-not-shutdown-on-threshold-exceeded ") % (self.data_dir, self.config_dir, self.data_dir, "\'*\'", "false") + nodeos_flags += category_config.nodeosArgs() + start_nodeos_cmd = ("%s -e -p eosio %s %s ") % (Utils.EosServerPath, nodeos_plugins, nodeos_flags) - self.nodeos.launchCmd(start_nodeos_cmd, self.node_id) + self.nodeos = Node(TestHelper.LOCAL_HOST, TestHelper.DEFAULT_PORT, self.node_id, self.data_dir, self.config_dir, shlex.split(start_nodeos_cmd), walletMgr=self.keosd) time.sleep(self.sleep_s*2) self.nodeos.waitForBlock(1, timeout=30) @@ -87,14 +126,16 @@ def activateAllBuiltinProtocolFeatures(self): def test_ChainApi(self) : resource = "chain" command = "get_info" + endpoint=self.endpoint("chain_ro") + # get_info without parameter - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("server_version", ret_json["payload"]) # get_info with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertIn("server_version", ret_json["payload"]) # get_info with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # activate the builtin protocol features and get some useful data @@ -107,40 +148,34 @@ def test_ChainApi(self) : allFeatureCodenames.append(s['specification'][0]['value']) self.assertEqual(len(allFeatureDigests), len(allFeatureCodenames)) - # Default limit set in get_activated_protocol_features_params - ACT_FEATURE_DEFAULT_LIMIT = 10 if len(allFeatureCodenames) > 10 else len(allFeatureCodenames) - # Actual expected activated features total ACT_FEATURE_CURRENT_EXPECTED_TOTAL = len(allFeatureCodenames) - # Extemely high value to attempt to always get full list of activated features - ACT_FEATURE_EXTREME = 10000 - # get_consensus_parameters without parameter command = "get_consensus_parameters" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("chain_config", ret_json["payload"]) self.assertIn("wasm_config", ret_json["payload"]) # get_consensus_parameters with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertIn("chain_config", ret_json["payload"]) self.assertIn("wasm_config", ret_json["payload"]) # get_consensus_parameters with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_activated_protocol_features without parameter command = "get_activated_protocol_features" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) - self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_DEFAULT_LIMIT) + self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_CURRENT_EXPECTED_TOTAL) for dict_feature in ret_json["payload"]["activated_protocol_features"]: self.assertTrue(dict_feature['feature_digest'] in allFeatureDigests) # get_activated_protocol_features with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) - self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_DEFAULT_LIMIT) + self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_CURRENT_EXPECTED_TOTAL) for dict_feature in ret_json["payload"]["activated_protocol_features"]: self.assertTrue(dict_feature['feature_digest'] in allFeatureDigests) for index, _ in enumerate(ret_json["payload"]["activated_protocol_features"]): @@ -148,45 +183,45 @@ def test_ChainApi(self) : self.assertTrue(ret_json["payload"]["activated_protocol_features"][index - 1]["activation_ordinal"] < ret_json["payload"]["activated_protocol_features"][index]["activation_ordinal"]) # get_activated_protocol_features with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_activated_protocol_features with 1st param payload = {"lower_bound":1} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) - self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_DEFAULT_LIMIT) + self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_CURRENT_EXPECTED_TOTAL-1) # -1 since lower_bound=1 for dict_feature in ret_json["payload"]["activated_protocol_features"]: self.assertTrue(dict_feature['feature_digest'] in allFeatureDigests) # get_activated_protocol_features with 2nd param payload = {"upper_bound":1000} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) - self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_DEFAULT_LIMIT) + self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_CURRENT_EXPECTED_TOTAL) for dict_feature in ret_json["payload"]["activated_protocol_features"]: self.assertTrue(dict_feature['feature_digest'] in allFeatureDigests) # get_activated_protocol_features with 2nd param upper_bound_param = 7 payload = {"upper_bound":upper_bound_param} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) for dict_feature in ret_json["payload"]["activated_protocol_features"]: self.assertTrue(dict_feature['feature_digest'] in allFeatureDigests) self.assertTrue(dict_feature['activation_ordinal'] <= upper_bound_param) # get_activated_protocol_features with 3rd param - payload = {"limit":1} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + payload = {"limit":1} # ignored by nodeos + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) - self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), 1) + self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_CURRENT_EXPECTED_TOTAL) for dict_feature in ret_json["payload"]["activated_protocol_features"]: self.assertTrue(dict_feature['feature_digest'] in allFeatureDigests) # get_activated_protocol_features with 3rd param to get expected full list of activated features - payload = {"limit":ACT_FEATURE_CURRENT_EXPECTED_TOTAL} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + payload = {"limit":ACT_FEATURE_CURRENT_EXPECTED_TOTAL} # ignored by nodeos + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_CURRENT_EXPECTED_TOTAL) for dict_feature in ret_json["payload"]["activated_protocol_features"]: @@ -196,10 +231,9 @@ def test_ChainApi(self) : for digest in allFeatureDigests: assert digest in str(ret_json["payload"]["activated_protocol_features"]), f"ERROR: Expected active feature \'{feature}\' not found in returned list." - # get_activated_protocol_features with 3rd param set extremely high to attempt to catch the - # addition of new features and fail and cause this test to be updated. - payload = {"limit":ACT_FEATURE_EXTREME} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + # get_activated_protocol_features with 3rd param set extremely high + payload = {"limit":999999} # ignored by nodeos + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_CURRENT_EXPECTED_TOTAL) for dict_feature in ret_json["payload"]["activated_protocol_features"]: @@ -207,17 +241,17 @@ def test_ChainApi(self) : # get_activated_protocol_features with 4th param payload = {"search_by_block_num":"true"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) - self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_DEFAULT_LIMIT) + self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_CURRENT_EXPECTED_TOTAL) for dict_feature in ret_json["payload"]["activated_protocol_features"]: self.assertTrue(dict_feature['feature_digest'] in allFeatureDigests) # get_activated_protocol_features with 5th param payload = {"reverse":"true"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) - self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_DEFAULT_LIMIT) + self.assertEqual(len(ret_json["payload"]["activated_protocol_features"]), ACT_FEATURE_CURRENT_EXPECTED_TOTAL) for dict_feature in ret_json["payload"]["activated_protocol_features"]: self.assertTrue(dict_feature['feature_digest'] in allFeatureDigests) for index, _ in enumerate(ret_json["payload"]["activated_protocol_features"]): @@ -230,216 +264,216 @@ def test_ChainApi(self) : "limit":10, "search_by_block_num":"true", "reverse":"true"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["activated_protocol_features"]), list) for dict_feature in ret_json["payload"]["activated_protocol_features"]: self.assertTrue(dict_feature['feature_digest'] in allFeatureDigests) # get_block with empty parameter command = "get_block" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with valid parameter payload = {"block_num_or_id":1} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["payload"]["block_num"], 1) # get_raw_block with empty parameter command = "get_raw_block" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with valid parameter payload = {"block_num_or_id":1} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertTrue("action_mroot" in ret_json["payload"]) # get_block_header with empty parameter command = "get_block_header" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with valid parameters payload = {"block_num_or_id":1, "include_extensions": True} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertTrue("id" in ret_json["payload"]) self.assertTrue("signed_block_header" in ret_json["payload"]) self.assertTrue("block_extensions" in ret_json["payload"]) payload = {"block_num_or_id":1, "include_extensions": False} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertTrue("id" in ret_json["payload"]) self.assertTrue("signed_block_header" in ret_json["payload"]) self.assertFalse("block_extensions" in ret_json["payload"]) # get_block_info with empty parameter command = "get_block_info" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block_info with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block_info with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block_info with valid parameter payload = {"block_num":1} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["payload"]["block_num"], 1) # get_block_header_state with empty parameter command = "get_block_header_state" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_block_header_state with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_block_header_state with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_block_header_state with valid parameter, the irreversible is not available, unknown block number payload = {"block_num_or_id":1} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3100002) # get_account with empty parameter command = "get_account" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_account with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_account with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_account with valid parameter payload = {"account_name":"default"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_code with empty parameter command = "get_code" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_code with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_code with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_code with valid parameter payload = {"account_name":"default"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_code_hash with empty parameter command = "get_code_hash" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_code_hash with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_code_hash with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_code_hash with valid parameter payload = {"account_name":"default"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_abi with empty parameter command = "get_abi" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_abi with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_abi with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_abi with valid parameter payload = {"account_name":"default"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_raw_code_and_abi with empty parameter command = "get_raw_code_and_abi" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_raw_code_and_abi with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_raw_code_and_abi with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_raw_code_and_abi with valid parameter payload = {"account_name":"default"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_raw_abi with empty parameter command = "get_raw_abi" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_raw_abi with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_raw_abi with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_raw_abi with valid parameter payload = {"account_name":"default"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_table_rows with empty parameter command = "get_table_rows" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_table_rows with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_table_rows with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_table_rows with valid parameter @@ -451,20 +485,20 @@ def test_ChainApi(self) : "key_type":"i128", "lower_bound":"0x0000000000000000D0F2A472A8EB6A57", "upper_bound":"0xFFFFFFFFFFFFFFFFD0F2A472A8EB6A57"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_table_by_scope with empty parameter command = "get_table_by_scope" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_table_by_scope with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_table_by_scope with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_table_by_scope with valid parameter @@ -473,104 +507,104 @@ def test_ChainApi(self) : "index_position":2, "lower_bound":"0x0000000000000000D0F2A472A8EB6A57", "upper_bound":"0xFFFFFFFFFFFFFFFFD0F2A472A8EB6A57"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 500) # get_currency_balance with empty parameter command = "get_currency_balance" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_currency_balance with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_currency_balance with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_currency_balance with valid parameter payload = {"code":"eosio.token", "account":"unknown"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_currency_stats with empty parameter command = "get_currency_stats" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_currency_stats with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_currency_stats with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_currency_stats with valid parameter payload = {"code":"eosio.token","symbol":"SYS"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_producers with empty parameter command = "get_producers" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_producers with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_producers with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_producers with valid parameter payload = {"json":"true","lower_bound":""} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["rows"]), list) # get_producer_schedule with empty parameter command = "get_producer_schedule" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["active"]), dict) # get_producer_schedule with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["active"]), dict) # get_producer_schedule with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_scheduled_transactions with empty parameter command = "get_scheduled_transactions" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_scheduled_transactions with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_scheduled_transactions with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_scheduled_transactions with valid parameter payload = {"json":"true","lower_bound":""} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(type(ret_json["payload"]["transactions"]), list) # get_required_keys with empty parameter command = "get_required_keys" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_required_keys with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_required_keys with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_required_keys with valid parameter @@ -584,20 +618,49 @@ def test_ChainApi(self) : "available_keys":["EOS4toFS3YXEQCkuuw1aqDLrtHim86Gz9u3hBdcBw5KNPZcursVHq", "EOS7d9A3uLe6As66jzN8j44TXJUqJSK3bFjjEEqR4oTvNAB3iM9SA", "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV"]} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 500) # get_transaction_id with empty parameter command = "get_transaction_id" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_transaction_id with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_transaction_id with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) + self.assertEqual(ret_json["code"], 400) + self.assertEqual(ret_json["error"]["code"], 3200006) + # get_transaction_id with missing actions + payload_no_actions = {"expiration":"2020-08-01T07:15:49", + "ref_block_num": 34881, + "ref_block_prefix":2972818865, + "max_net_usage_words":0, + "max_cpu_usage_ms":0, + "delay_sec":0, + "context_free_actions":[], + "transaction_extensions": [], + "signatures": ["SIG_K1_KeqfqiZu1GwUxQb7jzK9Fdks6HFaVBQ9AJtCZZj56eG9qGgvVMVtx8EerBdnzrhFoX437sgwtojf2gfz6S516Ty7c22oEp"], + "context_free_data": []} + ret_json = self.nodeos.processUrllibRequest(resource, command, payload_no_actions) + self.assertEqual(ret_json["code"], 400) + self.assertEqual(ret_json["error"]["code"], 3200006) + # get_transaction_id with invalid actions + payload_invalid_actions = {"expiration":"2020-08-01T07:15:49", + "ref_block_num": 34881, + "ref_block_prefix":2972818865, + "max_net_usage_words":0, + "max_cpu_usage_ms":0, + "delay_sec":0, + "context_free_actions":[], + "actions": "hello_world", + "transaction_extensions": [], + "signatures": ["SIG_K1_KeqfqiZu1GwUxQb7jzK9Fdks6HFaVBQ9AJtCZZj56eG9qGgvVMVtx8EerBdnzrhFoX437sgwtojf2gfz6S516Ty7c22oEp"], + "context_free_data": []} + ret_json = self.nodeos.processUrllibRequest(resource, command, payload_invalid_actions) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_transaction_id with valid parameter @@ -612,38 +675,56 @@ def test_ChainApi(self) : "transaction_extensions": [], "signatures": ["SIG_K1_KeqfqiZu1GwUxQb7jzK9Fdks6HFaVBQ9AJtCZZj56eG9qGgvVMVtx8EerBdnzrhFoX437sgwtojf2gfz6S516Ty7c22oEp"], "context_free_data": []} - ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw).decode('ascii') + ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw, endpoint=endpoint).decode('ascii') self.assertEqual(ret_str, "\"0be762a6406bab15530e87f21e02d1c58e77944ee55779a76f4112e3b65cac48\"") + # transaction that has hex_data + payload_hex = {"expiration":"2020-08-01T07:15:49", + "ref_block_num": 34881, + "ref_block_prefix":2972818865, + "max_net_usage_words":0, + "max_cpu_usage_ms":0, + "delay_sec":0, + "context_free_actions":[], + "actions":[{"account":"eosio.token","name": "transfer","authorization": [{"actor": "han","permission": "active"}], + "data": "{\"entry\":774831,\"miner\":\"eosminer1111\",\"nonce\":139429}\"}", + "hex_data": "000000000000a6690000000000ea305501000000000000000453595300000000016d"}], + "transaction_extensions": [], + "signatures": ["SIG_K1_KeqfqiZu1GwUxQb7jzK9Fdks6HFaVBQ9AJtCZZj56eG9qGgvVMVtx8EerBdnzrhFoX437sgwtojf2gfz6S516Ty7c22oEp"], + "context_free_data": []} + ret_str = self.nodeos.processUrllibRequest(resource, command, payload_hex, returnType=ReturnType.raw).decode('ascii') + self.assertEqual(ret_str, "\"0be762a6406bab15530e87f21e02d1c58e77944ee55779a76f4112e3b65cac48\"") + # push_block with empty parameter + endpoint=self.endpoint("chain_rw") command = "push_block" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # push_block with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # push_block with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # push_block with valid parameter payload = {"block":"signed_block"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(len(ret_json["payload"]), 0) # push_transaction with empty parameter command = "push_transaction" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # push_transaction with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # push_transaction with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # push_transaction with valid parameter @@ -651,20 +732,20 @@ def test_ChainApi(self) : "compression": "true", "packed_context_free_data": "context_free_data", "packed_trx": "packed_trx"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 500) # push_transactions with empty parameter command = "push_transactions" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # push_transactions with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # push_transactions with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # push_transactions with valid parameter @@ -672,20 +753,20 @@ def test_ChainApi(self) : "compression": "true", "packed_context_free_data": "context_free_data", "packed_trx": "packed_trx"}] - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn("transaction_id", ret_json["payload"][0]) # send_transaction with empty parameter command = "send_transaction" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # send_transaction with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # send_transaction with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # send_transaction with valid parameter @@ -693,130 +774,133 @@ def test_ChainApi(self) : "compression": "true", "packed_context_free_data": "context_free_data", "packed_trx": "packed_trx"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 500) # test all net api def test_NetApi(self) : resource = "net" - + endpoint=self.endpoint("net_rw") # connect with empty parameter command = "connect" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # connect with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) payload = "localhost" - ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw).decode('ascii') + ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw, endpoint=endpoint).decode('ascii') self.assertEqual("\"added connection\"", ret_str) # disconnect with empty parameter command = "disconnect" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # disconnect with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # disconnect with valid parameter payload = "localhost123" - ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw).decode('ascii') + ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw, endpoint=endpoint).decode('ascii') self.assertEqual("\"no known connection for host\"", ret_str) # status with empty parameter + endpoint=self.endpoint("net_ro") command = "status" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # status with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # status with valid parameter payload = "localhost" - ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw).decode('ascii') + ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw, endpoint=endpoint).decode('ascii') self.assertEqual(ret_str, "null") # connections with empty parameter command = "connections" - ret_str = self.nodeos.processUrllibRequest(resource, command, returnType=ReturnType.raw).decode('ascii') + ret_str = self.nodeos.processUrllibRequest(resource, command, returnType=ReturnType.raw, endpoint=endpoint).decode('ascii') self.assertIn("\"peer\":\"localhost:9011\"", ret_str) # connections with empty content parameter - ret_str = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, returnType=ReturnType.raw).decode('ascii') + ret_str = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, returnType=ReturnType.raw, endpoint=endpoint).decode('ascii') self.assertIn("\"peer\":\"localhost:9011\"", ret_str) # connections with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # test all producer api def test_ProducerApi(self) : resource = "producer" - + endpoint=self.endpoint("producer_rw") # pause with empty parameter command = "pause" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["payload"]["result"], "ok") # pause with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["payload"]["result"], "ok") # pause with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # resume with empty parameter command = "resume" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["payload"]["result"], "ok") # resume with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["payload"]["result"], "ok") # resume with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) + endpoint=self.endpoint("producer_ro") # paused with empty parameter command = "paused" - ret_str = self.nodeos.processUrllibRequest(resource, command, returnType=ReturnType.raw).decode('ascii') + ret_str = self.nodeos.processUrllibRequest(resource, command, returnType=ReturnType.raw, endpoint=endpoint).decode('ascii') self.assertEqual(ret_str, "false") # paused with empty content parameter - ret_str = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, returnType=ReturnType.raw).decode('ascii') + ret_str = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, returnType=ReturnType.raw, endpoint=endpoint).decode('ascii') self.assertEqual(ret_str, "false") # paused with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_runtime_options with empty parameter command = "get_runtime_options" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("max_transaction_time", ret_json["payload"]) # get_runtime_options with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertIn("max_transaction_time", ret_json["payload"]) # get_runtime_options with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # update_runtime_options with empty parameter + endpoint=self.endpoint("producer_rw") command = "update_runtime_options" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # update_runtime_options with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # update_runtime_options with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # update_runtime_options with valid parameter @@ -827,60 +911,61 @@ def test_ProducerApi(self) : "subjective_cpu_leeway_us":0, "incoming_defer_ratio":1.0, "greylist_limit":100} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn(ret_json["payload"]["result"], "ok") # add_greylist_accounts with empty parameter command = "add_greylist_accounts" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # add_greylist_accounts with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # add_greylist_accounts with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # add_greylist_accounts with valid parameter payload = {"accounts":["test1", "test2"]} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn(ret_json["payload"]["result"], "ok") # remove_greylist_accounts with empty parameter command = "remove_greylist_accounts" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # remove_greylist_accounts with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # remove_greylist_accounts with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # remove_greylist_accounts with valid parameter payload = {"accounts":["test1", "test2"]} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn(ret_json["payload"]["result"], "ok") # get_greylist with empty parameter + endpoint=self.endpoint("producer_ro") command = "get_greylist" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("accounts", ret_json["payload"]) # get_greylist with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertIn("accounts", ret_json["payload"]) # get_greylist with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_whitelist_blacklist with empty parameter command = "get_whitelist_blacklist" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("actor_whitelist", ret_json["payload"]) self.assertIn("actor_blacklist", ret_json["payload"]) self.assertIn("contract_whitelist", ret_json["payload"]) @@ -888,7 +973,7 @@ def test_ProducerApi(self) : self.assertIn("action_blacklist", ret_json["payload"]) self.assertIn("key_blacklist", ret_json["payload"]) # get_whitelist_blacklist with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertIn("actor_whitelist", ret_json["payload"]) self.assertIn("actor_blacklist", ret_json["payload"]) self.assertIn("contract_whitelist", ret_json["payload"]) @@ -896,21 +981,22 @@ def test_ProducerApi(self) : self.assertIn("action_blacklist", ret_json["payload"]) self.assertIn("key_blacklist", ret_json["payload"]) # get_whitelist_blacklist with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # set_whitelist_blacklist with empty parameter + endpoint=self.endpoint("producer_rw") command = "set_whitelist_blacklist" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # set_whitelist_blacklist with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # set_whitelist_blacklist with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # set_whitelist_blacklist with valid parameter @@ -920,122 +1006,127 @@ def test_ProducerApi(self) : "contract_blacklist":["test5"], "action_blacklist":[], "key_blacklist":[]} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn(ret_json["payload"]["result"], "ok") # get_integrity_hash with empty parameter + endpoint=self.endpoint("producer_rw") command = "get_integrity_hash" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("head_block_id", ret_json["payload"]) self.assertIn("integrity_hash", ret_json["payload"]) # get_integrity_hash with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertIn("head_block_id", ret_json["payload"]) self.assertIn("integrity_hash", ret_json["payload"]) # get_integrity_hash with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # create_snapshot with empty parameter + endpoint=self.endpoint("snapshot") command = "create_snapshot" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("head_block_id", ret_json["payload"]) self.assertIn("snapshot_name", ret_json["payload"]) # get_scheduled_protocol_feature_activations with empty parameter + endpoint=self.endpoint("producer_ro") command = "get_scheduled_protocol_feature_activations" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("protocol_features_to_activate", ret_json["payload"]) # get_scheduled_protocol_feature_activations with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertIn("protocol_features_to_activate", ret_json["payload"]) # get_scheduled_protocol_feature_activations with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # schedule_protocol_feature_activations with empty parameter + endpoint=self.endpoint("producer_rw") command = "schedule_protocol_feature_activations" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # schedule_protocol_feature_activations with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # schedule_protocol_feature_activations with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # schedule_protocol_feature_activations with valid parameter payload = {"protocol_features_to_activate":[]} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn(ret_json["payload"]["result"], "ok") # get_supported_protocol_features with empty parameter + endpoint=self.endpoint("producer_ro") command = "get_supported_protocol_features" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("feature_digest", ret_json["payload"][0]) self.assertIn("subjective_restrictions", ret_json["payload"][0]) # get_supported_protocol_features with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertIn("feature_digest", ret_json["payload"][0]) self.assertIn("subjective_restrictions", ret_json["payload"][0]) # get_supported_protocol_features with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_supported_protocol_features with 1st parameter payload = {"exclude_disabled":"true"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn("feature_digest", ret_json["payload"][0]) self.assertIn("subjective_restrictions", ret_json["payload"][0]) # get_supported_protocol_features with 2nd parameter payload = {"exclude_unactivatable":"true"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn("feature_digest", ret_json["payload"][0]) self.assertIn("subjective_restrictions", ret_json["payload"][0]) # get_supported_protocol_features with valid parameter payload = {"exclude_disabled":"true", "exclude_unactivatable":"true"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn("feature_digest", ret_json["payload"][0]) self.assertIn("subjective_restrictions", ret_json["payload"][0]) # get_account_ram_corrections with empty parameter command = "get_account_ram_corrections" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_account_ram_corrections with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_account_ram_corrections with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_account_ram_corrections with valid parameter payload = {"lower_bound":"", "upper_bound":"", "limit":1, "reverse":"false"} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn("rows", ret_json["payload"]) # get_unapplied_transactions with empty parameter command = "get_unapplied_transactions" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("size", ret_json["payload"]) self.assertIn("incoming_size", ret_json["payload"]) # get_unapplied_transactions with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertIn("size", ret_json["payload"]) self.assertIn("incoming_size", ret_json["payload"]) # get_unapplied_transactions with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # get_unapplied_transactions with valid parameter payload = {"lower_bound":"", "limit":1, "time_limit_ms":500} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertIn("trxs", ret_json["payload"]) # test all wallet api @@ -1276,72 +1367,72 @@ def test_WalletApi(self) : # test all test control api def test_TestControlApi(self) : resource = "test_control" - + endpoint = self.endpoint("test_control") # kill_node_on_producer with empty parameter command = "kill_node_on_producer" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # kill_node_on_producer with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # kill_node_on_producer with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) self.assertEqual(ret_json["error"]["code"], 3200006) # kill_node_on_producer with valid parameter payload = {"name":"auser", "where_in_sequence":12, "based_on_lib":"true"} - ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw).decode('ascii') + ret_str = self.nodeos.processUrllibRequest(resource, command, payload, returnType=ReturnType.raw, endpoint=endpoint).decode('ascii') self.assertIn("{}", ret_str) # test all trace api def test_TraceApi(self) : resource = "trace_api" - + endpoint = self.endpoint(resource) # get_block with empty parameter command = "get_block" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # get_block with valid parameter payload = {"block_num":1} - ret_json = self.nodeos.processUrllibRequest(resource, command, payload) + ret_json = self.nodeos.processUrllibRequest(resource, command, payload, endpoint=endpoint) self.assertEqual(ret_json["code"], 404) self.assertEqual(ret_json["error"]["code"], 0) # test all db_size api def test_DbSizeApi(self) : resource = "db_size" - + endpoint = self.endpoint(resource) # get with empty parameter command = "get" - ret_json = self.nodeos.processUrllibRequest(resource, command) + ret_json = self.nodeos.processUrllibRequest(resource, command, endpoint=endpoint) self.assertIn("free_bytes", ret_json["payload"]) self.assertIn("used_bytes", ret_json["payload"]) self.assertIn("size", ret_json["payload"]) self.assertIn("indices", ret_json["payload"]) # get with empty content parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.empty_content_dict, endpoint=endpoint) self.assertIn("free_bytes", ret_json["payload"]) self.assertIn("used_bytes", ret_json["payload"]) self.assertIn("size", ret_json["payload"]) self.assertIn("indices", ret_json["payload"]) # get with invalid parameter - ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param) + ret_json = self.nodeos.processUrllibRequest(resource, command, self.http_post_invalid_param, endpoint=endpoint) self.assertEqual(ret_json["code"], 400) # test prometheus api def test_prometheusApi(self) : resource = "prometheus" command = "metrics" - endpointPrometheus = f'http://{self.nodeos.host}:9101' - ret_text = self.nodeos.processUrllibRequest(resource, command, returnType = ReturnType.raw, method="GET", endpoint=endpointPrometheus).decode() + endpoint = self.endpoint(resource) + ret_text = self.nodeos.processUrllibRequest(resource, command, returnType = ReturnType.raw, method="GET", endpoint=endpoint).decode() # filter out all empty lines or lines starting with '#' data_lines = filter(lambda line: len(line) > 0 and line[0]!='#', ret_text.split('\n')) # converting each line into a key value pair and then construct a dictionay out of all the pairs @@ -1351,13 +1442,13 @@ def test_prometheusApi(self) : self.assertTrue(int(metrics["blocks_produced"]) > 1) self.assertTrue(int(metrics["last_irreversible"]) > 1) - ret = self.nodeos.processUrllibRequest(resource, "m", returnType = ReturnType.raw, method="GET", silentErrors= True, endpoint=endpointPrometheus) + ret = self.nodeos.processUrllibRequest(resource, "m", returnType = ReturnType.raw, method="GET", silentErrors= True, endpoint=endpoint) self.assertTrue(ret == 404) def test_multipleRequests(self): """Test keep-alive ability of HTTP plugin. Handle multiple requests in a single session""" host = self.nodeos.host - port = self.nodeos.port + port = category_config.port("chain_ro") addr = (host, port) body1 = '{ "block_num_or_id": "1" }\r\n' body2 = '{ "block_num_or_id": "2" }\r\n' @@ -1452,7 +1543,24 @@ def setUpClass(self): @classmethod def tearDownClass(self): - self.cleanEnv(self) + global keepLogs + self.killNodes(self) + if unittest.TestResult().wasSuccessful() and not keepLogs: + self.cleanEnv(self) + if __name__ == "__main__": + test_category = True if os.environ.get("PLUGIN_HTTP_TEST_CATEGORY") == "ON" else False + category_config = HttpCategoryConfig(test_category) + + parser = argparse.ArgumentParser() + parser.add_argument('--keep-logs', action='store_true') + parser.add_argument('unittest_args', nargs=argparse.REMAINDER) + + args = parser.parse_args() + global keepLogs + keepLogs = args.keep_logs; + + # Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone) + sys.argv[1:] = args.unittest_args unittest.main() diff --git a/tests/prod_preactivation_test.py b/tests/prod_preactivation_test.py index 3a968132e2..46b4376b49 100755 --- a/tests/prod_preactivation_test.py +++ b/tests/prod_preactivation_test.py @@ -18,7 +18,7 @@ cmdError=Utils.cmdError args = TestHelper.parse_args({"--host","--port","--defproducera_prvt_key","--defproducerb_prvt_key" - ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios","--clean-run" + ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios" ,"--sanity-test","--wallet-port","--unshared"}) server=args.host port=args.port @@ -26,22 +26,17 @@ defproduceraPrvtKey=args.defproducera_prvt_key defproducerbPrvtKey=args.defproducerb_prvt_key dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs dontLaunch=args.dont_launch -dontKill=args.leave_running prodCount=2 onlyBios=args.only_bios -killAll=args.clean_run sanityTest=args.sanity_test walletPort=args.wallet_port Utils.Debug=debug localTest=True -cluster=Cluster(host=server, port=port, walletd=True, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey, unshared=args.unshared) +cluster=Cluster(host=server, port=port, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey, unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill dontBootstrap=sanityTest WalletdName=Utils.EosWalletName @@ -54,8 +49,6 @@ Print("PORT: %d" % (port)) if localTest and not dontLaunch: - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") if cluster.launch(pnodes=prodCount, totalNodes=prodCount, prodCount=1, onlyBios=onlyBios, dontBootstrap=dontBootstrap, @@ -168,7 +161,7 @@ testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/read_only_trx_test.py b/tests/read_only_trx_test.py index 9f5bcda898..752f9775b7 100755 --- a/tests/read_only_trx_test.py +++ b/tests/read_only_trx_test.py @@ -22,12 +22,12 @@ appArgs=AppArgs() appArgs.add(flag="--read-only-threads", type=int, help="number of read-only threads", default=0) appArgs.add(flag="--num-test-runs", type=int, help="number of times to run the tests", default=1) -appArgs.add_bool(flag="--eos-vm-oc-enable", help="enable eos-vm-oc") +appArgs.add(flag="--eos-vm-oc-enable", type=str, help="specify eos-vm-oc-enable option", default="auto") appArgs.add(flag="--wasm-runtime", type=str, help="if set to eos-vm-oc, must compile with EOSIO_EOS_VM_OC_DEVELOPER", default="eos-vm-jit") args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed" ,"--dump-error-details","-v","--leave-running" - ,"--clean-run","--keep-logs","--unshared"}, applicationSpecificArgs=appArgs) + ,"--keep-logs","--unshared"}, applicationSpecificArgs=appArgs) pnodes=args.p topo=args.s @@ -41,23 +41,18 @@ nodesFile=args.nodes_file dontLaunch=nodesFile is not None seed=args.seed -dontKill=args.leave_running dumpErrorDetails=args.dump_error_details -killAll=args.clean_run -keepLogs=args.keep_logs numTestRuns=args.num_test_runs -killWallet=not dontKill -killEosInstances=not dontKill -if nodesFile is not None: - killEosInstances=False - Utils.Debug=debug testSuccessful=False errorInThread=False +noOC = args.eos_vm_oc_enable == "none" +allOC = args.eos_vm_oc_enable == "all" random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(walletd=True,unshared=args.unshared) +# all debuglevel so that "executing ${h} with eos vm oc" is logged +cluster=Cluster(loggingLevel="all", unshared=args.unshared, keepRunning=True if nodesFile is not None else args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) EOSIO_ACCT_PRIVATE_DEFAULT_KEY = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" @@ -69,6 +64,15 @@ userAccountName = "user" payloadlessAccountName = "payloadless" +def getCodeHash(node, account): + # Example get code result: code hash: 67d0598c72e2521a1d588161dad20bbe9f8547beb5ce6d14f3abd550ab27d3dc + cmd = f"get code {account}" + codeHash = node.processCleosCmd(cmd, cmd, silentErrors=False, returnType=ReturnType.raw) + if codeHash is None: errorExit(f"Unable to get code {account} from node {node.nodeId}") + else: codeHash = codeHash.split(' ')[2].strip() + if Utils.Debug: Utils.Print(f"{account} code hash: {codeHash}") + return codeHash + def startCluster(): global total_nodes global producerNode @@ -85,28 +89,28 @@ def startCluster(): errorExit("Failed to initilize nodes from Json string.") total_nodes=len(cluster.getNodes()) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() print("Stand up walletd") if walletMgr.launch() is False: errorExit("Failed to stand up keosd.") - else: - cluster.killall(allInstances=killAll) - cluster.cleanup() Print ("producing nodes: %d, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") # set up read-only options for API node specificExtraNodeosArgs={} # producer nodes will be mapped to 0 through pnodes-1, so the number pnodes is the no-producing API node specificExtraNodeosArgs[pnodes]=" --plugin eosio::net_api_plugin" + specificExtraNodeosArgs[pnodes]+=" --read-only-write-window-time-us " + specificExtraNodeosArgs[pnodes]+=" 10000 " + specificExtraNodeosArgs[pnodes]+=" --read-only-read-window-time-us " + specificExtraNodeosArgs[pnodes]+=" 490000 " + specificExtraNodeosArgs[pnodes]+=" --eos-vm-oc-cache-size-mb " + specificExtraNodeosArgs[pnodes]+=" 1 " # set small so there is churn specificExtraNodeosArgs[pnodes]+=" --read-only-threads " specificExtraNodeosArgs[pnodes]+=str(args.read_only_threads) if args.eos_vm_oc_enable: - specificExtraNodeosArgs[pnodes]+=" --eos-vm-oc-enable" + specificExtraNodeosArgs[pnodes]+=" --eos-vm-oc-enable " + specificExtraNodeosArgs[pnodes]+=args.eos_vm_oc_enable if args.wasm_runtime: specificExtraNodeosArgs[pnodes]+=" --wasm-runtime " specificExtraNodeosArgs[pnodes]+=args.wasm_runtime @@ -122,6 +126,12 @@ def startCluster(): producerNode = cluster.getNode() apiNode = cluster.nodes[-1] + eosioCodeHash = getCodeHash(producerNode, "eosio.token") + # eosio.* should be using oc unless oc tierup disabled + Utils.Print(f"search: executing {eosioCodeHash} with eos vm oc") + found = producerNode.findInLog(f"executing {eosioCodeHash} with eos vm oc") + assert( found or (noOC and not found) ) + def deployTestContracts(): Utils.Print("create test accounts") testAccount = Account(testAccountName) @@ -258,6 +268,10 @@ def basicTests(): assert(results[0]) apiNode.waitForTransactionInBlock(results[1]['transaction_id']) + testAccountCodeHash = getCodeHash(producerNode, testAccountName) + found = producerNode.findInLog(f"executing {testAccountCodeHash} with eos vm oc") + assert( (allOC and found) or not found ) + # verify the return value (age) from read-only is the same as created. Print("Send a read-only Get transaction to verify previous Insert") results = sendTransaction(testAccountName, 'getage', {"user": userAccountName}, opts='--read') @@ -292,7 +306,7 @@ def chainApiTests(): runReadOnlyTrxAndRpcInParallel("chain", "get_currency_balance", code=200, payload = {"code":"eosio.token", "account":testAccountName}) runReadOnlyTrxAndRpcInParallel("chain", "get_currency_stats", fieldIn="SYS", payload = {"code":"eosio.token", "symbol":"SYS"}) runReadOnlyTrxAndRpcInParallel("chain", "get_required_keys", code=400) - runReadOnlyTrxAndRpcInParallel("chain", "get_transaction_id", code=200, payload = {"ref_block_num":"1"}) + runReadOnlyTrxAndRpcInParallel("chain", "get_transaction_id", code=400, payload = {"ref_block_num":"1"}) runReadOnlyTrxAndRpcInParallel("chain", "push_block", code=202, payload = {"block":"signed_block"}) runReadOnlyTrxAndRpcInParallel("chain", "get_producer_schedule", "active") runReadOnlyTrxAndRpcInParallel("chain", "get_scheduled_transactions", "transactions", payload = {"json":"true","lower_bound":""}) @@ -331,7 +345,7 @@ def runEverythingParallel(): testSuccessful = True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) errorCode = 0 if testSuccessful else 1 -exit(errorCode) +exit(errorCode) \ No newline at end of file diff --git a/tests/resource_monitor_plugin_test.py b/tests/resource_monitor_plugin_test.py index e50db5fac3..b946247a08 100755 --- a/tests/resource_monitor_plugin_test.py +++ b/tests/resource_monitor_plugin_test.py @@ -192,7 +192,7 @@ def testAll(): ["Space usage warning"], 6) -args = TestHelper.parse_args({"--keep-logs","--dump-error-details","-v","--leave-running","--clean-run"}) +args = TestHelper.parse_args({"--keep-logs","--dump-error-details","-v","--leave-running","--unshared"}) debug=args.v pnodes=1 topo="mesh" @@ -202,36 +202,21 @@ def testAll(): killCount=1 killSignal=Utils.SigKillTag -killEosInstances= not args.leave_running dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -killAll=args.clean_run seed=1 Utils.Debug=debug testSuccessful=False -cluster=Cluster(walletd=True) - try: TestHelper.printSystemInfo("BEGIN") - cluster.setChainStrategy(chainSyncStrategyStr) - - cluster.killall(allInstances=killAll) - cluster.cleanup() - - if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo,delay=delay, dontBootstrap=True) is False: - errorExit("Failed to stand up eos cluster.") - cluster.killall(allInstances=killAll) - testAll() testSuccessful=True finally: if debug: Print("Cleanup in finally block.") cleanDirectories() - TestHelper.shutdown(cluster, None, testSuccessful, killEosInstances, False, keepLogs, killAll, dumpErrorDetails) exitCode = 0 if testSuccessful else 1 if debug: Print("Exiting test, exit value %d." % (exitCode)) diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index 1e891cab95..8af30bac1d 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -18,7 +18,7 @@ errorExit=Utils.errorExit args=TestHelper.parse_args({"-p","-d","-s","-c","--kill-sig","--kill-count","--keep-logs" - ,"--dump-error-details","-v","--leave-running","--clean-run","--unshared"}) + ,"--dump-error-details","-v","--leave-running","--unshared"}) pnodes=args.p topo=args.s delay=args.d @@ -27,17 +27,14 @@ total_nodes = pnodes killCount=args.kill_count if args.kill_count > 0 else 1 killSignal=args.kill_sig -killEosInstances= not args.leave_running dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -killAll=args.clean_run seed=1 Utils.Debug=debug testSuccessful=False random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) try: @@ -47,11 +44,6 @@ cluster.setChainStrategy(chainSyncStrategyStr) cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - Print ("producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s" % ( pnodes, topo, delay, chainSyncStrategyStr)) @@ -103,7 +95,7 @@ errorExit("Cluster sync wait failed.") Print ("Relaunch dead cluster nodes instances.") - if cluster.relaunchEosInstances(cachePopen=True) is False: + if cluster.relaunchEosInstances() is False: errorExit("Failed to relaunch Eos instances") Print("nodeos instances relaunched.") @@ -120,7 +112,7 @@ if not cluster.waitOnClusterSync(): errorExit("Cluster sync wait failed.") - if killEosInstances: + if not args.leave_running: atLeastOne=False for node in cluster.getNodes(): if node.popenProc is not None: @@ -130,7 +122,7 @@ testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killEosInstances, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/ship_streamer.cpp b/tests/ship_streamer.cpp index 296ff9af34..94a3c40fc9 100644 --- a/tests/ship_streamer.cpp +++ b/tests/ship_streamer.cpp @@ -10,6 +10,8 @@ #include #include +#include +#include #include #include @@ -111,8 +113,9 @@ int main(int argc, char* argv[]) { stream.write(boost::asio::buffer(request_type.json_to_bin(request_sb.GetString(), [](){}))); stream.read_message_max(0); - // block_num, block_id - std::map block_ids; + // Each block_num can have multiple block_ids since forks are possible + // block_num, block_id + std::map> block_ids; bool is_first = true; for(;;) { boost::beast::flat_buffer buffer; @@ -134,28 +137,49 @@ int main(int argc, char* argv[]) { eosio::check(result_document[1]["head"].HasMember("block_id"), "'head' does not contain 'block_id'"); eosio::check(result_document[1]["head"]["block_id"].IsString(), "'head.block_id' isn't a string"); + // stream what was received + if(is_first) { + std::cout << "[" << std::endl; + is_first = false; + } else { + std::cout << "," << std::endl; + } + std::cout << "{ \"get_blocks_result_v0\":" << std::endl; + + rapidjson::StringBuffer result_sb; + rapidjson::PrettyWriter result_writer(result_sb); + result_document[1].Accept(result_writer); + std::cout << result_sb.GetString() << std::endl << "}" << std::endl; + + // validate after streaming, so that invalid entry is included in the output uint32_t this_block_num = 0; if( result_document[1].HasMember("this_block") && result_document[1]["this_block"].IsObject() ) { - if( result_document[1]["this_block"].HasMember("block_num") && result_document[1]["this_block"]["block_num"].IsUint() ) { - this_block_num = result_document[1]["this_block"]["block_num"].GetUint(); + const auto& this_block = result_document[1]["this_block"]; + if( this_block.HasMember("block_num") && this_block["block_num"].IsUint() ) { + this_block_num = this_block["block_num"].GetUint(); } std::string this_block_id; - if( result_document[1]["this_block"].HasMember("block_id") && result_document[1]["this_block"]["block_id"].IsString() ) { - this_block_id = result_document[1]["this_block"]["block_id"].GetString(); + if( this_block.HasMember("block_id") && this_block["block_id"].IsString() ) { + this_block_id = this_block["block_id"].GetString(); } std::string prev_block_id; - if( result_document[1]["prev_block"].HasMember("block_id") && result_document[1]["prev_block"]["block_id"].IsString() ) { - prev_block_id = result_document[1]["prev_block"]["block_id"].GetString(); + if( result_document[1].HasMember("prev_block") && result_document[1]["prev_block"].IsObject() ) { + const auto& prev_block = result_document[1]["prev_block"]; + if ( prev_block.HasMember("block_id") && prev_block["block_id"].IsString() ) { + prev_block_id = prev_block["block_id"].GetString(); + } } if( !irreversible_only && !this_block_id.empty() && !prev_block_id.empty() ) { // verify forks were sent if (block_ids.count(this_block_num-1)) { - if (block_ids[this_block_num-1] != prev_block_id) { - std::cerr << "Received block: << " << this_block_num << " that does not link to previous: " << block_ids[this_block_num-1] << std::endl; + if (block_ids[this_block_num-1].count(prev_block_id) == 0) { + std::cerr << "Received block: << " << this_block_num << " that does not link to previous: "; + std::copy(block_ids[this_block_num-1].begin(), block_ids[this_block_num-1].end(), std::ostream_iterator(std::cerr, " ")); + std::cerr << std::endl; return 1; } } - block_ids[this_block_num] = this_block_id; + block_ids[this_block_num].insert(this_block_id); if( result_document[1]["last_irreversible"].HasMember("block_num") && result_document[1]["last_irreversible"]["block_num"].IsUint() ) { uint32_t lib_num = result_document[1]["last_irreversible"]["block_num"].GetUint(); @@ -168,19 +192,6 @@ int main(int argc, char* argv[]) { } - if(is_first) { - std::cout << "[" << std::endl; - is_first = false; - } else { - std::cout << "," << std::endl; - } - std::cout << "{ \"get_blocks_result_v0\":" << std::endl; - - rapidjson::StringBuffer result_sb; - rapidjson::PrettyWriter result_writer(result_sb); - result_document[1].Accept(result_writer); - std::cout << result_sb.GetString() << std::endl << "}" << std::endl; - if( this_block_num == end_block_num ) break; } diff --git a/tests/ship_streamer_test.py b/tests/ship_streamer_test.py index 7d3816dfd3..580a010966 100755 --- a/tests/ship_streamer_test.py +++ b/tests/ship_streamer_test.py @@ -7,7 +7,7 @@ import signal import sys -from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL +from TestHarness import Cluster, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys from TestHarness.TestHelper import AppArgs ############################################################### @@ -31,14 +31,11 @@ appArgs = AppArgs() extraArgs = appArgs.add(flag="--num-clients", type=int, help="How many ship_streamers should be started", default=1) -args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--unshared"}, applicationSpecificArgs=appArgs) +args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--unshared"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running -killAll=args.clean_run walletPort=TestHelper.DEFAULT_WALLET_PORT totalProducerNodes=2 @@ -49,21 +46,23 @@ walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName shipTempDir=None +def getLatestSnapshot(nodeId): + snapshotDir = os.path.join(Utils.getNodeDataDir(nodeId), "snapshots") + snapshotDirContents = os.listdir(snapshotDir) + assert len(snapshotDirContents) > 0 + snapshotDirContents.sort() + return os.path.join(snapshotDir, snapshotDirContents[-1]) + try: TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") - # *** setup topogrophy *** # "bridge" shape connects defprocera through defproducerc (3 in node0) to each other and defproduceru (1 in node1) @@ -71,7 +70,7 @@ shipNodeNum = 1 specificExtraNodeosArgs={} - specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --disable-replay-opts --trace-history --chain-state-history --plugin eosio::net_api_plugin " + specificExtraNodeosArgs[shipNodeNum]="--plugin eosio::state_history_plugin --disable-replay-opts --trace-history --chain-state-history --plugin eosio::net_api_plugin --plugin eosio::producer_api_plugin " # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin " @@ -94,7 +93,7 @@ shipNode = cluster.getNode(shipNodeNum) - accounts=cluster.createAccountKeys(6) + accounts=createAccountKeys(6) if accounts is None: Utils.errorExit("FAILURE - create keys") @@ -123,13 +122,18 @@ trans=node.regproducer(cluster.defProducerAccounts[prod], "http://mysite.com", 0, waitForTransBlock=False, exitOnError=True) # create accounts via eosio as otherwise a bid is needed + transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) for account in accounts: Print(f"Create new account {account.name} via {cluster.eosioAccount.name} with private key: {account.activePrivateKey}") - trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=True, stakeNet=10000, stakeCPU=10000, buyRAM=10000000, exitOnError=True) - transferAmount="100000000.0000 {0}".format(CORE_SYMBOL) + trans=nonProdNode.createInitializeAccount(account, cluster.eosioAccount, stakedDeposit=0, waitForTransBlock=False, stakeNet=10000, stakeCPU=10000, buyRAM=10000000, exitOnError=True) + nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + for account in accounts: Print(f"Transfer funds {transferAmount} from account {cluster.eosioAccount.name} to {account.name}") - nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=True) + trans=nonProdNode.transferFunds(cluster.eosioAccount, account, transferAmount, "test transfer", waitForTransBlock=False) + nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) + for account in accounts: trans=nonProdNode.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=False, exitOnError=True) + nonProdNode.waitForTransBlockIfNeeded(trans, True, exitOnError=True) # *** vote using accounts *** @@ -150,6 +154,19 @@ cluster.waitOnClusterSync(blockAdvancing=3) Print("Shutdown unneeded bios node") cluster.biosNode.kill(signal.SIGTERM) + + Print("Configure and launch txn generators") + targetTpsPerGenerator = 10 + testTrxGenDurationSec=60*60 + numTrxGenerators=2 + cluster.launchTrxGenerators(contractOwnerAcctName=cluster.eosioAccount.name, acctNamesList=[accounts[0].name, accounts[1].name], + acctPrivKeysList=[accounts[0].activePrivateKey,accounts[1].activePrivateKey], nodeId=prodNode1.nodeId, + tpsPerGenerator=targetTpsPerGenerator, numGenerators=numTrxGenerators, durationSec=testTrxGenDurationSec, + waitToComplete=False) + + status = cluster.waitForTrxGeneratorsSpinup(nodeId=prodNode1.nodeId, numGenerators=numTrxGenerators) + assert status is not None and status is not False, "ERROR: Failed to spinup Transaction Generators" + prodNode0.waitForProducer("defproducerc") block_range = 350 @@ -177,10 +194,10 @@ Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") # Generate a fork - forkAtProducer="defproducera" prodNode1Prod="defproduceru" preKillBlockNum=nonProdNode.getBlockNum() preKillBlockProducer=nonProdNode.getBlockProducerByNum(preKillBlockNum) + forkAtProducer="defproducer" + chr(ord(preKillBlockProducer[-1])+2) nonProdNode.killNodeOnProducer(producer=forkAtProducer, whereInSequence=1) Print(f"Current block producer {preKillBlockProducer} fork will be at producer {forkAtProducer}") prodNode0.waitForProducer(forkAtProducer) @@ -192,7 +209,8 @@ Utils.errorExit("Bridge did not shutdown"); Print("Fork started") - prodNode0.waitForProducer("defproducerb") # wait for fork to progress a bit + forkProgress="defproducer" + chr(ord(forkAtProducer[-1])+3) + prodNode0.waitForProducer(forkProgress) # wait for fork to progress a bit Print("Restore fork") Print("Relaunching the non-producing bridge node to connect the producing nodes again") @@ -204,7 +222,7 @@ nonProdNode.waitForProducer(forkAtProducer) nonProdNode.waitForProducer(prodNode1Prod) afterForkBlockNum = nonProdNode.getBlockNum() - if int(afterForkBlockNum) > int(end_block_num): + if int(afterForkBlockNum) < int(end_block_num): Utils.errorExit(f"Did not stream long enough {end_block_num} to cover the fork {afterForkBlockNum}, increase block_range {block_range}") Print(f"Stopping all {args.num_clients} clients") @@ -226,14 +244,66 @@ block_num += 1 assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" + Print("Generate snapshot") + shipNode.createSnapshot() + Print("Shutdown state_history_plugin nodeos") shipNode.kill(signal.SIGTERM) + Print("Shutdown bridge node") + nonProdNode.kill(signal.SIGTERM) + + Print("Test starting ship from snapshot") + Utils.rmNodeDataDir(shipNodeNum) + isRelaunchSuccess = shipNode.relaunch(chainArg=" --snapshot {}".format(getLatestSnapshot(shipNodeNum))) + assert isRelaunchSuccess, "relaunch from snapshot failed" + + afterSnapshotBlockNum = shipNode.getBlockNum() + + Print("Verify we can stream from ship after start from a snapshot with no incoming trxs") + start_block_num = afterSnapshotBlockNum + block_range = 0 + end_block_num = start_block_num + block_range + cmd = f"{shipClient} --start-block-num {start_block_num} --end-block-num {end_block_num} --fetch-block --fetch-traces --fetch-deltas" + if Utils.Debug: Utils.Print(f"cmd: {cmd}") + clients = [] + files = [] + starts = [] + for i in range(0, args.num_clients): + start = time.perf_counter() + outFile = open(f"{shipClientFilePrefix}{i}_snapshot.out", "w") + errFile = open(f"{shipClientFilePrefix}{i}_snapshot.err", "w") + Print(f"Start client {i}") + popen=Utils.delayedCheckOutput(cmd, stdout=outFile, stderr=errFile) + starts.append(time.perf_counter()) + clients.append((popen, cmd)) + files.append((outFile, errFile)) + Print(f"Client {i} started, Ship node head is: {shipNode.getBlockNum()}") + + Print(f"Stopping all {args.num_clients} clients") + for index, (popen, _), (out, err), start in zip(range(len(clients)), clients, files, starts): + popen.wait() + Print(f"Stopped client {index}. Ran for {time.perf_counter() - start:.3f} seconds.") + out.close() + err.close() + outFile = open(f"{shipClientFilePrefix}{index}_snapshot.out", "r") + data = json.load(outFile) + block_num = start_block_num + for i in data: + # fork can cause block numbers to be repeated + this_block_num = i['get_blocks_result_v0']['this_block']['block_num'] + if this_block_num < block_num: + block_num = this_block_num + assert block_num == this_block_num, f"{block_num} != {this_block_num}" + assert isinstance(i['get_blocks_result_v0']['deltas'], str) # verify deltas in result + block_num += 1 + assert block_num-1 == end_block_num, f"{block_num-1} != {end_block_num}" + testSuccessful = True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) if shipTempDir is not None: - if testSuccessful and not keepLogs: + if testSuccessful and not args.keep_logs: shutil.rmtree(shipTempDir, ignore_errors=True) errorCode = 0 if testSuccessful else 1 diff --git a/tests/ship_test.py b/tests/ship_test.py index fcd69e4801..c8d3fbee9a 100755 --- a/tests/ship_test.py +++ b/tests/ship_test.py @@ -30,7 +30,7 @@ extraArgs = appArgs.add(flag="--num-requests", type=int, help="How many requests that each ship_client requests", default=1) extraArgs = appArgs.add(flag="--num-clients", type=int, help="How many ship_clients should be started", default=1) extraArgs = appArgs.add_bool(flag="--unix-socket", help="Run ship over unix socket") -args = TestHelper.parse_args({"-p", "-n","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--unshared"}, applicationSpecificArgs=appArgs) +args = TestHelper.parse_args({"-p", "-n","--dump-error-details","--keep-logs","-v","--leave-running","--unshared"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v totalProducerNodes=args.p @@ -39,17 +39,12 @@ totalNodes=totalProducerNodes+1 totalNonProducerNodes=totalNodes-totalProducerNodes totalProducers=totalProducerNodes -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running -killAll=args.clean_run walletPort=TestHelper.DEFAULT_WALLET_PORT walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName shipTempDir=None @@ -58,8 +53,6 @@ TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") specificExtraNodeosArgs={} # non-producing nodes are at the end of the cluster's nodes, so reserving the last one for state_history_plugin @@ -196,9 +189,9 @@ testSuccessful = True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) if shipTempDir is not None: - if testSuccessful and not keepLogs: + if testSuccessful and not args.keep_logs: shutil.rmtree(shipTempDir, ignore_errors=True) errorCode = 0 if testSuccessful else 1 diff --git a/tests/split_blocklog_replay_test.py b/tests/split_blocklog_replay_test.py new file mode 100755 index 0000000000..ae7c24ffd8 --- /dev/null +++ b/tests/split_blocklog_replay_test.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 + +import os +import shutil +import time +import signal +from TestHarness import Node, TestHelper, Utils + +node_id = 1 +nodeos = Node(TestHelper.LOCAL_HOST, TestHelper.DEFAULT_PORT, node_id) +data_dir = Utils.getNodeDataDir(node_id) +config_dir = Utils.getNodeConfigDir(node_id) +if os.path.exists(data_dir): + shutil.rmtree(data_dir) +os.makedirs(data_dir) +if not os.path.exists(config_dir): + os.makedirs(config_dir) + +try: + start_nodeos_cmd = f"{Utils.EosServerPath} -e -p eosio --data-dir={data_dir} --config-dir={config_dir} --blocks-log-stride 10" \ + " --plugin=eosio::http_plugin --plugin=eosio::chain_api_plugin --http-server-address=localhost:8888" + + nodeos.launchCmd(start_nodeos_cmd, node_id) + time.sleep(2) + nodeos.waitForBlock(30) + nodeos.kill(signal.SIGTERM) + + nodeos.relaunch(chainArg="--replay-blockchain") + + time.sleep(2) + assert nodeos.waitForBlock(31) +finally: + # clean up + Node.killAllNodeos() diff --git a/tests/subjective_billing_test.py b/tests/subjective_billing_test.py index 55052f5830..57416307df 100755 --- a/tests/subjective_billing_test.py +++ b/tests/subjective_billing_test.py @@ -17,7 +17,7 @@ args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed" ,"--dump-error-details","-v","--leave-running" - ,"--clean-run","--keep-logs","--unshared"}) + ,"--keep-logs","--unshared"}) pnodes=args.p topo=args.s @@ -27,21 +27,13 @@ nodesFile=args.nodes_file dontLaunch=nodesFile is not None seed=args.seed -dontKill=args.leave_running dumpErrorDetails=args.dump_error_details -killAll=args.clean_run -keepLogs=args.keep_logs - -killWallet=not dontKill -killEosInstances=not dontKill -if nodesFile is not None: - killEosInstances=False Utils.Debug=debug testSuccessful=False random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=True if nodesFile is not None else args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) EOSIO_ACCT_PRIVATE_DEFAULT_KEY = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" @@ -56,14 +48,9 @@ errorExit("Failed to initilize nodes from Json string.") total_nodes=len(cluster.getNodes()) - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() print("Stand up walletd") if walletMgr.launch() is False: errorExit("Failed to stand up keosd.") - else: - cluster.killall(allInstances=killAll) - cluster.cleanup() Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" % (pnodes, total_nodes-pnodes, topo, delay)) @@ -94,7 +81,7 @@ cluster.createAccountAndVerify(account2, cluster.eosioAccount, stakedDeposit=1000, stakeCPU=1) Print("Validating accounts after bootstrap") - cluster.validateAccounts([account1, account2]) + cluster.validateAccounts([account1, account2], testSysAccounts=False) node = cluster.getNode() @@ -188,7 +175,7 @@ testSuccessful = True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, dumpErrorDetails) errorCode = 0 if testSuccessful else 1 -exit(errorCode) \ No newline at end of file +exit(errorCode) diff --git a/tests/terminate-scenarios-test.py b/tests/terminate-scenarios-test.py index 1018335ab3..561a8529f2 100755 --- a/tests/terminate-scenarios-test.py +++ b/tests/terminate-scenarios-test.py @@ -18,7 +18,7 @@ errorExit=Utils.errorExit args=TestHelper.parse_args({"-d","-s","-c","--kill-sig","--keep-logs" - ,"--dump-error-details","-v","--leave-running","--clean-run" + ,"--dump-error-details","-v","--leave-running" ,"--terminate-at-block","--unshared"}) pnodes=1 topo=args.s @@ -27,10 +27,7 @@ debug=args.v total_nodes = pnodes killSignal=args.kill_sig -killEosInstances= not args.leave_running dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -killAll=args.clean_run terminate=args.terminate_at_block seed=1 @@ -38,7 +35,7 @@ testSuccessful=False random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) walletMgr=WalletMgr(True) try: @@ -48,11 +45,6 @@ cluster.setChainStrategy(chainSyncStrategyStr) cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() - walletMgr.killall(allInstances=killAll) - walletMgr.cleanup() - Print ("producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s" % ( pnodes, topo, delay, chainSyncStrategyStr)) @@ -76,13 +68,13 @@ if nodeArg != "": if chainSyncStrategyStr == "hardReplay": nodeArg += " --truncate-at-block %d" % terminate - if cluster.relaunchEosInstances(cachePopen=True, nodeArgs=nodeArg, waitForTerm=(terminate > 0)) is False: + if cluster.relaunchEosInstances(nodeArgs=nodeArg, waitForTerm=(terminate > 0)) is False: errorExit("Failed to relaunch Eos instance") Print("nodeos instance relaunched.") testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killEosInstances, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tests/test_read_only_trx.cpp b/tests/test_read_only_trx.cpp new file mode 100644 index 0000000000..49134a54a7 --- /dev/null +++ b/tests/test_read_only_trx.cpp @@ -0,0 +1,251 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + + +namespace { +using namespace eosio; +using namespace eosio::chain; +using namespace eosio::test_utils; + +auto make_unique_trx() { + static uint64_t nextid = 0; + ++nextid; + account_name creator = config::system_account_name; + signed_transaction trx; + trx.expiration = fc::time_point_sec{fc::time_point::now() + fc::seconds( nextid % 50 == 0 ? 0 : 60 )}; // fail some transactions via expired + if( nextid % 10 == 0 ) { + // fail some for authorization (read-only transaction should not have authorization) + trx.actions.emplace_back( vector{{creator, config::active_name}}, testit{nextid} ); + } else { + vector no_auth{}; + trx.actions.emplace_back( no_auth, testit{nextid} ); + } + return std::make_shared( std::move(trx) ); +} +} + +BOOST_AUTO_TEST_SUITE(read_only_trxs) + +enum class app_init_status { failed, succeeded }; + +void test_configs_common(std::vector& specific_args, app_init_status expected_status) { + fc::temp_directory temp; + appbase::scoped_app app; + auto temp_dir_str = temp.path().string(); + + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; + argv.insert( argv.end(), specific_args.begin(), specific_args.end() ); + + // app->initialize() returns a boolean. BOOST_CHECK_EQUAL cannot compare + // a boolean with a app_init_status directly + bool rc = (expected_status == app_init_status::succeeded) ? true : false; + bool result = false; + try { + result = app->initialize( argv.size(), (char**) &argv[0]); + } catch(...) {} + BOOST_CHECK_EQUAL( result, rc ); +} + +// --read-only-thread not allowed on producer node +BOOST_AUTO_TEST_CASE(read_only_on_producer) { + std::vector specific_args = {"-p", "eosio", "-e", "--read-only-threads", "2" }; + test_configs_common(specific_args, app_init_status::failed); +} + +// read_window_time must be greater than max_transaction_time + 10ms +BOOST_AUTO_TEST_CASE(invalid_read_window_time) { + std::vector specific_args = { "--read-only-threads", "2", "--max-transaction-time", "10", "--read-only-write-window-time-us", "50000", "--read-only-read-window-time-us", "20000" }; // 20000 not greater than --max-transaction-time (10ms) + 10000us (minimum margin) + test_configs_common(specific_args, app_init_status::failed); +} + +// if --read-only-threads is not configured, read-only trx related configs should +// not be checked +BOOST_AUTO_TEST_CASE(not_check_configs_if_no_read_only_threads) { + std::vector specific_args = { "--max-transaction-time", "10", "--read-only-write-window-time-us", "50000", "--read-only-read-window-time-us", "20000" }; // 20000 not greater than --max-transaction-time (10ms) + 10000us (minimum margin) + test_configs_common(specific_args, app_init_status::succeeded); +} + +void test_trxs_common(std::vector& specific_args, bool test_disable_tierup = false) { + try { + fc::scoped_exit> on_exit = []() { + chain::wasm_interface_collection::test_disable_tierup = false; + }; + chain::wasm_interface_collection::test_disable_tierup = test_disable_tierup; + + using namespace std::chrono_literals; + fc::temp_directory temp; + appbase::scoped_app app; + auto temp_dir_str = temp.path().string(); + producer_plugin::set_test_mode(true); + + std::atomic next_calls = 0; + std::atomic num_get_account_calls = 0; + std::atomic num_posts = 0; + std::atomic trace_with_except = 0; + std::atomic trx_match = true; + const size_t num_pushes = 4242; + + { + std::promise> plugin_promise; + std::future> plugin_fut = plugin_promise.get_future(); + std::thread app_thread( [&]() { + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = {"test", "--data-dir", temp_dir_str.c_str(), "--config-dir", temp_dir_str.c_str()}; + argv.insert(argv.end(), specific_args.begin(), specific_args.end()); + app->initialize(argv.size(), (char**)&argv[0]); + app->find_plugin()->chain(); + app->startup(); + plugin_promise.set_value({app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); + } ); + fc::scoped_exit> on_except = [&](){ + if (app_thread.joinable()) + app_thread.join(); + }; + + auto[prod_plug, chain_plug] = plugin_fut.get(); + + activate_protocol_features_set_bios_contract(app, chain_plug); + + for( size_t i = 1; i <= num_pushes; ++i ) { + auto ptrx = i % 3 == 0 ? make_unique_trx() : make_bios_ro_trx(chain_plug->chain()); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug, &num_get_account_calls]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_account(chain_apis::read_only::get_account_params{.account_name=config::system_account_name}, fc::time_point::now()+fc::seconds(90)); + ++num_get_account_calls; + }); + app->executor().post( priority::low, exec_queue::read_only, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &app]() { + ++num_posts; + bool return_failure_traces = true; + app->get_method()(ptrx, + false, // api_trx + transaction_metadata::trx_type::read_only, // trx_type + return_failure_traces, + [ptrx, &next_calls, &trace_with_except, &trx_match, return_failure_traces] + (const next_function_variant& result) { + if( !std::holds_alternative( result ) && !std::get( result )->except ) { + if( std::get( result )->id != ptrx->id() ) { + elog( "trace not for trx ${id}: ${t}", + ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); + trx_match = false; + } + } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { + elog( "trace with except ${e}", + ("e", fc::json::to_pretty_string( *std::get( result ) )) ); + ++trace_with_except; + } + ++next_calls; + }); + }); + app->executor().post( priority::low, exec_queue::read_only, [&chain_plug=chain_plug]() { + chain_plug->get_read_only_api(fc::seconds(90)).get_consensus_parameters(chain_apis::read_only::get_consensus_parameters_params{}, fc::time_point::now()+fc::seconds(90)); + }); + } + + // Wait long enough such that all transactions are executed + auto start = fc::time_point::now(); + auto hard_deadline = start + fc::seconds(10); // To protect against waiting forever + while ( (next_calls < num_pushes || num_get_account_calls < num_pushes) && fc::time_point::now() < hard_deadline ){ + std::this_thread::sleep_for( 100ms ); + } + + app->quit(); + } + + BOOST_CHECK_EQUAL( trace_with_except, 0 ); // should not have any traces with except in it + BOOST_CHECK_EQUAL( num_pushes, num_posts ); + BOOST_CHECK_EQUAL( num_pushes, next_calls.load() ); + BOOST_CHECK_EQUAL( num_pushes, num_get_account_calls.load() ); + BOOST_CHECK( trx_match.load() ); // trace should match the transaction + } FC_LOG_AND_RETHROW() +} + +// test read-only trxs on main thread (no --read-only-threads) +BOOST_AUTO_TEST_CASE(no_read_only_threads) { + std::vector specific_args = { "-p", "eosio", "-e", "--abi-serializer-max-time-ms=999" }; + test_trxs_common(specific_args); +} + +// test read-only trxs on 1 threads (with --read-only-threads) +BOOST_AUTO_TEST_CASE(with_1_read_only_threads) { + std::vector specific_args = { "-p", "eosio", "-e", + "--read-only-threads=1", + "--max-transaction-time=10", + "--abi-serializer-max-time-ms=999", + "--read-only-write-window-time-us=100000", + "--read-only-read-window-time-us=40000" }; + test_trxs_common(specific_args); +} + +// test read-only trxs on 3 threads (with --read-only-threads) +BOOST_AUTO_TEST_CASE(with_3_read_only_threads) { + std::vector specific_args = { "-p", "eosio", "-e", + "--read-only-threads=3", + "--max-transaction-time=10", + "--abi-serializer-max-time-ms=999", + "--read-only-write-window-time-us=100000", + "--read-only-read-window-time-us=40000" }; + test_trxs_common(specific_args); +} + +// test read-only trxs on 3 threads (with --read-only-threads) +BOOST_AUTO_TEST_CASE(with_3_read_only_threads_no_tierup) { + std::vector specific_args = { "-p", "eosio", "-e", + "--read-only-threads=3", + "--max-transaction-time=10", + "--abi-serializer-max-time-ms=999", + "--read-only-write-window-time-us=100000", + "--read-only-read-window-time-us=40000" }; + test_trxs_common(specific_args, true); +} + +// test read-only trxs on 8 separate threads (with --read-only-threads) +BOOST_AUTO_TEST_CASE(with_8_read_only_threads) { + std::vector specific_args = { "-p", "eosio", "-e", + "--read-only-threads=8", + "--eos-vm-oc-enable=none", + "--max-transaction-time=10", + "--abi-serializer-max-time-ms=999", + "--read-only-write-window-time-us=10000", + "--read-only-read-window-time-us=400000" }; + test_trxs_common(specific_args); +} + +// test read-only trxs on 8 separate threads (with --read-only-threads) +BOOST_AUTO_TEST_CASE(with_8_read_only_threads_no_tierup) { + std::vector specific_args = { "-p", "eosio", "-e", + "--read-only-threads=8", + "--eos-vm-oc-enable=none", + "--max-transaction-time=10", + "--abi-serializer-max-time-ms=999", + "--read-only-write-window-time-us=10000", + "--read-only-read-window-time-us=400000" }; + test_trxs_common(specific_args, true); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/tests/test_snapshot_scheduler.cpp b/tests/test_snapshot_scheduler.cpp index 9267405113..84c4410d5d 100644 --- a/tests/test_snapshot_scheduler.cpp +++ b/tests/test_snapshot_scheduler.cpp @@ -8,13 +8,14 @@ using namespace eosio; using namespace eosio::chain; using snapshot_request_information = snapshot_scheduler::snapshot_request_information; +using snapshot_request_params = snapshot_scheduler::snapshot_request_params; using snapshot_request_id_information = snapshot_scheduler::snapshot_request_id_information; BOOST_AUTO_TEST_SUITE(producer_snapshot_scheduler_tests) BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { fc::logger log; - producer_plugin scheduler; + snapshot_scheduler scheduler; { // add/remove test @@ -30,19 +31,14 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { return e.to_detail_string().find("Duplicate snapshot request") != std::string::npos; }); - snapshot_request_id_information sri_delete_1 = {.snapshot_request_id = 0}; - scheduler.unschedule_snapshot(sri_delete_1); - + scheduler.unschedule_snapshot(0); BOOST_CHECK_EQUAL(1, scheduler.get_snapshot_requests().snapshot_requests.size()); - snapshot_request_id_information sri_delete_none = {.snapshot_request_id = 2}; - BOOST_CHECK_EXCEPTION(scheduler.unschedule_snapshot(sri_delete_none), snapshot_request_not_found, [](const fc::assert_exception& e) { + BOOST_CHECK_EXCEPTION(scheduler.unschedule_snapshot(0), snapshot_request_not_found, [](const fc::assert_exception& e) { return e.to_detail_string().find("Snapshot request not found") != std::string::npos; }); - snapshot_request_id_information sri_delete_2 = {.snapshot_request_id = 1}; - scheduler.unschedule_snapshot(sri_delete_2); - + scheduler.unschedule_snapshot(1); BOOST_CHECK_EQUAL(0, scheduler.get_snapshot_requests().snapshot_requests.size()); snapshot_request_information sri_large_spacing = {.block_spacing = 1000, .start_block_num = 5000, .end_block_num = 5010}; @@ -65,15 +61,19 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { std::future> plugin_fut = plugin_promise.get_future(); std::thread app_thread([&]() { - fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); - std::vector argv = - {"test", "--data-dir", temp.c_str(), "--config-dir", temp.c_str(), - "-p", "eosio", "-e", "--disable-subjective-billing=true"}; - app->initialize(argv.size(), (char**) &argv[0]); - app->startup(); - plugin_promise.set_value( - {app->find_plugin(), app->find_plugin()}); - app->exec(); + try { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", "--data-dir", temp.c_str(), "--config-dir", temp.c_str(), + "-p", "eosio", "-e"}; + app->initialize(argv.size(), (char**) &argv[0]); + app->startup(); + plugin_promise.set_value( + {app->find_plugin(), app->find_plugin()}); + app->exec(); + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"app threw exception see logged error"); }); auto [prod_plug, chain_plug] = plugin_fut.get(); @@ -87,42 +87,57 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { if (!pp->get_snapshot_requests().snapshot_requests.empty()) { const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests; - auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num) { + auto validate_snapshot_request = [&](uint32_t sid, uint32_t block_num, uint32_t spacing = 0, bool fuzzy_start = false) { auto it = find_if(snapshot_requests.begin(), snapshot_requests.end(), [sid](const snapshot_scheduler::snapshot_schedule_information& obj) {return obj.snapshot_request_id == sid;}); if (it != snapshot_requests.end()) { auto& pending = it->pending_snapshots; if (pending.size()==1) { - BOOST_CHECK_EQUAL(block_num, pending.begin()->head_block_num); + // pending snapshot block number + auto pbn = pending.begin()->head_block_num; + + // first pending snapshot + auto ps_start = (spacing != 0) ? (spacing + (pbn%spacing)) : pbn; + + if (!fuzzy_start) { + BOOST_CHECK_EQUAL(block_num, ps_start); + } + else { + int diff = block_num - ps_start; + BOOST_CHECK(std::abs(diff) <= 5); // accept +/- 5 blocks if start block not specified + } } return true; } return false; }; - BOOST_REQUIRE(validate_snapshot_request(0, 9)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires - BOOST_REQUIRE(validate_snapshot_request(4, 12)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12 + BOOST_REQUIRE(validate_snapshot_request(0, 9, 8)); // snapshot #0 should have pending snapshot at block #9 (8 + 1) and it never expires + BOOST_REQUIRE(validate_snapshot_request(4, 12, 10, true)); // snapshot #4 should have pending snapshot at block # at the moment of scheduling (2) plus 10 = 12 + BOOST_REQUIRE(validate_snapshot_request(5, 10, 10)); // snapshot #5 should have pending snapshot at block #10, #20 etc } }); - snapshot_request_information sri1 = {.block_spacing = 8, .start_block_num = 1, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 1"}; - snapshot_request_information sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2 that will never happen"}; - snapshot_request_information sri3 = {.block_spacing = 2, .start_block_num = 0, .end_block_num = 3, .snapshot_description = "Example of recurring snapshot 3 that will expire"}; - snapshot_request_information sri4 = {.start_block_num = 1, .snapshot_description = "One time snapshot on first block"}; - snapshot_request_information sri5 = {.block_spacing = 10, .snapshot_description = "Recurring every 10 blocks snapshot starting now"}; + snapshot_request_params sri1 = {.block_spacing = 8, .start_block_num = 1, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 1"}; + snapshot_request_params sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2 that wont happen in test"}; + snapshot_request_params sri3 = {.block_spacing = 2, .start_block_num = 0, .end_block_num = 3, .snapshot_description = "Example of recurring snapshot 3 that will expire"}; + snapshot_request_params sri4 = {.start_block_num = 1, .snapshot_description = "One time snapshot on first block"}; + snapshot_request_params sri5 = {.block_spacing = 10, .snapshot_description = "Recurring every 10 blocks snapshot starting now"}; + snapshot_request_params sri6 = {.block_spacing = 10, .start_block_num = 0, .snapshot_description = "Recurring every 10 blocks snapshot starting from 0"}; pp->schedule_snapshot(sri1); pp->schedule_snapshot(sri2); pp->schedule_snapshot(sri3); pp->schedule_snapshot(sri4); pp->schedule_snapshot(sri5); + pp->schedule_snapshot(sri6); - // all five snapshot requests should be present now - BOOST_CHECK_EQUAL(5, pp->get_snapshot_requests().snapshot_requests.size()); + // all six snapshot requests should be present now + BOOST_CHECK_EQUAL(6, pp->get_snapshot_requests().snapshot_requests.size()); - empty_blocks_fut.wait_for(std::chrono::seconds(6)); + empty_blocks_fut.wait_for(std::chrono::seconds(10)); // two of the snapshots are done here and requests, corresponding to them should be deleted - BOOST_CHECK_EQUAL(3, pp->get_snapshot_requests().snapshot_requests.size()); + BOOST_CHECK_EQUAL(4, pp->get_snapshot_requests().snapshot_requests.size()); // check whether no pending snapshots present for a snapshot with id 0 const auto& snapshot_requests = pp->get_snapshot_requests().snapshot_requests; @@ -141,8 +156,8 @@ BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { std::vector ssi; db.set_path(temp / "snapshots"); db >> ssi; - BOOST_CHECK_EQUAL(3, ssi.size()); - BOOST_CHECK_EQUAL(ssi.begin()->block_spacing, sri1.block_spacing); + BOOST_CHECK_EQUAL(4, ssi.size()); + BOOST_CHECK_EQUAL(ssi.begin()->block_spacing, *sri1.block_spacing); } catch(...) { throw; } diff --git a/tests/trace_plugin_test.py b/tests/trace_plugin_test.py index 8f6f7955f2..6fb0376413 100755 --- a/tests/trace_plugin_test.py +++ b/tests/trace_plugin_test.py @@ -6,44 +6,33 @@ import unittest import os -from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, CORE_SYMBOL +from TestHarness import Cluster, Node, TestHelper, Utils, WalletMgr, CORE_SYMBOL, createAccountKeys testSuccessful = True class TraceApiPluginTest(unittest.TestCase): - sleep_s = 1 - cluster=Cluster(walletd=True, defproduceraPrvtKey=None) + cluster=Cluster(defproduceraPrvtKey=None) walletMgr=WalletMgr(True) accounts = [] cluster.setWalletMgr(walletMgr) - # kill nodeos and keosd and clean up dir - def cleanEnv(self, shouldCleanup: bool) : - self.cluster.killall(allInstances=True) - if shouldCleanup: - self.cluster.cleanup() - self.walletMgr.killall(allInstances=True) - if shouldCleanup: - self.walletMgr.cleanup() - # start keosd and nodeos def startEnv(self) : account_names = ["alice", "bob", "charlie"] abs_path = os.path.abspath(os.getcwd() + '/unittests/contracts/eosio.token/eosio.token.abi') traceNodeosArgs = " --trace-rpc-abi eosio.token=" + abs_path - self.cluster.launch(totalNodes=1, extraNodeosArgs=traceNodeosArgs) + self.cluster.launch(totalNodes=2, extraNodeosArgs=traceNodeosArgs) self.walletMgr.launch() testWalletName="testwallet" testWallet=self.walletMgr.create(testWalletName, [self.cluster.eosioAccount, self.cluster.defproduceraAccount]) self.cluster.validateAccounts(None) - self.accounts=Cluster.createAccountKeys(len(account_names)) - node = self.cluster.getNode(0) + self.accounts=createAccountKeys(len(account_names)) + node = self.cluster.getNode(1) for idx in range(len(account_names)): self.accounts[idx].name = account_names[idx] self.walletMgr.importKey(self.accounts[idx], testWallet) for account in self.accounts: - node.createInitializeAccount(account, self.cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000000, waitForTransBlock=True, exitOnError=True) - time.sleep(self.sleep_s) + node.createInitializeAccount(account, self.cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000000, waitForTransBlock=True if account == self.accounts[-1] else False, exitOnError=True) def get_block(self, params: str, node: Node) -> json: resource = "trace_api" @@ -70,7 +59,7 @@ def test_TraceApi(self) : self.assertEqual(node.getAccountEosBalanceStr(self.accounts[0].name), Utils.deduceAmount(expectedAmount, xferAmount)) self.assertEqual(node.getAccountEosBalanceStr(self.accounts[1].name), Utils.addAmount(expectedAmount, xferAmount)) - time.sleep(self.sleep_s) + node.waitForBlock(blockNum) # verify trans via node api before calling trace_api RPC blockFromNode = node.getBlock(blockNum) @@ -108,12 +97,11 @@ def test_TraceApi(self) : @classmethod def setUpClass(self): - self.cleanEnv(self, shouldCleanup=True) self.startEnv(self) @classmethod def tearDownClass(self): - self.cleanEnv(self, shouldCleanup=testSuccessful) + TraceApiPluginTest.cluster.testFailed = not testSuccessful if __name__ == "__main__": unittest.main() diff --git a/tests/trx_finality_status_forked_test.py b/tests/trx_finality_status_forked_test.py index 31b179231d..8ee00b49ab 100755 --- a/tests/trx_finality_status_forked_test.py +++ b/tests/trx_finality_status_forked_test.py @@ -21,7 +21,7 @@ errorExit=Utils.errorExit -args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running", "--wallet-port","--unshared"}) Utils.Debug=args.v totalProducerNodes=2 @@ -29,17 +29,12 @@ totalNodes=totalProducerNodes+totalNonProducerNodes maxActiveProducers=3 totalProducers=maxActiveProducers -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running -killAll=args.clean_run walletPort=args.wallet_port walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -51,8 +46,6 @@ TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") specificExtraNodeosArgs={} # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node @@ -116,7 +109,7 @@ cluster.validateAccounts([account1]) # *** Killing the "bridge" node *** - Print("Sending command to kill \"bridge\" node to separate the 2 producer groups.") + Print('Sending command to kill "bridge" node to separate the 2 producer groups.') # kill at the beginning of the production window for defproducera, so there is time for the fork for # defproducerc to grow before it would overtake the fork for defproducera and defproducerb killAtProducer="defproducera" @@ -143,7 +136,8 @@ def getState(status): return status["state"] transferAmount = 10 - prodC.transferFunds(cluster.eosioAccount, account1, f"{transferAmount}.0000 {CORE_SYMBOL}", "fund account") + transfer = prodC.transferFunds(cluster.eosioAccount, account1, f"{transferAmount}.0000 {CORE_SYMBOL}", "fund account") + transBlockNum = transfer['processed']['block_num'] transId = prodC.getLastTrackedTransactionId() retStatus = prodC.getTransactionStatus(transId) state = getState(retStatus) @@ -162,7 +156,6 @@ def getState(status): # since the Bridge node is killed when this producer is producing its last block in its window, there is plenty of time for the transfer to be # sent before the first block is created, but adding this to ensure it is in one of these blocks numTries = 2 - preInfo = prodC.getInfo() while numTries > 0: retStatus = prodC.getTransactionStatus(transId) state = getState(retStatus) @@ -171,8 +164,6 @@ def getState(status): numTries -= 1 assert prodC.waitForNextBlock(), "Production node C should continue to advance, even after bridge node is killed" - postInfo = prodC.getInfo() - Print(f"getTransactionStatus returned status: {json.dumps(retStatus, indent=1)}") assert state == inBlockState, \ f"ERROR: getTransactionStatus didn't return \"{inBlockState}\" state." @@ -183,9 +174,12 @@ def getState(status): if not nonProdNode.relaunch(): errorExit(f"Failure - (non-production) node {nonProdNode.nodeNum} should have restarted") - Print("Wait for LIB to move, which indicates prodC has forked out the branch") - assert prodC.waitForLibToAdvance(60), \ - "ERROR: Network did not reach consensus after bridge node was restarted." + while prodC.getInfo()['last_irreversible_block_num'] < transBlockNum: + Print("Wait for LIB to move, which indicates prodC may have forked out the branch") + assert prodC.waitForLibToAdvance(60), \ + "ERROR: Network did not reach consensus after bridge node was restarted." + if prodC.getTransactionStatus(transId)['state'] == forkedOutState: + break retStatus = prodC.getTransactionStatus(transId) state = getState(retStatus) @@ -234,7 +228,7 @@ def getState(status): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) errorCode = 0 if testSuccessful else 1 exit(errorCode) diff --git a/tests/trx_finality_status_test.py b/tests/trx_finality_status_test.py index b31f4ed15f..0ec660518f 100755 --- a/tests/trx_finality_status_test.py +++ b/tests/trx_finality_status_test.py @@ -28,24 +28,19 @@ errorExit=Utils.errorExit appArgs=AppArgs() -args = TestHelper.parse_args({"-n", "--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--unshared"}) +args = TestHelper.parse_args({"-n", "--dump-error-details","--keep-logs","-v","--leave-running","--unshared"}) Utils.Debug=args.v pnodes=3 totalNodes=args.n if totalNodes<=pnodes+2: totalNodes=pnodes+2 -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -dontKill=args.leave_running prodCount=1 -killAll=args.clean_run walletPort=TestHelper.DEFAULT_WALLET_PORT walletMgr=WalletMgr(True, port=walletPort) testSuccessful=False -killEosInstances=not dontKill -killWallet=not dontKill WalletdName=Utils.EosWalletName ClientName="cleos" @@ -57,8 +52,6 @@ TestHelper.printSystemInfo("BEGIN") cluster.setWalletMgr(walletMgr) - cluster.killall(allInstances=killAll) - cluster.cleanup() Print("Stand up cluster") successDuration = 60 failure_duration = 40 @@ -214,7 +207,7 @@ def validate(status, knownTrx=True): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, dumpErrorDetails=dumpErrorDetails) exitCode = 0 if testSuccessful else 1 exit(exitCode) diff --git a/tests/trx_generator/main.cpp b/tests/trx_generator/main.cpp index 50c6bb1c54..60c20e3f7e 100644 --- a/tests/trx_generator/main.cpp +++ b/tests/trx_generator/main.cpp @@ -62,6 +62,7 @@ int main(int argc, char** argv) { ("abi-file", bpo::value(&user_trx_config._abi_data_file_path), "The path to the contract abi file to use for the supplied transaction action data") ("actions-data", bpo::value(&user_trx_config._actions_data_json_file_or_str), "The json actions data file or json actions data description string to use") ("actions-auths", bpo::value(&user_trx_config._actions_auths_json_file_or_str), "The json actions auth file or json actions auths description string to use, containting authAcctName to activePrivateKey pairs.") + ("api-endpoint", bpo::value(&provider_config._api_endpoint), "The api endpoint to direct transactions to. Defaults to: '/v1/chain/send_transaction2'") ("peer-endpoint-type", bpo::value(&provider_config._peer_endpoint_type)->default_value("p2p"), "Identify the peer endpoint api type to determine how to send transactions. Allowable 'p2p' and 'http'. Default: 'p2p'") ("peer-endpoint", bpo::value(&provider_config._peer_endpoint)->default_value("127.0.0.1"), "set the peer endpoint to send transactions to") ("port", bpo::value(&provider_config._port)->default_value(9876), "set the peer endpoint port to send transactions to") diff --git a/tests/trx_generator/trx_generator.cpp b/tests/trx_generator/trx_generator.cpp index 7dc1ea46d5..dda297422f 100644 --- a/tests/trx_generator/trx_generator.cpp +++ b/tests/trx_generator/trx_generator.cpp @@ -146,13 +146,16 @@ namespace eosio::testing { for (size_t i = 0; i < action_array.size(); ++i) { auto action_mvo = fc::mutable_variant_object(action_array[i]); locate_key_words_in_action_mvo(acct_gen_fields_out[i], action_mvo, key_word); + if(acct_gen_fields_out[i].empty()) { + acct_gen_fields_out.erase(i); + } } } void update_key_word_fields_in_sub_action(const std::string& key, fc::mutable_variant_object& action_mvo, const std::string& action_inner_key, const std::string& key_word) { if (action_mvo.find(action_inner_key) != action_mvo.end()) { - auto inner = action_mvo[action_inner_key].get_object(); + const auto& inner = action_mvo[action_inner_key].get_object(); if (inner.find(key) != inner.end()) { fc::mutable_variant_object inner_mvo = fc::mutable_variant_object(inner); inner_mvo.set(key, key_word); @@ -210,10 +213,15 @@ namespace eosio::testing { } EOS_RETHROW_EXCEPTIONS(chain::transaction_type_exception, "Fail to parse unpacked action data JSON") - chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); - chain::name auth_perm = chain::name(action_mvo["authorization"].get_object()["permission"].as_string()); + std::vector auth = {}; + if (action_mvo["authorization"].get_object().find("actor") != action_mvo["authorization"].get_object().end() && + action_mvo["authorization"].get_object().find("permission") != action_mvo["authorization"].get_object().end()) { + chain::name auth_actor = chain::name(action_mvo["authorization"].get_object()["actor"].as_string()); + chain::name auth_perm = chain::name(action_mvo["authorization"].get_object()["permission"].as_string()); + auth.push_back({auth_actor, auth_perm}); + } - return chain::action({{auth_actor, auth_perm}}, _config._contract_owner_account, action_name, std::move(packed_action_data)); + return chain::action(auth, _config._contract_owner_account, action_name, std::move(packed_action_data)); }); return actions; @@ -235,7 +243,7 @@ namespace eosio::testing { const std::string gen_acct_name_per_trx("ACCT_PER_TRX"); - auto action_array = unpacked_actions_data_json.get_array(); + const auto& action_array = unpacked_actions_data_json.get_array(); _unpacked_actions.reserve(action_array.size()); std::transform(action_array.begin(), action_array.end(), std::back_inserter(_unpacked_actions), [&](const auto& var) { @@ -278,8 +286,8 @@ namespace eosio::testing { } bool trx_generator_base::tear_down() { - _provider.log_trxs(_config._log_dir); _provider.teardown(); + _provider.log_trxs(_config._log_dir); ilog("Sent transactions: ${cnt}", ("cnt", _txcount)); ilog("Tear down p2p transaction provider"); diff --git a/tests/trx_generator/trx_provider.cpp b/tests/trx_generator/trx_provider.cpp index d468bed734..9e6a51f817 100644 --- a/tests/trx_generator/trx_provider.cpp +++ b/tests/trx_generator/trx_provider.cpp @@ -34,6 +34,37 @@ namespace eosio::testing { return send_buffer; } + void provider_connection::init_and_connect() { + _connection_thread_pool.start( + 1, [](const fc::exception& e) { elog("provider_connection exception ${e}", ("e", e)); }); + connect(); + }; + + void provider_connection::cleanup_and_disconnect() { + disconnect(); + _connection_thread_pool.stop(); + }; + + fc::time_point provider_connection::get_trx_ack_time(const eosio::chain::transaction_id_type& trx_id) { + fc::time_point time_acked; + std::lock_guard lock(_trx_ack_map_lock); + auto search = _trxs_ack_time_map.find(trx_id); + if (search != _trxs_ack_time_map.end()) { + time_acked = search->second; + } else { + elog("get_trx_ack_time - Transaction acknowledge time not found for transaction with id: ${id}", + ("id", trx_id)); + time_acked = fc::time_point::min(); + } + return time_acked; + } + + void provider_connection::trx_acknowledged(const eosio::chain::transaction_id_type& trx_id, + const fc::time_point& ack_time) { + std::lock_guard lock(_trx_ack_map_lock); + _trxs_ack_time_map[trx_id] = ack_time; + } + void p2p_connection::connect() { ilog("Attempting P2P connection to ${ip}:${port}.", ("ip", _config._peer_endpoint)("port", _config._port)); tcp::resolver r(_connection_thread_pool.get_executor()); @@ -51,6 +82,11 @@ namespace eosio::testing { void p2p_connection::send_transaction(const chain::packed_transaction& trx) { send_buffer_type msg = create_send_buffer(trx); _p2p_socket.send(boost::asio::buffer(*msg)); + trx_acknowledged(trx.id(), fc::time_point::min()); //using min to identify ack time as not applicable for p2p + } + + acked_trx_trace_info p2p_connection::get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) { + return {}; } void http_connection::connect() {} @@ -70,8 +106,11 @@ namespace eosio::testing { } } + bool http_connection::needs_response_trace_info() { + return _config._api_endpoint == "/v1/chain/send_read_only_transaction"; + } + void http_connection::send_transaction(const chain::packed_transaction& trx) { - const std::string target = "/v1/chain/send_transaction2"s; const int http_version = 11; const std::string content_type = "application/json"s; @@ -83,21 +122,80 @@ namespace eosio::testing { http_client_async::http_request_params params{_connection_thread_pool.get_executor(), _config._peer_endpoint, _config._port, - target, + _config._api_endpoint, http_version, content_type}; http_client_async::async_http_request( params, std::move(msg_body), - [&acked = _acknowledged](boost::beast::error_code ec, - boost::beast::http::response response) { - ++acked; - if (response.result() != boost::beast::http::status::accepted) { - elog("async_http_request Failed with response http status code: ${status}", ("status", response.result_int())); + [this, trx_id = trx.id()](boost::beast::error_code ec, + boost::beast::http::response response) { + ++this->_acknowledged; + trx_acknowledged(trx_id, fc::time_point::now()); + + if (this->needs_response_trace_info() && response.result() == boost::beast::http::status::ok) { + try { + fc::variant resp_json = fc::json::from_string(response.body()); + if (resp_json.is_object() && resp_json.get_object().contains("processed")) { + const auto& processed = resp_json["processed"]; + const auto& block_num = processed["block_num"].as_uint64(); + const auto& block_time = processed["block_time"].as_string(); + std::string status = "failed"; + uint32_t net = 0; + uint32_t cpu = 0; + if (processed.get_object().contains("receipt")) { + const auto& receipt = processed["receipt"]; + if (receipt.is_object()) { + status = receipt["status"].as_string(); + net = receipt["net_usage_words"].as_uint64() * 8; + cpu = receipt["cpu_usage_us"].as_uint64(); + } + if (status == "executed") { + record_trx_info(trx_id, block_num, cpu, net, block_time); + } else { + elog("async_http_request Transaction receipt status not executed: ${string}", + ("string", response.body())); + } + } else { + elog("async_http_request Transaction failed, no receipt: ${string}", + ("string", response.body())); + } + } else { + elog("async_http_request Transaction failed, transaction not processed: ${string}", + ("string", response.body())); + } + } + EOS_RETHROW_EXCEPTIONS(chain::json_parse_exception, "Fail to parse JSON from string: ${string}", + ("string", response.body())); + } + + if (!(response.result() == boost::beast::http::status::accepted || + response.result() == boost::beast::http::status::ok)) { + elog("async_http_request Failed with response http status code: ${status}", + ("status", response.result_int())); } }); ++_sent; } + void http_connection::record_trx_info(const eosio::chain::transaction_id_type& trx_id, uint32_t block_num, + uint32_t cpu_usage_us, uint32_t net_usage_words, + const std::string& block_time) { + std::lock_guard lock(_trx_info_map_lock); + _acked_trx_trace_info_map.insert({trx_id, {true, block_num, cpu_usage_us, net_usage_words, block_time}}); + } + + acked_trx_trace_info http_connection::get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) { + acked_trx_trace_info info; + std::lock_guard lock(_trx_info_map_lock); + auto search = _acked_trx_trace_info_map.find(trx_id); + if (search != _acked_trx_trace_info_map.end()) { + info = search->second; + } else { + elog("get_acked_trx_trace_info - Acknowledged transaction trace info not found for transaction with id: ${id}", ("id", trx_id)); + } + return info; + } + trx_provider::trx_provider(const provider_base_config& provider_config) { if (provider_config._peer_endpoint_type == "http") { _conn.emplace(provider_config); @@ -121,8 +219,25 @@ namespace eosio::testing { fileName << log_dir << "/trx_data_output_" << getpid() << ".txt"; std::ofstream out(fileName.str()); - for (logged_trx_data data : _sent_trx_data) { - out << std::string(data._trx_id) << ","<< data._sent_timestamp.to_iso_string() << "\n"; + for (const logged_trx_data& data : _sent_trx_data) { + fc::time_point acked = _peer_connection->get_trx_ack_time(data._trx_id); + std::string acked_str; + fc::microseconds ack_round_trip_us; + if (fc::time_point::min() == acked) { + acked_str = "NA"; + ack_round_trip_us = fc::microseconds(-1); + } else { + acked_str = acked.to_iso_string(); + ack_round_trip_us = acked - data._timestamp; + } + out << std::string(data._trx_id) << "," << data._timestamp.to_iso_string() << "," << acked_str << "," + << ack_round_trip_us.count(); + + acked_trx_trace_info info = _peer_connection->get_acked_trx_trace_info(data._trx_id); + if (info._valid) { + out << "," << info._block_num << "," << info._cpu_usage_us << "," << info._net_usage_words << "," << info._block_time; + } + out << "\n"; } out.close(); } diff --git a/tests/trx_generator/trx_provider.hpp b/tests/trx_generator/trx_provider.hpp index c0e5713a1e..8f8dd9200d 100644 --- a/tests/trx_generator/trx_provider.hpp +++ b/tests/trx_generator/trx_provider.hpp @@ -9,6 +9,7 @@ #include #include #include +#include using namespace std::chrono_literals; @@ -17,21 +18,40 @@ namespace eosio::testing { struct logged_trx_data { eosio::chain::transaction_id_type _trx_id; - fc::time_point _sent_timestamp; + fc::time_point _timestamp; - explicit logged_trx_data(eosio::chain::transaction_id_type trx_id, fc::time_point sent=fc::time_point::now()) : - _trx_id(trx_id), _sent_timestamp(sent) {} + explicit logged_trx_data(eosio::chain::transaction_id_type trx_id, fc::time_point time_of_interest=fc::time_point::now()) : + _trx_id(trx_id), _timestamp(time_of_interest) {} }; struct provider_base_config { std::string _peer_endpoint_type = "p2p"; std::string _peer_endpoint = "127.0.0.1"; unsigned short _port = 9876; + // Api endpoint not truly used for p2p connections as transactions are streamed directly to p2p endpoint + std::string _api_endpoint = "/v1/chain/send_transaction2"; std::string to_string() const { std::ostringstream ss; - ss << "endpoint type: " << _peer_endpoint_type << " peer_endpoint: " << _peer_endpoint << " port: " << _port; - return std::move(ss).str(); + ss << "Provider base config endpoint type: " << _peer_endpoint_type << " peer_endpoint: " << _peer_endpoint + << " port: " << _port << " api endpoint: " << _api_endpoint; + return ss.str(); + } + }; + + struct acked_trx_trace_info { + bool _valid = false; + uint32_t _block_num = 0; + uint32_t _cpu_usage_us = 0; + uint32_t _net_usage_words = 0; + std::string _block_time = ""; + + std::string to_string() const { + std::ostringstream ss; + ss << "Acked Transaction Trace Info " + << "valid: " << _valid << " block num: " << _block_num << " cpu usage us: " << _cpu_usage_us + << " net usage words: " << _net_usage_words << " block time: " << _block_time; + return ss.str(); } }; @@ -39,22 +59,20 @@ namespace eosio::testing { const provider_base_config& _config; eosio::chain::named_thread_pool _connection_thread_pool; + std::mutex _trx_ack_map_lock; + std::map _trxs_ack_time_map; + explicit provider_connection(const provider_base_config& provider_config) : _config(provider_config) {} virtual ~provider_connection() = default; - void init_and_connect() { - _connection_thread_pool.start( - 1, [](const fc::exception& e) { elog("provider_connection exception ${e}", ("e", e)); }); - connect(); - }; - - void cleanup_and_disconnect() { - disconnect(); - _connection_thread_pool.stop(); - }; + void init_and_connect(); + void cleanup_and_disconnect(); + fc::time_point get_trx_ack_time(const eosio::chain::transaction_id_type& trx_id); + void trx_acknowledged(const eosio::chain::transaction_id_type& trx_id, const fc::time_point& ack_time); + virtual acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) = 0; virtual void send_transaction(const chain::packed_transaction& trx) = 0; private: @@ -63,17 +81,24 @@ namespace eosio::testing { }; struct http_connection : public provider_connection { + std::mutex _trx_info_map_lock; + std::map _acked_trx_trace_info_map; + std::atomic _acknowledged{0}; std::atomic _sent{0}; explicit http_connection(const provider_base_config& provider_config) : provider_connection(provider_config) {} - void send_transaction(const chain::packed_transaction& trx); + void send_transaction(const chain::packed_transaction& trx) final; + void record_trx_info(const eosio::chain::transaction_id_type& trx_id, uint32_t block_num, uint32_t cpu_usage_us, + uint32_t net_usage_words, const std::string& block_time); + acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override final; private: void connect() override final; void disconnect() override final; + bool needs_response_trace_info(); }; struct p2p_connection : public provider_connection { @@ -83,7 +108,9 @@ namespace eosio::testing { : provider_connection(provider_config) , _p2p_socket(_connection_thread_pool.get_executor()) {} - void send_transaction(const chain::packed_transaction& trx); + void send_transaction(const chain::packed_transaction& trx) final; + + acked_trx_trace_info get_acked_trx_trace_info(const eosio::chain::transaction_id_type& trx_id) override final; private: void connect() override final; @@ -104,19 +131,17 @@ namespace eosio::testing { std::vector _sent_trx_data; }; - using fc::time_point; - struct tps_test_stats { - uint32_t total_trxs = 0; - uint32_t trxs_left = 0; - uint32_t trxs_sent = 0; - time_point start_time; - time_point expected_end_time; - time_point last_run; - time_point next_run; - int64_t time_to_next_trx_us = 0; - fc::microseconds trx_interval; - uint32_t expected_sent; + uint32_t total_trxs = 0; + uint32_t trxs_left = 0; + uint32_t trxs_sent = 0; + fc::time_point start_time; + fc::time_point expected_end_time; + fc::time_point last_run; + fc::time_point next_run; + int64_t time_to_next_trx_us = 0; + fc::microseconds trx_interval; + uint32_t expected_sent; }; constexpr int64_t min_sleep_us = 1; @@ -150,7 +175,7 @@ namespace eosio::testing { std::string to_string() const { std::ostringstream ss; ss << "Trx Tps Tester Config: duration: " << _gen_duration_seconds << " target tps: " << _target_tps; - return std::move(ss).str(); + return ss.str(); }; }; diff --git a/tests/validate-dirty-db.py b/tests/validate-dirty-db.py index b4f060263d..b5453eba10 100755 --- a/tests/validate-dirty-db.py +++ b/tests/validate-dirty-db.py @@ -19,7 +19,7 @@ Print=Utils.Print errorExit=Utils.errorExit -args = TestHelper.parse_args({"--keep-logs","--dump-error-details","-v","--leave-running","--clean-run","--unshared"}) +args = TestHelper.parse_args({"--keep-logs","--dump-error-details","-v","--leave-running","--unshared"}) debug=args.v pnodes=1 topo="mesh" @@ -29,10 +29,7 @@ killCount=1 killSignal=Utils.SigKillTag -killEosInstances= not args.leave_running dumpErrorDetails=args.dump_error_details -keepLogs=args.keep_logs -killAll=args.clean_run seed=1 Utils.Debug=debug @@ -63,16 +60,13 @@ def runNodeosAndGetOutput(myTimeout=3, nodeosLogPath=f"{Utils.TestLogRoot}"): return (True, output) random.seed(seed) # Use a fixed seed for repeatability. -cluster=Cluster(walletd=True,unshared=args.unshared) +cluster=Cluster(unshared=args.unshared, keepRunning=args.leave_running, keepLogs=args.keep_logs) try: TestHelper.printSystemInfo("BEGIN") cluster.setChainStrategy(chainSyncStrategyStr) - cluster.killall(allInstances=killAll) - cluster.cleanup() - Print ("producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s" % ( pnodes, topo, delay, chainSyncStrategyStr)) @@ -83,7 +77,9 @@ def runNodeosAndGetOutput(myTimeout=3, nodeosLogPath=f"{Utils.TestLogRoot}"): node=cluster.getNode(0) Print("Kill cluster nodes.") - cluster.killall(allInstances=killAll) + for node in cluster.nodes: + node.kill(signal.SIGKILL) + cluster.biosNode.kill(signal.SIGKILL) Print("Restart nodeos repeatedly to ensure dirty database flag sticks.") timeout=6 @@ -111,9 +107,9 @@ def runNodeosAndGetOutput(myTimeout=3, nodeosLogPath=f"{Utils.TestLogRoot}"): testSuccessful=True finally: if debug: Print("Cleanup in finally block.") - TestHelper.shutdown(cluster, None, testSuccessful, killEosInstances, False, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, None, testSuccessful, dumpErrorDetails) if debug: Print("Exiting test, exit value 0.") exitCode = 0 if testSuccessful else 1 -exit(exitCode) \ No newline at end of file +exit(exitCode) diff --git a/tutorials/bios-boot-tutorial/bios-boot-tutorial.py b/tutorials/bios-boot-tutorial/bios-boot-tutorial.py index 83266dd3bc..1bd675a19f 100755 --- a/tutorials/bios-boot-tutorial/bios-boot-tutorial.py +++ b/tutorials/bios-boot-tutorial/bios-boot-tutorial.py @@ -280,9 +280,6 @@ def produceNewAccounts(): print(i, name) f.write(' {"name":"%s", "pvt":"%s", "pub":"%s"},\n' % (name, r[1], r[2])) -def stepKillAll(): - run('killall keosd nodeos || true') - sleep(1.5) def stepStartWallet(): startWallet() importKeys() @@ -394,7 +391,6 @@ def stepLog(): parser = argparse.ArgumentParser() commands = [ - ('k', 'kill', stepKillAll, True, "Kill all nodeos and keosd processes"), ('w', 'wallet', stepStartWallet, True, "Start keosd, create wallet, fill with keys"), ('b', 'boot', stepStartBoot, True, "Start boot node"), ('s', 'sys', createSystemAccounts, True, "Create system accounts (eosio.*)"), diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index cdab740d58..3506096632 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -40,18 +41,33 @@ FC_REFLECT(act_sig, (sig) ) BOOST_AUTO_TEST_SUITE(abi_tests) +#ifdef NDEBUG fc::microseconds max_serialization_time = fc::seconds(1); // some test machines are very slow +#else +fc::microseconds max_serialization_time = fc::microseconds::maximum(); // don't check in debug builds +#endif + +static fc::time_point get_deadline() { + return fc::time_point::now().safe_add(max_serialization_time); +} // verify that round trip conversion, via bytes, reproduces the exact same data fc::variant verify_byte_round_trip_conversion( const abi_serializer& abis, const type_name& type, const fc::variant& var ) { auto bytes = abis.variant_to_binary(type, var, abi_serializer::create_yield_function( max_serialization_time )); + auto b = abis.variant_to_binary(type, var, max_serialization_time ); + BOOST_TEST( b == bytes ); auto var2 = abis.binary_to_variant(type, bytes, abi_serializer::create_yield_function( max_serialization_time )); + auto var3 = abis.binary_to_variant(type, b, max_serialization_time); - std::string r = fc::json::to_string(var2, fc::time_point::now() + max_serialization_time); + std::string r2 = fc::json::to_string(var2, get_deadline()); + std::string r3 = fc::json::to_string(var3, get_deadline()); + BOOST_TEST( r2 == r3 ); auto bytes2 = abis.variant_to_binary(type, var2, abi_serializer::create_yield_function( max_serialization_time )); + auto bytes3 = abis.variant_to_binary(type, var3, max_serialization_time); + BOOST_TEST( bytes2 == bytes3 ); BOOST_TEST( fc::to_hex(bytes) == fc::to_hex(bytes2) ); @@ -63,10 +79,16 @@ void verify_round_trip_conversion( const abi_serializer& abis, const type_name& auto var = fc::json::from_string(json); auto bytes = abis.variant_to_binary(type, var, abi_serializer::create_yield_function( max_serialization_time )); BOOST_REQUIRE_EQUAL(fc::to_hex(bytes), hex); + auto b = abis.variant_to_binary(type, var, max_serialization_time); + BOOST_REQUIRE_EQUAL(fc::to_hex(b), hex); auto var2 = abis.binary_to_variant(type, bytes, abi_serializer::create_yield_function( max_serialization_time )); - BOOST_REQUIRE_EQUAL(fc::json::to_string(var2, fc::time_point::now() + max_serialization_time), expected_json); + BOOST_REQUIRE_EQUAL(fc::json::to_string(var2, get_deadline()), expected_json); + auto var3 = abis.binary_to_variant(type, b, max_serialization_time ); + BOOST_REQUIRE_EQUAL(fc::json::to_string(var3, get_deadline()), expected_json); auto bytes2 = abis.variant_to_binary(type, var2, abi_serializer::create_yield_function( max_serialization_time )); BOOST_REQUIRE_EQUAL(fc::to_hex(bytes2), hex); + auto b2 = abis.variant_to_binary(type, var3, max_serialization_time); + BOOST_REQUIRE_EQUAL(fc::to_hex(b2), hex); } void verify_round_trip_conversion( const abi_serializer& abis, const type_name& type, const std::string& json, const std::string& hex ) @@ -87,17 +109,28 @@ fc::variant verify_type_round_trip_conversion( const abi_serializer& abis, const { try { auto bytes = abis.variant_to_binary(type, var, abi_serializer::create_yield_function( max_serialization_time )); + auto b = abis.variant_to_binary(type, var, max_serialization_time); T obj; abi_serializer::from_variant(var, obj, get_resolver(), abi_serializer::create_yield_function( max_serialization_time )); + T obj2; + abi_serializer::from_variant(var, obj2, get_resolver(), max_serialization_time); + fc::variant var2; abi_serializer::to_variant(obj, var2, get_resolver(), abi_serializer::create_yield_function( max_serialization_time )); - std::string r = fc::json::to_string(var2, fc::time_point::now() + max_serialization_time); + fc::variant var3; + abi_serializer::to_variant(obj2, var3, get_resolver(), max_serialization_time); + std::string r2 = fc::json::to_string(var2, get_deadline()); + std::string r3 = fc::json::to_string(var3, get_deadline()); + BOOST_TEST( r2 == r3 ); auto bytes2 = abis.variant_to_binary(type, var2, abi_serializer::create_yield_function( max_serialization_time )); + auto b3 = abis.variant_to_binary(type, var3, max_serialization_time); + BOOST_TEST( bytes2 == b3 ); + BOOST_TEST( b == b3 ); BOOST_TEST( fc::to_hex(bytes) == fc::to_hex(bytes2) ); @@ -1572,11 +1605,16 @@ BOOST_AUTO_TEST_CASE(packed_transaction) )====="; fc::variant var; abi_serializer::to_variant(packed_txn, var, get_resolver(fc::json::from_string(packed_transaction_abi).as()), abi_serializer::create_yield_function( max_serialization_time )); + fc::variant var2; + abi_serializer::to_variant(packed_txn, var2, get_resolver(fc::json::from_string(packed_transaction_abi).as()), max_serialization_time); chain::packed_transaction packed_txn2; abi_serializer::from_variant(var, packed_txn2, get_resolver(fc::json::from_string(packed_transaction_abi).as()), abi_serializer::create_yield_function( max_serialization_time )); + chain::packed_transaction packed_txn3; + abi_serializer::from_variant(var2, packed_txn3, get_resolver(fc::json::from_string(packed_transaction_abi).as()), max_serialization_time); const auto txn2 = packed_txn2.get_transaction(); + const auto txn3 = packed_txn3.get_transaction(); BOOST_REQUIRE_EQUAL(txn.ref_block_num, txn2.ref_block_num); BOOST_REQUIRE_EQUAL(txn.ref_block_prefix, txn2.ref_block_prefix); @@ -1589,6 +1627,19 @@ BOOST_AUTO_TEST_CASE(packed_transaction) verify_action_equal(txn.actions[i], txn2.actions[i]); BOOST_REQUIRE_EQUAL(txn.max_net_usage_words.value, txn2.max_net_usage_words.value); BOOST_REQUIRE_EQUAL(txn.max_cpu_usage_ms, txn2.max_cpu_usage_ms); + + BOOST_REQUIRE_EQUAL(txn.ref_block_num, txn3.ref_block_num); + BOOST_REQUIRE_EQUAL(txn.ref_block_prefix, txn3.ref_block_prefix); + BOOST_REQUIRE(txn.expiration == txn3.expiration); + BOOST_REQUIRE_EQUAL(txn.context_free_actions.size(), txn3.context_free_actions.size()); + for (unsigned int i = 0; i < txn.context_free_actions.size(); ++i) + verify_action_equal(txn.context_free_actions[i], txn3.context_free_actions[i]); + BOOST_REQUIRE_EQUAL(txn.actions.size(), txn3.actions.size()); + for (unsigned int i = 0; i < txn.actions.size(); ++i) + verify_action_equal(txn.actions[i], txn3.actions[i]); + BOOST_REQUIRE_EQUAL(txn.max_net_usage_words.value, txn3.max_net_usage_words.value); + BOOST_REQUIRE_EQUAL(txn.max_cpu_usage_ms, txn3.max_cpu_usage_ms); + } FC_LOG_AND_RETHROW() } BOOST_AUTO_TEST_CASE(abi_type_repeat) @@ -1929,6 +1980,113 @@ BOOST_AUTO_TEST_CASE(abi_type_loop) } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(abi_std_optional) +{ try { + const char* repeat_abi = R"=====( + { + "version": "eosio::abi/1.2", + "types": [], + "structs": [ + { + "name": "fees", + "base": "", + "fields": [ + { + "name": "gas_price", + "type": "uint64?" + }, + { + "name": "miner_cut", + "type": "uint32?" + }, + { + "name": "bridge_fee", + "type": "uint32?" + } + ] + } + ], + "actions": [ + { + "name": "fees", + "type": "fees", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "variants": [], + "action_results": [] + } + )====="; + + abi_serializer abis(fc::json::from_string(repeat_abi).as(), abi_serializer::create_yield_function( max_serialization_time )); + { + // check conversion when all optional members are provided + std::string test_data = R"=====( + { + "gas_price" : "42", + "miner_cut" : "2", + "bridge_fee" : "2" + } + )====="; + + auto var = fc::json::from_string(test_data); + verify_byte_round_trip_conversion(abis, "fees", var); + } + + { + // check conversion when the first optional member is missing + std::string test_data = R"=====( + { + "miner_cut" : "2", + "bridge_fee" : "2" + } + )====="; + + auto var = fc::json::from_string(test_data); + verify_byte_round_trip_conversion(abis, "fees", var); + } + + { + // check conversion when the second optional member is missing + std::string test_data = R"=====( + { + "gas_price" : "42", + "bridge_fee" : "2" + } + )====="; + + auto var = fc::json::from_string(test_data); + verify_byte_round_trip_conversion(abis, "fees", var); + } + + { + // check conversion when the last optional member is missing + std::string test_data = R"=====( + { + "gas_price" : "42", + "miner_cut" : "2", + } + )====="; + + auto var = fc::json::from_string(test_data); + verify_byte_round_trip_conversion(abis, "fees", var); + } + + { + // check conversion when all optional members are missing + std::string test_data = R"=====( + { + } + )====="; + + auto var = fc::json::from_string(test_data); + verify_byte_round_trip_conversion(abis, "fees", var); + } + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_CASE(abi_type_redefine) { try { // inifinite loop in types @@ -2014,7 +2172,7 @@ BOOST_AUTO_TEST_CASE(abi_type_nested_in_vector) } )====="; - BOOST_CHECK_THROW( abi_serializer abis(fc::json::from_string(repeat_abi).as(), abi_serializer::create_yield_function( max_serialization_time )), fc::exception ); + BOOST_CHECK_THROW( abi_serializer abis(fc::json::from_string(repeat_abi).as(), abi_serializer::create_yield_function( max_serialization_time )), parse_error_exception ); } FC_LOG_AND_RETHROW() } @@ -2094,6 +2252,7 @@ BOOST_AUTO_TEST_CASE(abi_large_array) static_cast(0xff), static_cast(0x08)}; BOOST_CHECK_THROW( abis.binary_to_variant( "hi[]", bin, abi_serializer::create_yield_function( max_serialization_time ) );, fc::exception ); + BOOST_CHECK_THROW( abis.binary_to_variant( "hi[]", bin, max_serialization_time );, fc::exception ); } FC_LOG_AND_RETHROW() } @@ -2207,7 +2366,9 @@ BOOST_AUTO_TEST_CASE(abi_recursive_structs) abi_serializer abis(fc::json::from_string(abi_str).as(), abi_serializer::create_yield_function( max_serialization_time )); string hi_data = "{\"user\":\"eosio\"}"; auto bin = abis.variant_to_binary("hi2", fc::json::from_string(hi_data), abi_serializer::create_yield_function( max_serialization_time )); + auto bin2 = abis.variant_to_binary("hi2", fc::json::from_string(hi_data), max_serialization_time); BOOST_CHECK_THROW( abis.binary_to_variant("hi", bin, abi_serializer::create_yield_function( max_serialization_time ));, fc::exception ); + BOOST_CHECK_THROW( abis.binary_to_variant("hi", bin2, max_serialization_time);, fc::exception ); } FC_LOG_AND_RETHROW() } @@ -2219,11 +2380,12 @@ BOOST_AUTO_TEST_CASE(abi_very_deep_structs) abi_serializer abis( fc::json::from_string( large_nested_abi ).as(), abi_serializer::create_yield_function( max_serialization_time ) ); string hi_data = "{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":{\"f1\":0}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}"; BOOST_CHECK_THROW( abis.variant_to_binary( "s98", fc::json::from_string( hi_data ), abi_serializer::create_yield_function( max_serialization_time ) ), fc::exception ); + BOOST_CHECK_THROW( abis.variant_to_binary( "s98", fc::json::from_string( hi_data ), max_serialization_time ), fc::exception ); } FC_LOG_AND_RETHROW() } // Infinite recursion of abi_serializer in struct definitions -BOOST_AUTO_TEST_CASE(abi_very_deep_structs_1ms) +BOOST_AUTO_TEST_CASE(abi_very_deep_structs_1us) { try { BOOST_CHECK_THROW( @@ -2293,20 +2455,32 @@ BOOST_AUTO_TEST_CASE(abi_large_signature) .sig = sig }); - fc::variant var; - auto start = fc::time_point::now(); - bool check_data = true; - try { - abi_serializer::to_variant( large_act, var, get_resolver( fc::json::from_string( abi_str ).as() ), - abi_serializer::create_yield_function( fc::milliseconds( 1 ) ) ); - } catch( abi_serialization_deadline_exception& ) { - // can be thrown if check_deadline is tripped after deadline in to_base58 is tripped - check_data = false; + { + fc::variant var; + auto start = fc::time_point::now(); + bool check_data = true; + try { + abi_serializer::to_variant( large_act, var, get_resolver( fc::json::from_string( abi_str ).as() ), + abi_serializer::create_yield_function( fc::milliseconds( 1 ) ) ); + } catch( abi_serialization_deadline_exception& ) { + // can be thrown if check_deadline is tripped after deadline in to_base58 is tripped + check_data = false; + } + auto stop = fc::time_point::now(); + // Give it a leaway of 50ms + BOOST_CHECK_LE( (stop - start).count(), 51 * 1000 ); + if( check_data ) { + BOOST_CHECK( var.get_object().contains( "data" ) ); + BOOST_CHECK( var.get_object().contains( "hex_data" ) ); + } } - auto stop = fc::time_point::now(); - // Give it a leaway of 50ms - BOOST_CHECK_LE( (stop - start).count(), 51*1000 ); - if( check_data ) { + { + fc::variant var; + auto start = fc::time_point::now(); + abi_serializer::to_variant( large_act, var, get_resolver( fc::json::from_string( abi_str ).as() ), fc::milliseconds(1) ); + auto stop = fc::time_point::now(); + // Give it a leaway of 50ms + BOOST_CHECK_LE( (stop - start).count(), 51 * 1000 ); BOOST_CHECK( var.get_object().contains( "data" ) ); BOOST_CHECK( var.get_object().contains( "hex_data" ) ); } @@ -2373,9 +2547,20 @@ BOOST_AUTO_TEST_CASE(variants) BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["4", 5, 6])"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_starts_with("Expected input to be an array of two items while processing variant 'v1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(9)"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Expected input to be an array of two items while processing variant 'v1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"([4])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Expected input to be an array of two items while processing variant 'v1") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"([4, 5])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Encountered non-string as first item of input array while processing variant 'v1") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["4", 5, 6])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Expected input to be an array of two items while processing variant 'v1'") ); + // type is not valid within this variant BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["int9", 21])"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_starts_with("Specified type 'int9' in input array is not valid within the variant 'v1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["int9", 21])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Specified type 'int9' in input array is not valid within the variant 'v1'") ); verify_round_trip_conversion(abis, "v1", R"(["int8",21])", "0015"); verify_round_trip_conversion(abis, "v1", R"(["string","abcd"])", "010461626364"); @@ -2467,10 +2652,14 @@ BOOST_AUTO_TEST_CASE(extend) // missing i1 BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5})"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_starts_with("Missing field 'i1' in input object while processing struct") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5})"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Missing field 'i1' in input object while processing struct") ); // Unexpected 'a' BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5,"i1":6,"a":[8,9,10]})"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_starts_with("Unexpected field 'a' found in input object while processing struct") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"({"i0":5,"i1":6,"a":[8,9,10]})"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Unexpected field 'a' found in input object while processing struct") ); verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6})", "0506"); verify_round_trip_conversion(abis, "s", R"({"i0":5,"i1":6,"i2":7})", "050607"); @@ -2486,6 +2675,8 @@ BOOST_AUTO_TEST_CASE(extend) BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"i0":1})"), abi_serializer::create_yield_function( max_serialization_time )), abi_exception, fc_exception_message_starts_with("Encountered field 'i2' without binary extension designation while processing struct") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"i0":1})"), max_serialization_time), + abi_exception, fc_exception_message_starts_with("Encountered field 'i2' without binary extension designation while processing struct") ); } FC_LOG_AND_RETHROW() @@ -2522,9 +2713,13 @@ BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_array) BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([])"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_starts_with("Early end to input array specifying the fields of struct") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Early end to input array specifying the fields of struct") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([1,2])"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_starts_with("Early end to input array specifying the fields of struct") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s", fc::json::from_string(R"([1,2])"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Early end to input array specifying the fields of struct") ); verify_round_trip_conversion(abis, "s", R"([1,2,3])", "010203", R"({"i0":1,"i1":2,"i2":3})"); @@ -2554,9 +2749,13 @@ BOOST_AUTO_TEST_CASE(abi_serialize_incomplete_json_object) BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({})"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_starts_with("Missing field 'f0' in input object") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({})"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Missing field 'f0' in input object") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":{"i0":1}})"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_starts_with("Missing field 'i1' in input object") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":{"i0":1}})"), max_serialization_time), + pack_exception, fc_exception_message_starts_with("Missing field 'i1' in input object") ); verify_round_trip_conversion(abis, "s2", R"({"f0":{"i0":1,"i1":2},"i2":3})", "010203"); @@ -2585,6 +2784,8 @@ BOOST_AUTO_TEST_CASE(abi_serialize_json_mismatching_type) BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":1,"i1":2})"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_is("Unexpected input encountered while processing struct 's2.f0'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2", fc::json::from_string(R"({"f0":1,"i1":2})"), max_serialization_time), + pack_exception, fc_exception_message_is("Unexpected input encountered while processing struct 's2.f0'") ); verify_round_trip_conversion(abis, "s2", R"({"f0":{"i0":1},"i1":2})", "0102"); @@ -2608,7 +2809,8 @@ BOOST_AUTO_TEST_CASE(abi_serialize_json_empty_name) try { abi_serializer abis( fc::json::from_string(abi).as(), abi_serializer::create_yield_function( max_serialization_time ) ); - auto bin = abis.variant_to_binary("s1", fc::json::from_string(R"({"":1})"), abi_serializer::create_yield_function( max_serialization_time )); + auto bin1 = abis.variant_to_binary("s1", fc::json::from_string(R"({"":1})"), abi_serializer::create_yield_function( max_serialization_time )); + auto bin2 = abis.variant_to_binary("s1", fc::json::from_string(R"({"":1})"), max_serialization_time); verify_round_trip_conversion(abis, "s1", R"({"":1})", "01"); @@ -2661,34 +2863,52 @@ BOOST_AUTO_TEST_CASE(abi_serialize_detailed_error_messages) BOOST_CHECK_EXCEPTION( abis.variant_to_binary("bar", fc::json::from_string(R"({"f0":{"i0":1},"i2":3})"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's2.f0'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("bar", fc::json::from_string(R"({"f0":{"i0":1},"i2":3})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's2.f0'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i2":3})"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's3'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i2":3})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's3'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["s2",{}]})"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_is("Specified type 's2' in input array is not valid within the variant 's3.f3'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["s2",{}]})"), max_serialization_time), + pack_exception, fc_exception_message_is("Specified type 's2' in input array is not valid within the variant 's3.f3'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11},"i2":13}]})"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's3.f3..f0'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s3", fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11},"i2":13}]})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's3.f3..f0'") ); verify_round_trip_conversion(abis, "s3", R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}]})", "010203010b0c0d"); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s3",{"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}],"f5":0}])"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_is("Unexpected field 'f5' found in input object while processing struct 'v1.'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s3",{"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}],"f5":0}])"), max_serialization_time), + pack_exception, fc_exception_message_is("Unexpected field 'f5' found in input object while processing struct 'v1.'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s4",{"f0":[0,1],"f1":[{"i0":2,"i1":3},{"i1":5}]}])"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 'v1..f1[1]'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s4",{"f0":[0,1],"f1":[{"i0":2,"i1":3},{"i1":5}]}])"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 'v1..f1[1]'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2[]", fc::json::from_string(R"([{"f0":{"i0":1,"i1":2},"i2":3},{"f0":{"i0":4},"i2":6}])"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 'ARRAY[1].f0'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2[]", fc::json::from_string(R"([{"f0":{"i0":1,"i1":2},"i2":3},{"f0":{"i0":4},"i2":6}])"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 'ARRAY[1].f0'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s5", fc::json::from_string(R"({"f0":[["bar",{"f0":{"i0":1,"i1":2},"i2":3}],["foo",{"f0":{"i0":4},"i2":6}]]})"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's5.f0[1]..f0'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s5", fc::json::from_string(R"({"f0":[["bar",{"f0":{"i0":1,"i1":2},"i2":3}],["foo",{"f0":{"i0":4},"i2":6}]]})"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's5.f0[1]..f0'") ); verify_round_trip_conversion( abis, "s1arrayarray", R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":5,"i1":6},{"i0":7,"i1":8},{"i0":9,"i1":10}]])", "0202010203040305060708090a"); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s1arrayarray", fc::json::from_string(R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":6,"i1":6},{"i0":7,"i1":8},{"i1":10}]])"), abi_serializer::create_yield_function( max_serialization_time )), pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 'ARRAY[1][2]'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s1arrayarray", fc::json::from_string(R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":6,"i1":6},{"i0":7,"i1":8},{"i1":10}]])"), max_serialization_time), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 'ARRAY[1][2]'") ); } FC_LOG_AND_RETHROW() } @@ -2738,37 +2958,62 @@ BOOST_AUTO_TEST_CASE(abi_serialize_short_error_messages) BOOST_CHECK_EXCEPTION( abis.variant_to_binary("bar", fc::json::from_string(R"({"f0":{"i0":1},"i2":3})"), abi_serializer::create_yield_function( max_serialization_time ), true), pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("bar", fc::json::from_string(R"({"f0":{"i0":1},"i2":3})"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", fc::json::from_string(R"({"i0":1,"i2":3})"), abi_serializer::create_yield_function( max_serialization_time ), true ), pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 'very_very_very_very_very_very_very_very_very_very_long_...ame_s3'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", + fc::json::from_string(R"({"i0":1,"i2":3})"), max_serialization_time, true ), + pack_exception, + fc_exception_message_is("Missing field 'i1' in input object while processing struct 'very_very_very_very_very_very_very_very_very_very_long_...ame_s3'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["s2",{}]})"), abi_serializer::create_yield_function( max_serialization_time ), true ), pack_exception, fc_exception_message_is("Specified type 's2' in input array is not valid within the variant 'v2'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", + fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["s2",{}]})"), max_serialization_time, true ), + pack_exception, fc_exception_message_is("Specified type 's2' in input array is not valid within the variant 'v2'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11},"i2":13}]})"), abi_serializer::create_yield_function( max_serialization_time ), true ), pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "very_very_very_very_very_very_very_very_very_very_long_struct_name_s3", + fc::json::from_string(R"({"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11},"i2":13}]})"), max_serialization_time, true ), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "v1", fc::json::from_string(R"(["very_very_very_very_very_very_very_very_very_very_long_struct_name_s3",{"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}],"very_very_very_very_very_very_very_very_very_very_long_field_name_f5":0}])"), abi_serializer::create_yield_function( max_serialization_time ), true ), pack_exception, fc_exception_message_is("Unexpected field 'very_very_very_very_very_very_very_very_very_very_long_...ame_f5' found in input object while processing struct 'very_very_very_very_very_very_very_very_very_very_long_...ame_s3'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary( "v1", + fc::json::from_string(R"(["very_very_very_very_very_very_very_very_very_very_long_struct_name_s3",{"i0":1,"i1":2,"i2":3,"f3":["bar",{"f0":{"i0":11,"i1":12},"i2":13}],"very_very_very_very_very_very_very_very_very_very_long_field_name_f5":0}])"), + max_serialization_time, true ), + pack_exception, + fc_exception_message_is("Unexpected field 'very_very_very_very_very_very_very_very_very_very_long_...ame_f5' found in input object while processing struct 'very_very_very_very_very_very_very_very_very_very_long_...ame_s3'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s4",{"f0":[0,1],"f1":[{"i0":2,"i1":3},{"i1":5}]}])"), abi_serializer::create_yield_function( max_serialization_time ), true), pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 's1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("v1", fc::json::from_string(R"(["s4",{"f0":[0,1],"f1":[{"i0":2,"i1":3},{"i1":5}]}])"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 's1'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2[]", fc::json::from_string(R"([{"f0":{"i0":1,"i1":2},"i2":3},{"f0":{"i0":4},"i2":6}])"), abi_serializer::create_yield_function( max_serialization_time ), true), pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s2[]", fc::json::from_string(R"([{"f0":{"i0":1,"i1":2},"i2":3},{"f0":{"i0":4},"i2":6}])"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s5", fc::json::from_string(R"({"f0":[["bar",{"f0":{"i0":1,"i1":2},"i2":3}],["foo",{"f0":{"i0":4},"i2":6}]]})"), abi_serializer::create_yield_function( max_serialization_time ), true), pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s5", fc::json::from_string(R"({"f0":[["bar",{"f0":{"i0":1,"i1":2},"i2":3}],["foo",{"f0":{"i0":4},"i2":6}]]})"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i1' in input object while processing struct 's1'") ); BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s1arrayarray", fc::json::from_string(R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":6,"i1":6},{"i0":7,"i1":8},{"i1":10}]])"), abi_serializer::create_yield_function( max_serialization_time ), true), pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 's1'") ); + BOOST_CHECK_EXCEPTION( abis.variant_to_binary("s1arrayarray", fc::json::from_string(R"([[{"i0":1,"i1":2},{"i0":3,"i1":4}],[{"i0":6,"i1":6},{"i0":7,"i1":8},{"i1":10}]])"), max_serialization_time, true), + pack_exception, fc_exception_message_is("Missing field 'i0' in input object while processing struct 's1'") ); } FC_LOG_AND_RETHROW() } @@ -2816,38 +3061,59 @@ BOOST_AUTO_TEST_CASE(abi_deserialize_detailed_error_messages) // Test to verify that array of optinal doesn't throw exception abi_serializer abis( fc::json::from_string(abi).as(), abi_serializer::create_yield_function( max_serialization_time ) ); BOOST_CHECK_NO_THROW( abis.binary_to_variant("s4", fc::variant("030101000103").as(), abi_serializer::create_yield_function( max_serialization_time )) ); + BOOST_CHECK_NO_THROW( abis.binary_to_variant("s4", fc::variant("030101000103").as(), max_serialization_time) ); try { BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("020102").as(), abi_serializer::create_yield_function( max_serialization_time )), unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'f1' of struct 's2'") ); + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("020102").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'f1' of struct 's2'") ); BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("0201020103").as(), abi_serializer::create_yield_function( max_serialization_time )), unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'i1' of struct 's2.f1[0]'") ); + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("0201020103").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'i1' of struct 's2.f1[0]'") ); BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("020102ff").as(), abi_serializer::create_yield_function( max_serialization_time )), unpack_exception, fc_exception_message_is("Unable to unpack size of array 's2.f1'") ); + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s2", fc::variant("020102ff").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack size of array 's2.f1'") ); BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s3", fc::variant("010203").as(), abi_serializer::create_yield_function( max_serialization_time )), abi_exception, fc_exception_message_is("Encountered field 'i5' without binary extension designation while processing struct 's3'") ); + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s3", fc::variant("010203").as(), max_serialization_time), + abi_exception, fc_exception_message_is("Encountered field 'i5' without binary extension designation while processing struct 's3'") ); BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s3", fc::variant("02010304").as(), abi_serializer::create_yield_function( max_serialization_time )), abi_exception, fc_exception_message_is("Encountered field 'i5' without binary extension designation while processing struct 's3'") ); + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s3", fc::variant("02010304").as(), max_serialization_time), + abi_exception, fc_exception_message_is("Encountered field 'i5' without binary extension designation while processing struct 's3'") ); BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s4", fc::variant("020101").as(), abi_serializer::create_yield_function( max_serialization_time )), unpack_exception, fc_exception_message_is("Unable to unpack optional of built-in type 'int8' while processing 's4.f0[1]'") ); + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s4", fc::variant("020101").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack optional of built-in type 'int8' while processing 's4.f0[1]'") ); BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("02010102").as(), abi_serializer::create_yield_function( max_serialization_time )), unpack_exception, fc_exception_message_is("Unable to unpack presence flag of optional 's5.f0[1]'") ); + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("02010102").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack presence flag of optional 's5.f0[1]'") ); BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("0001").as(), abi_serializer::create_yield_function( max_serialization_time )), unpack_exception, fc_exception_message_is("Unable to unpack tag of variant 's5.f1[0]'") ); + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("0001").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unable to unpack tag of variant 's5.f1[0]'") ); BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("00010501").as(), abi_serializer::create_yield_function( max_serialization_time )), unpack_exception, fc_exception_message_is("Unpacked invalid tag (5) for variant 's5.f1[0]'") ); + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("00010501").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Unpacked invalid tag (5) for variant 's5.f1[0]'") ); BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("00010101").as(), abi_serializer::create_yield_function( max_serialization_time )), unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'i1' of struct 's5.f1[0].'") ); + BOOST_CHECK_EXCEPTION( abis.binary_to_variant("s5", fc::variant("00010101").as(), max_serialization_time), + unpack_exception, fc_exception_message_is("Stream unexpectedly ended; unable to unpack field 'i1' of struct 's5.f1[0].'") ); } FC_LOG_AND_RETHROW() } @@ -2972,12 +3238,22 @@ BOOST_AUTO_TEST_CASE(abi_to_variant__add_action__good_return_value) auto abidef = fc::json::from_string(abi).as(); abi_serializer abis(abi_def(abidef), abi_serializer::create_yield_function(max_serialization_time)); - mutable_variant_object mvo; - eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_yield_function(max_serialization_time)); - eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); - std::string res = fc::json::to_string(mvo, fc::time_point::now() + max_serialization_time); + { + mutable_variant_object mvo; + eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_yield_function(max_serialization_time), fc::microseconds{}); + eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); + std::string res = fc::json::to_string(mvo, get_deadline()); - BOOST_CHECK_EQUAL(res, expected_json); + BOOST_CHECK_EQUAL(res, expected_json); + } + { + mutable_variant_object mvo; + eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_depth_yield_function(), max_serialization_time); + eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); + std::string res = fc::json::to_string(mvo, get_deadline()); + + BOOST_CHECK_EQUAL(res, expected_json); + } } BOOST_AUTO_TEST_CASE(abi_to_variant__add_action__bad_return_value) @@ -2997,12 +3273,22 @@ BOOST_AUTO_TEST_CASE(abi_to_variant__add_action__bad_return_value) auto abidef = fc::json::from_string(abi).as(); abi_serializer abis(abi_def(abidef), abi_serializer::create_yield_function(max_serialization_time)); - mutable_variant_object mvo; - eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_yield_function(max_serialization_time)); - eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); - std::string res = fc::json::to_string(mvo, fc::time_point::now() + max_serialization_time); + { + mutable_variant_object mvo; + eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_yield_function(max_serialization_time), fc::microseconds{}); + eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); + std::string res = fc::json::to_string(mvo, get_deadline()); - BOOST_CHECK_EQUAL(res, expected_json); + BOOST_CHECK_EQUAL(res, expected_json); + } + { + mutable_variant_object mvo; + eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_depth_yield_function(), max_serialization_time); + eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); + std::string res = fc::json::to_string(mvo, get_deadline()); + + BOOST_CHECK_EQUAL(res, expected_json); + } } BOOST_AUTO_TEST_CASE(abi_to_variant__add_action__no_return_value) @@ -3032,12 +3318,22 @@ BOOST_AUTO_TEST_CASE(abi_to_variant__add_action__no_return_value) auto abidef = fc::json::from_string(abi).as(); abi_serializer abis(abi_def(abidef), abi_serializer::create_yield_function(max_serialization_time)); - mutable_variant_object mvo; - eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_yield_function(max_serialization_time)); - eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); - std::string res = fc::json::to_string(mvo, fc::time_point::now() + max_serialization_time); + { + mutable_variant_object mvo; + eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_yield_function(max_serialization_time), fc::microseconds{}); + eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); + std::string res = fc::json::to_string(mvo, get_deadline()); + + BOOST_CHECK_EQUAL(res, expected_json); + } + { + mutable_variant_object mvo; + eosio::chain::impl::abi_traverse_context ctx(abi_serializer::create_depth_yield_function(), max_serialization_time); + eosio::chain::impl::abi_to_variant::add(mvo, "action_traces", at, get_resolver(abidef), ctx); + std::string res = fc::json::to_string(mvo, get_deadline()); - BOOST_CHECK_EQUAL(res, expected_json); + BOOST_CHECK_EQUAL(res, expected_json); + } } BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/block_log_extract.cpp b/unittests/block_log_extract.cpp index a4c5c6ce59..49fa0a87eb 100644 --- a/unittests/block_log_extract.cpp +++ b/unittests/block_log_extract.cpp @@ -57,7 +57,9 @@ BOOST_FIXTURE_TEST_CASE(extract_from_middle, block_log_extract_fixture) try { block_log new_log(output_dir.path()); auto id = gs.compute_chain_id(); - BOOST_REQUIRE_EQUAL(new_log.extract_chain_id(output_dir.path()), id); + auto extracted_id = new_log.extract_chain_id(output_dir.path()); + BOOST_REQUIRE(extracted_id.has_value()); + BOOST_REQUIRE_EQUAL(*extracted_id, id); BOOST_REQUIRE_EQUAL(new_log.first_block_num(), 3); BOOST_REQUIRE_EQUAL(new_log.head()->block_num(), 7); @@ -73,7 +75,9 @@ BOOST_FIXTURE_TEST_CASE(extract_from_start, block_log_extract_fixture) try { block_log new_log(output_dir.path()); auto id = gs.compute_chain_id(); - BOOST_REQUIRE_EQUAL(new_log.extract_chain_id(output_dir.path()), id); + auto extracted_id = new_log.extract_chain_id(output_dir.path()); + BOOST_REQUIRE(extracted_id.has_value()); + BOOST_REQUIRE_EQUAL(*extracted_id, id); BOOST_REQUIRE_EQUAL(new_log.first_block_num(), 1); BOOST_REQUIRE_EQUAL(new_log.head()->block_num(), 7); @@ -92,7 +96,9 @@ BOOST_FIXTURE_TEST_CASE(reextract_from_start, block_log_extract_fixture) try { block_log new_log(output_dir2.path()); auto id = gs.compute_chain_id(); - BOOST_REQUIRE_EQUAL(new_log.extract_chain_id(output_dir2.path()), id); + auto extracted_id = new_log.extract_chain_id(output_dir2.path()); + BOOST_REQUIRE(extracted_id.has_value()); + BOOST_REQUIRE_EQUAL(*extracted_id, id); BOOST_REQUIRE_EQUAL(new_log.first_block_num(), 1); BOOST_REQUIRE_EQUAL(new_log.head()->block_num(), 6); @@ -107,7 +113,9 @@ BOOST_FIXTURE_TEST_CASE(extract_to_end, block_log_extract_fixture) try { block_log new_log(output_dir.path()); auto id = gs.compute_chain_id(); - BOOST_REQUIRE_EQUAL(new_log.extract_chain_id(output_dir.path()), id); + auto extracted_id = new_log.extract_chain_id(output_dir.path()); + BOOST_REQUIRE(extracted_id.has_value()); + BOOST_REQUIRE_EQUAL(*extracted_id, id); BOOST_REQUIRE_EQUAL(new_log.first_block_num(), 5); BOOST_REQUIRE_EQUAL(new_log.head()->block_num(), 12); diff --git a/unittests/chain_tests.cpp b/unittests/chain_tests.cpp index 1be6138dee..c94c10c2bd 100644 --- a/unittests/chain_tests.cpp +++ b/unittests/chain_tests.cpp @@ -144,4 +144,45 @@ BOOST_AUTO_TEST_CASE( decompressed_size_under_limit ) try { } FC_LOG_AND_RETHROW() +// verify accepted_block signals validated blocks +BOOST_AUTO_TEST_CASE( signal_validated_blocks ) try { + tester chain; + tester validator; + + block_state_ptr accepted_bsp; + auto c = chain.control->accepted_block.connect([&](const block_state_ptr& b) { + BOOST_CHECK(b); + BOOST_CHECK(chain.control->fetch_block_state_by_id(b->id) == b); + BOOST_CHECK(chain.control->fetch_block_state_by_number(b->block_num) == b); // verify it can be found (has to be validated) + BOOST_CHECK(chain.control->fetch_block_by_id(b->id) == b->block); + BOOST_CHECK(chain.control->fetch_block_by_number(b->block_num) == b->block); + BOOST_REQUIRE(chain.control->fetch_block_header_by_number(b->block_num)); + BOOST_CHECK(chain.control->fetch_block_header_by_number(b->block_num)->calculate_id() == b->id); + BOOST_REQUIRE(chain.control->fetch_block_header_by_id(b->id)); + BOOST_CHECK(chain.control->fetch_block_header_by_id(b->id)->calculate_id() == b->id); + accepted_bsp = b; + }); + block_state_ptr validated_bsp; + auto c2 = validator.control->accepted_block.connect([&](const block_state_ptr& b) { + BOOST_CHECK(b); + BOOST_CHECK(validator.control->fetch_block_state_by_id(b->id) == b); + BOOST_CHECK(validator.control->fetch_block_state_by_number(b->block_num) == b); // verify it can be found (has to be validated) + BOOST_CHECK(validator.control->fetch_block_by_id(b->id) == b->block); + BOOST_CHECK(validator.control->fetch_block_by_number(b->block_num) == b->block); + BOOST_REQUIRE(validator.control->fetch_block_header_by_number(b->block_num)); + BOOST_CHECK(validator.control->fetch_block_header_by_number(b->block_num)->calculate_id() == b->id); + BOOST_REQUIRE(validator.control->fetch_block_header_by_id(b->id)); + BOOST_CHECK(validator.control->fetch_block_header_by_id(b->id)->calculate_id() == b->id); + validated_bsp = b; + }); + + chain.produce_blocks(1); + validator.push_block(accepted_bsp->block); + + auto trace_ptr = chain.create_account("hello"_n); + chain.produce_block(); + validator.push_block(accepted_bsp->block); + +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/deep-mind/deep-mind.log b/unittests/deep-mind/deep-mind.log index 1ae670da49..d2be7b1237 100644 --- a/unittests/deep-mind/deep-mind.log +++ b/unittests/deep-mind/deep-mind.log @@ -23,17 +23,17 @@ DMLOG ABIDUMP ABI eosio DmVvc2lvOjphYmkvMS4wBwxhY2NvdW50X25hbWUEbmFtZQ9wZXJtaXNz DMLOG ABIDUMP END DMLOG START_BLOCK 2 DMLOG FEATURE_OP ACTIVATE 0ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd {"feature_digest":"0ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd","subjective_restrictions":{"enabled":true,"preactivation_required":false,"earliest_allowed_activation_time":"1970-01-01T00:00:00.000"},"description_digest":"64fe7df32e9b86be2b296b3f81dfd527f84e82b98e363bc97e40bc7a83733310","dependencies":[],"protocol_feature_type":"builtin","specification":[{"name":"builtin_feature_codename","value":"PREACTIVATE_FEATURE"}]} -DMLOG TRX_OP CREATE onblock ef240e45433c433de4061120632aa06e32ec3e77048abf55c62e0612c22548ed 01e10b5e01005a31e44100000000010000000000ea305500000000221acfa4010000000000ea305500000000a8ed323274003b3d4b000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044423079ed372a4dda0bf89c3a594df409eaa8c1535451b7d5ca6a3d7a376912000000000000000000 DMLOG CREATION_OP ROOT 0 DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"eosio","net_usage":{"last_ordinal":1262304001,"value_ex":0,"consumed":0},"cpu_usage":{"last_ordinal":1262304001,"value_ex":579,"consumed":100},"ram_usage":2724} +DMLOG TRX_OP CREATE onblock ef240e45433c433de4061120632aa06e32ec3e77048abf55c62e0612c22548ed 01e10b5e01005a31e44100000000010000000000ea305500000000221acfa4010000000000ea305500000000a8ed323274003b3d4b000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044423079ed372a4dda0bf89c3a594df409eaa8c1535451b7d5ca6a3d7a376912000000000000000000 DMLOG APPLIED_TRANSACTION 2 ef240e45433c433de4061120632aa06e32ec3e77048abf55c62e0612c22548ed02000000013b3d4b010000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e801006400000000000000000000000000000000000000000001010000010000000000ea305506d4766d9dbedb630ad9546f583a9809539cf09d38fd1554b4216503113ff4e501000000000000000100000000000000010000000000ea3055010000000000000000000000000000ea30550000000000ea305500000000221acfa4010000000000ea305500000000a8ed323274003b3d4b000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044423079ed372a4dda0bf89c3a594df409eaa8c1535451b7d5ca6a3d7a37691200000000000000000000000000000000ef240e45433c433de4061120632aa06e32ec3e77048abf55c62e0612c22548ed02000000013b3d4b010000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e80000000000000000 DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":0,"value_ex":0,"consumed":0},"average_block_cpu_usage":{"last_ordinal":0,"value_ex":0,"consumed":0},"pending_net_usage":0,"pending_cpu_usage":100,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1048576,"virtual_cpu_limit":200000} DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":2,"value_ex":0,"consumed":0},"average_block_cpu_usage":{"last_ordinal":2,"value_ex":833334,"consumed":100},"pending_net_usage":0,"pending_cpu_usage":0,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1049625,"virtual_cpu_limit":200200} -DMLOG ACCEPTED_BLOCK 2 02000000020000000000000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010001000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba10100000000000000010000000000ea305502000000010000000000ea305500000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e8013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0020701fd1d2d6fbca71ad1df5bd09a987d6863f301b93acfc1c34857e4b2f53821a0b4ca8483cf594f845f3f4fc155dbbc98009cb9c7b7b60d449f922dc00abcb0f0000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0001013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0020701fd1d2d6fbca71ad1df5bd09a987d6863f301b93acfc1c34857e4b2f53821a0b4ca8483cf594f845f3f4fc155dbbc98009cb9c7b7b60d449f922dc00abcb0f000000 +DMLOG ACCEPTED_BLOCK 2 02000000020000000000000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010001000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba10100000000000000010000000000ea305502000000010000000000ea305500000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e8013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0020701fd1d2d6fbca71ad1df5bd09a987d6863f301b93acfc1c34857e4b2f53821a0b4ca8483cf594f845f3f4fc155dbbc98009cb9c7b7b60d449f922dc00abcb0f0000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0001013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0020701fd1d2d6fbca71ad1df5bd09a987d6863f301b93acfc1c34857e4b2f53821a0b4ca8483cf594f845f3f4fc155dbbc98009cb9c7b7b60d449f922dc00abcb0f000001 DMLOG START_BLOCK 3 -DMLOG TRX_OP CREATE onblock da9fbe9042e1bc9bd64d7a4506534d492107a29f79ad671c1fea19ae3fb70eb4 01e10b5e02005132b41600000000010000000000ea305500000000221acfa4010000000000ea305500000000a8ed32329801013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd000000 DMLOG CREATION_OP ROOT 0 DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"eosio","net_usage":{"last_ordinal":1262304002,"value_ex":0,"consumed":0},"cpu_usage":{"last_ordinal":1262304002,"value_ex":1157,"consumed":101},"ram_usage":2724} +DMLOG TRX_OP CREATE onblock da9fbe9042e1bc9bd64d7a4506534d492107a29f79ad671c1fea19ae3fb70eb4 01e10b5e02005132b41600000000010000000000ea305500000000221acfa4010000000000ea305500000000a8ed32329801013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd000000 DMLOG APPLIED_TRANSACTION 3 da9fbe9042e1bc9bd64d7a4506534d492107a29f79ad671c1fea19ae3fb70eb403000000023b3d4b01000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd01006400000000000000000000000000000000000000000001010000010000000000ea3055ccfe3b56076237b0b6da2f580652ee1420231b96d3d96b28183769ac932c9e5902000000000000000200000000000000010000000000ea3055020000000000000000000000000000ea30550000000000ea305500000000221acfa4010000000000ea305500000000a8ed32329801013b3d4b0000000000ea30550000000000015ab65a885a31e441ac485ebd2aeba87bf7ee6e7bcc40bf3a24506ba1000000000000000000000000000000000000000000000000000000000000000062267e8b11d7d8f28e1f991a4de2b08cf92500861af2795765bdc9263cd6f4cd000000000001000021010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd00000000000000000000da9fbe9042e1bc9bd64d7a4506534d492107a29f79ad671c1fea19ae3fb70eb403000000023b3d4b01000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd0000000000000000 DMLOG CREATION_OP ROOT 0 DMLOG RAM_OP 0 eosio code add setcode eosio 180494 177770 @@ -121,7 +121,7 @@ DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"eosio","net_usage":{"last_ordinal":1 DMLOG APPLIED_TRANSACTION 3 04ba316cf9ddd86690833edc0f4548f8c07f0d66c09dca029b0a1fb96f16c62803000000023b3d4b01000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd0100d007000010000000000000000080000000000000000001010000010000000000ea3055302a2f1713925c939a997367c967b457bfc2c580304f9686b1de22fc5946e40616000000000000001600000000000000010000000000ea3055160000000000000001010000000000ea30550000000000ea30550000002a9bed3232010000000000ea305500000000a8ed32322035c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b0000000000000000000004ba316cf9ddd86690833edc0f4548f8c07f0d66c09dca029b0a1fb96f16c62803000000023b3d4b01000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd0000000000000000 DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":2,"value_ex":0,"consumed":0},"average_block_cpu_usage":{"last_ordinal":2,"value_ex":833334,"consumed":100},"pending_net_usage":9440,"pending_cpu_usage":40100,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1049625,"virtual_cpu_limit":200200} DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":3,"value_ex":78666667,"consumed":9440},"average_block_cpu_usage":{"last_ordinal":3,"value_ex":334993056,"consumed":40101},"pending_net_usage":0,"pending_cpu_usage":0,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1050675,"virtual_cpu_limit":200400} -DMLOG ACCEPTED_BLOCK 3 03000000030000000200000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100012d5b1b639d6ae94fcdd0536b224644931573d1ccb2a0c548613cd1feea18888b0200000000000000010000000000ea305503000000010000000000ea305502000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a09139800000000000000205d7ce507e9dbea47687e80fceaf2794b22bd883902adeb8c97de9f7283b614b0590bc4251ba5410cb035f88e60ffdf6fccecd10d83edfe36021227d1ee9e18830000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0001023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a09139800000000000000205d7ce507e9dbea47687e80fceaf2794b22bd883902adeb8c97de9f7283b614b0590bc4251ba5410cb035f88e60ffdf6fccecd10d83edfe36021227d1ee9e18831400d0070000fb05010100203b7de491b51d3d74624078bc2c5dc4420985f0350afb6923a5585b5621750c9f126d7cff0efeade2068c7b618fc754b2abb5bff8cdb9bd0ecb4432b72ae1ed380100a82f78daed5c7b8c5ce755ff1ef7357b67e3ebc6d94c3609f9e662d0b8a4659bb8eb2575dbbddbc476694b9cca2dfea3b0bbd99d647776bdbb9e1da70e0adead081045158a7894b6405524a4d21424545aa8cacb0d0815a94891fa20414284ff2a025511a245ad54737ee77cf7ceeccb71f09a87545b9e7be77b9cef7ce79cef3cbf71f44fe94f1bf5d03d9f1951f447e343fdf3d87be873f2879efef473830dea77fff59e7bbef7f440d3bfd197d9f57368d1bfa54767949ab11b9736d48cd9b8840f7a0b372ed11f35136cf0436fe80dfac0b80dbc2afa67f84d6306e6063201ad97a8ff9234d00880f033d54c84469e48cd68b03c8b3ea54dd0909531c1fc52d0b0ed95c70e2dae4f3fd29eed5de8b6a767e77a8b8fcdf6daf32a42d7cd6bdd76d9548e51317aeaedd5f5c5d5e9d9f5f576b7a72c9aa273ed73ebed9e4af025c3b4d595e9f9d9deecf4fae2cfb4558d9b09defcf4409f1a2aa7cead3d2e53ebddf6f90b8b40e6426f41a568ba89e04eaf75171f5b5c6e3f4ac8d519393476dbebab17ba73ede9e5c5738bbd75358c9e70f6e155c24ae17d44a6aeaeadaeb7e7f1327f61aedd5d5737a1d3a1f3e1e5d5b9a5b985d9c595e9b5d9eeecb9768ffae9756e8956e29db9475f6918efa23e77a1db6daff4a67b8be7daea00d316339982ed81b579743afff0f4238b2bf3d38be347558696da34d17361b9b778af3a88ef0707693c3db73adf56868958aed36dcfb5097257d61a2280580ef09890d1fac2ec3d6f1c57af61e4a877bdb74a6445ffcd681aa6a60b6bf3e02dda0ed993275414abb8369444511c0f0d594b9f517c8b1e31237624a07ff4371cd123d60e51efd0adb7da86ff63ab8f46725b10ea353d34145aad7434623774b17959a51baaf8d45f568fb8a6c3d9b5b5e5c7d5eb6a07b42a745a7bfdd83d47c727ee7bd39b87fe66539f0854767bbaa9b5dd3093f2d7a9078655417f5be683f4a5c81ecb752737e3f44d5a9f9cccad539d22ee1417cfe76a9c1a9c29b29e53ef1ad64e4faa62e3c4b0a9dbb45007e81ff5e90e663b4d2fe83d39aca9bdf8cdcb2a33ce1e489d4d8d4ac7b5def8415a6e29a755c64d9d66d262f59651832ba175dc6cd2f3ad0a40313352c533b4f3ffd03ada2854d3601718b7043ccf3b757258611fef0076d96d07d2ecce62649cc0127ae5968b8d4e1e38ddc96ecbb17da75c405b74f67c6e4ed034553cd1c92da19207457c3ed70f0c1b0c21ac685a71b19387d4d78c9c75da192c1c776901daf9131d02648088f62d173b2e62184ec68434c5f29bca465367881c84970c54f4d1c22c80549d0a2430a126fe9ede4b742b469a9637a28be0ed843e6191fd00d024d49de6bd366d0a5a6777d2dc74429b0dde36f5df9e6bec7a5859225a9339fce1c9dc60ae39a894d39e26292146a426345d7a93f272c2484b6b9e2e1154e1a0398c01a6a8778011febd839629d7b3d95d34d54c62415e4c31a2584ca6381a31acea26051d200bf4245168a23feb1ca6d5d2043cd2d9e1eda8f8f61f4e43950da9f42744a85e22fae9c3a08b2e5e0021137ecde82da8ded0adb2d78ef257a75be822622d65756a7949d1bae92fd774c0846b1104fa0872b354c43fcee7e5eb2cceaa08c0b2a62194695a9245a3dc961b6c411509c9112f456fcd80799088f838bb54d8415018cf5c23410b00c783082a10f50e84dded3abb44840118013088481f4a76fd881cda17441ad78fc81dfb8288bb7e440eef0b22adeb47e4ee7d4164ecfa1139ba2f884c5c3f22c7f70591cb6a174cf45e9898014c4c05e33982a10750d17ba2a2050223a0592d1118361ae9778cd51be612eb3957aa3975c4aadc4cb9a78eab14d660aa456f43fc36466f357e9ba03728426c01e32d8f870db33cdef01bc66b7ec378b62d9fc883fbd4017a0b8ae4b1fbd44dfc96d1db30bf35e8ad8e193c2eaec645d5b8b01a17f0fa0d5edf1c57b70aee99c7e5f60a97d10a97db2a5c1abc0b8cbbb9dae36baa3d1eacf69809ce8a9118e10581c42db234bd1d1264d57dea2e2107b5fd4035eece6adc1d6459c844b286602bf4adefd3fe7f92f6da533efd522076fd194daed5619535e0fa38f56e78155bff121a57aefcf1b77ee7d73ffde2d44f929380af57ae7cf6db5fc35720b9b9b9f9fca7fff04f3e72cf43c356be5efe95ef50ef43c3817cddfc230c7ef770e22c7c910f12ba05b9544fd1d3d923f6297dccb263414ecb8f8ed693d42f71e55b1f7e71ea3dbcc4339f7cf1c57ff8e047bef6f98d3ed0bfffbddfa0efef1e8e05ea3c3dc8c59e119833c76c4b409205c8de305a8f539ef639d94705e5437ffbf257805a244096e9419a6541802c1cb3ce03719decded17a94fab537bffde13e10c0fc28808402e4494c08c8c5f6fbdba4fd251e4ed2c9de385a0f531979861ee1b8392de34e1fb3137ed844273b365a0ffcb01e3da271b326c3d68ed9861fd6e8643f365ab77ed83be9118f9b5332ecd4313be98791a20538e3c73d013cc6cd451977f198cdfcb8ac931d1fad6b3fec7df4a88d9bb332ecec313be6878d75b2b78c52f891dd415f9ed190a6d7283eb3194e0bf99b27b324fdb2d131046c8ce4ab19389231e8eea0198a568f24ccc8823c7e4064cec5c507d8f58eb3db9a86d1a0a6039d62ed3cbbc37007e32c240f3f2848d65b2e98526010b5769ab010ae038f30f1b0e277b025f8f92fc012a09310635fd260540df077b6d2bce4647f5eea12572b34fae9bc53d4007b414c1f3719351cc2e45a47da98c714f14094031716fa8220d5eabc4ea926751db1ae09479bbacec3d7e6082462fb1461abca25c5157dde4507b51a2086c978c36344650a3d2378e671fa73468757a36d79743d753d30ed296b52d09ec5612f0283b22d4fd91dd44c795b25e102f218997a4c0750d45614c9842289d0ac0145dae9d3e6886dbd0245a283666f5a0cf7652e3b927edb50e84a24f9b8b911f2f6450ad6157d667654f6725c1e13781095c6095c40a756866653a3bc550e555cd032934211daf1045303a7069d09efb9ea4c8ed96760595ee05e97205a1662d29e4bb22a1c7fa6ae9359cfe89cb9c55d2f6881ee71268c99452f700b562d5b1a1523aec20199181db4bb70e1e346d870f3e0d1c79cac96feaa3511197562c7a6be91227a4a1e93f2382d8fb3c29aa3f218ab38045e819050a478bb8c2816e738036dbe496c7b2b734d58365171658c8f34c2d75d5846ebcdc8eced1c6b0d722c138e3564d24cae847bf4581304060ec559728fe871baa9f138454a891e93cda1abf069c8c125c2790976e1d4a6de7960ee4ebf6775c207e6867108142639236748b4227fcf8884fefb560ebe02cf66fa3cdbd4b229614a764ab856bb1ad78840bb706d53ced910b85613ae65c0d8d5ae81718cc54bb2c31a2ca4eaaf98418892b289d978cc2ec8db647f6dac54cd430309821d9c450e083949b2b45f31bbb673bbb9f7b9f5d2f05e4e35e586844ea48239adfc6095dd46019b2246227596a5a3900f24d5c897ec33dbed18927e2e14b3ff4db5b71e8e2b5d9c94ba38f1eb267d5d9c6c93aaa4b4fd7071f6949a44a4060a93c5252b46af76aa9f17f9a8ed38d5a72be161d1b986537d7a40386604cfb395626a99fbd91010518ab173cd9a77ad2db8572bbef6ec575ffbe030ab7ea44c3397c7d43ab6ec7d8b182e223fcef421e535c0d2a77032e9f85b56ebe8815339b682d93966a4d726348cef82e03b431009d0e9a53c06b221840833428f28fca9af13a231231a6e4174461ef38209a000d1b08f682888f2bc15993a2f324be42e6596e6cd88d6f1d0e22c4fa5fdf440fb99b23d19907119c6f957efacdd4fed792a6a1ab27f2015ce672d957a25426f3763619dfd083b3a2f3e074727ad952a33fd4598347de34ddae92d7af1ecdede06fb1ba52dfb22f46243ccbad8b2c957f040763767c99ee6ec2a0ec8cc80ffb1b6c5b5d8d59c5d456f95562cbc8a15bb8c8481bec479f2cb8a83576477103b2134297833766a03e859f16345c3e5014e2ce144f8fbe347e87338f7d17ff9cc37de40bccf5038390595c4d11069b50772d522cd826f2758303e7b993d600b7e247ed49492c8ee0436d4cac3615d2f87d4113d31a3127ecb3a651878d20f7e6058a7a20b8abb3b790492d3493b816202e9da850e1020c1715cd2e19ac0034c1412e8900b3329c7b818a4a038c326b5442e947a482ee11feb6eff967ecc4af4b0a93df57212ab2306e25629e6b054cca1e742d857cce136e90dbd62862e15511a70ca4eeda2a343d6d1c66ba3ad815acb1c45be8e75370825dac2727c717440afb364676ff3ca3de21e7a1b14e6ad2e40eca2bd1db718648f2a151f5d9be326fa1af179c04a964f23407ad373ff00fdbc66e20a9868a6e24b34d070054ab45329e15f30da6e38613b54129f42944b2cca25c1d2568a599fe40cc08a40086639cbca8bf9c04cb15c21c6dd3f90287bec23b44687a34186a6010df5a3dc6e83a6fb395d55ca871ec8e932b4f4dff50d2261b00709d51e2095b84c7b8084d0ecdfa6bf6e593346bcf1a069a6147c3bae9271dabb19d2f18e2ca7f470d0d4db7989efc2d471029d4b6e48579071e69a73cee2097b75459d7711f21379d4fbfd27096e54c49d664487980c1249ee79d2435ea9f20e12d9526d891c083a7af613b97950aaaa2e5ecadeeb7bcb8de5c949d699d0facebc0b03a983cc81613726c1eee85b728274a564f0835229d2eeb4f5cbd2495adaa14e7857b52a5bc14dd007466aba21a8e469a2b7d124d84a934068120dd224649a18a189014d42170dd0049ed95b0cb248f5bedcb868a9703bd0447291c8da1c40b3e93940be207c54a4a6b886bc7b117510e2401155977b7f1545d441506511065af8da8aa8bb2162b13bfbaa8ba8af0e9143fb8248e3fa11b9635f1071d78fc8e17d41a475fd88dcbd2f888c5d3f2247f7059189eb47e4f8be20b27b11752f4caeb188ba072aba84b05b11f5b7c52f0ff7d1fa243badcfa0a68d5cb2cdfa88ed89c5ba180a3b617822313ce4122f650f55db492aa32ac3c5b925e55d591f52c61c4103346f04d4499660a128307e701712259ca6a0686e2bb738620389fe53f74397cc27502417c677740825f24bab6b48755e104ec1521e88c7b8f1ce61d6e6e46052e81dba402e3489b3cf8fa03f5130266727d7127d87f065450042870b65e4efa896783641cea40b386e534211cd496d89d4789ce65d6a7642602ea55261d877e1a00417a5b0469efa6b46c81821b6fe0b6b62899edd12a79ce47a13416de4108f3b1855443db8d34456556e6d69dc1c433585c2a0f0a4bfcf147074c48d4027e4ea1c9132aceea269dcb2cb0ee54c30d0ed0301b22bf0edfa910ba49183f2e21b12d20588700a0d3bcc63b343a374ba98ce0a914bc8ac629a6cad8684a5810d61c3622925253cf062a7b86bcbd8d82585e3b1a0d551445308dce98108b526112af5d4ab6b75779010321fe9dd61c70f725aa32665158d143697eb10a2b01cc41c82e32d92405471e94a3e90612401c97eca45083c25b8268fb4d1d41e0ce8076632174bd2a67fa5ad2106a2649c079c11d2888b9504c57fc69b03ba4896dcfc1037be2c3b66998e24f0e18f983d667203d9e6e771760b4d8c789c4cfcd873c20fe2dfe94e19df97c5a6b314ac09050981a3ac1d5bd9ad0c0195f7337251b13375c94553fa09faf8d9f7de4e6c232e51b0fa5d4d7e93d4cd82c39c1c3a46b84cf2da25da4ffb1217d21d874a0a071c1712754422ac5c05e864ef1b958188092d5f02909091a01ecd43cf46f60724b28fd9aa7b26c6583e41264cea100a706249b344b44b6622b49296b48eeb94c50a30904f218e9b5c4f844a75c8b130982d4c948a59fa211b0a0b858d14ae8b0ae228c9ee0c4228a4b96bb72004210dc270e5d930600b1c3026c54f683635ab00d6fa688af860cb443a244c1583c0389a4a7e01d9bc3728f5641e4c4d3cf524498b2e363ad80cf5b1f9206340d0ab2081149a08de95e7fc098c40c9b084430c670cf840c2c30f80c1001c72a3194cc61aa744850e3d04b1b03d3ab8d9413ec822bd068f000b0550d7b21ea77848e6d0820405be34e44ba3c3bb979b21d294f9a6ac6c324898105f3eef85321bd08c03a944affa37399518f854a264b612a46b78e9665837e93605c7df919d97b17e9c682fbe3dbc5d7dd9d216f910179773b795c36d3596d57b7a3f85d95244a87095c41ae3ab3cbe7a2fd4522e197c1fc80d02f26553a9bb6d92b5975c9529ea3da1226175581e8e9d003afca4be5a223c8d1dd6b1ca4d86d089879b7c07a5515d1e6079e220f730fc4f674e6e99ea7c4a6fcbec5b315b97b3f59eb3ab0923db26f00ea026b3fed1701dc9cabe6d5492748924e97c0ed7882d6435fae7b86830703b4af160f1a12cd9b407799af2ae171cad3c821f620a5c698a59f511d988b0c5f7a8016e3f291dc2ab0777d1456fbf1dd503b80a996be23700e23d231d6c71ef05b7b3011d3bf7fefb062960728e82342d8b6b900cc5e50dbec311c38292e1586a4afa350f91f328e15902d5b4151ce636bcf6509cd8a85526bf902f5e62d5e00b4f7cc58ebdddca313462bd02c9e921b5ca387a6374204d9fd7261057f07f5de10d68ba6d6a8ec28b4a668ed804fecbeb540c5394c5d81d5f712a95e0a70ced28d8eedc5edb8e1a7e478d6bd851c38f7ba51d855e77e73bb7c585403f322b4766db062503831a25811a7bd801efdd8148311e194556f468346b4cab1ae221176535ef4aa65ff6d6eed590ea1a69b4cfc4317b11a74ca76571b9a9bfb6b2295454fcae08e7607b2565b3aaa404a2baab4a4a807d04be9262717acec8035703032e989c159d754a640147f079ae90f81a37d0872a65dff3ac04ce72a710f181af81841c78579d196a20b6ac8184acb2b8936f32c9302e78707dade56f56a20632263d6b825352ba0e16c569cb65eec0578e41c4c1dab154bf387e0dfaa5635b2e17c0a3adc0700c2faa861597e8700e1ffad5e320f5fa3b9b280b2c81e86e0616488598c1f5dbefe7769ac8451714c7a02d898f57d1edb4a36dea1dc96dafe17d65bcf82a3dd99b868e47bf293ef9d5676f19d0f2b401d6f296b53c59956552f441a5e80df39698a53c4dfd83ec68f9e6aab746f596f937291396399eb1dd6d848574f66d44c0587438c5cd2ca9ec036cf37f0b0de3ebb0c8d80d9a1672b079a95dac8b45a2e2f439ee36e2e48b8db192b550550564771bc377292cdb98a735bb4ffca3a5fdf47ccec8e3b4f77ce450ca314cf8d69fe8047a3f22878e20fcdaff19f79e7434a3c746ebefac0dca7bf7dfbc36328542a6edb820b046600432719855c908c5604614532916a51dc32363fdba353d22d40c25b264e141fc88e82de6f851fa0349af1889da620490914b38808c3880440e860248c3c16513f65ae35786fd00d2ec08206309203d9c12f92a808ca6b80254c19100d29401a447c5226ea72f6500697d00197b3be92355e5d713a3238999b16dc1a2646ac606e245d6be134c3ebc8d41b32bcfd0ec6ed1e3c48a97becfd8ffff8cf51750b65c46aa38fcb211ed36e06ddc30edc657387689ea5ae68c04575f54db8239f95583c21d259e3d51a9c80984574c3ab62bd2debfb351fa2b49df5f09d88a559dc9167f25e0247f69659ca9fc9586f82b6ec05f69f5fd9506dfb13c25f8bc593c83898168ef7819edb16790fea93656c29531b92dc3e9b631e7adb35c01e3727499d6e15008d849b3385d64ef9638319907d92dcef6af04245d64f6d8be210d990cdc472248b8432a9797f8f46523e3e668992de55ca7de35d729a1aa53e9b3b8ea53ba3241e5b634cec1ad82dbf229f257908c2c9ec50b0e635956966141f1157268c47b09e0bdc470e7254625ff212e1ae2bd9832f41c702bb4fca25bfb4b4174e61acb79826461243f15364c32fc34462ea121730a88b0635c868d7c0e5c2e0918c13f3ec1ee2049d102d7fe49ea16fc85002be94fc0ae8acafc3b702f455adcf7b5f2e46906e10294915cc077a9785d5d9574627f8904bb8a21f13edb8a7ed9063b20a15ccd22152117b762a0148b24c4e5c5ad7e469696ab344d799b2b4dffd1a6fc93fef49d8fcc2e2eb7e75d6fd5cd2e2fafcecdf6da6e6df6d1f6ba5a7db8d39eebd197f575e95fecb5bbb3bdd5ee34ded7ddca6acf2daeb87317967b8bd38b2bf3ed8b8a7f0c99def9fe2e0d55ed6e77b5ebf07f5b2cae3c5a4d567cacd310ed8a33e0e9bd73b32b0036476db4baacbb0ed8bdd98797a9e111374bfd0bedae9b5b5de97567e77a8aeb00e9eb77e0786e757ef191c7f744efe581e5fcd06b5cee63cfa9f44df21f4350bb47786176e551225777f1dc6cf771b7d47edcbd7fa1bde22163d7b32b1ebe62cd9ae66bddd5deeadceab2f3ff71488969ffff18e132651a3cdac61cb22ce9dd1756da17d70806ed50684aa83eb278b13d3ffdf0e3bdf63ab05cef752fcc097569ee1f349552ff05ee7357f400d00700008101010100204b21f3cba072cc493e70861540df4677b498b0505a8b8e2a346b85a0c2dd2fc4263c4a7d8629026c4eb594ad96fac2bfe5f8ffebb9c841c353920b7b8ec11abc0100d90778da8d563b8f1c4510eedbf7e37cf209d9e60808402496c0dcdaac4e8ece01090112afe83043ef74ed4e6b677a86ee9edd5b3b2121b049888d842c84c0c1456702eb20b036424242c2e00408800c24fe03d53db3f33a58e860b6bbeaebeaeaaaafaab7f55bff9d1a796df0e5798263c37cc89f2fbe657e1eb8c7cb92e0de5f83c1eded95e4fded2d08150faf5ea5237e69f7855db2d3c199e351e5915a339c0b900d4103681849dff5c09daa3818bc34ec5057f319d54036b6c640752cc1617c024a17515d1a6b2f945c2f48a3ab3d09ca0b7dd68ab9d097078d292cd4267e9c39f089a70faea351378c85563b11c8802bf44c383eccc0cf20cd39e55a9d31df4c766ee487eed4f528174e4425baab412ab2fd44400f1dab73046827567402f6ece195a73495139455b44ee4ead4bb1db3594b2a94b929fa51367179f0f4882adc00722dea6c6edb0798d3452a7fd60d858643ed8c2598c8297bf18227220efe2f948148a1851bbb515c72a47ce34cbbeec655133b0106781de0c9aa059f8f41f3200b19833148090c41870e1c465c528b9b73c1c2798a3a57b5c2c0cfe276de28b9f0b90027552b7e6375c085d35a0691f6ac7a7768c39351b2a4eabb54b8e0dba3486d2b597131b1f0b3553ab68cff9c15a9dec3adc83b0327b5764a645b3bbd7c77b2ce294f6a755cf4a278e473d7c1692b91a74e75d083a9b5d828596cb8218364a6175132eb4b782fe61202581d2b906ec926dcee4a2cd2302de6ec9354785ea52d5bd5900bda21ea652849adab4030243b676debdc60af83126d32d91c2d34a85341c20682e6d233ab41b8f02f154e6a05e4e9b897c2b319c990c52e3a859123b533d932bbdf76c276c527c2e4b21ceb4d8cd8aa8bb1b56dac6d90260d1b8db10c036bbaa54063abace4ba8ea2241c3da3f77980ddaa92bd2e7628c7629ab617f54c2527174b05a6ae8a8236da3229af186acd0293fea689c65e7716ccb0eb61a892b5e548eeca2475a55ec7d3d32658c78357533c329d62a2b5eda28a6cb492c93f3758e35524f9ac128236578e11276e742c286468aca330a42cf661ab98b783ebbd58643cafff27cf7b71c4685a678db575669c5f1543c3e0735af70bef07a975ec4a819b769132cbcc6379f1637c36f3278f7c7debe2cb1f7c7eadd434c8feb73fdd3bfaf4956223c0f1fcb4fec587792193fd4fee3cc31edc2956278e5f1fdd7cfc59566c1fbd39fc19d8d14999a138ee42707492b171f5c0afa848c877af9e78c7cb22f570ec3f77fb789951c882be4940930cf4f0d1db6fdc5f16528fe3ddaf0eee2fb324e3d8fb1e057942cd851ffef1fb8fc5fcd920f8af3f2e66c9fcffb84b7ff865b7ce875708c9ff60d8f137aa5a1fa900d00700001001010020742877c36a520b152b1337ea1ecd37b0c98ad07289c32fec392e7eebab9f0ac71f7bc8c718cfa75317b2e15702372a9222c4616783ee7b3f0ec6358f8c328eea00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232201a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72410000d00700001001010020058e23368b919493d6ac61d27f66b829a53893e88ddde857d3b82d913960960d22fa36f397752b98c295e3b31927f740127c0a99e76f8bfeea88f44466b8fbfd00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea990000d0070000100101001f43fe868e263d8134cf705aa85e26ce78ebb058edd558865fa3d240f5cb9e50c2389e9c8276eac800b7233a552045b2e79124c97e5156a0649849cc7f5d09eee600005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f0000d0070000100101001f29e82b08ccf15e2187f29fea11ee3f4974f41b51e45b19f353348d8848b86fb71cadd88630456b7a1c60803c7b402487d41fbf18f0b0a13b4cca1f740447938300005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff5260000d0070000100101002047a8b784c3765b5c63ac52e3d8461b80bc2d3e3f62434f8accb277d9f2487cfd3c0728fcd26b5119a11288e5db46bc5b547877e220971609d1cef8cba443340800005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed32322068dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974280000d007000010010100203e701fbafd4149bc95b55a6bfc3b78246f5c2668ccc05ed4059a36ceb38f140b31e3b69e15f2579571e5bde39e034947271599c200e540b3949112bef163074c00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c430000d0070000100101001f0cc7352e60f4f8476783d6d1b48766a111c56fee2c1a552e76a75c92bc17de172f994ffc854c09717c904054819ca7a17379ddecaf531c439b35337ba099b81300005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232208ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4050000d0070000100101002040965063a83be2d53b36c8d7e0775f503c2caa1407e586314562aace52c272fe60659e196413a6c9db4168470bcabb9a5851121c10c7b665f363f6cd4d1e4bda00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232202652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed250000d0070000100101002074ea7468b2a031c4cd53bf10ec3ac66b0c4b5c8779e045f1ef8d9c7b116be649217ff340107d0163397b99918ee2ce822b66cd6fce7b385af97a04671136e2ee00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0000d007000010010100204dfb21ca5140582379bc026792c16b4cf97827143a4a9cd99ae70b3e6016cd6316bcbb9f1cb1233f12a0bbcd9debafa64724d0459b5c8d3cb67ceddfb2e3962500005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d670000d0070000100101002033446a3a94ade71dff3edb786259679487ab701bbc147490b1d4159fecf545fa22fee0698db16bf616465e5cebb985bfc4d9ed1ec4a55e38997dd4b4bbc427eb00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c20000d0070000100101001f3f67edd35bf731a07f40c638e8812112cd7d1baa39ec7dac4a1b2f0c83ac8bd53689b56dba69a7386e3860a6f8976695ac0bc2b5dacae91080f1d54df2dac0c000005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b44767070000d0070000100101001f1e030564013603d54f9e983b63cd940f8ff09ae038b14813f4021bb0c09ebb640d90cb4f8d57be2809f492a51737b671a5f549d4efa8e7efdaeaa9663c09d1ad00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450710000d007000010010100205cea642eecf05568ce8c5564e63349eea3b816108914ba2ab5efffbb8ea467265f0b6d474f03ed02a3bf529fd6e55a595cbf8dd1adf4311cb9c51e862f8a535400005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232205443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b40000d0070000100101001f4556076cc86e0840bf69664f1ef8fcd4d91abda313d08e7840d24ba45cb429cf12b7d3a1f64250c19d1b975e7b107853beff70ebfc4c27c44f825dc05cdc9cd600005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e990000d0070000100101001f354d903ad0f2c6cc9d9a377d681ffaa00475d1e559e48074b4c8cce3111d5c172903b2f179ad4d736dda4e7d1b6a859baeab9dde5e5e495ce09733ec4650634400005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb400000d0070000100101001f1766fa716a828da244c9ce52919b7a19acb38dbd110d1bb0039bb2477c17e4465dceecb8330ed5ee9de1330930dfcfa1a5e8149ce8536a82c0093642adf7328200005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232206bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc0000d00700001001010020488923db1c78fa430a3a9eab75f4ee467c7b9a3d3b4eb3bd08e183c82ef79b9102a4d2a7d1ec79c96b404911ae1b10f579bd82a660011c1ca2b872b30ef7dcac00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed32322035c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b000000 +DMLOG ACCEPTED_BLOCK 3 03000000030000000200000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100012d5b1b639d6ae94fcdd0536b224644931573d1ccb2a0c548613cd1feea18888b0200000000000000010000000000ea305503000000010000000000ea305502000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a09139800000000000000205d7ce507e9dbea47687e80fceaf2794b22bd883902adeb8c97de9f7283b614b0590bc4251ba5410cb035f88e60ffdf6fccecd10d83edfe36021227d1ee9e18830000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001010ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd0001023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a09139800000000000000205d7ce507e9dbea47687e80fceaf2794b22bd883902adeb8c97de9f7283b614b0590bc4251ba5410cb035f88e60ffdf6fccecd10d83edfe36021227d1ee9e18831400d0070000fb05010100203b7de491b51d3d74624078bc2c5dc4420985f0350afb6923a5585b5621750c9f126d7cff0efeade2068c7b618fc754b2abb5bff8cdb9bd0ecb4432b72ae1ed380100a82f78daed5c7b8c5ce755ff1ef7357b67e3ebc6d94c3609f9e662d0b8a4659bb8eb2575dbbddbc476694b9cca2dfea3b0bbd99d647776bdbb9e1da70e0adead081045158a7894b6405524a4d21424545aa8cacb0d0815a94891fa20414284ff2a025511a245ad54737ee77cf7ceeccb71f09a87545b9e7be77b9cef7ce79cef3cbf71f44fe94f1bf5d03d9f1951f447e343fdf3d87be873f2879efef473830dea77fff59e7bbef7f440d3bfd197d9f57368d1bfa54767949ab11b9736d48cd9b8840f7a0b372ed11f35136cf0436fe80dfac0b80dbc2afa67f84d6306e6063201ad97a8ff9234d00880f033d54c84469e48cd68b03c8b3ea54dd0909531c1fc52d0b0ed95c70e2dae4f3fd29eed5de8b6a767e77a8b8fcdf6daf32a42d7cd6bdd76d9548e51317aeaedd5f5c5d5e9d9f5f576b7a72c9aa273ed73ebed9e4af025c3b4d595e9f9d9deecf4fae2cfb4558d9b09defcf4409f1a2aa7cead3d2e53ebddf6f90b8b40e6426f41a568ba89e04eaf75171f5b5c6e3f4ac8d519393476dbebab17ba73ede9e5c5738bbd75358c9e70f6e155c24ae17d44a6aeaeadaeb7e7f1327f61aedd5d5737a1d3a1f3e1e5d5b9a5b985d9c595e9b5d9eeecb9768ffae9756e8956e29db9475f6918efa23e77a1db6daff4a67b8be7daea00d316339982ed81b579743afff0f4238b2bf3d38be347558696da34d17361b9b778af3a88ef0707693c3db73adf56868958aed36dcfb5097257d61a2280580ef09890d1fac2ec3d6f1c57af61e4a877bdb74a6445ffcd681aa6a60b6bf3e02dda0ed993275414abb8369444511c0f0d594b9f517c8b1e31237624a07ff4371cd123d60e51efd0adb7da86ff63ab8f46725b10ea353d34145aad7434623774b17959a51baaf8d45f568fb8a6c3d9b5b5e5c7d5eb6a07b42a745a7bfdd83d47c727ee7bd39b87fe66539f0854767bbaa9b5dd3093f2d7a9078655417f5be683f4a5c81ecb752737e3f44d5a9f9cccad539d22ee1417cfe76a9c1a9c29b29e53ef1ad64e4faa62e3c4b0a9dbb45007e81ff5e90e663b4d2fe83d39aca9bdf8cdcb2a33ce1e489d4d8d4ac7b5def8415a6e29a755c64d9d66d262f59651832ba175dc6cd2f3ad0a40313352c533b4f3ffd03ada2854d3601718b7043ccf3b757258611fef0076d96d07d2ecce62649cc0127ae5968b8d4e1e38ddc96ecbb17da75c405b74f67c6e4ed034553cd1c92da19207457c3ed70f0c1b0c21ac685a71b19387d4d78c9c75da192c1c776901daf9131d02648088f62d173b2e62184ec68434c5f29bca465367881c84970c54f4d1c22c80549d0a2430a126fe9ede4b742b469a9637a28be0ed843e6191fd00d024d49de6bd366d0a5a6777d2dc74429b0dde36f5df9e6bec7a5859225a9339fce1c9dc60ae39a894d39e26292146a426345d7a93f272c2484b6b9e2e1154e1a0398c01a6a8778011febd839629d7b3d95d34d54c62415e4c31a2584ca6381a31acea26051d200bf4245168a23feb1ca6d5d2043cd2d9e1eda8f8f61f4e43950da9f42744a85e22fae9c3a08b2e5e0021137ecde82da8ded0adb2d78ef257a75be822622d65756a7949d1bae92fd774c0846b1104fa0872b354c43fcee7e5eb2cceaa08c0b2a62194695a9245a3dc961b6c411509c9112f456fcd80799088f838bb54d8415018cf5c23410b00c783082a10f50e84dded3abb44840118013088481f4a76fd881cda17441ad78fc81dfb8288bb7e440eef0b22adeb47e4ee7d4164ecfa1139ba2f884c5c3f22c7f70591cb6a174cf45e9898014c4c05e33982a10750d17ba2a2050223a0592d1118361ae9778cd51be612eb3957aa3975c4aadc4cb9a78eab14d660aa456f43fc36466f357e9ba03728426c01e32d8f870db33cdef01bc66b7ec378b62d9fc883fbd4017a0b8ae4b1fbd44dfc96d1db30bf35e8ad8e193c2eaec645d5b8b01a17f0fa0d5edf1c57b70aee99c7e5f60a97d10a97db2a5c1abc0b8cbbb9dae36baa3d1eacf69809ce8a9118e10581c42db234bd1d1264d57dea2e2107b5fd4035eece6adc1d6459c844b286602bf4adefd3fe7f92f6da533efd522076fd194daed5619535e0fa38f56e78155bff121a57aefcf1b77ee7d73ffde2d44f929380af57ae7cf6db5fc35720b9b9b9f9fca7fff04f3e72cf43c356be5efe95ef50ef43c3817cddfc230c7ef770e22c7c910f12ba05b9544fd1d3d923f6297dccb263414ecb8f8ed693d42f71e55b1f7e71ea3dbcc4339f7cf1c57ff8e047bef6f98d3ed0bfffbddfa0efef1e8e05ea3c3dc8c59e119833c76c4b409205c8de305a8f539ef639d94705e5437ffbf257805a244096e9419a6541802c1cb3ce03719decded17a94fab537bffde13e10c0fc28808402e4494c08c8c5f6fbdba4fd251e4ed2c9de385a0f531979861ee1b8392de34e1fb3137ed844273b365a0ffcb01e3da271b326c3d68ed9861fd6e8643f365ab77ed83be9118f9b5332ecd4313be98791a20538e3c73d013cc6cd451977f198cdfcb8ac931d1fad6b3fec7df4a88d9bb332ecec313be6878d75b2b78c52f891dd415f9ed190a6d7283eb3194e0bf99b27b324fdb2d131046c8ce4ab19389231e8eea0198a568f24ccc8823c7e4064cec5c507d8f58eb3db9a86d1a0a6039d62ed3cbbc37007e32c240f3f2848d65b2e98526010b5769ab010ae038f30f1b0e277b025f8f92fc012a09310635fd260540df077b6d2bce4647f5eea12572b34fae9bc53d4007b414c1f3719351cc2e45a47da98c714f14094031716fa8220d5eabc4ea926751db1ae09479bbacec3d7e6082462fb1461abca25c5157dde4507b51a2086c978c36344650a3d2378e671fa73468757a36d79743d753d30ed296b52d09ec5612f0283b22d4fd91dd44c795b25e102f218997a4c0750d45614c9842289d0ac0145dae9d3e6886dbd0245a283666f5a0cf7652e3b927edb50e84a24f9b8b911f2f6450ad6157d667654f6725c1e13781095c6095c40a756866653a3bc550e555cd032934211daf1045303a7069d09efb9ea4c8ed96760595ee05e97205a1662d29e4bb22a1c7fa6ae9359cfe89cb9c55d2f6881ee71268c99452f700b562d5b1a1523aec20199181db4bb70e1e346d870f3e0d1c79cac96feaa3511197562c7a6be91227a4a1e93f2382d8fb3c29aa3f218ab38045e819050a478bb8c2816e738036dbe496c7b2b734d58365171658c8f34c2d75d5846ebcdc8eced1c6b0d722c138e3564d24cae847bf4581304060ec559728fe871baa9f138454a891e93cda1abf069c8c125c2790976e1d4a6de7960ee4ebf6775c207e6867108142639236748b4227fcf8884fefb560ebe02cf66fa3cdbd4b229614a764ab856bb1ad78840bb706d53ced910b85613ae65c0d8d5ae81718cc54bb2c31a2ca4eaaf98418892b289d978cc2ec8db647f6dac54cd430309821d9c450e083949b2b45f31bbb673bbb9f7b9f5d2f05e4e35e586844ea48239adfc6095dd46019b2246227596a5a3900f24d5c897ec33dbed18927e2e14b3ff4db5b71e8e2b5d9c94ba38f1eb267d5d9c6c93aaa4b4fd7071f6949a44a4060a93c5252b46af76aa9f17f9a8ed38d5a72be161d1b986537d7a40386604cfb395626a99fbd91010518ab173cd9a77ad2db8572bbef6ec575ffbe030ab7ea44c3397c7d43ab6ec7d8b182e223fcef421e535c0d2a77032e9f85b56ebe8815339b682d93966a4d726348cef82e03b431009d0e9a53c06b221840833428f28fca9af13a231231a6e4174461ef38209a000d1b08f682888f2bc15993a2f324be42e6596e6cd88d6f1d0e22c4fa5fdf440fb99b23d19907119c6f957efacdd4fed792a6a1ab27f2015ce672d957a25426f3763619dfd083b3a2f3e074727ad952a33fd4598347de34ddae92d7af1ecdede06fb1ba52dfb22f46243ccbad8b2c957f040763767c99ee6ec2a0ec8cc80ffb1b6c5b5d8d59c5d456f95562cbc8a15bb8c8481bec479f2cb8a83576477103b2134297833766a03e859f16345c3e5014e2ce144f8fbe347e87338f7d17ff9cc37de40bccf5038390595c4d11069b50772d522cd826f2758303e7b993d600b7e247ed49492c8ee0436d4cac3615d2f87d4113d31a3127ecb3a651878d20f7e6058a7a20b8abb3b790492d3493b816202e9da850e1020c1715cd2e19ac0034c1412e8900b3329c7b818a4a038c326b5442e947a482ee11feb6eff967ecc4af4b0a93df57212ab2306e25629e6b054cca1e742d857cce136e90dbd62862e15511a70ca4eeda2a343d6d1c66ba3ad815acb1c45be8e75370825dac2727c717440afb364676ff3ca3de21e7a1b14e6ad2e40eca2bd1db718648f2a151f5d9be326fa1af179c04a964f23407ad373ff00fdbc66e20a9868a6e24b34d070054ab45329e15f30da6e38613b54129f42944b2cca25c1d2568a599fe40cc08a40086639cbca8bf9c04cb15c21c6dd3f90287bec23b44687a34186a6010df5a3dc6e83a6fb395d55ca871ec8e932b4f4dff50d2261b00709d51e2095b84c7b8084d0ecdfa6bf6e593346bcf1a069a6147c3bae9271dabb19d2f18e2ca7f470d0d4db7989efc2d471029d4b6e48579071e69a73cee2097b75459d7711f21379d4fbfd27096e54c49d664487980c1249ee79d2435ea9f20e12d9526d891c083a7af613b97950aaaa2e5ecadeeb7bcb8de5c949d699d0facebc0b03a983cc81613726c1eee85b728274a564f0835229d2eeb4f5cbd2495adaa14e7857b52a5bc14dd007466aba21a8e469a2b7d124d84a934068120dd224649a18a189014d42170dd0049ed95b0cb248f5bedcb868a9703bd0447291c8da1c40b3e93940be207c54a4a6b886bc7b117510e2401155977b7f1545d441506511065af8da8aa8bb2162b13bfbaa8ba8af0e9143fb8248e3fa11b9635f1071d78fc8e17d41a475fd88dcbd2f888c5d3f2247f7059189eb47e4f8be20b27b11752f4caeb188ba072aba84b05b11f5b7c52f0ff7d1fa243badcfa0a68d5cb2cdfa88ed89c5ba180a3b617822313ce4122f650f55db492aa32ac3c5b925e55d591f52c61c4103346f04d4499660a128307e701712259ca6a0686e2bb738620389fe53f74397cc27502417c677740825f24bab6b48755e104ec1521e88c7b8f1ce61d6e6e46052e81dba402e3489b3cf8fa03f5130266727d7127d87f065450042870b65e4efa896783641cea40b386e534211cd496d89d4789ce65d6a7642602ea55261d877e1a00417a5b0469efa6b46c81821b6fe0b6b62899edd12a79ce47a13416de4108f3b1855443db8d34456556e6d69dc1c433585c2a0f0a4bfcf147074c48d4027e4ea1c9132aceea269dcb2cb0ee54c30d0ed0301b22bf0edfa910ba49183f2e21b12d20588700a0d3bcc63b343a374ba98ce0a914bc8ac629a6cad8684a5810d61c3622925253cf062a7b86bcbd8d82585e3b1a0d551445308dce98108b526112af5d4ab6b75779010321fe9dd61c70f725aa32665158d143697eb10a2b01cc41c82e32d92405471e94a3e90612401c97eca45083c25b8268fb4d1d41e0ce8076632174bd2a67fa5ad2106a2649c079c11d2888b9504c57fc69b03ba4896dcfc1037be2c3b66998e24f0e18f983d667203d9e6e771760b4d8c789c4cfcd873c20fe2dfe94e19df97c5a6b314ac09050981a3ac1d5bd9ad0c0195f7337251b13375c94553fa09faf8d9f7de4e6c232e51b0fa5d4d7e93d4cd82c39c1c3a46b84cf2da25da4ffb1217d21d874a0a071c1712754422ac5c05e864ef1b958188092d5f02909091a01ecd43cf46f60724b28fd9aa7b26c6583e41264cea100a706249b344b44b6622b49296b48eeb94c50a30904f218e9b5c4f844a75c8b130982d4c948a59fa211b0a0b858d14ae8b0ae228c9ee0c4228a4b96bb72004210dc270e5d930600b1c3026c54f683635ab00d6fa688af860cb443a244c1583c0389a4a7e01d9bc3728f5641e4c4d3cf524498b2e363ad80cf5b1f9206340d0ab2081149a08de95e7fc098c40c9b084430c670cf840c2c30f80c1001c72a3194cc61aa744850e3d04b1b03d3ab8d9413ec822bd068f000b0550d7b21ea77848e6d0820405be34e44ba3c3bb979b21d294f9a6ac6c324898105f3eef85321bd08c03a944affa37399518f854a264b612a46b78e9665837e93605c7df919d97b17e9c682fbe3dbc5d7dd9d216f910179773b795c36d3596d57b7a3f85d95244a87095c41ae3ab3cbe7a2fd4522e197c1fc80d02f26553a9bb6d92b5975c9529ea3da1226175581e8e9d003afca4be5a223c8d1dd6b1ca4d86d089879b7c07a5515d1e6079e220f730fc4f674e6e99ea7c4a6fcbec5b315b97b3f59eb3ab0923db26f00ea026b3fed1701dc9cabe6d5492748924e97c0ed7882d6435fae7b86830703b4af160f1a12cd9b407799af2ae171cad3c821f620a5c698a59f511d988b0c5f7a8016e3f291dc2ab0777d1456fbf1dd503b80a996be23700e23d231d6c71ef05b7b3011d3bf7fefb062960728e82342d8b6b900cc5e50dbec311c38292e1586a4afa350f91f328e15902d5b4151ce636bcf6509cd8a85526bf902f5e62d5e00b4f7cc58ebdddca313462bd02c9e921b5ca387a6374204d9fd7261057f07f5de10d68ba6d6a8ec28b4a668ed804fecbeb540c5394c5d81d5f712a95e0a70ced28d8eedc5edb8e1a7e478d6bd851c38f7ba51d855e77e73bb7c585403f322b4766db062503831a25811a7bd801efdd8148311e194556f468346b4cab1ae221176535ef4aa65ff6d6eed590ea1a69b4cfc4317b11a74ca76571b9a9bfb6b2295454fcae08e7607b2565b3aaa404a2baab4a4a807d04be9262717acec8035703032e989c159d754a640147f079ae90f81a37d0872a65dff3ac04ce72a710f181af81841c78579d196a20b6ac8184acb2b8936f32c9302e78707dade56f56a20632263d6b825352ba0e16c569cb65eec0578e41c4c1dab154bf387e0dfaa5635b2e17c0a3adc0700c2faa861597e8700e1ffad5e320f5fa3b9b280b2c81e86e0616488598c1f5dbefe7769ac8451714c7a02d898f57d1edb4a36dea1dc96dafe17d65bcf82a3dd99b868e47bf293ef9d5676f19d0f2b401d6f296b53c59956552f441a5e80df39698a53c4dfd83ec68f9e6aab746f596f937291396399eb1dd6d848574f66d44c0587438c5cd2ca9ec036cf37f0b0de3ebb0c8d80d9a1672b079a95dac8b45a2e2f439ee36e2e48b8db192b550550564771bc377292cdb98a735bb4ffca3a5fdf47ccec8e3b4f77ce450ca314cf8d69fe8047a3f22878e20fcdaff19f79e7434a3c746ebefac0dca7bf7dfbc36328542a6edb820b046600432719855c908c5604614532916a51dc32363fdba353d22d40c25b264e141fc88e82de6f851fa0349af1889da620490914b38808c3880440e860248c3c16513f65ae35786fd00d2ec08206309203d9c12f92a808ca6b80254c19100d29401a447c5226ea72f6500697d00197b3be92355e5d713a3238999b16dc1a2646ac606e245d6be134c3ebc8d41b32bcfd0ec6ed1e3c48a97becfd8ffff8cf51750b65c46aa38fcb211ed36e06ddc30edc657387689ea5ae68c04575f54db8239f95583c21d259e3d51a9c80984574c3ab62bd2debfb351fa2b49df5f09d88a559dc9167f25e0247f69659ca9fc9586f82b6ec05f69f5fd9506dfb13c25f8bc593c83898168ef7819edb16790fea93656c29531b92dc3e9b631e7adb35c01e3727499d6e15008d849b3385d64ef9638319907d92dcef6af04245d64f6d8be210d990cdc472248b8432a9797f8f46523e3e668992de55ca7de35d729a1aa53e9b3b8ea53ba3241e5b634cec1ad82dbf229f257908c2c9ec50b0e635956966141f1157268c47b09e0bdc470e7254625ff212e1ae2bd9832f41c702bb4fca25bfb4b4174e61acb79826461243f15364c32fc34462ea121730a88b0635c868d7c0e5c2e0918c13f3ec1ee2049d102d7fe49ea16fc85002be94fc0ae8acafc3b702f455adcf7b5f2e46906e10294915cc077a9785d5d9574627f8904bb8a21f13edb8a7ed9063b20a15ccd22152117b762a0148b24c4e5c5ad7e469696ab344d799b2b4dffd1a6fc93fef49d8fcc2e2eb7e75d6fd5cd2e2fafcecdf6da6e6df6d1f6ba5a7db8d39eebd197f575e95fecb5bbb3bdd5ee34ded7ddca6acf2daeb87317967b8bd38b2bf3ed8b8a7f0c99def9fe2e0d55ed6e77b5ebf07f5b2cae3c5a4d567cacd310ed8a33e0e9bd73b32b0036476db4baacbb0ed8bdd98797a9e111374bfd0bedae9b5b5de97567e77a8aeb00e9eb77e0786e757ef191c7f744efe581e5fcd06b5cee63cfa9f44df21f4350bb47786176e551225777f1dc6cf771b7d47edcbd7fa1bde22163d7b32b1ebe62cd9ae66bddd5deeadceab2f3ff71488969ffff18e132651a3cdac61cb22ce9dd1756da17d70806ed50684aa83eb278b13d3ffdf0e3bdf63ab05cef752fcc097569ee1f349552ff05ee7357f400d00700008101010100204b21f3cba072cc493e70861540df4677b498b0505a8b8e2a346b85a0c2dd2fc4263c4a7d8629026c4eb594ad96fac2bfe5f8ffebb9c841c353920b7b8ec11abc0100d90778da8d563b8f1c4510eedbf7e37cf209d9e60808402496c0dcdaac4e8ece01090112afe83043ef74ed4e6b677a86ee9edd5b3b2121b049888d842c84c0c1456702eb20b036424242c2e00408800c24fe03d53db3f33a58e860b6bbeaebeaeaaaafaab7f55bff9d1a796df0e5798263c37cc89f2fbe657e1eb8c7cb92e0de5f83c1eded95e4fded2d08150faf5ea5237e69f7855db2d3c199e351e5915a339c0b900d4103681849dff5c09daa3818bc34ec5057f319d54036b6c640752cc1617c024a17515d1a6b2f945c2f48a3ab3d09ca0b7dd68ab9d097078d292cd4267e9c39f089a70faea351378c85563b11c8802bf44c383eccc0cf20cd39e55a9d31df4c766ee487eed4f528174e4425baab412ab2fd44400f1dab73046827567402f6ece195a73495139455b44ee4ead4bb1db3594b2a94b929fa51367179f0f4882adc00722dea6c6edb0798d3452a7fd60d858643ed8c2598c8297bf18227220efe2f948148a1851bbb515c72a47ce34cbbeec655133b0106781de0c9aa059f8f41f3200b19833148090c41870e1c465c528b9b73c1c2798a3a57b5c2c0cfe276de28b9f0b90027552b7e6375c085d35a0691f6ac7a7768c39351b2a4eabb54b8e0dba3486d2b597131b1f0b3553ab68cff9c15a9dec3adc83b0327b5764a645b3bbd7c77b2ce294f6a755cf4a278e473d7c1692b91a74e75d083a9b5d828596cb8218364a6175132eb4b782fe61202581d2b906ec926dcee4a2cd2302de6ec9354785ea52d5bd5900bda21ea652849adab4030243b676debdc60af83126d32d91c2d34a85341c20682e6d233ab41b8f02f154e6a05e4e9b897c2b319c990c52e3a859123b533d932bbdf76c276c527c2e4b21ceb4d8cd8aa8bb1b56dac6d90260d1b8db10c036bbaa54063abace4ba8ea2241c3da3f77980ddaa92bd2e7628c7629ab617f54c2527174b05a6ae8a8236da3229af186acd0293fea689c65e7716ccb0eb61a892b5e548eeca2475a55ec7d3d32658c78357533c329d62a2b5eda28a6cb492c93f3758e35524f9ac128236578e11276e742c286468aca330a42cf661ab98b783ebbd58643cafff27cf7b71c4685a678db575669c5f1543c3e0735af70bef07a975ec4a819b769132cbcc6379f1637c36f3278f7c7debe2cb1f7c7eadd434c8feb73fdd3bfaf4956223c0f1fcb4fec587792193fd4fee3cc31edc2956278e5f1fdd7cfc59566c1fbd39fc19d8d14999a138ee42707492b171f5c0afa848c877af9e78c7cb22f570ec3f77fb789951c882be4940930cf4f0d1db6fdc5f16528fe3ddaf0eee2fb324e3d8fb1e057942cd851ffef1fb8fc5fcd920f8af3f2e66c9fcffb84b7ff865b7ce875708c9ff60d8f137aa5a1fa900d00700001001010020742877c36a520b152b1337ea1ecd37b0c98ad07289c32fec392e7eebab9f0ac71f7bc8c718cfa75317b2e15702372a9222c4616783ee7b3f0ec6358f8c328eea00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232201a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72410000d00700001001010020058e23368b919493d6ac61d27f66b829a53893e88ddde857d3b82d913960960d22fa36f397752b98c295e3b31927f740127c0a99e76f8bfeea88f44466b8fbfd00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea990000d0070000100101001f43fe868e263d8134cf705aa85e26ce78ebb058edd558865fa3d240f5cb9e50c2389e9c8276eac800b7233a552045b2e79124c97e5156a0649849cc7f5d09eee600005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f0000d0070000100101001f29e82b08ccf15e2187f29fea11ee3f4974f41b51e45b19f353348d8848b86fb71cadd88630456b7a1c60803c7b402487d41fbf18f0b0a13b4cca1f740447938300005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff5260000d0070000100101002047a8b784c3765b5c63ac52e3d8461b80bc2d3e3f62434f8accb277d9f2487cfd3c0728fcd26b5119a11288e5db46bc5b547877e220971609d1cef8cba443340800005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed32322068dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974280000d007000010010100203e701fbafd4149bc95b55a6bfc3b78246f5c2668ccc05ed4059a36ceb38f140b31e3b69e15f2579571e5bde39e034947271599c200e540b3949112bef163074c00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c430000d0070000100101001f0cc7352e60f4f8476783d6d1b48766a111c56fee2c1a552e76a75c92bc17de172f994ffc854c09717c904054819ca7a17379ddecaf531c439b35337ba099b81300005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232208ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4050000d0070000100101002040965063a83be2d53b36c8d7e0775f503c2caa1407e586314562aace52c272fe60659e196413a6c9db4168470bcabb9a5851121c10c7b665f363f6cd4d1e4bda00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232202652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed250000d0070000100101002074ea7468b2a031c4cd53bf10ec3ac66b0c4b5c8779e045f1ef8d9c7b116be649217ff340107d0163397b99918ee2ce822b66cd6fce7b385af97a04671136e2ee00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0000d007000010010100204dfb21ca5140582379bc026792c16b4cf97827143a4a9cd99ae70b3e6016cd6316bcbb9f1cb1233f12a0bbcd9debafa64724d0459b5c8d3cb67ceddfb2e3962500005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d670000d0070000100101002033446a3a94ade71dff3edb786259679487ab701bbc147490b1d4159fecf545fa22fee0698db16bf616465e5cebb985bfc4d9ed1ec4a55e38997dd4b4bbc427eb00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232204fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c20000d0070000100101001f3f67edd35bf731a07f40c638e8812112cd7d1baa39ec7dac4a1b2f0c83ac8bd53689b56dba69a7386e3860a6f8976695ac0bc2b5dacae91080f1d54df2dac0c000005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b44767070000d0070000100101001f1e030564013603d54f9e983b63cd940f8ff09ae038b14813f4021bb0c09ebb640d90cb4f8d57be2809f492a51737b671a5f549d4efa8e7efdaeaa9663c09d1ad00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450710000d007000010010100205cea642eecf05568ce8c5564e63349eea3b816108914ba2ab5efffbb8ea467265f0b6d474f03ed02a3bf529fd6e55a595cbf8dd1adf4311cb9c51e862f8a535400005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232205443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b40000d0070000100101001f4556076cc86e0840bf69664f1ef8fcd4d91abda313d08e7840d24ba45cb429cf12b7d3a1f64250c19d1b975e7b107853beff70ebfc4c27c44f825dc05cdc9cd600005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e990000d0070000100101001f354d903ad0f2c6cc9d9a377d681ffaa00475d1e559e48074b4c8cce3111d5c172903b2f179ad4d736dda4e7d1b6a859baeab9dde5e5e495ce09733ec4650634400005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed323220d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb400000d0070000100101001f1766fa716a828da244c9ce52919b7a19acb38dbd110d1bb0039bb2477c17e4465dceecb8330ed5ee9de1330930dfcfa1a5e8149ce8536a82c0093642adf7328200005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed3232206bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc0000d00700001001010020488923db1c78fa430a3a9eab75f4ee467c7b9a3d3b4eb3bd08e183c82ef79b9102a4d2a7d1ec79c96b404911ae1b10f579bd82a660011c1ca2b872b30ef7dcac00005206e10b5e02005132b41600000000010000000000ea30550000002a9bed3232010000000000ea305500000000a8ed32322035c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b000001 DMLOG START_BLOCK 4 DMLOG FEATURE_OP ACTIVATE 1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241 {"feature_digest":"1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241","subjective_restrictions":{"enabled":true,"preactivation_required":true,"earliest_allowed_activation_time":"1970-01-01T00:00:00.000"},"description_digest":"f3c3d91c4603cde2397268bfed4e662465293aab10cd9416db0d442b8cec2949","dependencies":[],"protocol_feature_type":"builtin","specification":[{"name":"builtin_feature_codename","value":"ONLY_LINK_TO_EXISTING_PERMISSION"}]} DMLOG FEATURE_OP ACTIVATE ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99 {"feature_digest":"ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99","subjective_restrictions":{"enabled":true,"preactivation_required":true,"earliest_allowed_activation_time":"1970-01-01T00:00:00.000"},"description_digest":"9908b3f8413c8474ab2a6be149d3f4f6d0421d37886033f27d4759c47a26d944","dependencies":[],"protocol_feature_type":"builtin","specification":[{"name":"builtin_feature_codename","value":"REPLACE_DEFERRED"}]} @@ -141,9 +141,9 @@ DMLOG FEATURE_OP ACTIVATE bcd2a26394b36614fd4894241d3c451ab0f6fd110958c342307362 DMLOG FEATURE_OP ACTIVATE d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb40 {"feature_digest":"d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb40","subjective_restrictions":{"enabled":true,"preactivation_required":true,"earliest_allowed_activation_time":"1970-01-01T00:00:00.000"},"description_digest":"8139e99247b87f18ef7eae99f07f00ea3adf39ed53f4d2da3f44e6aa0bfd7c62","dependencies":[],"protocol_feature_type":"builtin","specification":[{"name":"builtin_feature_codename","value":"CONFIGURABLE_WASM_LIMITS2"}]} DMLOG FEATURE_OP ACTIVATE 6bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc {"feature_digest":"6bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc","subjective_restrictions":{"enabled":true,"preactivation_required":true,"earliest_allowed_activation_time":"1970-01-01T00:00:00.000"},"description_digest":"68d6405cb8df3de95bd834ebb408196578500a9f818ff62ccc68f60b932f7d82","dependencies":[],"protocol_feature_type":"builtin","specification":[{"name":"builtin_feature_codename","value":"CRYPTO_PRIMITIVES"}]} DMLOG FEATURE_OP ACTIVATE 35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b {"feature_digest":"35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b","subjective_restrictions":{"enabled":true,"preactivation_required":true,"earliest_allowed_activation_time":"1970-01-01T00:00:00.000"},"description_digest":"e5d7992006e628a38c5e6c28dd55ff5e57ea682079bf41fef9b3cced0f46b491","dependencies":[],"protocol_feature_type":"builtin","specification":[{"name":"builtin_feature_codename","value":"GET_BLOCK_NUM"}]} -DMLOG TRX_OP CREATE onblock 89d482b30a16a7019245292abb20393bdde4ccd1ff2331b1039efc77ef70b7b9 0000000000000000000000000000010000000000ea305500000000221acfa4010000000000ea305500000000a8ed323274023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a091398000000000000000000 DMLOG CREATION_OP ROOT 0 DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"eosio","net_usage":{"last_ordinal":1262304003,"value_ex":54635,"consumed":1},"cpu_usage":{"last_ordinal":1262304003,"value_ex":233234,"consumed":101},"ram_usage":180802} +DMLOG TRX_OP CREATE onblock 89d482b30a16a7019245292abb20393bdde4ccd1ff2331b1039efc77ef70b7b9 0000000000000000000000000000010000000000ea305500000000221acfa4010000000000ea305500000000a8ed323274023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a091398000000000000000000 DMLOG APPLIED_TRANSACTION 4 89d482b30a16a7019245292abb20393bdde4ccd1ff2331b1039efc77ef70b7b904000000033b3d4b01000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef01006400000000000000000000000000000000000000000001010000010000000000ea30552786c63374c90aa8c0387b85266147ed008f53eb19b299f255e57ea78b33f69417000000000000001700000000000000010000000000ea3055170000000000000001010000000000ea30550000000000ea305500000000221acfa4010000000000ea305500000000a8ed323274023b3d4b0000000000ea305500000000000213588be25132b4167ced6df22b5439e376d5a20284190bb94a43e3e86c50d366bd80731342402e85b2ddc0052985fd31301156b938d7325ded2582756e40bfbc4f83b79f8de2f5d0c5394ffcc2f724830bb6b5ed9dcd5dbb4a0913980000000000000000000000000000000089d482b30a16a7019245292abb20393bdde4ccd1ff2331b1039efc77ef70b7b904000000033b3d4b01000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef0000000000000000 DMLOG CREATION_OP ROOT 0 DMLOG RAM_OP 0 eosio code update setcode eosio 199492 18690 @@ -156,11 +156,11 @@ DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"eosio","net_usage":{"last_ordinal":1 DMLOG APPLIED_TRANSACTION 4 d276f624b0262b174fe373d5bcb026f4b5c87dd77d794b246b77a75e4d22525504000000033b3d4b01000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef0100d00700008201000000000000000010040000000000000001010000010000000000ea30552deb8b0eef2f2bfd027d20727a96e4b30eb6ccdc27488670d57bf488395c48fc19000000000000001900000000000000010000000000ea3055190000000000000002020000000000ea30550000000000ea305500000000b863b2c2010000000000ea305500000000a8ed323293120000000000ea305589120e656f73696f3a3a6162692f312e320117626c6f636b5f7369676e696e675f617574686f726974792276617269616e745f626c6f636b5f7369676e696e675f617574686f726974795f763019086162695f686173680002056f776e6572046e616d6504686173680b636865636b73756d32353608616374697661746500010e666561747572655f6469676573740b636865636b73756d32353609617574686f726974790004097468726573686f6c640675696e743332046b6579730c6b65795f7765696768745b5d086163636f756e7473197065726d697373696f6e5f6c6576656c5f7765696768745b5d0577616974730d776169745f7765696768745b5d1a626c6f636b5f7369676e696e675f617574686f726974795f76300002097468726573686f6c640675696e743332046b6579730c6b65795f7765696768745b5d15626c6f636b636861696e5f706172616d65746572730011136d61785f626c6f636b5f6e65745f75736167650675696e7436341a7461726765745f626c6f636b5f6e65745f75736167655f7063740675696e743332196d61785f7472616e73616374696f6e5f6e65745f75736167650675696e7433321e626173655f7065725f7472616e73616374696f6e5f6e65745f75736167650675696e743332106e65745f75736167655f6c65657761790675696e74333223636f6e746578745f667265655f646973636f756e745f6e65745f75736167655f6e756d0675696e74333223636f6e746578745f667265655f646973636f756e745f6e65745f75736167655f64656e0675696e743332136d61785f626c6f636b5f6370755f75736167650675696e7433321a7461726765745f626c6f636b5f6370755f75736167655f7063740675696e743332196d61785f7472616e73616374696f6e5f6370755f75736167650675696e743332196d696e5f7472616e73616374696f6e5f6370755f75736167650675696e743332186d61785f7472616e73616374696f6e5f6c69666574696d650675696e7433321e64656665727265645f7472785f65787069726174696f6e5f77696e646f770675696e743332156d61785f7472616e73616374696f6e5f64656c61790675696e743332166d61785f696e6c696e655f616374696f6e5f73697a650675696e743332176d61785f696e6c696e655f616374696f6e5f64657074680675696e743136136d61785f617574686f726974795f64657074680675696e7431360b63616e63656c64656c617900020e63616e63656c696e675f61757468107065726d697373696f6e5f6c6576656c067472785f69640b636865636b73756d3235360a64656c657465617574680002076163636f756e74046e616d650a7065726d697373696f6e046e616d650a6b65795f7765696768740002036b65790a7075626c69635f6b6579067765696768740675696e743136086c696e6b617574680004076163636f756e74046e616d6504636f6465046e616d650474797065046e616d650b726571756972656d656e74046e616d650a6e65776163636f756e7400040763726561746f72046e616d65046e616d65046e616d65056f776e657209617574686f726974790661637469766509617574686f72697479076f6e6572726f7200020973656e6465725f69640775696e743132380873656e745f747278056279746573107065726d697373696f6e5f6c6576656c0002056163746f72046e616d650a7065726d697373696f6e046e616d65177065726d697373696f6e5f6c6576656c5f77656967687400020a7065726d697373696f6e107065726d697373696f6e5f6c6576656c067765696768740675696e7431361270726f64756365725f617574686f7269747900020d70726f64756365725f6e616d65046e616d6509617574686f7269747917626c6f636b5f7369676e696e675f617574686f726974790c72657161637469766174656400010e666561747572655f6469676573740b636865636b73756d323536077265716175746800010466726f6d046e616d65067365746162690002076163636f756e74046e616d65036162690562797465730a736574616c696d6974730004076163636f756e74046e616d650972616d5f627974657305696e7436340a6e65745f77656967687405696e7436340a6370755f77656967687405696e74363407736574636f64650004076163636f756e74046e616d6506766d747970650575696e743809766d76657273696f6e0575696e743804636f646505627974657309736574706172616d73000106706172616d7315626c6f636b636861696e5f706172616d657465727307736574707269760002076163636f756e74046e616d650769735f707269760575696e74380873657470726f64730001087363686564756c651470726f64756365725f617574686f726974795b5d0a756e6c696e6b617574680003076163636f756e74046e616d6504636f6465046e616d650474797065046e616d650a757064617465617574680004076163636f756e74046e616d650a7065726d697373696f6e046e616d6506706172656e74046e616d65046175746809617574686f726974790b776169745f776569676874000208776169745f7365630675696e743332067765696768740675696e743136100000002a9bed32320861637469766174650000bc892a4585a6410b63616e63656c64656c6179000040cbdaa8aca24a0a64656c65746561757468000000002d6b03a78b086c696e6b617574680000409e9a2264b89a0a6e65776163636f756e7400000000e0d27bd5a4076f6e6572726f7200905436db6564acba0c72657161637469766174656400000000a0656dacba07726571617574680000000000b863b2c206736574616269000000ce4eba68b2c20a736574616c696d6974730000000040258ab2c207736574636f6465000000c0d25c53b3c209736574706172616d730000000060bb5bb3c207736574707269760000000038d15bb3c20873657470726f6473000040cbdac0e9e2d40a756e6c696e6b61757468000040cbdaa86c52d50a757064617465617574680001000000a061d3dc31036936340000086162695f68617368000000012276617269616e745f626c6f636b5f7369676e696e675f617574686f726974795f7630011a626c6f636b5f7369676e696e675f617574686f726974795f76300000000000000000000000d276f624b0262b174fe373d5bcb026f4b5c87dd77d794b246b77a75e4d22525504000000033b3d4b01000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef010000000000ea3055890000000000000000000000000000 DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":3,"value_ex":78666667,"consumed":9440},"average_block_cpu_usage":{"last_ordinal":3,"value_ex":334993056,"consumed":40101},"pending_net_usage":7920,"pending_cpu_usage":4100,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1050675,"virtual_cpu_limit":200400} DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":4,"value_ex":144011111,"consumed":7999},"average_block_cpu_usage":{"last_ordinal":4,"value_ex":366368114,"consumed":4433},"pending_net_usage":0,"pending_cpu_usage":0,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1051726,"virtual_cpu_limit":200600} -DMLOG ACCEPTED_BLOCK 4 04000000040000000300000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010003000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd2d5b1b639d6ae94fcdd0536b224644931573d1ccb2a0c548613cd1feea18888ba1daaeb8ca4a99a2fdb182ebb462ceb26de69d76f024586207a1159226ea43de0300000000000000010000000000ea305504000000010000000000ea305503000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b001f5505dc47adebb683205c6cd618df9385777f5ffb29d307479ace7cc07533ec5d3b9e648ee95c2a1dbea8d971cd9e980c6185867556035db58109f2678ce179e30000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001130ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72412652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b447670735c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c25443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b468dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974286bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc8ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a405ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead45071d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb40e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0001033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b001f5505dc47adebb683205c6cd618df9385777f5ffb29d307479ace7cc07533ec5d3b9e648ee95c2a1dbea8d971cd9e980c6185867556035db58109f2678ce179e30200d0070000dc060101002026bcc48f2a2332b2fcb39133cc9226405049c824dba512d79415009b403b107d7dade3a3173c6cc9657b7662dd1fd267156e076ac91df0aea1d8172e65d676fe0100b13578daed3c0b90645755f7f33e3dfdba77df862569665773fb31606fdcc8246c66c710cdbe91ec872424c480c10fb3bd33bd3bddf3dd9e9e65d7c2edc56ce1f2b10a1525099602150d0906ab02456128ac4d245504b58c24a108258a965552201acb2a410a89e773efebd73d33614327912a9964a7efbbef9e73ef3dff73eeed09ff317a8b166ffef2e3e705fc48fc25be3efe46f8bdef15ef7ae0e17c87f8c837afbcf22fefcc75fd393cd45717b147de215f765888c3aadb158775177f7ba7e147760f0b79fab0d73d7d1abafc2efe409fe60f95fd3e6ddf89c3018251bf3c0df84e3b4c80f6340d94d023bb841861e10560a769795497401899821ef5b43fa61b4b27a2d923d3479b4bb3d3cd893d42634fa9b1bcda5c9eaeafae36da1d21b12b9e596bb71b4b9de97663a6d13cd1680b0fbbfdfa91651822b05de6f1d3ab73f52baf9a108a70f7faaee09edca8abaeb892fb62dbd76eae341667af9818e7ee208f69641ad633b7d069be5af8f8ecf55006795c2303482258f032ac777abe714a04d863561b9de9230bcb33f33373f5e6d2f44abd5d5f6c741aed5568cecc376679c7c162637166e5940809e6d8f78329e0b08b11f54a7b796579b5318b8dd9b51918234688aa8e849de66283c9b71dd1d6673a40d0dc684255586937973aabd30bbc9a8b1c8972bb291256e0de6a67b9dd20f645d4d56e1c5f6b424f7dad33274ad8b58517d63cd15c681c83d596b1f325d8d96eac2eafb5671ad30bcdc56667556cc1372fdb781fd38d93622b41aeb41bb4ec7aa7317db451efacb51b2226aaf1b2f9617b73d5bd9d76c367c53666393c2f2f4dcfd63bf5e9d5e6af36c445d40d7867a773ef9818dbf202393db33cdb102fc1fe226c1e49885b273e95a1636d651697857ddb7b949c83b54bbdff3af1d26db1d816c771292ea8f8e2822a5c22652c2bfc5390f643560af8290ad094ee9f2ac882829f82e7cb15592efb5a0a195cacbb323d735e445d91fed323d9473822fdfacacac229f18a918ba44865547ae39b7ee1cdbff84bbff296e9d9c6d1e247df2daff3445c8bbea28511f1e5f1981146bec1db3775e6cc7f3f71efd93bffeeaeae51f08cddf16b7ce8beff8bbff9bb1fb8f78dbdde6b5cef47fff6f3a2d7fd33d87de66b0f7cf3fce3efcee1f8593bfa530ffec7adbdde6ba1f7db7ffcc4173ef2a1c71fcae1d84738ce3cf1d90f3f35d5eb4db9f7ccb7eec8f7be967b3ff69dbffffaddb9f9aee3ee4f7eeb8bf9c1fbb1f753ff75f7fbefcaf71ea0b1e77fe73b7d630f52efeffdf5379ec8f7dec078bffc271ff8e283b9e95e4fdd8f3df0f14fdf7965aff7267feabdf73ef5d457de7367dfe09b7d23782be71fbce349718bb72fa5e7a71efff8377ffddf456ca2c11f3f37dcf1a96fcf80e0edfd0852115f11b5a5ee2a98b42c52b115ba525153ef81e7343e91c856a226e0897bcfee4bb411ad746b2b5d399e8809e8004095c61d23ae477068cb168e37121a5519c16bb94fa4dd03f8367def7911cbc8e848896842caeecb618ef904de4ca81200c30ca59a123df467f6612f4e938a6b05e28d5e2d551796268dd851956a9f06fcf12b135196a9a8f21ad3445d5796466d13a224a2c8882840f1fd89c8079d17f67711172bf6ea7369f47a5ec857c57c22c78070b0f42f09d844819a31b4bcac85af45fcaa517a34b286af60c5f3f116e8f9aa688d89e8c305582192b30618e09797a8f9347c1defff21d83f7556556414c014e2ada38676eea505a2b587bdadaa7628005a6f0cad532f07ed65d0a5a1a0e3a1a0b70f055d190a7ae750d06628e8b1a1a06b4341ef1e0a7a7c28e83d43414f0e057dcd50d0e7453fb8dc0c5ce5c05506fe3080cbef0f2e07c0115844df915a76d569b23be3f38926b38373c5adb83025aa5e1a57d1a02968fb60e2f08551ad6aa0d0de02ce436561fca9c23b8c67822b84389b785393e7ce567d78d78575190fde55bd12dbdf54eea7b5ddcbd61720f76a304d0e76af16381ddafb5655e0ec386b44a63c51a901d3cd686240d39b5e033ccea73be9c9563c026b2cb4766980341a3f3c9cc8db8b9103cd07561f440dec2398fef7435802f98087db7f5af0fe352820fa01684d815daec04777af8e611a01e8120f97ac0921d1c0832da277bbeb5f3ff16f57800b10e8450ed2fa00227e7502f41e379a377efe61763b0678378eef692df11ea68f17ab37d0b077fd050e03c1887725021711034d6aadc4c736ec03569804ae5d6925a16d1b8d688185f884988d029f9014f0c33385f9781c0885fd00047bc151013fc7ee3944a24d5a6cbbf438ef9cc6d44066e6ecc3843acc1308761c7648a13da166a1090e88b8185f35ca780a791a59425aacf1c5893fa1f6102961647c2bb274ea349055be0d7efdda9b76c01a90668226cdb880f403c0ddc4055d533b13851fe6268057f826860fe6564aa0ca3057419e2ab0819d25e40434ed7a693b1a6c583c1121a2c355e120c5756574dc97453c5325f1af2b8bdebc205707500daa02506a8c57e65ac876501b4321050a83421c0a70000b044a3261aae2272e7627e187099f26df7d36d0234e2f311e505558f618cf297005a0bce06690bb3e29034ceb83f8d73030d80f4107a804ae02175652107bc0f37e5c2374e2c2029238fe0f5407561957133fedb2edf84f1453248f9fbedd3b54c646fcca6a98ca6a017b81ba1c19cd27e1188caf824e49901c3fae8eb2d0062cb456460324fcb80980d6b60304d8b56adc52b8fe9042b7f4a9cf9296042664e10a310cf114ef30bdb46f10ca7f7504f080c92802f9c0981a2f350006b6230273e293425755cae6a444468c46b5aa65dabecf66a484562c3265b2621159b112bc032be69b08ad58c45aea3b2bf6c9476805ca9450c89483652be66330da22bb19e1b4c001b4625e66c57c67c5ecf468b500054ed93364a496b00cd4a06a84734548359e3232457c61b0a788d0f050e307126a9353f322da6dea8a7b9a2f41ed40fa820965f003f50ad0a3dd82373b9daced4c7cb2382036a85a3ebe8ce123ca54cbc05380aa85f295a9d608cd6789ad415841af006301f5208420989495d8c328b0df671da0f8d504d11d4a2b30cfa40506ac73152cc418597eb4a982d9f01932966a5facc834b005fd06751ac97ec0633b48bf023686011a438c77802f199530f682e7b8f70c84f480f6f4802d126b0a4d15b28e26e019ff80e551a737b7c88da079ab51f0eea15946bb63836c8f788dfe17d685161276b3670a25a016278455583328c00cfa6806fdcc0c8200a17df882eef96da00c980672f1b493445f232ee1989e1ebd2973ee1aa1d11a4ed5a0a5a8350e2d49ad496821b8a6712fc1d63d600b0a27ae161793aec7d07a29b52ad0da8e4030e06a11521f8e0bb2717e36cec325f0fc93efc0a9dfc159e199f3cf88dbcf4247c5767cf5dadb294ca0a7a72fbdfd2cbe2c9cc3c76f6fc557156c7f3724a01ab4bfa76fa7659f3d7b96350fa6895fe0692a442024da366a21212fa216922fee4bb3e47c7a291bd2730f6741184a23d804930bc20c0981228e422c7697967ed79a647c8fbf807af3696511d801c623483ffae47d4fa216a6f761a31abab1206434cc4b9fb8efc9cbc040827683bde9242198e5e0d08e24c4e5800517148b8145ad2cc4bb303492e40dd005a08ca2d7a82c2490ff49544f819a0eab26b9c72c34e04108dea2180f8c6f60734b5e6ec8cea7d0024753951a737270431ece024113cf225921503ee1f50ac47699be617409dd34353ecf510c097e34341e394d9ad29234a429614710e08a7837bc0d39daf351ed04aed6c368881d30600083012b0a7145214282d372ee59a2f991687e607b38130564d8e94167f498d29eb34431e95b8a1b22ef0e53810b535872e0552339381d4ffcf9d45fbbbf1a90d79110e7834f0ccfa17fbc0577a3881296e7be5ba0b206024d02a6d0645589b8cf8a297d1bc18469777f5959d4a48da556fc86513281840b8247df5293e8e75b6a021961533eca09c8487c8b8838c441cf0e9efec632c584269c8f7fce02520465c29be80d587294e788832f8f3f7001881da9047a1afffc28f945b0607faa155ab09efd42e31d5ff23c9b3191993191993191376382cd98c8cc98c8cc98f8ff67c6c01fed483f0399e34ee0d54fa79fa6161ba8e8f7fb5d0e57a884e595425ee123f34a5b5e493b85a216f34adac928d3b4bc82d63d60bc9857125d30f10a5bcc2b89ccf49057d8c7bc72e3fc6c9ce7720b8d44d48344d47d44d4fd44d43922ea1c11754644dde3154d13bfc0d3548840cc2b6c31afb0c5bc8268257e39c52cc09d7b7e1410fcb005042afe310a02bef410e709cca83f93d2735ea497d441f8fae38957b6358702565cc809908de53a437ac63b44eee89555c55998e7b2304042cff1a5a316810586219053c9a8972f615ec2193b2760e86d2996f5d293c7d3c2817c6ea5b3b16cb25f2b35ac1c97cdcb7549f0052e2dc2e0fd47483640f2a8629918ef930996081485e72803b6c034c61f98db417f0bab2c355bd3225cf96c1ba3bd1a2663353586f542aa8b38ee939060b9718ca21475e0d9e02e4138e9e0e4b3c059808b070068e129859e992c531e5968c553a35c732924544ea1fd95b8aa628b7acf485bd1b3a54fc8bd3546bd9ace8728563e44cbb8fd91befae8e58202215e32058e182a3d238e03a19e91c713d5b9ff2cb2260d5b90850655346a67e4f1b20d9228e6f339e653102c91bbc2b80883611731791868e3070f641642587a6399abaecc232f72317d308f549b4ffc9b7284633cc8ea00d2e52c9930beade8dea1fab2880207e65ea2e78d3c44e125d0e36a215c1202dbd55c48ee42d46fab45dae51eb8274c3cba379465fa0cfc8428e8444a78a9d2ef4197e67c4361bea1b19000f98626c1d556788d8664838bd8982f00ed2025a8eade5a6c44ebd68041782f039230d13cd57f608a8328fe482dda3a05f11187fa05ae7f62c4ef5118ee22faad363b7840290da2017a76463eaf8a762b7fdc6c156d42ddc6927b704335bb95c5fe736240c1080fcafd3e869e84a7c7a8743289824f0a40551eece51c0975cb157ab9c4a077e99b139f4bd472af46dde71a3696be6bb6dcb94b1f64ac57b2364de6b4e91aa74d6724aad30d439a37d8f893e769e3d0f9375279cc80ff6be797f37a032e0ff43b334758740b8cbe5c405a693037c1a45a1c882bce21dea760439ecb6136db50c56ea8f2836ea86281bfdf867cebc693f5bb82bcda77236337321e1854c80daa38fa54360909b0b216308de8cc20ae16f0a3521d21528d9802961ee29791f5861d7d48cba8dfc7a159264a65d574df5205abcdde40a5d91b23b3e05bb50315dfce1f311f0081fe54587f0a2eb6a16debf8d028ad88bd48bc778ab4d0e0f903f8203063f1a8f52a86e49bb18e51decffd220972f57e0fac7b80f5fe80d688050520f8e116566390fcada4805707ba586ef7d285563282188a88b96809bfd24a22b2afa6c819bf971eccd8d7e23a658931682e491e6c9952aa3ae9b947042afe368922927651e97d10ce229d40446684d2f2c844a648af50950b3c8c174d55573a13d35c1ef172bef3af34955b47723904791b4d07817400c8a75a1b39511da1a6a04cb03967af02ae929ca84f4ed447278a35a610a559630d639d130d73859324c0c442b31fd558b37d1d3b502c50e0d1d58d5c418709fd0ed0dd84b6c29368c7108d041dc918a29121459c204286447890099dc010bcef01ee2bda6f7de4c1967d07f03ebe2b3306c5c57560487990213e533a807d4596da452aaf61593fa25748ed11c79002d599606b052415e851c444c07584ae1424b874a2f93c8b0b43db498b49e9e3eb49441082b165e5c43f0c7411b8a97b25ab5e4ca03926f090c82e2cc03a786235c7b31f308b6b155a4ea90497fa6d0933f106a387ec5cc9fa6fc74a908930c53c8c2a9810385c8bfa4f12e5cf03c7fc350c27f07887aa753af5d61294a010e209f6cf28841aab7d9ec3a3b1ed7720bcd06415500ed0108160a1448c60e0d7db4da5b5f9be702f552f178210858abc85002b6a8129ce5723d603b06f10c348148e80d959c40e143b3eb1915464e4e846422f9ed8082b4f396992287a5bb87c285136b73279b79050da32e85616db2d5955b4831208db93b6078935e74607a6cca31811e1468be0c32a126b2660ad68a73924a528aa5cd2364cc24b4920c3650cab0a1c56791c568d70a9142660e10cd8ee05ce6e85fd760fd79bf0d1295d5020bbb795ed9ec8d93de9e0c9d4fa4ecd0ad6eef9836aa699e0b24ac55ddad0161b086ee5235eaae386561b4d9915a38c2b29e31b5b81fcbca47890f7af32d5c3c85b5bed188cddbcaac7aeda1d7af36da9090e55b14e4d38a4b39a7e967a687cd3673565de6a9281cbac261eb8f5520f0fad26397f14504d6567b69a9435f8f7c487a3774a0ad406cf8a5f88c8d606a5607a6e748e15da3fe58eed36b97676fd600aa3f83ce4c2ae9d6d048da1c8855d3b7b6ed0f150d0db8782ae0c05bd7328683314f4d850d0b5a1a0770f053d3e14f49ea1a0278782be6628e875d7ce3603bfc06b679b69f846d7cedef7c3520485f4852c183984f778be4711933de87b7a1363bae172fc81e5f8b81c9f8de914a54aeeaa4b1a57f9865d16856417cdf07a01e1edbfe4e2e36595d09a5e9fcf90f11c0f231d94df117bb9a54891eb637cb98597a8f87649112fb70426a2cb2d015d6e29620201599e09f0720b5f08c2d4b7ef72cb88292221471cac5bfe089ac911dc174ebbee728b72975b94bba207c1133c0c5ed12bda3c12270a90123c1f67ac1c68e72fab59a20d5c59d3f6ca5a855b7c658d8a248020e40ac93f034d0a5c22f1f91a99dcab6fe3eac8cd994f8b5bf1647605eb735a7bee44e5874612fa448078ef5bde8779de17f2bc0f99f7851cefc30be1bd6f42e4bdef60dd827daa2058de8717c4fb7003de8719ef7de63dcf373cefedf5d5ab334e3e29b5ee3fbce7545373b40671dee4e2fd7c0c2ee8ce027f65c0f878c0eedb0376778f80ef29daf0b5a6c693e07e4a08368771170f037730af28a5a300173a21bafdee432eba654329d0ce12a8072e022b152eb78b29b7db4aa7f3fcfd81ef71c15b0f6c8ebf0d40970f38d9a34c48df8317d340bc60912063b740f21276b8ceddbe5a6ccd5d2611eb2eb078587e5128d43267d1a54d00037b37974278266f4044cae10c37c6e90f04819c6cf81855de3aeaee2b0478af84614a7952445f9154b450b9fd733dc2c8ab54ccf91fefdac35d7b9bef5a6fb46bcd3e70fdae75ffae6397cf795cc87118f5c618e58053b67b8e6f1b1d60f2252c10944ab16854f196c7a6fd279474390d1588e9da479ab4129b3ad65a57293105adcbc5582a8fefa02abc40d9153db1e00b25ee664c4dd56838a4bbf204caf9f11d0915d7f1545fd0acd19dba77917abd14a27abd8852e8e430643994b62874951ae73387679d7ecb854cbf65f3e99db687681bbcf52a1f824e83ca7fe2e14ce5430c22fdbe8cc9cbaef1af579681708c260ed14c849c0c0f9a8959c80c7bb24281199651d064e1718c978be1d6ef57b1271bd82c85809caea6559af34599e43ef922cc02025eed0f8c3741a50750d9cb669009c5bf3c1afdcf7ab3c40a01d93e4c058aa45191eeafdaca817e91ec945d818f2b805fc7936007d5dfa93c414ecc043ba8a463e732feeb379c6b83d3c79c5556f4ff28d90fb6343407a46ac03abc50f7317be46de84b171cae507d767beecb17aac4a75e5ebbc75b5751d838dbe12b851bd9562fae8ff69b6a3ab2a55bc75cfdb1676a826eca93553cc88720028b531ed5015d15b9d2a2d7ecbef3c5298fab4eaa2aec8977c4851e4a86e85e229daf494a3df6ba3a5dcc9721f11805fa4a94ab7cf0111bf1d1f1c96f706dde5be7e65eccdabc3de00ed1a2afafcd1fb187db7db579cc5a2617b0366fa9ea2355b5bdcc9f04842f57f2c3d760ce06a86a6bac1a565670c1505636f7736573c73aa3e399d13e9109364a5103a4ef1e62e3ba2c154f481163bf4d8d3ee4cb527f497d7d517d72b157541fe7a2bae2a23a857414fdabac20adb282b4ca0ad28a0bd2dcea2fb94f6e5072cf95da8933546affa3df7e74eca6b24cef86cf5ca51d47e8f4b71ebdfb223cb747fec8ce86757689d23cb9d0abad031f6d6d1d39daabadff409b42cab8d6645fe53dbb00edaaeff4ed9a8d8ad1f4151557959754952743deabcae3418d2dc303d222be26b92bf59de8905bf6b3339df759c9a3ea7189cbea11ddaf85416f6bbd4a8ad780db464f01c8b900ed56c791a75ba4bd5d4bcbb2c967ee78c973c74b24bbb672aef86c490c6a80e4fd20767b7937ca7d6582a5d35d74f807feda8476c7adc3d98a70bdadd06c2b34db8a70d056a05de0fb1f7c99b9d5f327e18141150cf98b3e035e2b1c74cbf64b1ff06879566b51c97c904c7c595d58bb3ace5fe9c03331d8139fc5d0c5264e2b4576e53cee3d17f8608d9f4b74bb04268a8f2195c7d906dc40f77576090bb1d90c6260063130033d3fbfc84e4ad92574ca1e29597456667be8dc7b87ce3d3b74ee79373eefe6b319bebb0fb8c7f6aa31a3507bed552d11fd8bc70710e745cea1a3462b32bbfcbc4509694fd6f80b59a40a052a596401b03ab08163f7dda58c81c8cdd61efa44243bea9d6cd195e4752242ae9788273311a138a39fecd4d52f28d4d52f2bf65a8e1517c50e2517c2ec7ea1b60ba876e563de8df078ebf0b0f1c4af39262a9e1b7d6e342389a0afae3a111beba35e5eca72d453eba9a7d653af276eeefb1f0371d0792ae67c10bf2593bbe48ca4ae9030e2b7644998e99a204b5c0dcfca21e44bf43c5fee40eccc1de5ce7d576e2a2b5bc42b5e4f2618bf3a836200c46b733c83be0cff144400de906274fe86430003ecb72cc0fce35d109fac2702e35776e8eb6d74b5cd435df0ca744b80bd1187c6907a1fa7c818298733611cee8e8dc5848dd75dfc0cefe2cbecd16e66ecf27a9b11ee79c1cfb769241f213339630e9969c3db9df33136dd84d8f627c1317dc3d7f41736a25347ebcd85c6ace92c9bfac2c2f24cbdd3302bf5638d55b1b6d438b9d298e9c0cb46bbbddc36cd2573b479b2313b7de454a7b16a669697563bedb599ce725b74e61a66798987f19fbb3133f5a5a5e58e39d280d602ce30db6c03b68553c2fda4ef7c48444f79f609ff3c0ee06bd4174dbdd3692cae7478556f6d376949ab1d83b33496664551d83fc3033861a51d93fd359eab8d58692f77966796178c1bd35c35b890de5fece13f80a5e09f7e6ef30a5cab0fff02f8b77ca405dbc101ab0c00c3db7520c634b6794ea0d8e2da42a739dd5c9a6d9c144c1ffc9b40cda563d978612935032f6046c6bb8ab09dfa9105e8386aeaf07eaed1469277dab091f5932f2ecf368f9eda645e3b811d734113f05f1d6a3b2ececcd5978e0135dacdc57afb94996f9c326f9d6b2c5994b89bfa9245bc294171dffdf43cd6e80811022d0bf06f04fe15e15f24f04fb108d15c3a515f68ce9a13f576b3be847bc2ad3c17a1045b1b795f4b00d7ff02b86d754e00d007000082010101001f5be06ae972efdca372f145881855ab035fc9b761243d0042d6d39703ea6e94a86cd3b37db4b18cdc3f21d6403bc80f91005cf01ed88a02858ec4a7b9fe9ba77f0100e60778da8d56bd6f1c45149ffb3edf25966308c6292882682205e24bb0ac54a6a0a1a0e0a340262c733bef6e47b73bbb9999bdf3414d116890e88284228420852ba7890c85752d121209690205d081c4ffc09bd9bdddbd353e3cc57ebcf7e6cdbcf77eef37d3faa3fb418dbcf7f4f131c151310ff2f7b577cdeba17b385b10dcffa7d7fb627d2eb9b3be0aa1e2e1cd9bb4cf5fd97ab957d9e8fba13b72141f0a2e860e8db5174aaea797c754722ab4738ade195fdb6ca313c7a3ca23d546381120eb8206503792aeeb813b5271d07b75bb4d5dcdc75403a9ac0e80ea5882c3f810942e5aad64ae497d457b129417faac1973a1aff7ea2398aa73f87026c0879edebb854edd30165a6d462003ae3028e1f830063f33694c28d7eabc7966b24ba78743aa6758f6a29def7a940b27a212c3d52015b9f04c40f7d35409d04eace810ac93ed1b97349543d065ad13b93a5d66d34cd6920a653285712cbab8de7ba14f154e00b9d46a2df7ed034ce83495bfe88642c3be7606124ce6954d5c6127220ece6aca40a4a68588dd285ed8c862c4997659c465179b01267899c1f3650f3e1f80e641963206039012181aed3bb01f7149addd840b164e52ab8b652f0cfc2c6fcf1925173e17e0a46ac53f9a2fb07152cb20d29e556f6ddbf4e4f02aaaba2e152ef87629525d4dfee6685c2bc3b969f6cf59b1553a38157167cc49b5953682edbd4e3e3bf9cfb14baa35fce94471dfe7ae839fcd449e6eaa8d3b18598ff5058f753764907ce969947c7525dc8eb98400e6cb0a845b3209a7bb129b3c4cc9207b240c917779d3b202e48256887a194aec43058221d8396bd9cdf576da28d1a6928dfe54833a912424207497ae594ec2c6290c41aa05cb93795f48cf7a244316bbb8a99ca6aae7336116651ece69c47a0e93372744b694115bc6d254a4521fc830b0fe9b0a34b26ea9ec35142599e918bdcf0324be52215790ac1c6bd3b0b4d4314d9dc4980a4c8b15052df465aa5f72d41c0706090d93989d9571304602c4ac25ff162ec956b08ada52a4229566f2f1dfec69d689241f97826a71e51871e2b86d8d4286ceda0a93c4621f9e3d5994bd5b9d586450aefd2f943b71c468da4af5a5ad644298e3bd6eecf352770b470ca9b6ed9f0237258a4520ade1417ce54b3c94f35391fc70e7caeb9f7cfbda022f90dd1f9fde3ff8fa8d62afe3b83aaa7df759deab64f7abbb97d9c3bbc506c4f1fba38f9f7c93f5d3e7ef6cff0aece068117938ee41707094a16c7e7d98438c909fde3cf20e674548e1d87de9d3c359060d428e1fbdfff68359a1d8383efc7eefc12c2b2b8e9d9f519097d00477fcd79fbf146b6503f6df7a52ac88b9c9dca38f7fdbaaf1ed1b84e4f70dbcf59ce182525976da937f01d36959fa0000 +DMLOG ACCEPTED_BLOCK 4 04000000040000000300000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010003000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cd2d5b1b639d6ae94fcdd0536b224644931573d1ccb2a0c548613cd1feea18888ba1daaeb8ca4a99a2fdb182ebb462ceb26de69d76f024586207a1159226ea43de0300000000000000010000000000ea305504000000010000000000ea305503000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add8010000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b001f5505dc47adebb683205c6cd618df9385777f5ffb29d307479ace7cc07533ec5d3b9e648ee95c2a1dbea8d971cd9e980c6185867556035db58109f2678ce179e30000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001130ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72412652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b447670735c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c25443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b468dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974286bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc8ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a405ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead45071d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb40e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0001033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b001f5505dc47adebb683205c6cd618df9385777f5ffb29d307479ace7cc07533ec5d3b9e648ee95c2a1dbea8d971cd9e980c6185867556035db58109f2678ce179e30200d0070000dc060101002026bcc48f2a2332b2fcb39133cc9226405049c824dba512d79415009b403b107d7dade3a3173c6cc9657b7662dd1fd267156e076ac91df0aea1d8172e65d676fe0100b13578daed3c0b90645755f7f33e3dfdba77df862569665773fb31606fdcc8246c66c710cdbe91ec872424c480c10fb3bd33bd3bddf3dd9e9e65d7c2edc56ce1f2b10a1525099602150d0906ab02456128ac4d245504b58c24a108258a965552201acb2a410a89e773efebd73d33614327912a9964a7efbbef9e73ef3dff73eeed09ff317a8b166ffef2e3e705fc48fc25be3efe46f8bdef15ef7ae0e17c87f8c837afbcf22fefcc75fd393cd45717b147de215f765888c3aadb158775177f7ba7e147760f0b79fab0d73d7d1abafc2efe409fe60f95fd3e6ddf89c3018251bf3c0df84e3b4c80f6340d94d023bb841861e10560a769795497401899821ef5b43fa61b4b27a2d923d3479b4bb3d3cd893d42634fa9b1bcda5c9eaeafae36da1d21b12b9e596bb71b4b9de97663a6d13cd1680b0fbbfdfa91651822b05de6f1d3ab73f52baf9a108a70f7faaee09edca8abaeb892fb62dbd76eae341667af9818e7ee208f69641ad633b7d069be5af8f8ecf55006795c2303482258f032ac777abe714a04d863561b9de9230bcb33f33373f5e6d2f44abd5d5f6c741aed5568cecc376679c7c162637166e5940809e6d8f78329e0b08b11f54a7b796579b5318b8dd9b51918234688aa8e849de66283c9b71dd1d6673a40d0dc684255586937973aabd30bbc9a8b1c8972bb291256e0de6a67b9dd20f645d4d56e1c5f6b424f7dad33274ad8b58517d63cd15c681c83d596b1f325d8d96eac2eafb5671ad30bcdc56667556cc1372fdb781fd38d93622b41aeb41bb4ec7aa7317db451efacb51b2226aaf1b2f9617b73d5bd9d76c367c53666393c2f2f4dcfd63bf5e9d5e6af36c445d40d7867a773ef9818dbf202393db33cdb102fc1fe226c1e49885b273e95a1636d651697857ddb7b949c83b54bbdff3af1d26db1d816c771292ea8f8e2822a5c22652c2bfc5390f643560af8290ad094ee9f2ac882829f82e7cb15592efb5a0a195cacbb323d735e445d91fed323d9473822fdfacacac229f18a918ba44865547ae39b7ee1cdbff84bbff296e9d9c6d1e247df2daff3445c8bbea28511f1e5f1981146bec1db3775e6cc7f3f71efd93bffeeaeae51f08cddf16b7ce8beff8bbff9bb1fb8f78dbdde6b5cef47fff6f3a2d7fd33d87de66b0f7cf3fce3efcee1f8593bfa530ffec7adbdde6ba1f7db7ffcc4173ef2a1c71fcae1d84738ce3cf1d90f3f35d5eb4db9f7ccb7eec8f7be967b3ff69dbffffaddb9f9aee3ee4f7eeb8bf9c1fbb1f753ff75f7fbefcaf71ea0b1e77fe73b7d630f52efeffdf5379ec8f7dec078bffc271ff8e283b9e95e4fdd8f3df0f14fdf7965aff7267feabdf73ef5d457de7367dfe09b7d23782be71fbce349718bb72fa5e7a71efff8377ffddf456ca2c11f3f37dcf1a96fcf80e0edfd0852115f11b5a5ee2a98b42c52b115ba525153ef81e7343e91c856a226e0897bcfee4bb411ad746b2b5d399e8809e8004095c61d23ae477068cb168e37121a5519c16bb94fa4dd03f8367def7911cbc8e848896842caeecb618ef904de4ca81200c30ca59a123df467f6612f4e938a6b05e28d5e2d551796268dd851956a9f06fcf12b135196a9a8f21ad3445d5796466d13a224a2c8882840f1fd89c8079d17f67711172bf6ea7369f47a5ec857c57c22c78070b0f42f09d844819a31b4bcac85af45fcaa517a34b286af60c5f3f116e8f9aa688d89e8c305582192b30618e09797a8f9347c1defff21d83f7556556414c014e2ada38676eea505a2b587bdadaa7628005a6f0cad532f07ed65d0a5a1a0e3a1a0b70f055d190a7ae750d06628e8b1a1a06b4341ef1e0a7a7c28e83d43414f0e057dcd50d0e7453fb8dc0c5ce5c05506fe3080cbef0f2e07c0115844df915a76d569b23be3f38926b38373c5adb83025aa5e1a57d1a02968fb60e2f08551ad6aa0d0de02ce436561fca9c23b8c67822b84389b785393e7ce567d78d78575190fde55bd12dbdf54eea7b5ddcbd61720f76a304d0e76af16381ddafb5655e0ec386b44a63c51a901d3cd686240d39b5e033ccea73be9c9563c026b2cb4766980341a3f3c9cc8db8b9103cd07561f440dec2398fef7435802f98087db7f5af0fe352820fa01684d815daec04777af8e611a01e8120f97ac0921d1c0832da277bbeb5f3ff16f57800b10e8450ed2fa00227e7502f41e379a377efe61763b0678378eef692df11ea68f17ab37d0b077fd050e03c1887725021711034d6aadc4c736ec03569804ae5d6925a16d1b8d688185f884988d029f9014f0c33385f9781c0885fd00047bc151013fc7ee3944a24d5a6cbbf438ef9cc6d44066e6ecc3843acc1308761c7648a13da166a1090e88b8185f35ca780a791a59425aacf1c5893fa1f6102961647c2bb274ea349055be0d7efdda9b76c01a90668226cdb880f403c0ddc4055d533b13851fe6268057f826860fe6564aa0ca3057419e2ab0819d25e40434ed7a693b1a6c583c1121a2c355e120c5756574dc97453c5325f1af2b8bdebc205707500daa02506a8c57e65ac876501b4321050a83421c0a70000b044a3261aae2272e7627e187099f26df7d36d0234e2f311e505558f618cf297005a0bce06690bb3e29034ceb83f8d73030d80f4107a804ae02175652107bc0f37e5c2374e2c2029238fe0f5407561957133fedb2edf84f1453248f9fbedd3b54c646fcca6a98ca6a017b81ba1c19cd27e1188caf824e49901c3fae8eb2d0062cb456460324fcb80980d6b60304d8b56adc52b8fe9042b7f4a9cf9296042664e10a310cf114ef30bdb46f10ca7f7504f080c92802f9c0981a2f350006b6230273e293425755cae6a444468c46b5aa65dabecf66a484562c3265b2621159b112bc032be69b08ad58c45aea3b2bf6c9476805ca9450c89483652be66330da22bb19e1b4c001b4625e66c57c67c5ecf468b500054ed93364a496b00cd4a06a84734548359e3232457c61b0a788d0f050e307126a9353f322da6dea8a7b9a2f41ed40fa820965f003f50ad0a3dd82373b9daced4c7cb2382036a85a3ebe8ce123ca54cbc05380aa85f295a9d608cd6789ad415841af006301f5208420989495d8c328b0df671da0f8d504d11d4a2b30cfa40506ac73152cc418597eb4a982d9f01932966a5facc834b005fd06751ac97ec0633b48bf023686011a438c77802f199530f682e7b8f70c84f480f6f4802d126b0a4d15b28e26e019ff80e551a737b7c88da079ab51f0eea15946bb63836c8f788dfe17d685161276b3670a25a016278455583328c00cfa6806fdcc0c8200a17df882eef96da00c980672f1b493445f232ee1989e1ebd2973ee1aa1d11a4ed5a0a5a8350e2d49ad496821b8a6712fc1d63d600b0a27ae161793aec7d07a29b52ad0da8e4030e06a11521f8e0bb2717e36cec325f0fc93efc0a9dfc159e199f3cf88dbcf4247c5767cf5dadb294ca0a7a72fbdfd2cbe2c9cc3c76f6fc557156c7f3724a01ab4bfa76fa7659f3d7b96350fa6895fe0692a442024da366a21212fa216922fee4bb3e47c7a291bd2730f6741184a23d804930bc20c0981228e422c7697967ed79a647c8fbf807af3696511d801c623483ffae47d4fa216a6f761a31abab1206434cc4b9fb8efc9cbc040827683bde9242198e5e0d08e24c4e5800517148b8145ad2cc4bb303492e40dd005a08ca2d7a82c2490ff49544f819a0eab26b9c72c34e04108dea2180f8c6f60734b5e6ec8cea7d0024753951a737270431ece024113cf225921503ee1f50ac47699be617409dd34353ecf510c097e34341e394d9ad29234a429614710e08a7837bc0d39daf351ed04aed6c368881d30600083012b0a7145214282d372ee59a2f991687e607b38130564d8e94167f498d29eb34431e95b8a1b22ef0e53810b535872e0552339381d4ffcf9d45fbbbf1a90d79110e7834f0ccfa17fbc0577a3881296e7be5ba0b206024d02a6d0645589b8cf8a297d1bc18469777f5959d4a48da556fc86513281840b8247df5293e8e75b6a021961533eca09c8487c8b8838c441cf0e9efec632c584269c8f7fce02520465c29be80d587294e788832f8f3f7001881da9047a1afffc28f945b0607faa155ab09efd42e31d5ff23c9b3191993191993191376382cd98c8cc98c8cc98f8ff67c6c01fed483f0399e34ee0d54fa79fa6161ba8e8f7fb5d0e57a884e595425ee123f34a5b5e493b85a216f34adac928d3b4bc82d63d60bc9857125d30f10a5bcc2b89ccf49057d8c7bc72e3fc6c9ce7720b8d44d48344d47d44d4fd44d43922ea1c11754644dde3154d13bfc0d3548840cc2b6c31afb0c5bc8268257e39c52cc09d7b7e1410fcb005042afe310a02bef410e709cca83f93d2735ea497d441f8fae38957b6358702565cc809908de53a437ac63b44eee89555c55998e7b2304042cff1a5a316810586219053c9a8972f615ec2193b2760e86d2996f5d293c7d3c2817c6ea5b3b16cb25f2b35ac1c97cdcb7549f0052e2dc2e0fd47483640f2a8629918ef930996081485e72803b6c034c61f98db417f0bab2c355bd3225cf96c1ba3bd1a2663353586f542aa8b38ee939060b9718ca21475e0d9e02e4138e9e0e4b3c059808b070068e129859e992c531e5968c553a35c732924544ea1fd95b8aa628b7acf485bd1b3a54fc8bd3546bd9ace8728563e44cbb8fd91befae8e58202215e32058e182a3d238e03a19e91c713d5b9ff2cb2260d5b90850655346a67e4f1b20d9228e6f339e653102c91bbc2b80883611731791868e3070f641642587a6399abaecc232f72317d308f549b4ffc9b7284633cc8ea00d2e52c9930beade8dea1fab2880207e65ea2e78d3c44e125d0e36a215c1202dbd55c48ee42d46fab45dae51eb8274c3cba379465fa0cfc8428e8444a78a9d2ef4197e67c4361bea1b19000f98626c1d556788d8664838bd8982f00ed2025a8eade5a6c44ebd68041782f039230d13cd57f608a8328fe482dda3a05f11187fa05ae7f62c4ef5118ee22faad363b7840290da2017a76463eaf8a762b7fdc6c156d42ddc6927b704335bb95c5fe736240c1080fcafd3e869e84a7c7a8743289824f0a40551eece51c0975cb157ab9c4a077e99b139f4bd472af46dde71a3696be6bb6dcb94b1f64ac57b2364de6b4e91aa74d6724aad30d439a37d8f893e769e3d0f9375279cc80ff6be797f37a032e0ff43b334758740b8cbe5c405a693037c1a45a1c882bce21dea760439ecb6136db50c56ea8f2836ea86281bfdf867cebc693f5bb82bcda77236337321e1854c80daa38fa54360909b0b216308de8cc20ae16f0a3521d21528d9802961ee29791f5861d7d48cba8dfc7a159264a65d574df5205abcdde40a5d91b23b3e05bb50315dfce1f311f0081fe54587f0a2eb6a16debf8d028ad88bd48bc778ab4d0e0f903f8203063f1a8f52a86e49bb18e51decffd220972f57e0fac7b80f5fe80d688050520f8e116566390fcada4805707ba586ef7d285563282188a88b96809bfd24a22b2afa6c819bf971eccd8d7e23a658931682e491e6c9952aa3ae9b947042afe368922927651e97d10ce229d40446684d2f2c844a648af50950b3c8c174d55573a13d35c1ef172bef3af34955b47723904791b4d07817400c8a75a1b39511da1a6a04cb03967af02ae929ca84f4ed447278a35a610a559630d639d130d73859324c0c442b31fd558b37d1d3b502c50e0d1d58d5c418709fd0ed0dd84b6c29368c7108d041dc918a29121459c204286447890099dc010bcef01ee2bda6f7de4c1967d07f03ebe2b3306c5c57560487990213e533a807d4596da452aaf61593fa25748ed11c79002d599606b052415e851c444c07584ae1424b874a2f93c8b0b43db498b49e9e3eb49441082b165e5c43f0c7411b8a97b25ab5e4ca03926f090c82e2cc03a786235c7b31f308b6b155a4ea90497fa6d0933f106a387ec5cc9fa6fc74a908930c53c8c2a9810385c8bfa4f12e5cf03c7fc350c27f07887aa753af5d61294a010e209f6cf28841aab7d9ec3a3b1ed7720bcd06415500ed0108160a1448c60e0d7db4da5b5f9be702f552f178210858abc85002b6a8129ce5723d603b06f10c348148e80d959c40e143b3eb1915464e4e846422f9ed8082b4f396992287a5bb87c285136b73279b79050da32e85616db2d5955b4831208db93b6078935e74607a6cca31811e1468be0c32a126b2660ad68a73924a528aa5cd2364cc24b4920c3650cab0a1c56791c568d70a9142660e10cd8ee05ce6e85fd760fd79bf0d1295d5020bbb795ed9ec8d93de9e0c9d4fa4ecd0ad6eef9836aa699e0b24ac55ddad0161b086ee5235eaae386561b4d9915a38c2b29e31b5b81fcbca47890f7af32d5c3c85b5bed188cddbcaac7aeda1d7af36da9090e55b14e4d38a4b39a7e967a687cd3673565de6a9281cbac261eb8f5520f0fad26397f14504d6567b69a9435f8f7c487a3774a0ad406cf8a5f88c8d606a5607a6e748e15da3fe58eed36b97676fd600aa3f83ce4c2ae9d6d048da1c8855d3b7b6ed0f150d0db8782ae0c05bd7328683314f4d850d0b5a1a0770f053d3e14f49ea1a0278782be6628e875d7ce3603bfc06b679b69f846d7cedef7c3520485f4852c183984f778be4711933de87b7a1363bae172fc81e5f8b81c9f8de914a54aeeaa4b1a57f9865d16856417cdf07a01e1edbfe4e2e36595d09a5e9fcf90f11c0f231d94df117bb9a54891eb637cb98597a8f87649112fb70426a2cb2d015d6e29620201599e09f0720b5f08c2d4b7ef72cb88292221471cac5bfe089ac911dc174ebbee728b72975b94bba207c1133c0c5ed12bda3c12270a90123c1f67ac1c68e72fab59a20d5c59d3f6ca5a855b7c658d8a248020e40ac93f034d0a5c22f1f91a99dcab6fe3eac8cd994f8b5bf1647605eb735a7bee44e5874612fa448078ef5bde8779de17f2bc0f99f7851cefc30be1bd6f42e4bdef60dd827daa2058de8717c4fb7003de8719ef7de63dcf373cefedf5d5ab334e3e29b5ee3fbce7545373b40671dee4e2fd7c0c2ee8ce027f65c0f878c0eedb0376778f80ef29daf0b5a6c693e07e4a08368771170f037730af28a5a300173a21bafdee432eba654329d0ce12a8072e022b152eb78b29b7db4aa7f3fcfd81ef71c15b0f6c8ebf0d40970f38d9a34c48df8317d340bc60912063b740f21276b8ceddbe5a6ccd5d2611eb2eb078587e5128d43267d1a54d00037b37974278266f4044cae10c37c6e90f04819c6cf81855de3aeaee2b0478af84614a7952445f9154b450b9fd733dc2c8ab54ccf91fefdac35d7b9bef5a6fb46bcd3e70fdae75ffae6397cf795cc87118f5c618e58053b67b8e6f1b1d60f2252c10944ab16854f196c7a6fd279474390d1588e9da479ab4129b3ad65a57293105adcbc5582a8fefa02abc40d9153db1e00b25ee664c4dd56838a4bbf204caf9f11d0915d7f1545fd0acd19dba77917abd14a27abd8852e8e430643994b62874951ae73387679d7ecb854cbf65f3e99db687681bbcf52a1f824e83ca7fe2e14ce5430c22fdbe8cc9cbaef1af579681708c260ed14c849c0c0f9a8959c80c7bb24281199651d064e1718c978be1d6ef57b1271bd82c85809caea6559af34599e43ef922cc02025eed0f8c3741a50750d9cb669009c5bf3c1afdcf7ab3c40a01d93e4c058aa45191eeafdaca817e91ec945d818f2b805fc7936007d5dfa93c414ecc043ba8a463e732feeb379c6b83d3c79c5556f4ff28d90fb6343407a46ac03abc50f7317be46de84b171cae507d767beecb17aac4a75e5ebbc75b5751d838dbe12b851bd9562fae8ff69b6a3ab2a55bc75cfdb1676a826eca93553cc88720028b531ed5015d15b9d2a2d7ecbef3c5298fab4eaa2aec8977c4851e4a86e85e229daf494a3df6ba3a5dcc9721f11805fa4a94ab7cf0111bf1d1f1c96f706dde5be7e65eccdabc3de00ed1a2afafcd1fb187db7db579cc5a2617b0366fa9ea2355b5bdcc9f04842f57f2c3d760ce06a86a6bac1a565670c1505636f7736573c73aa3e399d13e9109364a5103a4ef1e62e3ba2c154f481163bf4d8d3ee4cb527f497d7d517d72b157541fe7a2bae2a23a857414fdabac20adb282b4ca0ad28a0bd2dcea2fb94f6e5072cf95da8933546affa3df7e74eca6b24cef86cf5ca51d47e8f4b71ebdfb223cb747fec8ce86757689d23cb9d0abad031f6d6d1d39daabadff409b42cab8d6645fe53dbb00edaaeff4ed9a8d8ad1f4151557959754952743deabcae3418d2dc303d222be26b92bf59de8905bf6b3339df759c9a3ea7189cbea11ddaf85416f6bbd4a8ad780db464f01c8b900ed56c791a75ba4bd5d4bcbb2c967ee78c973c74b24bbb672aef86c490c6a80e4fd20767b7937ca7d6582a5d35d74f807feda8476c7adc3d98a70bdadd06c2b34db8a70d056a05de0fb1f7c99b9d5f327e18141150cf98b3e035e2b1c74cbf64b1ff06879566b51c97c904c7c595d58bb3ace5fe9c03331d8139fc5d0c5264e2b4576e53cee3d17f8608d9f4b74bb04268a8f2195c7d906dc40f77576090bb1d90c6260063130033d3fbfc84e4ad92574ca1e29597456667be8dc7b87ce3d3b74ee79373eefe6b319bebb0fb8c7f6aa31a3507bed552d11fd8bc70710e745cea1a3462b32bbfcbc4509694fd6f80b59a40a052a596401b03ab08163f7dda58c81c8cdd61efa44243bea9d6cd195e4752242ae9788273311a138a39fecd4d52f28d4d52f2bf65a8e1517c50e2517c2ec7ea1b60ba876e563de8df078ebf0b0f1c4af39262a9e1b7d6e342389a0afae3a111beba35e5eca72d453eba9a7d653af276eeefb1f0371d0792ae67c10bf2593bbe48ca4ae9030e2b7644998e99a204b5c0dcfca21e44bf43c5fee40eccc1de5ce7d576e2a2b5bc42b5e4f2618bf3a836200c46b733c83be0cff144400de906274fe86430003ecb72cc0fce35d109fac2702e35776e8eb6d74b5cd435df0ca744b80bd1187c6907a1fa7c818298733611cee8e8dc5848dd75dfc0cefe2cbecd16e66ecf27a9b11ee79c1cfb769241f213339630e9969c3db9df33136dd84d8f627c1317dc3d7f41736a25347ebcd85c6ace92c9bfac2c2f24cbdd3302bf5638d55b1b6d438b9d298e9c0cb46bbbddc36cd2573b479b2313b7de454a7b16a669697563bedb599ce725b74e61a66798987f19fbb3133f5a5a5e58e39d280d602ce30db6c03b68553c2fda4ef7c48444f79f609ff3c0ee06bd4174dbdd3692cae7478556f6d376949ab1d83b33496664551d83fc3033861a51d93fd359eab8d58692f77966796178c1bd35c35b890de5fece13f80a5e09f7e6ef30a5cab0fff02f8b77ca405dbc101ab0c00c3db7520c634b6794ea0d8e2da42a739dd5c9a6d9c144c1ffc9b40cda563d978612935032f6046c6bb8ab09dfa9105e8386aeaf07eaed1469277dab091f5932f2ecf368f9eda645e3b811d734113f05f1d6a3b2ececcd5978e0135dacdc57afb94996f9c326f9d6b2c5994b89bfa9245bc294171dffdf43cd6e80811022d0bf06f04fe15e15f24f04fb108d15c3a515f68ce9a13f576b3be847bc2ad3c17a1045b1b795f4b00d7ff02b86d754e00d007000082010101001f5be06ae972efdca372f145881855ab035fc9b761243d0042d6d39703ea6e94a86cd3b37db4b18cdc3f21d6403bc80f91005cf01ed88a02858ec4a7b9fe9ba77f0100e60778da8d56bd6f1c45149ffb3edf25966308c6292882682205e24bb0ac54a6a0a1a0e0a340262c733bef6e47b73bbb9999bdf3414d116890e88284228420852ba7890c85752d121209690205d081c4ffc09bd9bdddbd353e3cc57ebcf7e6cdbcf77eef37d3faa3fb418dbcf7f4f131c151310ff2f7b577cdeba17b385b10dcffa7d7fb627d2eb9b3be0aa1e2e1cd9bb4cf5fd97ab957d9e8fba13b72141f0a2e860e8db5174aaea797c754722ab4738ade195fdb6ca313c7a3ca23d546381120eb8206503792aeeb813b5271d07b75bb4d5dcdc75403a9ac0e80ea5882c3f810942e5aad64ae497d457b129417faac1973a1aff7ea2398aa73f87026c0879edebb854edd30165a6d462003ae3028e1f830063f33694c28d7eabc7966b24ba78743aa6758f6a29def7a940b27a212c3d52015b9f04c40f7d35409d04eace810ac93ed1b97349543d065ad13b93a5d66d34cd6920a653285712cbab8de7ba14f154e00b9d46a2df7ed034ce83495bfe88642c3be7606124ce6954d5c6127220ece6aca40a4a68588dd285ed8c862c4997659c465179b01267899c1f3650f3e1f80e641963206039012181aed3bb01f7149addd840b164e52ab8b652f0cfc2c6fcf1925173e17e0a46ac53f9a2fb07152cb20d29e556f6ddbf4e4f02aaaba2e152ef87629525d4dfee6685c2bc3b969f6cf59b1553a38157167cc49b5953682edbd4e3e3bf9cfb14baa35fce94471dfe7ae839fcd449e6eaa8d3b18598ff5058f753764907ce969947c7525dc8eb98400e6cb0a845b3209a7bb129b3c4cc9207b240c917779d3b202e48256887a194aec43058221d8396bd9cdf576da28d1a6928dfe54833a912424207497ae594ec2c6290c41aa05cb93795f48cf7a244316bbb8a99ca6aae7336116651ece69c47a0e93372744b694115bc6d254a4521fc830b0fe9b0a34b26ea9ec35142599e918bdcf0324be52215790ac1c6bd3b0b4d4314d9dc4980a4c8b15052df465aa5f72d41c0706090d93989d9571304602c4ac25ff162ec956b08ada52a4229566f2f1dfec69d689241f97826a71e51871e2b86d8d4286ceda0a93c4621f9e3d5994bd5b9d586450aefd2f943b71c468da4af5a5ad644298e3bd6eecf352770b470ca9b6ed9f0237258a4520ade1417ce54b3c94f35391fc70e7caeb9f7cfbda022f90dd1f9fde3ff8fa8d62afe3b83aaa7df759deab64f7abbb97d9c3bbc506c4f1fba38f9f7c93f5d3e7ef6cff0aece068117938ee41707094a16c7e7d98438c909fde3cf20e674548e1d87de9d3c359060d428e1fbdfff68359a1d8383efc7eefc12c2b2b8e9d9f519097d00477fcd79fbf146b6503f6df7a52ac88b9c9dca38f7fdbaaf1ed1b84e4f70dbcf59ce182525976da937f01d36959fa0001 DMLOG START_BLOCK 5 -DMLOG TRX_OP CREATE onblock 48f08a7ea5ea23750c7f9b142fb4bb947bb3fc73d42fc59f26339f2487773087 0000000000000000000000000000010000000000ea305500000000221acfa4010000000000ea305500000000a8ed3232b905033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b000000 DMLOG CREATION_OP ROOT 0 DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"eosio","net_usage":{"last_ordinal":1262304004,"value_ex":100468,"consumed":1},"cpu_usage":{"last_ordinal":1262304004,"value_ex":256961,"consumed":101},"ram_usage":199629} +DMLOG TRX_OP CREATE onblock 48f08a7ea5ea23750c7f9b142fb4bb947bb3fc73d42fc59f26339f2487773087 0000000000000000000000000000010000000000ea305500000000221acfa4010000000000ea305500000000a8ed3232b905033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b000000 DMLOG APPLIED_TRANSACTION 5 48f08a7ea5ea23750c7f9b142fb4bb947bb3fc73d42fc59f26339f248777308705000000043b3d4b010000000596867aec674ebb58c23ffcafefab5119fdbdbb08736a63fbdc64e75101006400000000000000000000000000000000000000000001010000010000000000ea3055080cda2e60a87185264b067ea2e9e32dceeca3c6727a538c215223e312a9327d1a000000000000001a00000000000000010000000000ea30551a0000000000000002020000000000ea30550000000000ea305500000000221acfa4010000000000ea305500000000a8ed3232b905033b3d4b0000000000ea30550000000000034b2b890c59dad3c04fd9300057ba6285196dc55f32e988a49d6059cdded9794b7e6d10923376031cef59e6d6d809ee653bcc37297cb644078a507a6bd26c416dc790f2b35815f1d6741bc63e1c235f13e809df666ddea0ba2e5f45c80000000000010000c104121a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b7241ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea994a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0fe0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff52668dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a297428ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c438ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a4052652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c2299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b4476707c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead450715443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b4bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb406bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc35c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b0000000000000000000048f08a7ea5ea23750c7f9b142fb4bb947bb3fc73d42fc59f26339f248777308705000000043b3d4b010000000596867aec674ebb58c23ffcafefab5119fdbdbb08736a63fbdc64e7510000000000000000 DMLOG CREATION_OP ROOT 0 DMLOG PERM_OP INS 0 9 {"usage_id":8,"parent":0,"owner":"alice","name":"owner","last_updated":"2020-01-01T00:00:02.000","auth":{"threshold":1,"keys":[{"key":"EOS6JvuLaCqV8qHbSqUBVRPMo9N7V3vgE8YqHmweG568YmTDJ3opq","weight":1}],"accounts":[{"permission":{"actor":"alice","permission":"eosio.code"},"weight":1}],"waits":[]}} @@ -177,4 +177,4 @@ DMLOG RLIMIT_OP ACCOUNT_USAGE UPD {"owner":"alice","net_usage":{"last_ordinal":1 DMLOG APPLIED_TRANSACTION 5 6cdc68c8257e32da2f6cd8c4f6e6bb7798a47e7da6ac14a7cea45da9cd70c81c05000000043b3d4b010000000596867aec674ebb58c23ffcafefab5119fdbdbb08736a63fbdc64e7510100d007000012000000000000000090000000000000000001010000010000000000ea3055f3d881d2f7fbf2f7cb6081aff84e7aca1dd3914a0948ef4fc9422e734e8d4d571c000000000000001c00000000000000010000000000855c34010000000000000002020000000000ea30550000000000ea30550040cbdaa86c52d5010000000000855c3400000000a8ed3232310000000000855c34000000008090b1ca00000000a8ed32320100000000010000000000ea305500000000a8ed3232010000000000000000000000006cdc68c8257e32da2f6cd8c4f6e6bb7798a47e7da6ac14a7cea45da9cd70c81c05000000043b3d4b010000000596867aec674ebb58c23ffcafefab5119fdbdbb08736a63fbdc64e751010000000000855c34400100000000000000000000000000 DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":4,"value_ex":144011111,"consumed":7999},"average_block_cpu_usage":{"last_ordinal":4,"value_ex":366368114,"consumed":4433},"pending_net_usage":376,"pending_cpu_usage":4100,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1051726,"virtual_cpu_limit":200600} DMLOG RLIMIT_OP STATE UPD {"average_block_net_usage":{"last_ordinal":5,"value_ex":145944352,"consumed":519},"average_block_cpu_usage":{"last_ordinal":5,"value_ex":397481713,"consumed":4464},"pending_net_usage":0,"pending_cpu_usage":0,"total_net_weight":0,"total_cpu_weight":0,"total_ram_bytes":0,"virtual_net_limit":1052778,"virtual_cpu_limit":200800} -DMLOG ACCEPTED_BLOCK 5 05000000050000000400000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100018b5b706080c8d5ec9456986e611761b17ec82e672f8176e581625f54535c32150400000000000000010000000000ea305505000000010000000000ea305504000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100000000000596867aec674ebb58c23ffcafefab5119fdbdbb08736a63fbdc64e751043b3d4b0000000000ea30550000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef08a40bd5b3162bbbe3fb5d540dabc705bada65dd0a276381665c9ed73d4afa451bb28b302f397e60628e8e3a7d3ecb23a8a0bf20f606dd26b93b61511cb87673000000000000001f69207fd55ddaddad96457d633a97f216211989106548974158e1cf70d1f8e5e5262d674a40041040cf394ae3496bb3852b67e15d9c771dfb4a818f6166f590a90000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001130ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72412652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b447670735c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c25443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b468dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974286bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc8ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a405ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead45071d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb40e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0001043b3d4b0000000000ea30550000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef08a40bd5b3162bbbe3fb5d540dabc705bada65dd0a276381665c9ed73d4afa451bb28b302f397e60628e8e3a7d3ecb23a8a0bf20f606dd26b93b61511cb87673000000000000001f69207fd55ddaddad96457d633a97f216211989106548974158e1cf70d1f8e5e5262d674a40041040cf394ae3496bb3852b67e15d9c771dfb4a818f6166f590a90200d00700001d0101001f55be758d9f4e3d253e069c66875beafbffe405c897ee2da2dd4577b2953a3379758676fd8906c5c3eb7043a30dc8d939af3eef5e73bf7f57f253e854f803dd810000bd0107e10b5e0400a7e16ae000000000010000000000ea305500409e9a2264b89a010000000000ea305500000000a8ed32328a010000000000ea30550000000000855c3401000000010002bb30f6894f29bb6fca635b1df728ad77e48fdd6123ce5e4455b0f71e072e7df80100010000000000855c3400804a1401ea305501000001000000010003ebcf44b45a71d4f225768f602d1e2e2b25ef779ee9897fe744bf1a16e85423d50100010000000000855c3400804a1401ea30550100000000d0070000120101001f504584a3e50ad7d75a7ffd3254fbd363824a5269d57a1d3443644067e42117515242fb12efe17be14590b398489b951a8823f8b5aed4d7de11be3a971498ddb100006307e10b5e0400a7e16ae000000000010000000000ea30550040cbdaa86c52d5010000000000855c3400000000a8ed3232310000000000855c34000000008090b1ca00000000a8ed32320100000000010000000000ea305500000000a8ed3232010000000000 +DMLOG ACCEPTED_BLOCK 5 05000000050000000400000000000000010000000000ea3055000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100018b5b706080c8d5ec9456986e611761b17ec82e672f8176e581625f54535c32150400000000000000010000000000ea305505000000010000000000ea305504000000000100000001000240e54a7b27e042b80a810153bec1dd166eef95fa69f6c9886ae283363bc2add80100000000000596867aec674ebb58c23ffcafefab5119fdbdbb08736a63fbdc64e751043b3d4b0000000000ea30550000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef08a40bd5b3162bbbe3fb5d540dabc705bada65dd0a276381665c9ed73d4afa451bb28b302f397e60628e8e3a7d3ecb23a8a0bf20f606dd26b93b61511cb87673000000000000001f69207fd55ddaddad96457d633a97f216211989106548974158e1cf70d1f8e5e5262d674a40041040cf394ae3496bb3852b67e15d9c771dfb4a818f6166f590a90000000029807708239aa7de914d3ed61e9009ab2280bfbc50f1d9769f27f8341ef26198000000000001130ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd1a99a59d87e06e09ec5b028a9cbb7749b4a5ad8819004365d02dc4379a8b72412652f5f96006294109b3dd0bbde63693f55324af452b799ee137a81a905eed25299dcb6af692324b899b39f16d5a530a33062804e41f09dc97e9f156b447670735c2186cc36f7bb4aeaf4487b36e57039ccf45a9136aa856a5d569ecca55ef2b4a90c00d55454dc5b059055ca213579c6ea856967712a56017487886a4d4cc0f4e7bf348da00a945489b2a681749eb56f5de00b900014e137ddae39f48f69d674fca8bd82bbd181e714e283f83e1b45d95ca5af40fb89ad3977b653c448f78c25443fcf88330c586bc0e5f3dee10e7f63c76c00249c87fe4fbf7f38c082006b468dcaa34c0517d19666e6b33add67351d8c5f69e999ca1e37931bc410a2974286bcb40a24e49c26d0a60513b6aeb8551d264e4717f306b81a37a5afb3b47cedc8ba52fe7a3956c5cd3a656a3174b931d3bb2abb45578befc59f283ecd816a405ad9e3d8f650687709fd68f4b90b41f7d825a365b02c23a636cef88ac2ac00c43bcd2a26394b36614fd4894241d3c451ab0f6fd110958c3423073621a70826e99c3a6138c5061cf291310887c0b5c71fcaffeab90d5deb50d3b9e687cead45071d528b9f6e9693f45ed277af93474fd473ce7d831dae2180cca35d907bd10cb40e0fb64b1085cc5538970158d05a009c24e276fb94e1a0bf6a528b48fbc4ff526ef43112c6543b88db2283a2e077278c315ae2c84719a8b25f25cc88565fbea99f0af56d2c5a48d60a4a5b5c903edfb7db3a736a94ed589d0b797df33ff9d3e1d0001043b3d4b0000000000ea30550000000000044444a224a7e16ae04e24e6640d6a909fb89ca6e5e8c1e5397e1e03ef08a40bd5b3162bbbe3fb5d540dabc705bada65dd0a276381665c9ed73d4afa451bb28b302f397e60628e8e3a7d3ecb23a8a0bf20f606dd26b93b61511cb87673000000000000001f69207fd55ddaddad96457d633a97f216211989106548974158e1cf70d1f8e5e5262d674a40041040cf394ae3496bb3852b67e15d9c771dfb4a818f6166f590a90200d00700001d0101001f55be758d9f4e3d253e069c66875beafbffe405c897ee2da2dd4577b2953a3379758676fd8906c5c3eb7043a30dc8d939af3eef5e73bf7f57f253e854f803dd810000bd0107e10b5e0400a7e16ae000000000010000000000ea305500409e9a2264b89a010000000000ea305500000000a8ed32328a010000000000ea30550000000000855c3401000000010002bb30f6894f29bb6fca635b1df728ad77e48fdd6123ce5e4455b0f71e072e7df80100010000000000855c3400804a1401ea305501000001000000010003ebcf44b45a71d4f225768f602d1e2e2b25ef779ee9897fe744bf1a16e85423d50100010000000000855c3400804a1401ea30550100000000d0070000120101001f504584a3e50ad7d75a7ffd3254fbd363824a5269d57a1d3443644067e42117515242fb12efe17be14590b398489b951a8823f8b5aed4d7de11be3a971498ddb100006307e10b5e0400a7e16ae000000000010000000000ea30550040cbdaa86c52d5010000000000855c3400000000a8ed3232310000000000855c34000000008090b1ca00000000a8ed32320100000000010000000000ea305500000000a8ed3232010000000001 diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 64427568a5..4f61553040 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -139,6 +139,27 @@ BOOST_AUTO_TEST_CASE(name_suffix_tests) BOOST_CHECK_EQUAL( name{name_suffix("abcdefhij.123"_n)}, name{"123"_n} ); } +BOOST_AUTO_TEST_CASE(name_prefix_tests) +{ + BOOST_CHECK_EQUAL("e"_n.prefix(), "e"_n); + BOOST_CHECK_EQUAL(""_n.prefix(), ""_n); + BOOST_CHECK_EQUAL("abcdefghijklm"_n.prefix(), "abcdefghijklm"_n); + BOOST_CHECK_EQUAL("abcdefghijkl"_n.prefix(), "abcdefghijkl"_n); + BOOST_CHECK_EQUAL("abc.xyz"_n.prefix(), "abc"_n); + BOOST_CHECK_EQUAL("abc.xyz.qrt"_n.prefix(), "abc.xyz"_n); + BOOST_CHECK_EQUAL("."_n.prefix(), ""_n); + + BOOST_CHECK_EQUAL("eosio.any"_n.prefix(), "eosio"_n); + BOOST_CHECK_EQUAL("eosio"_n.prefix(), "eosio"_n); + BOOST_CHECK_EQUAL("eosio"_n.prefix(), config::system_account_name); + BOOST_CHECK_EQUAL("eosio."_n.prefix(), "eosio"_n); + BOOST_CHECK_EQUAL("eosio.evm"_n.prefix(), "eosio"_n); + BOOST_CHECK_EQUAL(".eosio"_n.prefix(), ""_n); + BOOST_CHECK_NE("eosi"_n.prefix(), "eosio"_n); + BOOST_CHECK_NE("eosioeosio"_n.prefix(), "eosio"_n); + BOOST_CHECK_NE("eosioe"_n.prefix(), "eosio"_n); +} + /// Test processing of unbalanced strings BOOST_AUTO_TEST_CASE(json_from_string_test) { diff --git a/unittests/partitioned_block_log_tests.cpp b/unittests/partitioned_block_log_tests.cpp index b44872ac61..7e70fc9b86 100644 --- a/unittests/partitioned_block_log_tests.cpp +++ b/unittests/partitioned_block_log_tests.cpp @@ -21,6 +21,17 @@ void remove_existing_states(eosio::chain::controller::config& config) { std::filesystem::create_directories(state_path); } +std::filesystem::path get_retained_dir(const eosio::chain::controller::config& cfg) { + std::filesystem::path retained_dir; + auto paritioned_config = std::get_if(&cfg.blog); + if (paritioned_config) { + retained_dir = paritioned_config->retained_dir; + if (retained_dir.is_relative()) + retained_dir = cfg.blocks_dir / retained_dir; + } + return retained_dir; +} + struct restart_from_block_log_test_fixture { eosio::testing::tester chain; uint32_t cutoff_block_num; @@ -47,12 +58,13 @@ struct restart_from_block_log_test_fixture { void restart_chain() { eosio::chain::controller::config copied_config = chain.get_config(); + auto genesis = eosio::chain::block_log::extract_genesis_state(copied_config.blocks_dir, + get_retained_dir(copied_config)); + BOOST_REQUIRE(genesis); + copied_config.blog = eosio::chain::basic_blocklog_config{}; - auto genesis = eosio::chain::block_log::extract_genesis_state(chain.get_config().blocks_dir); - BOOST_REQUIRE(genesis); - // remove the state files to make sure we are starting from block log remove_existing_states(copied_config); eosio::testing::tester from_block_log_chain(copied_config, *genesis); @@ -173,7 +185,8 @@ BOOST_AUTO_TEST_CASE(test_split_log_util1) { uint32_t head_block_num = chain.control->head_block_num(); eosio::chain::controller::config copied_config = chain.get_config(); - auto genesis = eosio::chain::block_log::extract_genesis_state(chain.get_config().blocks_dir); + auto genesis = eosio::chain::block_log::extract_genesis_state(chain.get_config().blocks_dir, + get_retained_dir(chain.get_config())); BOOST_REQUIRE(genesis); chain.close(); @@ -269,8 +282,9 @@ void split_log_replay(uint32_t replay_max_retained_block_files) { true); chain.produce_blocks(150); - eosio::chain::controller::config copied_config = chain.get_config(); - auto genesis = eosio::chain::block_log::extract_genesis_state(chain.get_config().blocks_dir); + auto copied_config = chain.get_config(); + auto genesis = + eosio::chain::block_log::extract_genesis_state(copied_config.blocks_dir, get_retained_dir(copied_config)); BOOST_REQUIRE(genesis); chain.close(); @@ -326,7 +340,7 @@ BOOST_AUTO_TEST_CASE(test_restart_without_blocks_log_file) { chain.produce_blocks(160); eosio::chain::controller::config copied_config = chain.get_config(); - auto genesis = eosio::chain::block_log::extract_genesis_state(chain.get_config().blocks_dir); + auto genesis = eosio::chain::block_log::extract_genesis_state(chain.get_config().blocks_dir, get_retained_dir(copied_config)); BOOST_REQUIRE(genesis); chain.close(); diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index d8813cbc5a..4f041d8094 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -412,6 +412,14 @@ BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { cfg.disable_all_subjective_mitigations = true; c.init( cfg ); + transaction_trace_ptr trace; + auto h = c.control->applied_transaction.connect( [&](std::tuple x) { + auto& t = std::get<0>(x); + if( t && !eosio::chain::is_onblock(*t)) { + trace = t; + } + } ); + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( "alice"_n ), alice_ram_usage0 ); c.push_action( "test"_n, "defercall"_n, "alice"_n, fc::mutable_variant_object() @@ -448,6 +456,8 @@ BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { dtrxs = c.get_scheduled_transactions(); BOOST_CHECK_EQUAL( dtrxs.size(), 0 ); + // must be equal before builtin_protocol_feature_t::replace_deferred to support replay of blocks before activation + BOOST_CHECK( first_dtrx_id.str() == trace->id.str() ); c.produce_block(); @@ -507,6 +517,13 @@ BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); BOOST_CHECK_EQUAL( first_dtrx_id2, dtrxs[0] ); + c.produce_block(); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 0 ); + // Not equal after builtin_protocol_feature_t::replace_deferred activated + BOOST_CHECK( first_dtrx_id2.str() != trace->id.str() ); + } FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index f5e466dc53..ef50da4af5 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -530,31 +530,63 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_restart_with_existing_state_and_truncated_blo } chain.control->abort_block(); + { + // create a new snapshot child + auto writer = SNAPSHOT_SUITE::get_writer(); + chain.control->write_snapshot(writer); + auto snapshot = SNAPSHOT_SUITE::finalize(writer); - // create a new snapshot child - auto writer = SNAPSHOT_SUITE::get_writer(); - chain.control->write_snapshot(writer); - auto snapshot = SNAPSHOT_SUITE::finalize(writer); + // create a new child at this snapshot + int ordinal = 1; - // create a new child at this snapshot - int ordinal = 1; - snapshotted_tester snap_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), ordinal++); - verify_integrity_hash(*chain.control, *snap_chain.control); - auto block = chain.produce_block(); - chain.control->abort_block(); - snap_chain.push_block(block); - verify_integrity_hash(*chain.control, *snap_chain.control); + snapshotted_tester snap_chain(chain.get_config(), SNAPSHOT_SUITE::get_reader(snapshot), ordinal++); + verify_integrity_hash(*chain.control, *snap_chain.control); + auto block = chain.produce_block(); + chain.control->abort_block(); + snap_chain.push_block(block); + verify_integrity_hash(*chain.control, *snap_chain.control); - snap_chain.close(); - auto cfg = snap_chain.get_config(); - // restart chain with truncated block log and existing state, but no genesis state (chain_id) - snap_chain.open(); - verify_integrity_hash(*chain.control, *snap_chain.control); + snap_chain.close(); + auto cfg = snap_chain.get_config(); + // restart chain with truncated block log and existing state, but no genesis state (chain_id) + snap_chain.open(); + verify_integrity_hash(*chain.control, *snap_chain.control); + + block = chain.produce_block(); + chain.control->abort_block(); + snap_chain.push_block(block); + verify_integrity_hash(*chain.control, *snap_chain.control); + } + // test with empty block log + { + // create a new snapshot child + auto writer = SNAPSHOT_SUITE::get_writer(); + chain.control->write_snapshot(writer); + auto snapshot = SNAPSHOT_SUITE::finalize(writer); + + // create a new child at this snapshot + int ordinal = 2; + auto chain_cfg = chain.get_config(); + chain_cfg.blog = eosio::chain::empty_blocklog_config{}; // use empty block log + snapshotted_tester snap_chain(chain_cfg, SNAPSHOT_SUITE::get_reader(snapshot), ordinal++); + verify_integrity_hash(*chain.control, *snap_chain.control); + auto block = chain.produce_block(); + chain.control->abort_block(); + snap_chain.push_block(block); + verify_integrity_hash(*chain.control, *snap_chain.control); + + snap_chain.close(); + auto cfg = snap_chain.get_config(); + // restart chain with truncated block log and existing state, but no genesis state (chain_id) + snap_chain.open(); + verify_integrity_hash(*chain.control, *snap_chain.control); + + block = chain.produce_block(); + chain.control->abort_block(); + snap_chain.push_block(block); + verify_integrity_hash(*chain.control, *snap_chain.control); + } - block = chain.produce_block(); - chain.control->abort_block(); - snap_chain.push_block(block); - verify_integrity_hash(*chain.control, *snap_chain.control); } BOOST_AUTO_TEST_CASE(json_snapshot_validity_test) diff --git a/unittests/test_utils.hpp b/unittests/test_utils.hpp new file mode 100644 index 0000000000..974fee3927 --- /dev/null +++ b/unittests/test_utils.hpp @@ -0,0 +1,177 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +namespace eosio::test_utils { + +using namespace eosio::chain; +using namespace eosio::chain::literals; + +struct testit { + uint64_t id; + explicit testit(uint64_t id = 0) + :id(id){} + static account_name get_account() { + return chain::config::system_account_name; + } + static action_name get_name() { + return "testit"_n; + } +}; + +// Corresponds to the reqactivated action of the bios contract. +// See libraries/testing/contracts/eosio.bios/eosio.bios.hpp +struct reqactivated { + chain::digest_type feature_digest; + + explicit reqactivated(const chain::digest_type& fd) + :feature_digest(fd){}; + + static account_name get_account() { + return chain::config::system_account_name; + } + static action_name get_name() { + return "reqactivated"_n; + } +}; + +// Create a read-only trx that works with bios reqactivated action +auto make_bios_ro_trx(eosio::chain::controller& control) { + const auto& pfm = control.get_protocol_feature_manager(); + static auto feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::replace_deferred); + + signed_transaction trx; + trx.expiration = fc::time_point_sec{fc::time_point::now() + fc::seconds(30)}; + vector no_auth{}; + trx.actions.emplace_back( no_auth, reqactivated{*feature_digest} ); + return std::make_shared( std::move(trx) ); +} + +// Push an input transaction to controller and return trx trace +// If account is eosio then signs with the default private key +auto push_input_trx(appbase::scoped_app& app, eosio::chain::controller& control, account_name account, signed_transaction& trx) { + trx.expiration = fc::time_point_sec{fc::time_point::now() + fc::seconds(30)}; + trx.set_reference_block( control.head_block_id() ); + if (account == config::system_account_name) { + auto default_priv_key = private_key_type::regenerate(fc::sha256::hash(std::string("nathan"))); + trx.sign(default_priv_key, control.get_chain_id()); + } else { + trx.sign(testing::tester::get_private_key(account, "active"), control.get_chain_id()); + } + auto ptrx = std::make_shared( trx, packed_transaction::compression_type::zlib ); + + auto trx_promise = std::make_shared>(); + std::future trx_future = trx_promise->get_future(); + + app->executor().post( priority::low, exec_queue::read_write, [&ptrx, &app, trx_promise]() { + app->get_method()(ptrx, + false, // api_trx + transaction_metadata::trx_type::input, // trx_type + true, // return_failure_traces + [trx_promise](const next_function_variant& result) { + if( std::holds_alternative( result ) ) { + try { + std::get(result)->dynamic_rethrow_exception(); + } catch(...) { + trx_promise->set_exception(std::current_exception()); + } + } else if ( std::get( result )->except ) { + try { + std::get(result)->except->dynamic_rethrow_exception(); + } catch(...) { + trx_promise->set_exception(std::current_exception()); + } + } else { + trx_promise->set_value(std::get(result)); + } + }); + }); + + if (trx_future.wait_for(std::chrono::seconds(5)) == std::future_status::timeout) + throw std::runtime_error("failed to execute trx: " + ptrx->get_transaction().actions.at(0).name.to_string() + " to account: " + account.to_string()); + + return trx_future.get(); +} + +// Push setcode trx to controller and return trx trace +auto set_code(appbase::scoped_app& app, eosio::chain::controller& control, account_name account, const vector& wasm) { + signed_transaction trx; + trx.actions.emplace_back(std::vector{{account, config::active_name}}, + chain::setcode{ + .account = account, + .vmtype = 0, + .vmversion = 0, + .code = bytes(wasm.begin(), wasm.end()) + }); + return push_input_trx(app, control, account, trx); +} + +void activate_protocol_features_set_bios_contract(appbase::scoped_app& app, chain_plugin* chain_plug) { + using namespace appbase; + + auto feature_set = std::make_shared>(false); + // has to execute when pending block is not null + for (int tries = 0; tries < 100; ++tries) { + app->executor().post( priority::high, exec_queue::read_write, [&chain_plug=chain_plug, feature_set](){ + try { + if (!chain_plug->chain().is_building_block() || *feature_set) + return; + const auto& pfm = chain_plug->chain().get_protocol_feature_manager(); + auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); + BOOST_CHECK( preactivate_feature_digest ); + chain_plug->chain().preactivate_feature( *preactivate_feature_digest, false ); + std::vector pfs{ + builtin_protocol_feature_t::only_link_to_existing_permission, + builtin_protocol_feature_t::replace_deferred, + builtin_protocol_feature_t::no_duplicate_deferred_id, + builtin_protocol_feature_t::fix_linkauth_restriction, + builtin_protocol_feature_t::disallow_empty_producer_schedule, + builtin_protocol_feature_t::restrict_action_to_self, + builtin_protocol_feature_t::only_bill_first_authorizer, + builtin_protocol_feature_t::forward_setcode, + builtin_protocol_feature_t::get_sender, + builtin_protocol_feature_t::ram_restrictions, + builtin_protocol_feature_t::webauthn_key, + builtin_protocol_feature_t::wtmsig_block_signatures }; + for (const auto t : pfs) { + auto feature_digest = pfm.get_builtin_digest(t); + BOOST_CHECK( feature_digest ); + chain_plug->chain().preactivate_feature( *feature_digest, false ); + } + *feature_set = true; + return; + } FC_LOG_AND_DROP() + BOOST_CHECK(!"exception setting protocol features"); + }); + if (*feature_set) + break; + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + } + + // Wait for next block + std::this_thread::sleep_for( std::chrono::milliseconds(config::block_interval_ms) ); + + auto r = set_code(app, chain_plug->chain(), config::system_account_name, testing::contracts::eosio_bios_wasm()); + BOOST_CHECK(r->receipt && r->receipt->status == transaction_receipt_header::executed); +} + + +} // namespace eosio::test_utils + +FC_REFLECT( eosio::test_utils::testit, (id) ) +FC_REFLECT( eosio::test_utils::reqactivated, (feature_digest) ) diff --git a/unittests/wasm_tests.cpp b/unittests/wasm_tests.cpp index f463b229be..b35818b7ad 100644 --- a/unittests/wasm_tests.cpp +++ b/unittests/wasm_tests.cpp @@ -1116,7 +1116,10 @@ BOOST_FIXTURE_TEST_CASE(eosio_abi, validating_tester) try { // verify to_variant works on eos native contract type: newaccount // see abi_serializer::to_abi() abi_serializer::to_variant(*result, pretty_output, get_resolver(), abi_serializer::create_yield_function( abi_serializer_max_time )); + BOOST_TEST(fc::json::to_string(pretty_output, fc::time_point::now() + abi_serializer_max_time).find("newaccount") != std::string::npos); + pretty_output.clear(); + abi_serializer::to_variant(*result, pretty_output, get_resolver(), abi_serializer_max_time); BOOST_TEST(fc::json::to_string(pretty_output, fc::time_point::now() + abi_serializer_max_time).find("newaccount") != std::string::npos); produce_block();