diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..2490b293 --- /dev/null +++ b/404.html @@ -0,0 +1,2623 @@ + + + + + + + + + + + + + + + + + + + XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 00000000..5364f51a --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +wiki.cheng-group.net \ No newline at end of file diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.fe8b6f2b.min.js b/assets/javascripts/bundle.fe8b6f2b.min.js new file mode 100644 index 00000000..cf778d42 --- /dev/null +++ b/assets/javascripts/bundle.fe8b6f2b.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var M=f()(_);return u("cut"),M},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(j,"px"),M.setAttribute("readonly",""),M.value=V,M}var te=function(_,M){var j=A(_);M.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,M):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,M):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(M){return typeof M}:H=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=_.action,j=M===void 0?"copy":M,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(M){return typeof M}:Ie=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var M=0;M<_.length;M++){var j=_[M];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,M){return _&&ro(V.prototype,_),M&&ro(V,M),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(M){return M.__proto__||Object.getPrototypeOf(M)},Wt(V)}function vr(V,_){var M="data-clipboard-".concat(V);if(_.hasAttribute(M))return _.getAttribute(M)}var Ri=function(V){Ci(M,V);var _=Hi(M);function M(j,D){var Y;return _i(this,M),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(M,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),M}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var O=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return O}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),G(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),v(r=>Go.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),v(t=>en.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(v(t=>t?O:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),b(t=>t.length>0),G(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():O))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function B(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!B("announce.dismiss")||!e.childElementCount)return O;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:O),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?O:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):O})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:O)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),Na(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>B("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(v(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return O;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(b(()=>B("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?O:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return O;let r=e.target.closest("a");if(r===null)return O;if(r.target||e.metaKey||e.ctrlKey)return O;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):O}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),O}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return O;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),v(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),O)))),v(Xn),v(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>O)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?O:(i.preventDefault(),I(p))}}return O}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),G(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),v(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?O:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>O),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>O),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>O),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return O}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return O}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>O),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title"))):O})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(v(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(v(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),G(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;B("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Qn(e,{viewport$:Oe,header$:rt})),G(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>B("search.highlight")?mi(e,{index$:Mi,location$:jt}):O),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(v(()=>hs),Pe(ds),G(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.fe8b6f2b.min.js.map + diff --git a/assets/javascripts/bundle.fe8b6f2b.min.js.map b/assets/javascripts/bundle.fe8b6f2b.min.js.map new file mode 100644 index 00000000..82635852 --- /dev/null +++ b/assets/javascripts/bundle.fe8b6f2b.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + +
+

使用终端登录服务器后没办法直接显示图形界面。有时候在*服务器*上使用画图软件时,可以通过X11 Forwarding功能将图像显示到本地电脑上。只需要在命令里加上-X或者-Y

+
ssh -X -i <para.> -p <para.> username@server_ip
+
+

在config文件中配置X11 Forwarding*

+
Host <hostnickname>
+    ForwardX11 yes  # (1)!
+    ForwardX11Trusted yes # (2)!
+
+
    +
  1. equivalent to -X
  2. +
  3. equivalent to -Y (This option valid only if your ForwardX11 is set to yes!)
  4. +
+

使用跳板机/代理进行远程登录

+

本组的服务器限制了登录的ip,即你只能在学校ip范围内进行登录。同时由于登录需要密钥,而密钥保存在办公室电脑上,因此登录就必须使用办公室电脑。因此,人不在办公室时就很难登录服务器。

+

解决方法就是,先在校园网环境下通过SSH登录到办公室电脑(仅自己的用户名密码即可),再通过办公室电脑登录到服务器。此时办公室电脑是作为*跳板*来使用的:

+
ssh username@proxy
+ssh -p port_number -i key_file username@cluster191
+
+

在config文件中配置跳板机*

+

打开 ~/.ssh/config,复制以下代码(注意去掉注释,否则可能会报错):

+
# nickname you set for your office computer
+Host proxy
+    # username you set for login
+    User robinzhuang
+    # IP address of your office computer, change the xxx to real one!
+    Hostname 10.24.3.xxx
+
+# nickname for your cluster
+Host myserver
+    # username you set, change to real one!
+    User kmr
+    # IP for cluster, change to real one!
+    Hostname 123.45.67.89
+    # the key file location used in login 
+    IdentityFile ~/.ssh/id_rsa
+    # specify the port number, replace xx with real port!
+    Port xx
+    # use Host proxy as Jump Server
+    ProxyJump proxy
+
+

我们可以发现其实是直接登录课题组服务器的一些改进,我们首先配置了从这台电脑登录到跳板机的命令,然后再配置利用跳板机到服务器的命令。

+
+

如果上述的 ProxyJump proxy 不起作用,可将其替换为 ProxyCommand ssh -o 'ForwardAgent yes' proxy "ssh-add ~/.ssh/id_rsa && nc %h %p" ,请用你的密钥的路径来代替上述的 ~/.ssh/id_rsa 部分。

+
+

完成以上配置后可以使用如下命令直接配置:

+
ssh myserver
+
+

在config文件中转发端口*

+

有时,我们在服务器上部署了 jupyter notebook 等服务时,需要把远程的某个端口 (以下例子中为 8888 端口) 转发到本地的某个端口 (以下例子中为 9999 端口),使得在本地访问 https://localhost:9999 时也能访问远程的 jupyter notebook 服务。

+
Host myserver # (1)!
+    User kmr # (2)!
+    Hostname 123.45.67.89 # (3)!
+    LocalForward 9999 localhost:8888 # (4)!
+
+
    +
  1. 为你的服务器取一个任意的昵称
  2. +
  3. 请修改为真实的用户名
  4. +
  5. 请修改为真实的IP
  6. +
  7. localhost:8888 是相对于远端服务器的真实IP和端口,若不是 localhost,请替换为对应的IP和端口号
  8. +
+

在使用跳板机的情况下使用X11 Forwarding

+

只需要在 ~/.ssh/config 中加入

+
Host * # (1)!
+    ForwardX11Trusted yes
+
+
    +
  1. 对任意配置生效
  2. +
+

一份示例配置文件(config)

+

以下为 ~/.ssh/config 的一个示例,需要时可在这份示例文件上进行修改,必要修改的部分已在注释中标出,General config 可以直接照抄。注意须删掉文件中所有的注释。

+
# General config
+Host *
+    ForwardX11Trusted yes
+    ForwardAgent yes
+    AddKeysToAgent yes
+    ServerAliveInterval 60
+    ControlPersist yes
+    ControlMaster auto
+    ControlPath /tmp/%r@%h:%p
+
+# set proxy
+# nickname for your Jump Server
+Host nickname_proxy
+    # IP for Jump Server (REPlACE IT!)
+    Hostname 10.24.3.255
+    # your username for Jump Server (REPlACE IT!)
+    User chenglab
+
+# Host1 and host2
+# nickname for your cluster
+Host nickname_1
+    Hostname 123.45.67.89
+    # your host1 username (REPlACE IT!)
+    User kmr1 
+    LocalForward 8051 localhost:8888
+# nickname for your cluster
+Host nickname_2
+    Hostname 123.45.67.90
+    # your host2 username (REPlACE IT!)
+    User kmr2
+    LocalForward 8052 localhost:8888
+
+# set same parts for host1 and host2
+# use your own nickname
+Host nickname_1 nickname_2
+    Port 7696
+    # use your own nickname
+    ProxyJump nickname_proxy
+
+

超纲的部分​​*

+

在配置文件中实现类似选择语句的功能,以下例子描述的是当网络环境随时变更时,连接同一台机器可能会需要访问不同IP时所采取的策略。

+
+

此例子不建议初学者直接复制粘贴,其中需要替换的部分请根据具体应用场景来自行斟酌

+
+
Host elements
+    User chenglab
+    Match host elements exec "nc -G 4 -z 10.24.3.144 %p"
+        # Private net IP
+        Hostname 10.24.3.144
+    Match host elements
+        # Public net IP
+        Hostname xxx.xxx.xxx.xxx
+        Port 6000
+
+

常见问题

+

ssh private key are too open

+

The error message is

+
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@ WARNING: UNPROTECTED PRIVATE KEY FILE! @
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+Permissions 0644 for '/home/me/.ssh/id_rsa_targethost' are too open.
+It is recommended that your private key files are NOT accessible by others.
+This private key will be ignored.
+bad permissions: ignore key: /home/me/.ssh/id_rsa_targethost
+
+

This arises from the permission of your private key:id_rsa file.

+

Use command ls -l to see your id_rsa permission. if it is not -rw-------, you should change it to that! Use the following command:

+
chmod 600 ~/.ssh/id_rsa
+
+

No xauth data; using fake authentication data for X11 forwarding.

+

The error message is

+
Warning: No xauth data; using fake authentication data for X11 forwarding.
+
+

This is because ssh can't find your xauth location. Usually, the location is in /opt/X11/bin/xauth. Add this in your ssh configure file:

+
Host *
+    XAuthLocation /opt/X11/bin/xauth
+
+

Remote host identification has changed!

+

When the remote host was just repaired, the error like below might be raised.

+
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@    WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!     @
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
+Someone could be eavesdropping on you right now (man-in-the-middle attack)!
+It is also possible that a host key has just been changed.
+The fingerprint for the RSA key sent by the remote host is
+51:82:00:1c:7e:6f:ac:ac:de:f1:53:08:1c:7d:55:68.
+Please contact your system administrator.
+Add correct host key in /Users/isaacalves/.ssh/known_hosts to get rid of this message.
+Offending RSA key in /Users/isaacalves/.ssh/known_hosts:12
+RSA host key for 104.131.16.158 has changed and you have requested strict checking.
+Host key verification failed.
+
+

Take it easy, and just edit your /Users/isaacalves/.ssh/known_hosts file to remove the line with the IP address of the very remote host. For some users such as Ubuntu or Debian users, ssh -R xxx might be necessary, which would be shown in the error info.

+

However, if not any repair or upgrade happened, man-in-the-middle attack might happen. Just stop logging in and contact manager of cluster at once to make sure.

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/cluster_usage/tensorboard/index.html b/en/wiki/cluster_usage/tensorboard/index.html new file mode 100644 index 00000000..5be0416e --- /dev/null +++ b/en/wiki/cluster_usage/tensorboard/index.html @@ -0,0 +1,2889 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 集群 TensorBoard 使用指南 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

集群 TensorBoard 使用指南

+

需求

+

基于 DeepMD-kit 或者 TensorFlow 的代码调试及训练数据实时监控。

+

TensorBoard 是什么

+

DeepMD-kit 官方教程

+

用法

+

DP 官方教程给出了在本地运行程序时的可视化,如果在服务器上运行,我们需要进行端口转发。

+

在计算节点上运行程序(推荐)

+
+

以在 gpu3 队列运行 DeepMD-kit 训练程序为例,其他程序可对应替换。

+
+
    +
  1. 通过 lsf 脚本提交程序到计算节点 +
    #!/bin/bash
    +#BSUB -q gpu3
    +#BSUB -W 24:00
    +#BSUB -J type_map_0
    +#BSUB -o %J.stdout
    +#BSUB -e %J.stderr
    +#BSUB -n 4
    +#BSUB -gpu "num=1:mode=shared:mps=no:j_exclusive=yes"
    +#BSUB -R "span[ptile=32]"
    +
    +# add modulefiles
    +module add deepmd/2.0-cuda11.3
    +
    +dp train input.json 1>> train.log 2>> train.err &
    +tensorboard --logdir=log --port=6006
    +
    + 如果想要实时查看训练过程中的数据,训练指令和 tensorboard 的运行指令需要同时运 行,故采用 &将训练指令挂起。
    +

    --logdir指定 tensorboard 的 event 文件所在路径(在 json 文件中指定)。

    +

    --port指定 tensorboard 在服务器上运行的端口号(缺省默认为 6006)。

    +
    +
  2. +
  3. 查看计算节点 ip 地址 + 做法类似jupyter notebook 教程,在登录节点命令行输入下面指令(将 c51-m002替换为实际运行的节点)。 +
    cat /etc/hosts | grep c51-m002
    +
  4. +
  5. 将端口转发到本地 +
    ssh -NfL localhost:<local_port>:<remote_ip>:<port> <username>@<ip_of_cluster>
    +
  6. +
+

在登录节点上运行程序

+
+

Warning

+

仅供短时间测试!长时间运行请使用计算节点!!

+
+

在命令行中运行训练和 tensorboard 程序后,在本地执行

+
ssh -NfL <local_port>:localhost:<port> <username>@<ip_of_cluster>
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/cluster_usage/vscode_remote/index.html b/en/wiki/cluster_usage/vscode_remote/index.html new file mode 100644 index 00000000..f0337f39 --- /dev/null +++ b/en/wiki/cluster_usage/vscode_remote/index.html @@ -0,0 +1,2838 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 在非登陆节点上使用VSCode - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

在非登陆节点上使用VSCode

+

VSCode 通过 Remote 插件提供了强大的远程编辑能力,使得用户可以在远程获得接近本地的编辑体验。 +VSCode Server原生基于Node和Electron技术,有着较高的内存等需求, +但鉴于目前登陆节点的资源日渐捉襟见肘,这里提出一个方案, +可以让用户较为方便地使用非登陆节点的资源开启VSCode Remote。

+

本文假设用户已经阅读过SSH 与 SCP 使用入门特别是有关 config 文件的部分, +并知晓集群的基础概况和调度系统使用方法。 +如未阅读,请先参阅上述两篇文字。

+

MacOS 和 Linux 用户

+

由于笔者目前使用的设备是 MacOS 操作系统(Linux情况类似),这里给出较完整的图文说明。

+

首先用自己最顺手的方式打开并编辑 ~/.ssh/config 文件, +参照这里的说明, +增加登陆节点的配置信息:

+
.ssh/config
Host <nickname>
+    HostName <ip_of_zeus>
+    Port <port>
+    User <username>
+
+

请将<ip_of_zeus>, <port>, <username>替换为实际的IP地址、端口号以及用户名。 +<nickname>请替换为任意自己喜欢的昵称,但请注意, +不要使用c5*的形式! 否则会和下文冲突。

+

然后增加以下几行:

+
.ssh/config
Host c5*
+    User <username>
+    ProxyCommand ssh -o ForwardAgent=yes <username>@<nickname> "nc -w 120 %h %p"
+
+

这里采用 c5* 作为前缀是为了在登陆节点上快速登陆到对应的计算节点。 +Zeus 集群上所有计算节点(含CPU、GPU、胖节点)均以 c5* 开头,具有类似 c5*-* 的形式, +故这里采用如此写法。请根据集群的情况对应调整。

+

然后在集群上,运行以下命令,开启一个虚拟终端:

+
user@login01$ bsub -q fat -n 1 -Is bash
+Job <xxx> is submitted to queue <fat>.
+<<Waiting for dispatch ...>>
+<<Starting on c51-s001>>
+user@c51-s001:~$ 
+
+

注意 bsub 的附加命令请参照集群使用说明, +Walltime及队列情况仍需要参照设置。

+

然后,请打开一个VSCode窗口,并点击左下角的按钮,选择“Connect to Host”:

+

+

输入虚拟终端所在的节点,例如上文中的输出 c51-s001:

+

+

如果提示输入密码等信息,请按回车以继续

+

+

等待安装 VSCode Server 即可。若以前曾配置过远程,会自动调用之前的服务。

+

Windows 用户

+

对于Windows用户,由于笔者暂时没有Windows设备,请参照此教程尝试,思路比较接近。本文即参考了该文章的实现。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/deprecated/deepmd-kit_installation_104/index.html b/en/wiki/deprecated/deepmd-kit_installation_104/index.html new file mode 100644 index 00000000..9b8738c7 --- /dev/null +++ b/en/wiki/deprecated/deepmd-kit_installation_104/index.html @@ -0,0 +1,3008 @@ + + + + + + + + + + + + + + + + + + + + + DeePMD-kit安装教程1.0 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

DeepMD-kit安装实战:服务器篇(旧版)

+
+

本部分写于2019年11月,基于国重服务器环境进行安装,适用于Tensorflow版本低于1.13的情形。目前针对更高版本已经有新版教程,请移步。

+
+

准备工作

+

首先准备必要的依赖。

+

检查可用的模块,并加载必要的模块:

+
module avail
+module add cuda/9.2
+module add gcc/4.9.4
+# gcc>=4.9 required by dp_ipi, or it won't be built.
+# For gcc-8.3 could not be supported, here we select a lower version.
+
+

本教程推荐使用conda虚拟环境安装,故:

+
module add miniconda/3.7
+conda create -n deepmd python=3.6
+conda activate deepmd
+
+

下载并编译nccl:

+
cd /some/nccl_download_path
+git clone https://github.com/NVIDIA/nccl.git -b v2.4.8-1
+cd nccl
+make -j src.build --prefix="/some/nccl_install_path" NVCC_GENCODE="-gencode=arch=compute_70,code=sm_70"
+
+

由于国重GPU节点不能直接联网,故使用登陆节点进行编译效率较高,但由于缺少必要的依赖libcuda.solibcuda.so.1(包含在GPU驱动中,登陆节点未安装),故采用stubs所带的库编译,并手动加入环境变量。

+
ln -s /share/cuda/9.2/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/share/cuda/9.2/lib64/stubs:/some/local/path
+
+

在某个想要的路径下将tensorflow-1.12版本的源代码下载好:

+
cd /some/workspace
+git clone https://github.com/tensorflow/tensorflow tensorflow -b r1.12 --depth=1
+
+

下载好bazel安装包并运行,将所需的环境加入环境变量:

+
wget https://github.com/bazelbuild/bazel/releases/download/0.15.0/bazel-0.15.0-installer-linux-x86_64.sh
+chmod +x bazel-0.15.0-installer-linux-x86_64.sh
+./bazel-0.15.0-installer-linux-x86_64.sh --user
+export PATH="$PATH:$HOME/bin"
+
+

tensorflow编译

+

首先配置tensorflow的编译选项:

+
cd tensorflow/
+./configure
+
+

根据需要,提供正确的组件和路径:

+
Please specify the location of python. [Default is /xxx]:
+
+Found possible Python library paths:
+  /xxx/python3.6/site-packages
+Please input the desired Python library path to use. Default is [xxx/python3.6/site-packages]
+
+Do you wish to build TensorFlow with Apache Ignite support? [Y/n]: Y
+
+Do you wish to build TensorFlow with XLA JIT support? [Y/n]: Y
+
+Do you wish to build TensorFlow with OpenCL SYCL support? [y/N]: N
+
+Do you wish to build TensorFlow with ROCm support? [y/N]: N
+
+Do you wish to build TensorFlow with CUDA support? [y/N]: Y
+
+Please specify the CUDA SDK version you want to use. [Leave empty to default to CUDA 9.0]: 9.2
+
+Please specify the location where CUDA 9.2 toolkit is installed. Refer to README.md for more details. [Default is /usr/local/cuda]: /share/cuda/9.2
+
+Please specify the cuDNN version you want to use. [Leave empty to default to cuDNN 7]: 7
+
+Please specify the location where cuDNN 7 library is installed. Refer to README.md for more details. [Default is /usr/local/cuda-10.0]: /share/cuda/9.2
+
+Do you wish to build TensorFlow with TensorRT support? [y/N]: N
+
+Please specify the NCCL version you want to use. If NCCL 2.2 is not installed, then you can use version 1.3 that can be fetched automatically but it may have worse performance with multiple GPUs. [Default is 2.2]: 2.4.8
+
+Please specify the location where NCCL 2 library is installed. Refer to README.md for more details. [Default is /usr/local/cuda]:/some/nccl_install_path
+
+Please note that each additional compute capability significantly increases your build time and binary size. [Default is: 3.5,7.0] 6.1
+
+Do you want to use clang as CUDA compiler? [y/N]: N
+
+Please specify which gcc should be used by nvcc as the host compiler. [Default is /xxx/gcc]: 
+
+Do you wish to build TensorFlow with MPI support? [y/N]: N
+
+Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native]: -march=native
+
+Would you like to interactively configure ./WORKSPACE for Android builds? [y/N]:N
+
+
+

注意

+
    +
  1. CUDA需要写清是9.2版本,否则可能会找不到小版本的依赖库。
  2. +
+

然后运行编译,但由于该节点的版本较为非主流,建议自行编译tf的python interface以避免兼容性问题。

+
bazel build --config=opt --copt=-msse4.2 --copt=-mavx --copt=-mavx2 --copt=-mfma --local_resources 2048,.5,1.0 --config=cuda //tensorflow/tools/pip_package:build_pip_package --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}"
+
+

由于目前节点支持主要的几种优化参数,故可以全部打开以加快运行速度。

+

为了他人的正常使用,建议主动限制在登陆节点上编译时的内存和CPU资源使用量。--local_resources 2048,.5,1.0这个设定可能有些保守,但可以保证不会占用过多资源(实测需要11个小时左右,但全程内存占用不超过2G且只使用了一个线程,若觉得太慢可以把中间的参数适当调高)。

+
    +
  1. nccl和gcc的路径对应前面加载和编译的环境。
  2. +
+
+

编译如果通过,则再运行以下命令编译c++ interface(实际上一步已经编译好所需的大部分依赖,这一步只是再封装成c++库):

+
bazel build -c opt --copt=-msse4.2 --copt=-mavx --copt=-mavx2 --copt=-mfma --config=cuda --verbose_failures //tensorflow:libtensorflow_cc.so --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}"
+
+

这里可以先将tensorflow-python安装好。

+
./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
+pip install /tmp/tensorflow_pkg/tensorflow-version-tags.whl # depends on your version info
+
+

然后,将进行一系列依赖的编译和安装。以防万一,建议首先安装依赖,方便起见,这里使用conda安装。

+
conda install automake autoconf libtool
+
+

将cmake切换到新版本:

+
module add cmake/3.7.3
+
+

指定tf-cc的目标路径为变量$tensorflow_root,并依次运行以下命令:

+
mkdir -p $tensorflow_root
+mkdir /tmp/proto
+sed -i 's;PROTOBUF_URL=.*;PROTOBUF_URL=\"https://mirror.bazel.build/github.com/google/protobuf/archive/v3.6.0.tar.gz\";g' tensorflow/contrib/makefile/download_dependencies.sh
+tensorflow/contrib/makefile/download_dependencies.sh
+cd tensorflow/contrib/makefile/downloads/protobuf/
+./autogen.sh
+./configure --prefix=/tmp/proto/
+make
+make install
+mkdir /tmp/eigen
+cd ../eigen
+mkdir build_dir
+cd build_dir
+cmake -DCMAKE_INSTALL_PREFIX=/tmp/eigen/ ../
+make install
+mkdir /tmp/nsync
+cd ../../nsync
+mkdir build_dir
+cd build_dir
+cmake -DCMAKE_INSTALL_PREFIX=/tmp/nsync/ ../
+make
+make install
+cd ../../absl
+bazel build
+mkdir -p $tensorflow_root/include/
+rsync -avzh --include '*/' --include '*.h' --exclude '*' absl $tensorflow_root/include/
+cd ../../../../..
+mkdir $tensorflow_root/lib
+cp bazel-bin/tensorflow/libtensorflow_cc.so $tensorflow_root/lib/
+cp bazel-bin/tensorflow/libtensorflow_framework.so $tensorflow_root/lib/
+cp /tmp/proto/lib/libprotobuf.a $tensorflow_root/lib/
+cp /tmp/nsync/lib64/libnsync.a $tensorflow_root/lib/
+mkdir -p $tensorflow_root/include/tensorflow
+cp -r bazel-genfiles/* $tensorflow_root/include/
+cp -r tensorflow/cc $tensorflow_root/include/tensorflow
+cp -r tensorflow/core $tensorflow_root/include/tensorflow
+cp -r third_party $tensorflow_root/include
+cp -r /tmp/proto/include/* $tensorflow_root/include
+cp -r /tmp/eigen/include/eigen3/* $tensorflow_root/include
+cp -r /tmp/nsync/include/*h $tensorflow_root/include
+cd $tensorflow_root/include
+find . -name "*.cc" -type f -delete
+rm -fr /tmp/proto /tmp/eigen /tmp/nsync
+
+

以完成c++部分的编译。

+

DeePMD-kit安装(1.0+)

+

首先下载DeePMD-kit,并进入:

+
cd /some/workspace
+git clone https://github.com/deepmodeling/deepmd-kit.git
+cd deepmd-kit
+deepmd_source_dir=`pwd`
+
+

如果前面使用了module load gcc/4.9.4提供的高版本gcc(以4.9.4为例)进行编译,需要手动载入对应的环境变量供cmake识别正确的gcc版本。

+
export CC=/share/apps/gcc/4.9.4/bin/gcc
+export CXX=/share/apps/gcc/4.9.4/bin/g++
+
+

然后安装dpmd-py

+
pip install .
+
+
+

如果遇到no module named 'google'或者no module named 'absl'的报错,则可能存在版本bug,需要重新安装依赖。

+
pip install --update protobus
+pip install --update absl-py
+
+
+

指定DeePMD-kit的目标路径为变量$deepmd_root,随后编译DeePMD-kit C++ Interface:

+
cd $deepmd_source_dir/source
+mkdir build 
+cd build
+cmake -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..
+make
+make install
+
+

如果运行:

+
$ ls $deepmd_root/bin
+dp_ipi
+$ ls $deepmd_root/lib
+libdeepmd_ipi.so  libdeepmd_op.so  libdeepmd.so
+
+

得到上述的结果,说明编译成功(若cmake时检测到的是4.8或更低版本的gcc,则编译结果会缺少dp_ipilibdeepmd_ipi.so)。

+

LAMMPS DeePMD-kit 接口编译

+

首先编译接口:

+
cd $deepmd_source_dir/source/build
+make lammps
+
+

然后下载好稳定版的lammps,并解压:

+
cd /some/workspace
+wget -c https://lammps.sandia.gov/tars/lammps-stable.tar.gz
+tar xf lammps-stable.tar.gz
+
+

若解压后得到目录名为lammps-31Mar17,则

+
cd lammps-31Mar17/src/
+cp -r $deepmd_source_dir/source/build/USER-DEEPMD .
+
+

打开deepmd module,并根据需要添加所需的模块,以fep为例:

+
make yes-user-deepmd
+make yes-user-fep 
+
+

载入需要的mpi库,并编译:

+
module load intel/15.0.6
+module load mpi/intel/5.0.3.049
+make mpi -j4
+
+

得到可执行文件:lmp_mpi

+

可将该文件复制到在$PATH中的路径,则可以直接输入文件名运行。

+

注意

+

完成上述安装步骤后,若需要立即测试运行,**必须**将stubs提供的libcuda.solibcuda.so.1从环境变量中移除,否则运行时会报错。

+

可以直接退出登陆并重新登陆,以免出现该问题。

+

一些可能的坑

+

尽管上述过程应该已经绕过了大部分的坑,但仍不能保证100%安装运行成功。这里记录几种可能的报错的处理方案。

+

需要conda init

+

这种情况已知可能发生在lsf脚本提交的步骤,来源于conda activate deepmd的步骤。具体原因尚不清楚,解决方案是手动载入所需的环境变量。推荐的做法是利用用户自定义module。

+

首先,启用自定义module:

+
module load use.own
+
+

然后运行module avail查看自定义脚本的文件位置,输出结果可能如下:

+
----------- /share/base/modulefiles/compilers -----------
+............
+
+------------- /usr/share/Modules/modulefiles ------------
+dot         module-git  module-info modules     null        use.own
+
+------------ /data/home/someuser/privatemodules ------------
+null
+
+

显示/data/home/someuser/privatemodules是当前用户自定义模块的存放位置。

+

则创建路径,并进入:

+
mkdir -p /data/home/someuser/privatemodules
+cd /data/home/someuser/privatemodules
+
+

然后根据想要的名字创建文件或目录。

+

比如想以deepmd为模块名,且希望提供不同版本的支持,则可以:

+
mkdir deepmd
+vim 1.0
+
+

编辑1.0文件:

+
# Help message
+proc ModulesHelp { } {
+    set nameversion [module-info name]
+    regsub "/.*" $nameversion "" name
+    regsub ".*/" $nameversion "" version
+    puts stderr "\tLoads the $version $name environment"
+}
+
+# Set variables
+set nameversion [module-info name]
+regsub "/.*" $nameversion "" name
+regsub ".*/" $nameversion "" version
+
+module-whatis "Miniconda, an alternative distirbution for python 3.6"
+
+# set environment variables
+
+    setenv        PYTHONROOT    /data/home/someuser/anaconda3/envs/deepmd
+
+    prepend-path    PATH        $env(PYTHONROOT)/bin
+    prepend-path    MANPATH        $env(PYTHONROOT)/share/man
+    prepend-path    PYTHONPATH    $env(PYTHONROOT)/lib/python3.6/site-packages
+
+

注意修改PYTHONROOT为正确的虚拟环境路径(可用conda env list查看),并且python3.6也要与实际使用的python版本一致。

+

这样,便可以通过module调用所需的虚拟环境。

+

使用时提交脚本可以这样写:

+
module load use.own
+module load deepmd/1.0
+
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/deprecated/lsf_usage/index.html b/en/wiki/deprecated/lsf_usage/index.html new file mode 100644 index 00000000..438fa239 --- /dev/null +++ b/en/wiki/deprecated/lsf_usage/index.html @@ -0,0 +1,3144 @@ + + + + + + + + + + + + + + + + + + + + + Lsf usage - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Lsf usage

+ +

LSF 作业管理系统(新版,作为归档)

+

目前 LSF Suite 10.2 已在 Zeus 上部署测试,该版本包含了新版的 LSF 作业管理系统,因而可对 GPU 提供支持。

+

输入 lsload -gpu 即可查看集群当前可以使用的 GPU 数目:

+
HOST_NAME       status  ngpus  gpu_shared_avg_mut  gpu_shared_avg_ut  ngpus_physical
+c51-g001            ok      4                  1%                 6%               4
+c51-g002            ok      4                  0%                 6%               4
+c51-m002            ok      8                  9%                68%               8
+c51-m004            ok      8                 12%                89%               8
+c51-m003            ok      8                  9%                72%               8
+c51-m001            ok      8                 15%                72%               8
+
+

输入 lsload -gpuload 则可以对 GPU 负载情况进行统计:

+
HOST_NAME       gpuid   gpu_model   gpu_mode  gpu_temp   gpu_ecc  gpu_ut  gpu_mut gpu_mtotal gpu_mused   gpu_pstate   gpu_status   gpu_error
+c51-g001            0 TeslaV100_S        0.0       48C       0.0     26%       7%      31.7G      1.1G            0           ok           -
+                    1 TeslaV100_S        0.0       38C       0.0      0%       0%      31.7G        0M            0           ok           -
+                    2 TeslaV100_S        0.0       36C       0.0      0%       0%      31.7G        0M            0           ok           -
+                    3 TeslaV100_S        0.0       37C       0.0      0%       0%      31.7G        0M            0           ok           -
+c51-g002            0 A10080GBPCI        0.0       44C       0.0      8%       0%      79.3G     1020M            0           ok           -
+                    1 A10080GBPCI        0.0       49C       0.0      8%       0%      79.3G     1020M            0           ok           -
+                    2 A10080GBPCI        0.0       47C       0.0      8%       0%      79.3G     1020M            0           ok           -
+                    3 A10080GBPCI        0.0       44C       0.0      0%       0%      79.3G      434M            0           ok           -
+c51-m004            0 NVIDIAGeFor        0.0       64C       0.0     91%      13%      10.7G      1.5G            2           ok           -
+                    1 NVIDIAGeFor        0.0       65C       0.0     89%      13%      10.7G      1.5G            2           ok           -
+                    2 NVIDIAGeFor        0.0       60C       0.0     88%      12%      10.7G      1.5G            2           ok           -
+                    3 NVIDIAGeFor        0.0       66C       0.0     89%      13%      10.7G      1.5G            2           ok           -
+                    4 NVIDIAGeFor        0.0       69C       0.0     87%      13%      10.7G      1.5G            2           ok           -
+                    5 NVIDIAGeFor        0.0       70C       0.0     91%      13%      10.7G      1.5G            2           ok           -
+                    6 NVIDIAGeFor        0.0       65C       0.0     85%      12%      10.7G      1.5G            2           ok           -
+                    7 NVIDIAGeFor        0.0       64C       0.0     87%      12%      10.7G      1.5G            2           ok           -
+c51-m002            0 NVIDIAGeFor        0.0       58C       0.0     92%      14%      10.7G      1.5G            2           ok           -
+                    1 NVIDIAGeFor        0.0       65C       0.0     86%      13%      10.7G      2.5G            2           ok           -
+                    2 NVIDIAGeFor        0.0       56C       0.0     86%      13%      10.7G      2.5G            2           ok           -
+                    3 NVIDIAGeFor        0.0       55C       0.0     63%       8%      10.7G      768M            2           ok           -
+                    4 NVIDIAGeFor        0.0       51C       0.0     63%       8%      10.7G      768M            2           ok           -
+                    5 NVIDIAGeFor        0.0       52C       0.0     68%       9%      10.7G      768M            2           ok           -
+                    6 NVIDIAGeFor        0.0       54C       0.0     66%       8%      10.7G      768M            2           ok           -
+                    7 NVIDIAGeFor        0.0       52C       0.0     39%       2%      10.7G      1.5G            2           ok           -
+c51-m003            0 NVIDIAGeFor        0.0       55C       0.0     62%       8%      10.7G      768M            2           ok           -
+                    1 NVIDIAGeFor        0.0       53C       0.0     64%       8%      10.7G      768M            2           ok           -
+                    2 NVIDIAGeFor        0.0       51C       0.0     64%       8%      10.7G      768M            2           ok           -
+                    3 NVIDIAGeFor        0.0       55C       0.0     62%       8%      10.7G      768M            2           ok           -
+                    4 NVIDIAGeFor        0.0       55C       0.0     79%      10%      10.7G      768M            2           ok           -
+                    5 NVIDIAGeFor        0.0       57C       0.0     79%      10%      10.7G      768M            2           ok           -
+                    6 NVIDIAGeFor        0.0       54C       0.0     80%      10%      10.7G      768M            2           ok           -
+                    7 NVIDIAGeFor        0.0       55C       0.0     80%      10%      10.7G      768M            2           ok           -
+c51-m001            0 NVIDIAGeFor        0.0       62C       0.0     98%      21%      10.7G      1.7G            2           ok           -
+                    1 NVIDIAGeFor        0.0       64C       0.0     98%      22%      10.7G      1.7G            2           ok           -
+                    2 NVIDIAGeFor        0.0       58C       0.0     97%      21%      10.7G      1.7G            2           ok           -
+                    3 NVIDIAGeFor        0.0       66C       0.0     93%      19%      10.7G      894M            2           ok           -
+                    4 NVIDIAGeFor        0.0       69C       0.0     98%      21%      10.7G      1.7G            2           ok           -
+                    5 NVIDIAGeFor        0.0       62C       0.0     98%      21%      10.7G      1.7G            2           ok           -
+                    6 NVIDIAGeFor        0.0       25C       0.0      0%       0%      10.7G        0M            8           ok           -
+                    7 NVIDIAGeFor        0.0       35C       0.0      0%       0%      10.7G        0M            8           ok           -
+
+

使用 GPU 资源时,需要对提交脚本进行相应修改,用 -gpu 命令申请 GPU 资源。

+
#!/bin/bash
+
+#BSUB -q gpu
+#BSUB -W 24:00
+#BSUB -J train
+#BSUB -o %J.stdout
+#BSUB -e %J.stderr
+#BSUB -gpu "num=1:mode=shared:mps=no:j_exclusive=no"
+#BSUB -n 4
+#BSUB -R "span[ptile=32]"
+
+module add deepmd/2.0b1
+lmp_mpi -i input.lammps 1>> model_devi.log 2>> model_devi.log
+
+

其中 num=1 表示申请1张GPU卡,j_exclusive=no 表示允许和其他任务共存,-n 表示申请的CPU核数。 +使用V100时,请设置为不超过8的整数; +使用A100时,请设置为不超过8的整数,若为开启MIG的情况,请参考A100拆分实例使用说明; +使用2080Ti时,请设置为不超过4的整数,否则均可能会出现资源空闲但无法使用的情况。如希望独占一张卡请使用j_exclusive=yes

+
+

链接

+

使用新版 LSF 提交任务,不需要引入检测脚本或CUDA_VISIBLE_DEVICES控制使用的GPU。

+
+

绑定CPU

+

对某些作业类型(如VASP),当使用GPU时,会希望CPU进程尽可能独立运行在所分配的核上,此时可通过设置 CPU 亲和性来控制所用的核数。示例如下:

+
#!/bin/bash
+#
+#BSUB -q gpu
+#BSUB -W 12:00
+#BSUB -J vasp
+#BSUB -o vasp.%J.stdout
+#BSUB -e vasp.%J.stderr
+#BSUB -n 8
+#BSUB -R "span[ptile=32]"
+#BSUB -gpu "num=1:mode=shared:mps=no:j_exclusive=no"
+#BSUB -R "affinity[core(1,exclusive=(core,alljobs))]"
+
+# add modulefiles
+module load vasp/6.1.0-openacc
+mpirun -np 1 vasp_gam
+
+

其中,core(1,exclusive=(core,alljobs)) 表示使用1个核且与其他作业不同。注意这里需要根据实际使用的核数指定,因为作业中mpirun -np的参数是1。

+

DP-GEN Slurm 系统提交方法

+

以训练步骤为例:

+
{
+  "train": [
+    {
+      "machine": {
+        "machine_type": "slurm",
+        "hostname": "xx.xxx.xxx.xxx",
+        "port": 22,
+        "username": "chenglab",
+        "work_path": "/home/chenglab/ypliu/dprun/train"
+      },
+      "resources": {
+        "numb_gpu": 1,
+        "numb_node": 1,
+        "task_per_node": 2,
+        "partition": "gpu",
+        "exclude_list": [],
+        "source_list": [],
+        "module_list": [
+            "deepmd/1.2"
+        ],
+        "time_limit": "96:0:0",
+        "sleep": 20
+      },
+      "python_path": "/share/apps/deepmd/1.2/bin/python3.6"
+    }
+  ],
+  ...
+}
+
+

若提交任务使用QoS设置,则可以在resources中增加qos项目,示例如下:

+
{
+  "train": [
+    {
+      "machine": {
+        "machine_type": "slurm",
+        "hostname": "xx.xxx.xxx.xxx",
+        "port": 22,
+        "username": "chenglab",
+        "work_path": "/home/chenglab/ypliu/dprun/train"
+      },
+      "resources": {
+        "numb_gpu": 1,
+        "numb_node": 1,
+        "task_per_node": 2,
+        "partition": "gpu",
+        "exclude_list": [],
+        "source_list": [],
+        "module_list": [
+            "deepmd/1.2"
+        ],
+        "time_limit": "96:0:0",
+        "qos": "normal",
+        "sleep": 20
+      },
+      "python_path": "/share/apps/deepmd/1.2/bin/python3.6"
+    }
+  ],
+  ...
+}
+
+

LSF 作业管理系统(旧版)

+
+

目前旧版 LSF 系统(10.1.0.0)已不再适用,此部分仅作归档,不再更新,还请留意。 +新版说明请移步

+
+

在GPU节点上,需要通过指定 CUDA_VISIBLE_DEVICES 来对任务进行管理。

+
#!/bin/bash
+
+#BSUB -q gpu
+#BSUB -W 24:00
+#BSUB -J test
+#BSUB -o %J.stdout
+#BSUB -e %J.stderr
+#BSUB -n 4
+
+
+

lsf 提交脚本中需要包含 export CUDA_VISIBLE_DEVICES=X ,其中 X 数值需要根据具体节点的卡的使用情况确定。

+
+

使用者可以用 ssh <host> nvidia-smi 登陆到对应节点(节点名为 <host>)检查 GPU 使用情况。 +示例如下:

+

$ ssh c51-g001 nvidia-smi
+Wed Mar 10 12:59:01 2021
++-----------------------------------------------------------------------------+
+| NVIDIA-SMI 460.32.03    Driver Version: 460.32.03    CUDA Version: 11.2     |
+|-------------------------------+----------------------+----------------------+
+| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
+| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
+|                               |                      |               MIG M. |
+|===============================+======================+======================|
+|   0  Tesla V100-SXM2...  Off  | 00000000:61:00.0 Off |                    0 |
+| N/A   42C    P0    42W / 300W |      3MiB / 32510MiB |      0%      Default |
+|                               |                      |                  N/A |
++-------------------------------+----------------------+----------------------+
+|   1  Tesla V100-SXM2...  Off  | 00000000:62:00.0 Off |                    0 |
+| N/A   43C    P0    44W / 300W |  31530MiB / 32510MiB |     62%      Default |
+|                               |                      |                  N/A |
++-------------------------------+----------------------+----------------------+
+|   2  Tesla V100-SXM2...  Off  | 00000000:89:00.0 Off |                    0 |
+| N/A   43C    P0    45W / 300W |      3MiB / 32510MiB |      0%      Default |
+|                               |                      |                  N/A |
++-------------------------------+----------------------+----------------------+
+|   3  Tesla V100-SXM2...  Off  | 00000000:8A:00.0 Off |                    0 |
+| N/A   43C    P0    47W / 300W |      3MiB / 32510MiB |      0%      Default |
+|                               |                      |                  N/A |
++-------------------------------+----------------------+----------------------+
+
++-----------------------------------------------------------------------------+
+| Processes:                                                                  |
+|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
+|        ID   ID                                                   Usage      |
+|=============================================================================|
+|    1   N/A  N/A    127004      C   ...pps/deepmd/1.2/bin/python    31527MiB |
++-----------------------------------------------------------------------------+
+
+表示目前该节点(c51-g001 )上 1 号卡正在被进程号为 127004 的进程 ...pps/deepmd/1.2/bin/python 使用,占用显存为 31527 MB,GPU 利用率为 62%。

+

在 Zeus 集群使用 deepmd 的提交脚本示例如下(目前 large 队列未对用户最大提交任务数设限制,Walltime 也无时间限制):

+
#!/bin/bash
+
+#BSUB -q large
+#BSUB -J train
+#BSUB -o %J.stdout
+#BSUB -e %J.stderr
+#BSUB -n 4
+
+module add cuda/9.2
+module add deepmd/1.0
+export CUDA_VISIBLE_DEVICES=0
+# decided by the specific usage of gpus
+dp train input.json > train.log
+
+

检测脚本

+

Zeus 集群上预置了两个检测脚本,针对不同需要对卡的使用进行划分。

+

可以使用检测脚本/share/base/tools/export_visible_devices来确定 $CUDA_VISIBLE_DEVICES 的值,示例如下:

+
#!/bin/bash
+
+#BSUB -q gpu
+#BSUB -J train
+#BSUB -o %J.stdout
+#BSUB -e %J.stderr
+#BSUB -n 4
+
+module add cuda/9.2
+module add deepmd/1.0
+source /share/base/scripts/export_visible_devices
+
+dp train input.json > train.log
+
+

/share/base/tools/export_visible_devices 可以使用flag -t mem 控制显存识别下限,即使用显存若不超过 mem 的数值,则认为该卡未被使用。根据实际使用情况和经验,默认100 MB以下视为空卡,即可以向该卡提交任务。

+

也可以使用检测脚本/share/base/tools/avail_gpu.sh来确定 $CUDA_VISIBLE_DEVICES 的值。/share/base/tools/avail_gpu.sh 可以使用flag -t util 控制显卡利用率可用上限,即使用显卡利用率若超过 util 的数值,则认为该卡被使用。目前脚本默认显卡利用率低于5%视为空卡,即可以向该卡提交任务。

+

任务优先级设置(QoS)(不可用)

+

默认情况下提交的任务Qos设置为normal,即填充在整个队列的末尾。如果任务比较紧急,可以向管理员报备申请使用emergency优先级,采用此优先级的任务默认排在队列顶。

+

使用方法如下,即在提交脚本中加入下行:

+
#SBATCH --qos emergency
+
+

DP-GEN

+

以训练步骤为例:

+
{
+  "train": [
+    {
+      "machine": {
+        "machine_type": "lsf",
+        "hostname": "xx.xxx.xxx.xxx",
+        "port": 22,
+        "username": "username",
+        "password": "password",
+        "work_path": "/some/remote/path"
+      },
+      "resources": {
+        "node_cpu": 4,
+        "numb_node": 1,
+        "task_per_node": 4,
+        "partition": "large",
+        "exclude_list": [],
+        "source_list": [
+            "/share/base/scripts/export_visible_devices -t 100"
+        ],
+        "module_list": [
+            "cuda/9.2",
+            "deepmd/1.0"
+                ],
+        "time_limit": "96:0:0",
+        "submit_wait_time": 20
+      },
+      "python_path": "/share/deepmd-1.0/bin/python3.6"
+    }
+  ],
+  ......
+}
+
+

DP-GEN v1.0 API

+
+

注意

+

train 部分使用了对新版 LSF 提供支持的写法,即同时指定 gpu_usagegpu_new_syntaxTrue,从而可在提交脚本中使用新版 LSF 的语法。

model_devi部分使用的是旧版语法,且未指定GPU,但导入了检测脚本。

fp 部分使用的是针对CPU计算使用的语法。注意 mpiexec.hydra 需要写出。

+
+
{
+  "api_version": "1.0",
+  "train": [
+    {
+      "command": "dp",
+      "machine": {
+        "batch_type": "LSF",
+        "context_type": "SSHContext",
+        "local_root": "./",
+        "remote_root": "/data/tom/dprun/train",
+        "remote_profile": {
+            "hostname": "123.45.67.89",
+            "username": "tom"
+        }
+      },
+      "resources": {
+        "number_node": 1,
+        "cpu_per_node": 4,
+        "gpu_per_node": 1,
+        "queue_name": "gpu",
+        "group_size": 1,
+        "kwargs": {
+          "gpu_usage": true,
+          "gpu_new_syntax": true, 
+          "gpu_exclusive": true
+        },
+        "custom_flags": [
+          "#BSUB -J train",
+          "#BSUB -W 24:00"
+        ],
+        "module_list": [
+          "deepmd/2.0"
+        ]
+      }
+    }
+  ],
+  "model_devi":[
+    {
+      "command": "lmp_mpi",
+      "machine":{
+        "batch_type": "LSF",
+        "context_type": "SSHContext",
+        "local_root": "./",
+        "remote_root": "/data/jerry/dprun/md",
+        "remote_profile": {
+          "hostname": "198.76.54.32",
+          "username": "jerry",
+          "port": 6666
+        }
+      },
+      "resources": {
+        "number_node": 1,
+        "cpu_per_node": 8,
+        "gpu_per_node": 0,
+        "queue_name": "gpu",
+        "group_size": 5,
+        "kwargs": {
+          "gpu_usage": false
+        },
+        "custom_flags": [
+          "#BSUB -J md",
+          "#BSUB -W 24:00"
+        ],
+        "strategy": {"if_cuda_multi_devices": false},
+        "para_deg": 2,
+        "module_list": [
+          "deepmd/2.0"
+        ],
+        "source_list": [
+          "/share/base/tools/avail_gpu.sh"
+        ]
+      }
+    }
+  ],
+  "fp":[
+    {
+      "command": "mpiexec.hydra -genvall vasp_gam",
+      "machine":{
+        "batch_type": "LSF",
+        "context_type": "SSHContext",
+        "local_root": "./",
+        "remote_root": "/data/jerry/dprun/fp",
+        "remote_profile": {
+          "hostname": "198.76.54.32",
+          "username": "jerry",
+          "port": 6666
+        }
+      },
+      "resources": {
+        "number_node": 2,
+        "cpu_per_node": 32,
+        "gpu_per_node": 0,
+        "kwargs": {
+          "gpu_usage": false
+        },
+        "custom_flags": [
+          "#BSUB -J label",
+          "#BSUB -W 12:00"
+        ],
+        "queue_name": "medium",
+        "group_size": 10,
+        "module_list": [
+          "intel/17.5.239",
+          "mpi/intel/2017.5.239",
+          "vasp/5.4.4"
+        ]
+      }
+    }
+  ]
+}
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/deprecated/mig_usage/index.html b/en/wiki/deprecated/mig_usage/index.html new file mode 100644 index 00000000..e940341d --- /dev/null +++ b/en/wiki/deprecated/mig_usage/index.html @@ -0,0 +1,2838 @@ + + + + + + + + + + + + + + + + + + + + + 使用A100切分的GPU实例 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

使用集群上的 GPU —— 使用A100切分的GPU实例

+

目前Zeus上已经部署了c51-g002节点,安装有4张Nvidia Tesla A100加速卡。Nvidia官方在A100发布后引入了Multi-Instance GPU(MIG)技术,可以将一张A100拆分为最多7个GPU实例(GPU Instance),在此基础上可以创建计算实例(Computing Instance)。

+

拆分工作需要管理员权限,因而管理员已经事先将其中的3张卡拆分为7个GI并创建CI,因此目前c51-g002节点可以同时使用至多22个GPU实例。

+

受限于现有的调度系统,如果你希望使用Zeus上的A100来进行计算,请仔细阅读以下操作指引。

+

常规使用

+

目前,c51-g002节点上的0号卡尚未开启MIG功能,因此使用上基本与V100一样。为了调度方便,请**务必**使用j_exclusive=yes选项以确保任务可以正确调度到0号卡。如果使用DP-GEN,请设置gpu_exclusivetrue

+
+

注意

+

不要心存侥幸设置j_exclusive=no,你会惊奇地发现任务可能被提交到其他卡上,因而无法尽情地享用80GB大显存。同时这也会使得其他人的任务被提交到0号卡上,从而产生干扰。

+
+

由于A100仅支持CUDA 11.1以上版本,故请注意使用的软件版本。以DeePMD-kit为例,目前集群上只有deepmd/2.0-cuda11.3兼容,因此请务必注意势函数和使用的DeePMD的版本,以免出现报错。

+

以下给出示例提交脚本:

+
#!/bin/bash
+#BSUB -q gpu2
+#BSUB -W 24:00
+#BSUB -J deepmd
+#BSUB -o %J.stdout
+#BSUB -e %J.stderr
+#BSUB -n 11
+#BSUB -gpu "num=1:mode=shared:mps=no:j_exclusive=yes"
+#BSUB -R "span[ptile=11]"
+
+# add modulefiles
+module add deepmd/2.0-cuda11.3
+
+dp train input.json 1>> train.log 2>> train.err
+
+

请参考/data/share/base/scripts下的实例,可复制粘贴使用。(带有A100标注,不带MIG后缀)

+

这里设置-n 11是考虑到GI调度的要求,我们需要防止出现多于22个任务同时运行在A100上。

+

使用MIG切分的GI

+

受限于现有LSF调度系统,尚且无法直接完成对GI的调度。因此我们需要另辟蹊径,所幸j_exclusive=no的情况下可以让任务正确识别到开启了MIG的卡,但也仅限于此了。我们需要进一步让任务正确分配到空闲的CI上,而非默认的第一个(通常编号为7)。

+
+

注意

+

不要心存侥幸设置j_exclusive=yes,你会惊奇地发现如果有人用了0号卡,你的任务会处于PEND状态,这是因为LSF认为其他卡均非空。

+
+
+

注意

+

也请不要参考LSF官方文档对于这里的说明,我们的版本不兼容MIG选项。

+
+

实际上英伟达官方指导中,若要手动使用CI,需要指定CUDA_VISIBLE_DEVICES为对应的UUID。通过ssh登陆到c51-g002节点上,运行以下命令:

+
nvidia-smi -L
+
+

可以得到以下输出:

+
GPU 0: A100 80GB PCIe (UUID: GPU-558ce120-5b8b-16a1-87d4-ce157bba3e9d)
+GPU 1: A100 80GB PCIe (UUID: GPU-162e30f5-cc45-efb9-1e81-19337f4919ce)
+  MIG 1g.10gb Device 0: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/7/0)
+  MIG 1g.10gb Device 1: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/8/0)
+  MIG 1g.10gb Device 2: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/9/0)
+  MIG 1g.10gb Device 3: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/11/0)
+  MIG 1g.10gb Device 4: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/12/0)
+  MIG 1g.10gb Device 5: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/13/0)
+  MIG 1g.10gb Device 6: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/14/0)
+GPU 2: A100 80GB PCIe (UUID: GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747)
+  MIG 1g.10gb Device 0: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/7/0)
+  MIG 1g.10gb Device 1: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/8/0)
+  MIG 1g.10gb Device 2: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/9/0)
+  MIG 1g.10gb Device 3: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/10/0)
+  MIG 1g.10gb Device 4: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/11/0)
+  MIG 1g.10gb Device 5: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/12/0)
+  MIG 1g.10gb Device 6: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/13/0)
+GPU 3: A100 80GB PCIe (UUID: GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792)
+  MIG 1g.10gb Device 0: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/7/0)
+  MIG 1g.10gb Device 1: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/8/0)
+  MIG 1g.10gb Device 2: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/9/0)
+  MIG 1g.10gb Device 3: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/11/0)
+  MIG 1g.10gb Device 4: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/12/0)
+  MIG 1g.10gb Device 5: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/13/0)
+  MIG 1g.10gb Device 6: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/14/0)
+
+

可以看到,1-3号GPU各自拥有了7个独立的MIG Device,各自的UUID列在括号里。

+

但是,如果你试图把任务直接交上去,并且手动指定一个UUID,则会发现很可能你的任务没有跑在想要的卡上,甚至在CPU上运行。这是因为LSF调度下,只有一张卡可见,因此只有该可见卡的UUID才有效。

+

因此,无论怎样,我们都需要一个脚本来监测自己目前位于哪张卡,该卡上有哪几个GI空闲。

+

管理员提供了一个脚本放置在/data/share/base/tools/mig_check.py,可输出当前可用的UUID。该脚本已经设置好执行环境,因而直接运行即可,不要用本地的Python环境来执行。 +以下给出一个示例提交脚本:

+
#BSUB -e %J.err
+#BSUB -o %J.out
+#BSUB -n 1
+#BSUB -R 'span[ptile=1]'
+#BSUB -q gpu2
+#BSUB -gpu 'num=1:mode=shared:j_exclusive=no'
+#BSUB -J train
+#BSUB -W 24:00
+
+module load deepmd/2.0-cuda11.3
+
+export CUDA_VISIBLE_DEVICES=`/data/share/base/tools/mig_check.py`
+
+dp train input.json 1>> train.log 2>> train.err
+
+

请设置使用1个CPU核以免没有足够多的CPU数供任务提交。

+

如果使用新版DP-GEN或DPDispatcher来调度任务,请加入新的环境变量选项。以下给出一个resources部分的示例:

+
"resources": {
+    "number_node": 1,
+    "cpu_per_node": 1,
+    "gpu_per_node": 1,
+    "queue_name": "gpu2",
+    "group_size": 1,
+    "kwargs": {
+      "gpu_usage": true,
+      "gpu_new_syntax": true,
+      "gpu_exclusive": false
+    },
+    "custom_flags": [
+      "#BSUB -J train",
+      "#BSUB -W 24:00"
+    ],
+    "strategy": {"if_cuda_multi_devices": false},
+    "module_list": ["deepmd/2.0-cuda11.3"],
+    "envs": {"CUDA_VISIBLE_DEVICES": "`/data/share/base/tools/mig_check.py`"},
+    "wait_time": 60
+}
+
+

请务必设置gpu_exclusivefalse以确保任务正确提交到1-3号卡;请务必设置if_cuda_multi_devicesfalse以免自动写入CUDA_VISIBLE_DEVICES。同时经过实践,30 s的等待时间对于训练任务可能太短,或需要60s。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/how_to_edit/howtodo/index.html b/en/wiki/how_to_edit/howtodo/index.html new file mode 100644 index 00000000..2cd2c2d0 --- /dev/null +++ b/en/wiki/how_to_edit/howtodo/index.html @@ -0,0 +1,3842 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 如何使用 Wiki - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

如何使用 Wiki

+

Wiki 书写使用 markdown 格式。本 wiki 使用 python-markdown 作为 markdown 的解释器,支持一些 markdown 的扩展语法。在本地编辑 markdown 文件时,推荐使用 VSCode

+
+

Warning

+

Typora正式版已经收费,且测试版在某些系统环境已不可用。

+
+

有任何问题可以在 https://github.com/chenggroup/chenggroup.github.io/issues 进行反馈。

+
+

文档中带有 * 的部分可以略过。

+
+

对某篇 wiki 内容有疑问

+

请使用页面下方的评论区、登陆Github账号后进行评论。该部分基于giscus构建,可以自动创建一个discussion,从而提供方便的互动。此功能需要创建页面的贡献者手动开启。

+

如何上传 wiki

+

如果还不会 markdown 语法,可以先看 markdown 语法部分,能被识别为 wiki 的 markdown 文件应在文件的开头插入 YAML Front Matter。把自己的 markdown 文档上传到 wiki 上可以有两种方案,本质都是在使用 Github: 1. 上传文件至 Github 仓库 (推荐);2. 由 wiki 网站 导向编辑页面。

+

上传文件至 github 仓库 (推荐)

+

推荐通过 pull requests 的方法来增加或修改 wiki 网站 上的 wiki。

+

1. Fork wiki 文档所在仓库

+

先 fork https://github.com/chenggroup/chenggroup.github.io ,然后进入 fork 成功后的仓库。

+ + +

2. 创建新文件或上传本地文件

+ + +

推荐在本地用 typora 等编辑器写好 markdown 后直接上传文件,文件请上传至 _wiki 目录 (master 分支)。也可以修改 fork 的仓库的 docs/wiki 下的文件,然后再提交 PR。

+

3. 设置导航

+
+

Note

+

新增步骤

+
+

在上传新的文档后,需要手动在仓库首级的 mkdocs.yml 中设置导航。

+

例如在软件使用中增加 VASP 使用教程的话(假设放在 docs/wiki/software_usage/vasp.md),且希望放在 CP2K 和 DP-GEN 之间,请在 nav 中增加如下内容:

+
nav:
+  ...
+  - Wikis:
+      ...
+      - 软件使用:
+          ...
+          - wiki/software_usage/Tips_for_LaTeX.md
+          - CP2K:
+              ...
+          - wiki/software_usage/vasp.md # 新增导航
+          - wiki/software_usage/DP-GEN.md
+          ...
+      ...
+
+

4. 提交 PR

+ + +

如何预览 wiki

+

预览 wiki 也有两种方案:1. 使用 typora 等实时渲染;2. 在本地启动 Mkdocs 服务。

+

通过 typora (注意已经收费)

+

使用 typora 编辑器可以很方便地实时渲染 markdown 文件。如果不使用本 wiki 中标注有 *wiki 扩展语法 ,则可以大体上认为 typora 所渲染出的文档与直接查看 wiki 网站 的文档相差无几,基本仅存在显示风格上的差异。但要注意需更改 typora 的一些设置(见后文),避免和 wiki 所使用的 markdown 扩展功能发生冲突。

+

修改 markdown 拓展语法设置

+

需要关闭上下标、高亮以及图表的功能。

+

Screen Shot 2019-11-08 at 21.21.10

+

修改数学公式设置

+

需要关闭数学公式自动添加序号的功能。

+

Screen Shot 2019-11-08 at 21.23.00

+

修改图像设置

+

需要把默认的无特殊操作改为通过 iPic 上传图片,不过在这之前需要 下载 iPic 。推荐在 iPic 偏好设置中开启压缩上传图片的选项,这样可以使 wiki 网页加载的速度更快。

+

image-20210602152924699

+

通过 Mkdocs 服务*

+

1. 下载网站源码至本地

+
git clone https://github.com/chenggroup/chenggroup.github.io.git
+cd chenggroup.github.io
+
+

2. 安装 mkdocs-material 和 必要的 mkdocs 插件

+

可参考 mkdocs-material 官方安装指南

+
pip install mkdocs-material \
+    mkdocs-macros-plugin \
+    mkdocs-static-i18n[material]
+
+

4. 启动 Mkdocs 服务

+
mkdocs serve
+
+

5. 编辑 wiki

+

把要预览的 wiki 移到 docs/wiki/ 目录下,或是直接编辑 docs/wiki/ 目录下的 markdown 文件。

+

6. 预览 wiki

+

等待片刻,打开浏览器访问 http://127.0.0.1:8000

+

Markdown 语法

+

Markdown 是一种标记语言,和代码一样,可以用纯文本的形式来书写。其使用的常用标记符号不超过十个,可以让人专注于文字而不是排版,并且也可以方便地导出为 HTML、PDF 等格式。

+

基本语法

+

markdown-basic-gramma

+
+

⚠ 插入图片时切勿使用本地路径,否则在 wiki 上无法查看,具体请参考 Typro 插入图片设置

+
+

可参考 markdown 教程练习 来学习基本语法。

+
+

⚠ 要引用同一篇 wiki 中的小标题(二至六级标题)可以通过 [sub title](#sub-title) 来引用。不过需要注意,要把小标题中的空格用 - 代替,所有大写字母改成小写,且忽略 . , & 等特殊符号。比如,用 [1. Fork wiki 文档所在仓库](#1-fork-wiki-文档所在仓库) 来表示 1. Fork wiki 文档所在仓库 。若有多个同名标题,以 title, tile-1, title-2 来区分。

+
+

GFM 扩展语法

+

GFM(GitHub Flavored Markdown) 是 github 所使用的 markdown 扩展语法。

+

清单

+
- [ ] 未完成列表
+- [x] 已完成列表
+
+
    +
  • 未完成列表
  • +
  • 已完成列表
  • +
+

表情

+
:eyeglasses: :+1:
+
+

👓 👍

+

Wiki 扩展语法

+

标注 * 的部分可以不去注意

+

YAML Front Matter

+
加入标题
+

只有在 markdown 文件的头部加入 YAML Front Matter 部分,才能使你写的 wiki 展示在网页上。因此最简单的,请在 YAML Front Matter 中加入 title,如下所示:

+
---
+title: getting-started
+---
+
+
添加作者
+

YAML Front Matter 中加入 authors 即可添加作者,多个作者用 yaml 语法的列表表示:

+
---
+title: getting-started
+authors: one author
+---
+
+
---
+title: getting-started
+authors:
+  - author1
+  - author2
+---
+
+
开启评论功能
+

对创建页面的编辑者来说,通常情况下请开启评论功能、以便读者可以快速提交反馈或评论,即在 YAML Front Matter 部分增加一行:

+
---
+...
+comments: true
+---
+
+

数学公式

+

数学公式可以用 LaTeX 语法来书写,两端用 $(一般用于行内公式) 或 $$(会使公式居中显示) 来标记,如 $E=mc^2$ 可表示 \(E=mc^2\)

+
$$
+E[\rho] = T_s[\rho] + \int \mathrm{d}r\ v_{\rm ext}(r)\rho(r) + V_{H}[\rho] + E_{\rm xc}[\rho]
+$$
+
+
\[ +E[\rho] = T_s[\rho] + \int \mathrm{d}r\ v_{\rm ext}(r)\rho(r) + V_{H}[\rho] + E_{\rm xc}[\rho] +\]
+

要表示多行公式,需要使用 aligned,并要在行尾部加 \\

+
$$
+\begin{aligned} \dot{x} &= \sigma(y-x) \\
+\dot{y} &= \rho x - y - xz \\
+\dot{z} &= -\beta z + xy \end{aligned} 
+$$
+
+
\[ +\begin{aligned} \dot{x} &= \sigma(y-x) \\ +\dot{y} &= \rho x - y - xz \\ +\dot{z} &= -\beta z + xy \end{aligned} +\]
+

若实现给公式编号等功能,可参照 LaTeX 的做法。

+

化学式与化学反应式

+

此功能通过 LaTeX 的 mhchem 插件来实现,使用上与数学公式输入相近,都需要通过 $$$ 来标记。

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
源码化学式与化学反应式
$\ce{Mg(OH)2}$\(\ce{Mg(OH)2}\)
$\ce{CrO4^2-}$\(\ce{CrO4^2-}\)
$\ce{[Cu(NH3)4]^2+}$\(\ce{[Cu(NH3)4]^2+}\)
$\ce{CoCl2.6H2O}$\(\ce{CoCl2.6H2O}\)
$\ce{^{227}_{90}Th+}$\(\ce{^{227}_{90}Th+}\)
$\ce{C2H5-OH}$\(\ce{C2H5-OH}\)
$\ce{CH3CH=CH2}$\(\ce{CH3CH=CH2}\)
$\ce{HC#CH}$\(\ce{HC#CH}\)
$\ce{CaCO3 ->[900\,{}^{\circ}\mathrm{C}] CaO + CO2}$\(\ce{CaCO3 ->[900\,{}^{\circ}\mathrm{C}] CaO + CO2}\)
$\ce{H2PO4- <=>C[OH-][H+] H+ + HPO4^2-}$\(\ce{H2PO4- <=>C[OH-][H+] H+ + HPO4^2-}\)
+

上下标

+

一般情况下可以用 <sup></sup> 表示上标,用 <sub></sub> 表示下标,如 支付宝TM 可用 支付宝<sup>TM</sup> 表示。

+

按钮*

+
[Subscribe to our newsletter](#){ .md-button }
+
+ + +

default +primary

+

提示*

+
!!! tldr "title"
+    TLDR means too long, didn't read
+
+

改变 tldr 即可使用不同的提示类型,比如

+
+

Use tldr for this.

+

TLDR means too long, didn't read

+
+
+

Use tip for this.

+

This is a tip.

+
+
+

Use info for this.

+

This is a piece of information, or you can use todo.

+
+
+

Use question for this.

+

This is a question.

+
+
+

Use warning for this.

+

This is a warning

+
+
+

Use danger for this.

+

This alerts danger!

+
+
+

Use success for this.

+

This alerts success

+
+

流程图

+

流程图可以用来表示工作流或者步骤等:

+
``` mermaid
+graph LR
+  A[Start] --> B{Error?};
+  B -->|Yes| C[Hmm...];
+  C --> D[Debug];
+  D --> B;
+  B ---->|No| E[Yay!];
+```
+
+
graph LR
+  A[Start] --> B{Error?};
+  B -->|Yes| C[Hmm...];
+  C --> D[Debug];
+  D --> B;
+  B ---->|No| E[Yay!];
+

引用本网站的其他 wiki

+

使用

+
[title](relavent/path/to/file.md)
+
+

即可在 wiki 中引用本网站的其他 wiki 。只需将 relavent/path/to/file.md 改成想要引用的 wiki 相对此文档的 相对路径

+

比如,要想引用 如何使用 wiki 这篇 wiki,则只需把 relavent/path/to/file.md 换成 ../how_to_edit/howtodo.md

+

Screen Shot 2019-12-02 at 11.30.03

+
+

Warning

+

注意这里推荐使用的是相对路径,可不改变同级目录结构。如需修改上级目录结构需要对应更改。

+
+

文档英文翻译

+

目前本 Wiki 采用 mkdocs-static-i18n 实现多语言支持,因而若需要编写翻译版本,仅需要在同一目录下增加一个后缀为 .en 的markdown文件。例如中文文档为 custom.md,则英文文档为 custom.en.md

+

注意请将导言区的 title 内容翻译为英文。

+

若涉及导航栏中自定义栏目的翻译,请在 mkdocs.yml 中增加。以下给出一个实例:

+
nav:
+  - 主页: index.md
+  - 分类1: 
+      - topic1/index.md
+      - topic1/item1.md
+  - 分类2: topic2/index.md
+
+plugins:
+  - i18n:
+    languages:
+      - locale: en
+        default: true
+        name: English
+      - locale: fr
+        name: Français
+        nav_translations:
+          主页: Home
+          分类1: Topic 1
+          分类2: Topic 2
+
+

参考资料*

+

要使用更多功能,请参考mkdocs-material官方文档

+

当然,想要快速获得支持,也可以联系作者或者 Open an issue

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/how_to_edit/howtousenews/index.html b/en/wiki/how_to_edit/howtousenews/index.html new file mode 100644 index 00000000..51fc9065 --- /dev/null +++ b/en/wiki/how_to_edit/howtousenews/index.html @@ -0,0 +1,2743 @@ + + + + + + + + + + + + + + + + + + + + + 如何发布 News - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

如何发布 News (致管理员)

+

因为迁移新的实现,暂不启用。

+

服务器的一些信息或是其他零碎的信息可发布在 News 里。

+

上传文件

+

文件的格式

+

请上传 markdown 格式的文件,当然也支持一些 markdown 的拓展功能

+

文件的命名

+

文件以 YYYY-MM-dd-name.md 来命名,如 2019-11-01-welcome.md

+
+

如果文件前缀的日期是个未来日期,则其不会在 News 页面上显示,不过当到了其日期之后则会自动出现在 News 页面上。

+
+

设置 News 的摘要

+

在一级标题之下, <!--more--> 之上的内容会被当作摘要。进入 read more 之前会显示摘要。

+

设置 News 的分类

+

YAML Front Matter 处添加 tags 可更方便地按照某些标签来检索 News,tags 示例如下所示:

+
---
+tags:
+  - HPCreview
+  - HPCreport
+---
+
+

查看 News

+

进入 https://wiki.cheng-group.net//news 可查看所有 News,https://wiki.cheng-group.net//archive 可查看按时间分类的 News。

+ + + + + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/miscellaneous/index.html b/en/wiki/miscellaneous/index.html new file mode 100644 index 00000000..b87a30ae --- /dev/null +++ b/en/wiki/miscellaneous/index.html @@ -0,0 +1,2950 @@ + + + + + + + + + + + + + + + + + + + + + + + 杂项(Miscellaneous) - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Miscellaneous

+

Put temporary or unclassied content here!

+

Run Process when you logout shell

+

Everytime you login the cluster, you want to run some commands while you have to logout the shell. Unfortunately, these commands will stop as soon as you logout. How to keep commands run? The trick here is use command nohup and &.

+

bash +nohup command &

+

You just need to prepend nohup and append & in your commands.Now, you can go back and have a nice sleep.

+

删除 linux 下的符号链接(快捷方式)

+

Linux 系统下的符号链接,又称软链接,基本类似于 Windows 系统下的快捷方式。如果你已经接触过deepmd,你应该已经对见到过一些符号链接了。需要注意的一点是,符号链接本质上是一个 独立的文本文件,操作系统会将其解释为另一个文件或者路径(文件夹)。因此符号链接有如下两个性质:

+
    +
  • +

    删除符号链接文件并不会影响原本的文件/路径(文件夹)

    +
  • +
  • +

    删除原始文件/路径后,符号链接仍然存在,但是链接会损坏,成为 “stale symbolic link”(字面意思)。

    +
  • +
+

在整理工作文件夹的时候,我们可能会需要删除符号链接,我们尤其需要注意路径符号链接的删除:

+

一个dp-gen的训练路径结构如下:

+
00.train/
+├── 000
+├── 001
+├── 002
+├── 003
+├── data.init -> /data/rhbi/TiO2-ML/00.cp2k_md
+├── data.iters
+├── graph.000.pb -> 000/frozen_model.pb
+├── graph.001.pb -> 001/frozen_model.pb
+├── graph.002.pb -> 002/frozen_model.pb
+├── graph.003.pb -> 003/frozen_model.pb
+└── jr.json
+
+

假设你想要删除和文件关联的软链接‘graph.000.pb’,输入 rm graph.000.pb,没有任何问题,你成功删除了这个文件。然而如果你想删除和一个文件夹相关的链接,data.init,你可能会不假思索地输入

+
rm data.init/
+
+

这时候你会收到报错:

+
rm: cannot remove ‘data.init/’: Is a directory
+
+

再次强调,符号链接本质上是一个 独立的文本文件。收到报错是因为shell的自动全功能把‘data.init’识别为了一个路径,因此在最后加入了斜杠‘/’,然而符号链接只是一个文本文件,这个时候系统认为不能用rm命令删掉一个路径,所以报错。正确的解决方法是去掉斜杠,输入正确的命令成功删除链接:

+
rm data.init
+
+

当然shell的自动补全和你使用的 shell 版本有关,有可能你的 shell 不会犯蠢直接加上‘/’,但是在删除链接的时候你需要额外注意,避免你的数据损失。

+
+

danger

+

千万不要运行 'rm -rf data.init/*' ,你会删除掉原路径下的所有文件!!!'

+
+

集群使用出错:/bin/sh^M: bad interpreter: No such file or directory

+

错误情况

+

/bin/sh^M: bad interpreter: No such file or directory

+

在集群上使用bsub提交作业后正常显示:

+
Job <1360> is submitted to queue <53-large>
+
+

但是用bjobs查看不到这个作业,(可能先显示在排队PEND)显示No unfinished job found,这个时候使用ls命令会看见提交的.lsf作业的目录下会生成输出和报错文件:1360.stdout,1360.stderr,这说明作业已经运行结束(异常结束)。

+

错误原因

+

使用vim命令查看.stdout和.stderr这两个文件,会发现在作业的换行处出现很多^M符号,查询原因是windows的文件上传到linux系统时文件格式可能不一致

+

错误处理

+

方法一:参考linux下运行脚本报读取或^M错误处理 - 知乎 (zhihu.com)

+

方法二:用vim命令在集群上新建一个作业,然后把作业内容复制上去,再bsub提交作业即可

+

Scrum Group

+

简单介绍

+
    +
  • scrum meeting 即每日例会,在橄榄球运动中 a scrum 意思为一场比赛,scrum meeting 旨在通过每日例会的形式来总结最近所做的工作,进行讨论和反思并对未来短期内的工作进行规划和展望。
  • +
+

基本规则

+
    +
  • 所有的学生根据所研究方向分为若干小组,每个小组由各自的 scrum master 管理,并由 scrum master 带领进行每周的汇报。
  • +
  • scrum meeting 每周进行两次,进行时间根据具体情况而定。
  • +
  • 所有的研究生和本科四年级学生除非有要事均需参加scrum meeting,如果有事不能参加的需向所在组的 scrum master 进行请假和汇报。
  • +
  • 如果当天老师繁忙,各个小组应该自行组织 scrum meeting。
  • +
+

例会内容

+
    +
  • 汇报从上次 scrum meeting 到目前为止所做的工作内容,包括遇到的问题、新的发现或者存在的疑问等。
  • +
+

参考文件

+
    +
  • 请参考以下文件(待更新)
  • +
  • https://www.scrumguides.org/scrum-guide.html
  • +
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/ase/index.html b/en/wiki/new_comers/ase/index.html new file mode 100644 index 00000000..acd8c25a --- /dev/null +++ b/en/wiki/new_comers/ase/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ASE: 原子建模基础 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

ASE: 原子建模基础

+ + + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/basis_pps/index.html b/en/wiki/new_comers/basis_pps/index.html new file mode 100644 index 00000000..980879e9 --- /dev/null +++ b/en/wiki/new_comers/basis_pps/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 密度泛函近似,基组与赝势 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

密度泛函近似,基组与赝势

+ + + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/dpgen/index.html b/en/wiki/new_comers/dpgen/index.html new file mode 100644 index 00000000..8763eb3c --- /dev/null +++ b/en/wiki/new_comers/dpgen/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 深度势能生成器: DP-GEN - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

深度势能生成器: DP-GEN

+ + + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/dpmd/index.html b/en/wiki/new_comers/dpmd/index.html new file mode 100644 index 00000000..21e474bc --- /dev/null +++ b/en/wiki/new_comers/dpmd/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 机器学习: 理论与DeePMD-kit - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

机器学习: 理论与DeePMD-kit

+ + + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/linux/index.html b/en/wiki/new_comers/linux/index.html new file mode 100644 index 00000000..95f544ad --- /dev/null +++ b/en/wiki/new_comers/linux/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Linux快速基础入门 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Linux快速基础入门

+ + + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/presentation/index.html b/en/wiki/new_comers/presentation/index.html new file mode 100644 index 00000000..0451f756 --- /dev/null +++ b/en/wiki/new_comers/presentation/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 如何进行展示/Presentation - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

如何进行展示/Presentation

+ + + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/python_numpy/index.html b/en/wiki/new_comers/python_numpy/index.html new file mode 100644 index 00000000..772239fe --- /dev/null +++ b/en/wiki/new_comers/python_numpy/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Python 和 Numpy - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Python 和 Numpy

+ + + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/qc_dft/index.html b/en/wiki/new_comers/qc_dft/index.html new file mode 100644 index 00000000..915cb6cd --- /dev/null +++ b/en/wiki/new_comers/qc_dft/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 量子化学与密度泛函理论 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

量子化学与密度泛函理论

+ + + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/read_papers/index.html b/en/wiki/new_comers/read_papers/index.html new file mode 100644 index 00000000..fa6a8153 --- /dev/null +++ b/en/wiki/new_comers/read_papers/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 如何阅读文献 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

如何阅读文献

+ + + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/toc/index.html b/en/wiki/new_comers/toc/index.html new file mode 100644 index 00000000..c2979415 --- /dev/null +++ b/en/wiki/new_comers/toc/index.html @@ -0,0 +1,2932 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 导览 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

新生入门教程

+

欢迎加入程俊课题组,每个人来到新环境都需要熟悉和学习规则,请各位新生按照以下清单顺序进行入组的准备。

+

个人座位

+

每位入学新生将分到一个座位和一台iMac电脑用于日常的科研。请大家先注册一个Apple ID, 然后寻找**课题组的集群管理员**,为你开通iMac电脑的账号。

+

集群与集群账号

+

课题组配备有集群(超算)资源供科研使用,而集群是以**Linux**系统运行的。与Windows类似,是另一种电脑操作系统。主要以键盘操作为主,因此如果不熟悉**Linux**系统的同学,请先自己粗略学习一下(视频)Linux入门

+

要登陆集群,同样需要集群账号,请寻找**课题组的集群管理员**为你开通集群账号。

+

登录集群**建议使用iMac的终端(terminal)**。这里iMac,指的就是苹果苹果电脑。由于苹果操作系统MacosLinux都是从Unix系统衍生出来,因此使用苹果系列电脑来登录集群最为方便。Windows系统的电脑则需要额外安装软件。

+

使用iMac登录集群只需要同时按住command+空格,就会跳出搜索框。在搜索框中输入terminal/终端,则会跳出终端应用。使用终端的SSH命令即可。SSH使用具体见下文。

+

为建立账号,需要生成SSH密钥。登录集群需要使用SSH操作。

+

使用集群前,请大家熟悉集群的基本知识和操作。如果要使用GPU等资源,还需学习如何使用集群上的GPU

+

如果以上有任何难以理解的内容请立即汇报给**课题组的集群管理员**

+

在iMac上和在集群上使用Python

+

Python是一种非常方便的编程语言,可以帮助我们处理计算数据。但是纯Python的安装和相应的Python库使用是十分烦人的。因此名为Anaconda的软件可以帮助我们解决这个问题。

+

在iMac上,安装Anaconda,直接去搜索引擎搜索Anaconda然后去官网下载对应的安装包即可。

+

在集群上,我们已经提前为大家安装好了Anaconda,使用和设置方法参见集群上的Anaconda

+

必学项目

+

量子化学(Levine)(前14章)

+

(视频)量子化学与密度泛函理论

+

(视频)密度泛函近似,基组与赝势

+

(视频)Linux入门

+

(视频)如何阅读文献

+

(视频)如何进行展示

+

(视频)Python和Numpy

+

选学[具体项目相关]

+

机器学习

+

(视频)Deep Learning Lecture by Frank Noe *需要科学上网

+

(书籍)Pattern Recognition and Machine Learning

+

(书籍)Deep Learning(花书)

+

(视频)Machine Learning for Physics and the Physics of Learning 2019 *需要科学上网

+

(视频)机器学习: 理论与DeePMD-kit

+

(视频)深度势能生成器: DP-GEN

+

DeePMD-kit 使用入门

+

DP-GEN使用入门

+

工作流

+

(视频)自动化计算与工作流: AiiDA

+

生成模型

+

(视频)Diffusion and Score-Based Generative Models

+

(视频)Dr. Yang Song — Advancements in Diffusion Models for Generative AI

+

(博客)Generative Modeling by Estimating Gradients of the Data Distribution

+

(博客)A Pedagogical Introduction to Score Models

+

(视频)通用分子结构模型Graphormer简介 - 郑书新博士

+

(视频)Beyond AlphaFold2: 从结构预测到分布预测 | 郑书新博士 | 微软研究院 | Distributional Graphormer (DiG)

+

(视频)Materials Project Seminars – Tian Xie "MatterGen: a generative model for inorganic materials design"

+

统计力学

+

(博客)Introduction to Statistical Mechanics

+

(博客)David Tong at DAMTP, Cambridge: Lectures on Theoretical Physics

+

(博客)Lectures on Statistical Physics

+

(博客)Lectures on Quantum Mechanics

+

(博客)Lectures on Solid State Physics

+ + + + + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/new_comers/workflow/index.html b/en/wiki/new_comers/workflow/index.html new file mode 100644 index 00000000..2ecfcb51 --- /dev/null +++ b/en/wiki/new_comers/workflow/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 自动化计算与工作流: AiiDA - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

自动化计算与工作流: AiiDA

+ + + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/question_under_carpet/chemical_computing/index.html b/en/wiki/question_under_carpet/chemical_computing/index.html new file mode 100644 index 00000000..ec4166c0 --- /dev/null +++ b/en/wiki/question_under_carpet/chemical_computing/index.html @@ -0,0 +1,2784 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 计算化学踩坑合集 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

计算化学踩坑合集

+

有时候,我们会沿用别人测试过的设置进行计算,而不一定会从头进行系统测试。但是,作为计算软件的使用者,我们需要意识到某些可能会出错的地方(或许是很棘手的问题),而不是将这些问题视而不见(sweep the problems under the carpet)。在此文章记录大家在项目中碰到的奇奇怪怪的坑,以供参考。

+
+

有新的内容可以通过 PR 或者评论区提出。可引用置顶issue #131

+
+

Cu pseudopotential

+

涉及 Cu 二价离子的计算可能要采用 19 电子的赝势 (semi-core potential)。

+
+

We found that only the computation of the orbital energy of the empty d-level of aqueous Cu2+ requires the use of a semi-core potential with explicit 3s and 3p electrons. +Ref: J. Am. Chem. Soc. 2004, 126, 12, 3928–3938 [link]

+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/skills/QS4writing/index.html b/en/wiki/skills/QS4writing/index.html new file mode 100644 index 00000000..d16addbd --- /dev/null +++ b/en/wiki/skills/QS4writing/index.html @@ -0,0 +1,3218 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Quick Start for Paper Writing(科研写作急速上手) - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Quick Start for Writing

+
+

小提示

+
+

中文版可以在英文版之后找到,但是还是鼓励大家先读读英文版~

+

English version

+

I write this blog aiming to share some simple tips about academic writing, and hope it could help the "poor guys" struggling with the writing, more or less.

+

Notice: I am not a master in writing but only a TOTAL FRESHMAN. And all the texts following are based on what I learnt and my understanding, maybe incomplete (I hope no mistakes at least). Nevertheless, I believe that it is the reason why I can make the texts more friendly and achievable for the tyros. If you have any question, please feel free to come and talk with me ;-)

+

Practice! Practice... Practice?

+

I guess some (or even most) of the you would say 'duh' when you heard about "Practice! Practice! Practice!" in some books. Sounds 100 percent correct but useless, right? Overall I agree, if you don't have a concrete and reasonable plan. Aimless practice can sometimes not only have no effects, but, even worse, depress you. Hence, I strongly suggest you start with writing YOUR paper, a specific example. For those having no projects, experiment report can also be an alternative. Then, craft your work step by step!

+

Step ONE: polish up your outline

+

The first and most important step. Seems to irrelevant to writing skills, uhm? Yes, but checking the outline with your colleagues and supervisors can largely save your time. Just imagine the time to rewrite the whole section! Generally speaking, the big framework for the project (and then the paper) has been made. However, we need to go a further step, and check the structures between paragraphs and even sentences. Actually it is the nightmare for many students, I believe.

+

For example, here we try to introduce the modelling methods in interface electrochemistry (mainly about EDL modelling), following solution electrochemistry introduced in the last section. Hence, we write down the outline below and discuss it with our partners.

+
the electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)
+==>
+EDL is hard to be probed (reason)
+==>
+we can get some info with in situ techniques and ab initio simulations
+==>
+One of the key characteristics of EDL is its capcacitance
+==>
+EDL capacitance can be measured by experiment (CV/impedance) and be a bencemark for modelling
+==>
+replace the solute by the electrode (from solution electrochemistry to interface electrochemistry)
+==>
+use similar simulation methods and focus on their performace on EDL modelling
+
+

In this step, you don't need to consider the elegance of your language. Simple but accurate texts can make your life easier.

+

Step TWO: ABT structure

+

“How long would you need to tell a story?” Randy Olson asked this question in his TEDMED talk. (YouTube link here. Sorry I cannot find another source for the guys in China...) In this talk and his book Houston, We Have a Narrative: Why Science Needs Story, Olson introduced a quite simple method to construct a narrative, the ABT structure:

+
+

(...) AND (...), BUT (...), THEREFORE (...)

+
+

Let's try to fill this structure with the outline in the last step!

+
%% start the ABT structure
+% EDL is important (... AND ...)
+the electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)
+% BUT it is hard to be probed
+However, EDL is hard to be probed, not only because xxx but xxx
+% THEREFORE, we need some tools
+To address this difficulty, both in situ experimental techniques and modelling are required.
+%% END the ABT structure
+
+

If you don't know how to construct your idea, write down all the points you can think about and try to adapt them to one or more ABT structure(s).

+

If you think the linking somewhere is not smooth enough, rewrite it with an ABT structure.

+

Ahhh! Not bad!

+

Step THREE: repeat your words

+

With the two steps mentioned above, I believe you have worked out a comprehensible outline. Then, we need to strengthen the linking between sentences and make the logic more explicitly, by repeating the words in the last sentence. Hence, your texts can be easier to be followed! Here is an example:

+
Electric double layers (EDL) at the electrode/electrolyte interfaces are where electrochemical reactions occur, and thus are of paramount importance in electrochemistry.
+% Electric double layers (EDL) <==> EDL
+However, microscopic understanding of the EDL is still lacking due to its complexity and difficulty to probe.
+% microscopic understanding <==> valuable insight
+Thanks to the development of computational methods, modelling has shown great potential in studying the interface of the electrode and the electrolyte in the past few years, and provided valuable insight into EDL structures and dielectric properties.
+
+

Maybe the repetition between the second and the third sentences is slightly implicit, but the idea is there. Nevertheless, I would not recommend a tyro to do so, since you might confuse the readers with rephrasing. If you are not sure, just repeat the words and make your texts clear!

+
+

Albert Einstein: When you are out to describe the truth, leave elegance to the tailor.

+
+

Here we come to another example (cited from DOI: 10.1126/SCIADV.ABB1219). I like this compact structure very much.

+
An electric double layer (EDL) formed at an electrified interface can afford a potential change of a few volts within a very thin layer of 3 to 5 Å, amounting to an extremely large electric field of similar strength to that in a particle accelerator.
+% an extremely large electric field <==> a strong electric field
+Naturally, one would wonder how solvent molecules such as water or any other reactive species inside the EDL would behave in response to such a strong electric field.
+% how ... behave <==> this question
+Answering this question is not only of fundamental interest but also of technological importance in a broad range of research areas in science and technology, to name a few, energy storage in supercapacitors, electrocatalysis of relevance to energy and environmental applications, self-assembly of colloidal particles, ion transport across biological membranes, and mineralization processes in earth science.
+% fundamental interest, technological importance <==> its significance
+Despite its significance, molecular-level understanding of EDL is largely missing, owing to its complexity and difficulty to probe.
+% molecular-level understanding <==> microscopic structures
+Because of the advent of advanced experimental (e.g., synchrotron-based techniques and Raman spectroscopy) and computational methods [e.g., ab intio molecular dynamics (AIMD)], it is not until recently that the microscopic structures of EDL have started to be unveiled.
+
+

Yeah! Finally! I don't want to talk too much to distract you (but I still strongly recommend you to read the book mentioned above for fun!). I think the three tips above are sufficient to work out a readable draft for your big bosses. Don't be afraid of writing! Just have a try!

+
+

I am not a natural in writing. On the contrary, I had really struggled with English writing and thought I was a dunderhead at all, even if I had a pretty nice and patient supervisor who helped me a lotttttttttt in my first paper. Things turned up in a day (shortly after I finished the quasi-final version of my first paper) when I was asked to give a hand to my colleague for a review. When I started to read the review, I knew how to put all I had been taught into practice magically. Just like a spark in my mind. Maybe you know what should be improved only when you need to deal with an "unreadable draft" ? (Just kidding! Don't kill me, Xiaohui!)

+
+

Sincere thanks to Dr. Katharina Doblhoff-Dier in Leiden University

+

中文版

+

笔者写这篇短文的目的是为了分享几个笔者觉得很实用的写作小技巧,希望可以帮到正在(或将要)挣扎在科研写作中的同学们。

+

:笔者并不是科研写作的专家,只是一个刚刚敲完自己第一篇论文的菜鸟。以下的内容都是基于笔者所学的和笔者的理解,可能比较片面(希望没有错误)。尽管如此,笔者希望用一个初学者的视角去讲述,让这篇短文的内容对初学者来说是友好的和可实现的 ;-)

+

练习!练习......练习?

+

笔者相信相当一部分人在一些书里(或者其他地方)听到“练习!练习!练习!”这句话的时候会说一声:“就这?就这?”听起来是个完全正确的废话。总体来说笔者同意你们的观点,如果你们在练习的时候没有一个具体和合理的计划。毫无目的的练习有时候会事倍功半,甚至因打击你的自信而起到反效果。因此,笔者会推荐大家从 自己的文章(一个具体的例子)开始。对于那些还没有文章的同学,不妨试试实验报告之类的?然后,开始一步步打磨你的文章吧!

+

第一步:确定框架

+

这是最重要的一步,虽然看起来和写作没太大关系。一个好的框架可以大大节省后续写作的时间——想想重写一整个段落!通常来说,整个项目(文章)的大框架应该是在项目进行之前就和导师敲定好的,这个不会有大问题。问题在哪呢?下一个尺度:段落间和句子间的连接。就笔者个人经验而言,这步是很多学生(和导师)的噩梦...

+

那我们在写框架的时候应该写到什么程度呢?来看看一个例子。这里,我们希望介绍电化学界面的一些模拟方法(着重在双电层模拟)。并且在上一节里,我们已经介绍过了溶液相的一些模拟方法。根据这些内容,我们可以写个大致如下的框架,然后和我们的合作者或者导师进行下一步讨论。

+
the electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)
+==>
+EDL is hard to be probed (reason)
+==>
+we can get some info with in situ techniques and ab initio simulations
+==>
+One of the key characteristics of EDL is its capcacitance
+==>
+EDL capacitance can be measured by experiment (CV/impedance) and be a bencemark for modelling
+==>
+replace the solute by the electrode (from solution electrochemistry to interface electrochemistry)
+==>
+use similar simulation methods and focus on their performace on EDL modelling
+
+

在这一步中,你不需要考虑语言的优美。简单而精准的文字在接下来的修改中更方便。

+

第二步:ABT 结构

+

“你需要花多长时间去讲述一个故事?” Randy Olson 在他的TEDMED 演讲中问了这个问题。(这是个油管链接,B 站没找着...)在这个演讲以及他的书Houston, We Have a Narrative: Why Science Needs Story中,Olson 介绍了一种非常简单的叙事方法,ABT 结构:

+
+

(...) AND (...), BUT (...), THEREFORE (...)

+
+

让我们试着把上一节里的框架用 ABT 结构改造一下!

+
%% start the ABT structure
+% EDL is important (... AND ...)
+the electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)
+% BUT it is hard to be probed
+However, EDL is hard to be probed, not only because xxx but xxx
+% THEREFORE, we need some tools
+To address this difficulty, both in situ experimental techniques and modelling are required.
+%% END the ABT structure
+
+

如果你不知道怎么下笔,那就先把所有想到的点写下来并把它们往 ABT 结构里套。

+

如果你认为某处的过渡不够自然,也可以考虑用 ABT 结构重写一下 。

+

第三步:重复你的词汇

+

经过上面的两个步骤,我相信你已经获得了一个可理解的大纲。 现在,我们来进行最后一步:尽量使每个句子中都出现上一个句子中的单词。这个方法可以加强句子之间的连接,使逻辑更加清晰,从而让你的文字可以更容易被阅读! 看看这个例子:

+
Electric double layers (EDL) at the electrode/electrolyte interfaces are where electrochemical reactions occur, and thus are of paramount importance in electrochemistry.
+% Electric double layers (EDL) <==> EDL
+However, microscopic understanding of the EDL is still lacking due to its complexity and difficulty to probe.
+% microscopic understanding <==> valuable insight
+Thanks to the development of computational methods, modelling has shown great potential in studying the interface of the electrode and the electrolyte in the past few years, and provided valuable insight into EDL structures and dielectric properties.
+
+

好吧,看上去第二个句子和第三个句子之间的重复有点隐晦?但是那个意思了。尽管如此,同义替换对于新手来说需要特别谨慎,以防出现表述偏差。如果你不是特别确定,那就简单地重复!让你的文章先变得清晰!

+
+

Albert Einstein: When you are out to describe the truth, leave elegance to the tailor.

+
+

这里是另一个例子 (引自 DOI: 10.1126/SCIADV.ABB1219)。笔者个人很喜欢这个简洁紧凑的例子!

+
An electric double layer (EDL) formed at an electrified interface can afford a potential change of a few volts within a very thin layer of 3 to 5 Å, amounting to an extremely large electric field of similar strength to that in a particle accelerator.
+% an extremely large electric field <==> a strong electric field
+Naturally, one would wonder how solvent molecules such as water or any other reactive species inside the EDL would behave in response to such a strong electric field.
+% how ... behave <==> this question
+Answering this question is not only of fundamental interest but also of technological importance in a broad range of research areas in science and technology, to name a few, energy storage in supercapacitors, electrocatalysis of relevance to energy and environmental applications, self-assembly of colloidal particles, ion transport across biological membranes, and mineralization processes in earth science.
+% fundamental interest, technological importance <==> its significance
+Despite its significance, molecular-level understanding of EDL is largely missing, owing to its complexity and difficulty to probe.
+% molecular-level understanding <==> microscopic structures
+Because of the advent of advanced experimental (e.g., synchrotron-based techniques and Raman spectroscopy) and computational methods [e.g., ab intio molecular dynamics (AIMD)], it is not until recently that the microscopic structures of EDL have started to be unveiled.
+
+

文章到这里就结束了!笔者不想写太多点以至于让你们有点抓狂(但是笔者还是非常推荐你们去读读上面提到的书!很有趣!)总的来说,笔者认为上面提及的三点已经足以写出一个清晰的初稿给你们的老板了。

+

最后,向我第一个项目的日常导师,荷兰莱顿大学的 Dr. Katharina Doblhoff-Dier 表示诚挚的感谢。

+

Useful websites for writing

+

vocabulary

+

https://www.vocabulary.com

+

https://www.oxfordlearnersdictionaries.com

+

synonym

+

https://www.wordhippo.com

+

https://www.thesaurus.com

+

collocation

+

https://www.linggle.com

+

https://netspeak.org

+

sentence

+

https://www.phrasebank.manchester.ac.uk

+

rephrase

+

https://quillbot.com

+

translation

+

http://www.onedict.com/index.php

+

https://www.deepl.com/translator

+

年度汇报标准

+

研究生(博士和硕士)前两年每年应写一次汇报。汇报使用 **LaTeX**撰写。模板选用 revtex 的 AIP 模板。以 **英文**撰写

+

硕士生第一年报告的词数应在 4500 左右,第二年报告应在 6000 左右。

+

博士生第一年报告的词数应在 6000 左右,第二年报告应在 7500 左右。

+

使用 Overleaf 写作

+

Overleaf是一个在线的 LaTeX 编辑器,可以直接在浏览器中编辑 LaTeX 文档。使用 Overleaf 可以方便地进行合作写作,同时也可以方便地进行版本控制。现阶段,课题组的科研论文基本都是使用 Overleaf 进行写作。基本操作流程为:在需要写文章的时候联系管理员,请管理员将文章相关人员的邮箱添加到一个空白项目中,然后用个人 Overleaf 账号进行后续编辑。项目相关文件课题组会统一进行归档管理。

+

在需要写文章的时候请将以下信息发给管理员:

+
    +
  • 所需模版(常用的如 ACS 和 AIP,如果有另外需求也可以告知管理员)
  • +
  • 项目名称(按照:作者名-序号-文章名 进行命名,比如:jxzhu-1-pt_oh_100
  • +
  • 需要添加的成员邮箱(除管理员外上限 5 人每项目) + 管理员添加相关人员邮箱后,请所有成员查看邮箱/登录 Overleaf 账号确认邀请。
  • +
+

版本管理

+

Overleaf 可以在修改的时候实现版本记录,也可以添加评论,具体的使用方法可以参考此教程

+
    +
  1. 右上角History,可以查看历史版本,并自行标记版本。
  2. +
  3. 右上角Menu-Sync,可以进行手动备份。但是现阶段 GitHub 账号绑定仅限于会员(早期已绑定用户同步功能不受影响),故推荐使用 git+本地进行备份(也可在本地自行选择其他的托管平台)。git 相关教程参见此教程
  4. +
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/skills/research_skill/index.html b/en/wiki/skills/research_skill/index.html new file mode 100644 index 00000000..db4223b9 --- /dev/null +++ b/en/wiki/skills/research_skill/index.html @@ -0,0 +1,2840 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 研究技能 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

研究技能入门

+

如何阅读文献

+

阅读文献入门

+

为什么要写作

+ + +

如何写作

+ + +

Whitesides教授的大作

+

Whitesides, G. M. Whitesides’ Group: Writing a Paper. Adv. Mater. 2004, 16 (15 SPEC. ISS.), 1375–1377.

+

如何用英语演讲

+

English for Presentations at International Conferences

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_development/lammps/installation/index.html b/en/wiki/software_development/lammps/installation/index.html new file mode 100644 index 00000000..b616a4c1 --- /dev/null +++ b/en/wiki/software_development/lammps/installation/index.html @@ -0,0 +1,2846 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 在集群安装LAMMPS - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

在集群安装LAMMPS

+

Zeus 集群

+
# Load the necessary modules
+module load cmake/3.20
+module load intel/17.5.239 mpi/intel/2017.5.239 gcc/7.4.0
+
+# find the ver in https://download.lammps.org/tars/index.html
+wget -c https://download.lammps.org/tars/lammps-23Jun2022.tar.gz
+tar -zxvf lammps-23Jun2022.tar.gz
+cd lammps-23Jun2022
+mkdir -p build
+cd build
+cmake ../cmake -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ \
+-DCMAKE_Fortran_COMPILER=gfortran \
+-D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=mpi \
+-D CMAKE_INSTALL_PREFIX=/data/jxzhu/apps/lammps/install/23Jun2022 \
+-D CMAKE_INSTALL_LIBDIR=lib \
+-D CMAKE_INSTALL_FULL_LIBDIR=/data/jxzhu/apps/lammps/install/23Jun2022/lib \
+-C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake \
+-D BUILD_SHARED_LIBS=yes
+make -j 32
+make install
+
+

检查是否安装完成

+
./lmp_mpi -h
+
+

对于个人用户,可以将可执行文件所在路径(如/data/jxzhu/apps/lammps/lammps-23Jun2022/build)写入某个虚拟环境的环境变量,以实现版本控制。

+

IKKEM 集群

+
module load intel/2021.1
+module load dev/cmake/3.26.3
+module load gcc/9.3
+
+# find the ver in https://download.lammps.org/tars/index.html
+# find the ver in https://download.lammps.org/tars/index.html
+wget -c https://download.lammps.org/tars/lammps-23Jun2022.tar.gz
+tar -zxvf lammps-23Jun2022.tar.gz
+cd lammps-23Jun2022
+mkdir -p build
+cd build
+cmake ../cmake -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ \
+      -DCMAKE_Fortran_COMPILER=gfortran \
+      -D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=intel_cpu_intelmpi \
+      -D CMAKE_INSTALL_PREFIX=/public/home/jxzhu/apps/lammps/install/lammps-23Jun2022 \
+      -D CMAKE_INSTALL_LIBDIR=lib \
+      -D CMAKE_INSTALL_FULL_LIBDIR=/public/home/jxzhu/apps/lammps/install/lammps-23Jun2022/lib \
+      -C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake \
+      -D BUILD_SHARED_LIBS=yes 
+make -j 32
+make install
+
+

检查是否安装完成

+
./lmp_intel_cpu_intelmpi -h
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_development/lammps/plugin/index.html b/en/wiki/software_development/lammps/plugin/index.html new file mode 100644 index 00000000..b68eea0a --- /dev/null +++ b/en/wiki/software_development/lammps/plugin/index.html @@ -0,0 +1,2769 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 基于插件模式开发LAMMPS - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

基于插件模式开发LAMMPS

+ +

一般来说,对代码进行功能添加/修改需要直接在源代码中进行,这样可能对原有代码产生影响。为了解决这个问题,LAMMPS引入了插件模式,使得用户可以在不改动源代码的情况下对LAMMPS进行功能扩展。接下来,我们通过官方的例子对插件的运行方式进行大致的了解:

+
```bash
+cd lammps-23Jun2022/examples/plugins
+```
+
+

make编译:

+
```bash
+make 
+```
+
+

或者cmake

+
```bash
+mkdir -p build
+cd build
+cmake ../
+make
+```
+
+

编译后可以得到多个动态库文件.so。可以通过两种方式调用插件:

+
    +
  1. 在lammps的input中,通过plugin load命令加载插件,即可使用插件中的功能。
    +
    plugin load morse2plugin.so
    +
  2. +
  3. 将动态库所在路径加入LAMMPS_PLUGIN_PATH,程序会自动加载搜索到的所有插件。
  4. +
+

注意:如果移动examples/plugins中例子所在路径,需要修改编译设置。如果采用make编译,需要修改Makefile中的CXXFLAGS

+
```bash
+CXXFLAGS=-I$(LAMMPS_SOURCE_DIR) -Wall -Wextra -O3 -fPIC -I$(LAMMPS_SOURCE_DIR)/OPENMP -fopenmp
+```
+
+

并设置LAMMPS_SOURCE_DIR为lammps源代码所在路径。

+
```bash
+export LAMMPS_SOURCE_DIR=/data/jxzhu/software/lammps/lammps-23Jun2022/src
+make
+```
+
+

如果采用cmake编译,需要将plugins/CMakeLists.txt中22行注释掉(get_filename_component(LAMMPS_SOURCE_DIR ${PROJECT_SOURCE_DIR}/../../src ABSOLUTE)),并在执行cmake时指定lammps源代码所在目录

+
```bash
+mkdir -p build
+cd build
+rm *
+cmake -DLAMMPS_SOURCE_DIR=/data/jxzhu/apps/lammps/lammps-23Jun2022/src ..
+make
+```
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_development/lammps/quick_start/index.html b/en/wiki/software_development/lammps/quick_start/index.html new file mode 100644 index 00000000..95431f9e --- /dev/null +++ b/en/wiki/software_development/lammps/quick_start/index.html @@ -0,0 +1,2809 @@ + + + + + + + + + + + + + + + + + + + + + + + + + LAMMPS开发准备 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

LAMMPS开发准备

+

为什么要学习LAMMPS开发?

+

作为一个开源的分子动力学模拟软件,LAMMPS在计算化学中有非常广泛的应用。现有的LAMMPS发行版本提供了大量的功能,大多数时候可以满足用户的需求。但是,有时候我们仍需要实现一些新的功能,或者对现有功能进行修改。此时,就需要我们对LAMMPS开发有大致了解。本教程面向已掌握LAMMPS的基本功能的用户,希望通过本教程的学习,读者可以掌握LAMMPS的基本开发方法,为自己的研究工作提供更多的可能性。考虑到现在已经有一些关于LAMMPS开发的教程(贴于下方),本教程将基于chenglab组内情况进行介绍。

+

阅读资料

+
    +
  1. 官方开发指南
    +非常全面的开发指南,包括了LAMMPS的代码结构、并行算法等,但是篇幅较长。建议优先阅读代码架构单步中调用的功能
  2. +
  3. Extending and Modifying LAMMPS Writing Your Own Source Code: A pragmatic guide to extending LAMMPS as per custom simulation requirements
    +详细介绍了如何在LAMMPS中添加新的功能,可以根据需求找到对应的案例进行学习。
  4. +
+

如果你没有任何代码经验,建议先根据基础完成以下的内容学习:

+
    +
  1. LAMMPS基础
  2. +
  3. Git基础
  4. +
  5. C++基础(请根据自己的代码基础选择合适的教程,比如C++ Primer Plus)
  6. +
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_installation/cp2k-7.1/index.html b/en/wiki/software_installation/cp2k-7.1/index.html new file mode 100644 index 00000000..3cd1bfa9 --- /dev/null +++ b/en/wiki/software_installation/cp2k-7.1/index.html @@ -0,0 +1,2933 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K 7.1 安装教程 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CP2K 7.1 安装教程

+

这里以 7.1 版本为例介绍如何安装编译 CP2K,其他版本可参照修改。

+

环境准备

+

可参考官方支持编译环境

+
    +
  • 使用 GCC 5.5.0 以上
  • +
  • Intel MPI 环境
  • +
+

一切就绪后,加载上述环境:

+
module load intel/17.5.239 mpi/intel/2017.5.239
+module load gcc/5.5.0
+
+

安装流程

+

首先,在 Release 页面 下载 CP2K 安装包,以 7.1 为例:

+
wget -c https://github.com/cp2k/cp2k/releases/download/v7.1.0/cp2k-7.1.tar.bz2
+
+

拷贝 cp2k-7.1.tar.bz2 到安装路径下并解压。由于需要预编译所需的库等,这里为了防止后续使用时产生额外路径依赖,推荐直接在安装路径下编译。 +以/share/apps/cp2k为例:

+
cp cp2k-7.1.tar.bz2 /share/apps/cp2k
+cd /share/apps/cp2k/
+tar -jxf cp2k-7.1.tar.bz2
+
+

更改目录名为 7.1,为后续添加 module 文件作准备(本步骤可选,也可保留默认名称,后续环境配置时需要相应修改):

+
mv cp2k-7.1 7.1
+
+

进入到 toolchain 目录下,并修改install_mpich.sh, 将其中的check_command mpic++ "mpich"改为check_command mpicxx "mpich"

+
cd 7.1/tools/toolchain
+sed -i 's/check_command mpic++/check_command mpicxx/g' scripts/install_mpich.sh
+
+

(可选) 若需安装 ELPA 包,需要将静态库替换为动态库,否则会报错undefined reference to ...

+
sed -i 's/a libmkl_core.a libmkl_sequential.a/so libmkl_sequential.so libmkl_core.so/g' scripts/install_mkl.sh
+sed -i 's/libmkl_gf_lp64.a/libmkl_gf_lp64.so/g' scripts/install_mkl.sh
+sed -i 's/libmkl_core.a/libmkl_sequential.so/g' scripts/install_mkl.sh
+sed -i 's/libmkl_scalapack_lp64.a/libmkl_scalapack_lp64.so/g' scripts/install_mkl.sh
+sed -i 's/libmkl_blacs_intelmpi_lp64.a/libmkl_blacs_intelmpi_lp64.so/g' scripts/install_mkl.sh
+sed -i 's/libmkl_blacs_openmpi_lp64.a/libmkl_blacs_openmpi_lp64.so/g' scripts/install_mkl.sh
+sed -i 's/libmkl_core.a/libmkl_sequential.so/g' scripts/install_mkl.sh
+
+

ref 1 +ref 2

+

(可选) 为加速安装、防止超时报错,在中国大陆可将 Github 统一替换为镜像。但后续从 cp2k 官方网站下载的包也可能出现超时报错,可能需要借助其他平台下载相应的软件包并放到build目录下。

+
sed -i 's/github.com/hub.fastgit.org/g' scripts/install_*.sh
+
+

随后运行 toolchain 脚本安装依赖软件:

+
./install_cp2k_toolchain.sh --gpu-ver=no   --enable-cuda=no  --with-mpich=system --with-sirius=no --with-openmpi=no  --with-spfft=no --with-hdf5=no
+
+

过程中请注意输出信息和报错等,并相应地予以解决。如果一切顺利,会提示需要拷贝 arch 文件,并 source 所需的环境,按照提示操作即可。注意由于步骤不同这里的命令可能不同,仅供参考:

+
cp install/arch/local* /share/apps/cp2k/7.1/arch/
+source /share/apps/cp2k/7.1/tools/toolchain/install/setup
+
+

之后进行编译安装:

+
cd /share/apps/cp2k/7.1/
+make -j 8 ARCH=local VERSION="popt psmp"
+
+

如果一切顺利,可以得到编译好的二进制可执行文件,创建bin目录,并拷贝exe目录里的文件到bin

+
mkdir bin
+cp ./exe/local/* ./bin
+
+

最后删除bintools之外的所有文件,并删除tools/toolchain里的buildinstall目录。

+

Module 文件生成

+

若集群使用 module 管理环境变量,请在 modulefile 目录下(取决于集群的设置)新建目录cp2k并创建文件.module

+
#%Module
+
+# Help message
+proc ModulesHelp { } {
+    set nameversion [module-info name]
+    regsub "/.*" $nameversion "" name
+    regsub ".*/" $nameversion "" version
+    puts stderr "\tLoads the $version $name environment"
+}
+
+# Set variables
+set nameversion [module-info name]
+regsub "/.*" $nameversion "" name
+regsub ".*/" $nameversion "" version
+module-whatis    "$name $version"
+
+# set environment variables
+set basedir /share/apps/$name/$version
+
+module load intel/17.5.239 mpi/intel/2017.5.239
+module load gcc/5.5.0
+
+prepend-path    PATH            ${basedir}/bin
+
+

然后创建符号链接,提供相应版本号的环境:

+
ln -s .module 7.1
+
+

Q&A

+
    +
  1. 如果所有标称为https://www.cp2k.org的压缩包均无法下载,且单独wget该压缩包时提示Issued certificate has expired,可以尝试更新证书服务,CentOS 7 命令如下:
  2. +
+
yum install ca-certificates
+
+
    +
  1. +

    以上欺骗手段仅适用于 Intel MPI <= 2018 的版本,对高版本 MPI 推荐直接安装更高版本的 CP2K,Toolchain 可提供完整支持。

    +
  2. +
  3. +

    如果make过程中频繁报错,还可能是系统没有正确配置地区设置,请使用如下命令加载环境变量:

    +
  4. +
+
export LANG=en_US.UTF-8
+export LC_ALL=en_US.UTF-8
+export LC_CTYPE="en_US.UTF-8"
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/index.html b/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/index.html new file mode 100644 index 00000000..8933a360 --- /dev/null +++ b/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/index.html @@ -0,0 +1,3182 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeepMD-kit快速安装 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

DeepMD-kit快速安装

+

为减少后续安装的困难,请优先参考最佳实践。本文介绍的方法成型时,DP尚未实现对Lammps的解耦,但仍然可用。

+
+

本部分主体写于2021年,截至目前(2022.08)仍适用,并且随版本升级仍在更新。

+

教程中使用的尚且是CUDA 10.1,但对CUDA 11.x也适用。

+
+

背景:以 Zeus 集群为例,在服务器通过源代码编译安装DeepMD-kit和包含完整接口的LAMMPS。虽然官方已经提供了通过 Conda 一键安装的方法,但由于此法所安装的各个组件均为预编译版本,因而无法做更多拓展和改动,且通过 Conda 安装的 Protobuf 存在版本冲突,无法进一步编译其他接口。这里介绍一种方法,通过 Conda 安装通常不需要较大改动的TensorFlow C++ Interface,其余部分仍手动编译。

+

初始环境说明

+

以下过程以 Zeus 集群为例,操作系统及版本为CentOS 7,管理节点联网,采用module作为环境管理。

+

以下是预先配置好的环境,对于其他集群,可以此要求准备环境,其中 Intel MPI 可以用 MPICH 代替,其余组件请自行安装。注意CUDA 10.1对Nvidia驱动版本有要求,需要预先检查好(可用nvidia-smi快速查看)。

+
    +
  • 通过yum安装
  • +
  • Git >= 1.8.2
  • +
  • 通过module加载
  • +
  • CUDA 10.1
  • +
  • Miniconda 3
  • +
  • GCC >= 7.4.0
  • +
  • Intel MPI 2017 (暂未对其他版本进行测试)
  • +
+
+

版本号仅供参考,实际安装可能会不一样,参考执行即可。

+
+

创建新的环境

+

首先准备必要的依赖。

+

检查可用的模块,并加载必要的模块:

+
module avail
+module add cuda/10.1
+module add gcc/7.4.0
+
+

注意这里导入的是GCC 7.4.0版本,如果采用低于4.9.4的版本(不导入GCC)则dp_ipi不会被编译。

+

然后创建虚拟环境,步骤请参考Anaconda 使用指南

+

假设创建的虚拟环境名称是 deepmd,则请将步骤最后的 <your env name> 替换为 deepmd。若采用该步骤的设置,则虚拟环境将被创建在/data/user/conda/env/deepmd下(假设用户名为user)。

+

注意请务必为创建的虚拟环境安装所需的Python环境。通常不指定Python版本号的情况下(例如文中的步骤conda create -n <your env name> python)会安装conda推荐的最新版本,如需要替代请对应指定,如conda create -n deepmd python=3.8

+

由于Zeus的GPU节点不能联网,故需要将所需的驱动程序库libcuda.solibcuda.so.1的名称手动链接到某个具有权限的路径/some/local/path并分别加入环境变量。

+
ln -s /share/cuda/10.0/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/share/cuda/10.0/lib64/stubs:/some/local/path
+
+
+

提示

+

若在Zeus 集群上安装,管理员已事先把libcuda.so.1 链接在/share/cuda/10.0/lib64/stubs/下,故无需额外创建软链接,同理/some/local/path也无需加入环境变量,但仍需要驱动程序库的符号链接libcuda.so。注意这一步骤执行后,实际运行时需要从环境变量中移除

+
+

安装Tensorflow的C++ 接口

+

以下安装,假设软件包下载路径均为/some/workspace, 以TensorFlow 2.3.0版本、DeePMD-kit 1.3.3 版本为例进行说明,其他版本的步骤请参照修改。

+

首先创建并进入虚拟环境,这里假设命名为deepmd

+
conda create -n deepmd python=3.8
+conda activate deepmd
+
+

搜索仓库,查找可用的TensorFlow的C++ 接口版本。

+
conda search libtensorflow_cc -c https://conda.deepmodeling.com
+
+

结果如下:

+
Loading channels: done
+# Name                       Version           Build  Channel
+libtensorflow_cc              1.14.0  cpu_h9a2eada_0
+libtensorflow_cc              1.14.0  gpu_he292aa2_0
+libtensorflow_cc               2.0.0  cpu_h9a2eada_0
+libtensorflow_cc               2.0.0  gpu_he292aa2_0
+libtensorflow_cc               2.1.0  cpu_cudaNone_0
+libtensorflow_cc               2.1.0  gpu_cuda10.0_0
+libtensorflow_cc               2.1.0  gpu_cuda10.1_0
+libtensorflow_cc               2.1.0   gpu_cuda9.2_0
+libtensorflow_cc               2.3.0  cpu_cudaNone_0
+libtensorflow_cc               2.3.0  gpu_cuda10.1_0
+libtensorflow_cc               2.4.1  gpu_cuda11.0_0
+libtensorflow_cc               2.4.1  gpu_cuda11.1_0
+libtensorflow_cc               2.5.0  cpu_cudaNone_0
+libtensorflow_cc               2.5.0  gpu_cuda10.1_0
+libtensorflow_cc               2.5.0  gpu_cuda11.3_0
+libtensorflow_cc               2.7.0  cpu_h6ddf1b9_0
+libtensorflow_cc               2.7.0 cuda101h50fd26c_0
+libtensorflow_cc               2.7.0 cuda113h3372e5c_0
+libtensorflow_cc               2.7.0 cuda113hbf71e95_1
+libtensorflow_cc               2.9.0  cpu_h681ccd4_0
+libtensorflow_cc               2.9.0 cuda102h929c028_0
+libtensorflow_cc               2.9.0 cuda116h4bf587c_0
+
+

这里所希望安装的版本是2.3.0的GPU版本,CUDA版本为10.1,因此输入以下命令安装:

+
conda install libtensorflow_cc=2.3.0=gpu_cuda10.1_0 -c https://conda.deepmodeling.org
+
+

若所安装的环境没有实际的GPU驱动(比如集群的登录节点)或需要用到Conda安装CudaToolkit,可能需要参照此处说明强制指定GPU环境。比如:

+
CONDA_OVERRIDE_CUDA="11.3" conda install libtensorflow_cc=2.7.0=cuda113hbf71e95_1 -c https://conda.deepmodeling.com
+
+

请注意CONDA_OVERRIDE_CUDA的值需要与GPU支持以及希望用到的CUDA版本相匹配。

+
+

提示

+

注意A100仅支持TF 2.4.0以上、CUDA11.2以上,安装时请对应选择。

+
+
+

提示

+

个别版本在后续编译时可能会提示需要libiomp5.so,请根据实际情况确定是否需要提前载入Intel环境(见下文Lammps编译部分)或者conda install intel-openmp

+
+
+

提示

+

conda命令可能速度较慢,也可以考虑切换为mamba,后者可大幅加速Conda的性能,且完全兼容。只需参照前述链接安装后将conda替换为mamba即可

+
+

若成功安装,则定义环境变量:

+
export tensorflow_root=/data/user/conda/env/deepmd
+
+

即虚拟环境创建的路径。

+

安装DeePMD-kit的Python接口

+

以防万一可以升级下pip的版本:

+
pip install --upgrade pip
+
+

接下来安装Tensorflow的Python接口

+
pip install tensorflow==2.3.0
+
+

若提示已安装,请使用--upgrade选项进行覆盖安装。若提示权限不足,请使用--user选项在当前账号下安装。

+

然后下载DeePMD-kit的源代码(注意把v1.3.3替换为需要安装的版本,如v2.0.3等)

+
cd /some/workspace
+git clone --recursive https://github.com/deepmodeling/deepmd-kit.git deepmd-kit -b v1.3.3
+
+

在运行git clone时记得要--recursive,这样才可以将全部文件正确下载下来,否则在编译过程中会报错。

+
+

提示

+

如果不慎漏了--recursive, 可以采取以下的补救方法: +

git submodule update --init --recursive
+

+
+

若集群上 Cmake 3没有安装,可以用pip进行安装:

+
pip install cmake
+
+

修改环境变量以使得cmake正确指定编译器:

+
export CC=`which gcc`
+export CXX=`which g++`
+export FC=`which gfortran`
+
+

若要启用CUDA编译,请导入环境变量:

+
export DP_VARIANT=cuda
+
+

随后通过pip安装DeePMD-kit:

+
cd deepmd-kit
+pip install .
+
+

安装DeePMD-kit的C++ 接口

+

延续上面的步骤,下面开始编译DeePMD-kit C++接口:

+
deepmd_source_dir=`pwd`
+cd $deepmd_source_dir/source
+mkdir build 
+cd build
+
+

假设DeePMD-kit C++ 接口安装在/some/workspace/deepmd_root下,定义安装路径deepmd_root

+
export deepmd_root=/some/workspace/deepmd_root
+
+

在build目录下运行:

+
cmake -DLAMMPS_VERSION_NUMBER=<value> -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..
+
+

请根据自己即将安装的Lammps版本指定-DLAMMPS_VERSION_NUMBER的值,目前最新版本的DeePMD-kit默认为20210929,如需安装Lammps 29Oct2020,请设定为20201029

+

若通过yum同时安装了Cmake 2和Cmake 3,请将以上的cmake切换为cmake3

+

最后编译并安装:

+
make
+make install
+
+

若无报错,通过以下命令执行检查是否有正确输出:

+
$ ls $deepmd_root/bin
+dp_ipi
+$ ls $deepmd_root/lib
+libdeepmd_ipi.so  libdeepmd_op.so  libdeepmd.so
+
+

安装LAMMPS的DeePMD-kit模块

+

接下来安装

+
cd $deepmd_source_dir/source/build
+make lammps
+
+

此时在$deepmd_source_dir/source/build下会出现USER-DEEPMD的LAMMPS拓展包。

+

下载LAMMPS安装包,并把接口代码复制到src目录下:

+
cd /some/workspace
+# Download Lammps latest release
+wget -c https://lammps.sandia.gov/tars/lammps-stable.tar.gz
+tar xf lammps-stable.tar.gz
+cd lammps-*/src/
+cp -r $deepmd_source_dir/source/build/USER-DEEPMD .
+
+

Make命令安装

+

选择需要编译的包(若需要安装其他包,请参考Lammps官方文档):

+
make yes-user-deepmd
+make yes-kspace
+
+

如果没有make yes-kspace 会因缺少pppm.h报错。

+

这里也可以通过以下命令批量安装其他包:

+
make yes-all                        # install all packages
+make no-lib                         # uninstall packages that require extra libraries
+make no-ext                         # uninstall packages that require external libraries
+
+

注意如Plumed、SMD、COLVARS等等需要提前配置或预先编译的插件如需安装请参考Lammps官方文档,同时诸如 Intel、GPU等加速包如果不需要编译可能需要额外手动取消安装。

+
+

目前官方文档改动较大,且未提供历史版本,因而仅适用于官方最新Release版本(目前仅适用于Lammps 29Sep2021以后的版本,但可能随着后续更新适用面进一步缩窄。),使用旧版请注意甄别。

+
+

加载MPI环境,并采用MPI方式编译Lammps可执行文件:

+
module load intel/17.5.239 mpi/intel/2017.5.239
+make mpi -j4
+
+
+

注意

+

此处使用的GCC版本应与之前编译Tensorflow C++接口和DeePMD-kit C++接口一致,否则可能会报错:@GLIBCXX_3.4.XX。如果在前面的安装中已经加载了GCC 7.4.0,请在这里也保持相应环境的加载。

+
+

经过以上过程,Lammps可执行文件lmp_mpi已经编译完成,用户可以执行该程序调用训练的势函数进行MD模拟。

+

Cmake安装

+

也可以直接使用Cmake进行编译,更加干净、快捷。

+

如需要安装Plumed,请首先利用Conda安装GSL环境:

+
conda install gsl
+
+

然后请编辑lammps-stable/cmake/CMakeLists.txt,找到set(STANDARD_PACKAGES这一行,并在末尾括号内增加一项:USER-DEEPMD

+
set(STANDARD_PACKAGES
+  ...  
+  USER-DEEPMD)
+
+

然后在lammps-stable目录下,新建build目录:

+
cd lammps-stable
+mkdir build
+cd build
+
+

进行配置:

+
cmake -C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake \
+-D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=mpi \
+-D WITH_JPEG=no -D WITH_PNG=no -D WITH_FFMPEG=no \
+-D PKG_PLUMED=yes -D PKG_COLVARS=yes -D PKG_USER-DEEPMD=ON \
+-D CMAKE_INSTALL_PREFIX=/data/user/conda/env/deepmd \
+-D CMAKE_CXX_FLAGS="-std=c++14 -DHIGH_PREC -DLAMMPS_VERSION_NUMBER=20220623 -I${deepmd_root}/include -I${tensorflow_root}/include -L${deepmd_root}/lib -L${tensorflow_root}/lib -Wl,--no-as-needed -ldeepmd_cc -ltensorflow_cc -ltensorflow_framework -Wl,-rpath=${deepmd_root}/lib -Wl,-rpath=${tensorflow_root}/lib" \
+../cmake
+
+

注意CMAKE_INSTALL_PREFIX指示的是安装路径,请根据实际情况修改。

+
+

注意

+

这里额外关闭了图形输出模块(JPEG、PNG、FFMPEG),因为Conda自带的图形库会与系统有冲突,暂时没有解决,且使用make默认也不会安装。

+
+
+

注意

+

由于未知原因,有时候CMake会找不到Conda安装的GSL。但若提前编译好Plumed并采用Runtime方式载入,可不需要GSL:-D PLUMED_MODE=runtime

+
+

然后进行编译:

+
make -j 16
+make install
+
+

经过以上过程,Lammps可执行文件lmp_mpi已经编译完成,用户可以执行该程序调用训练的势函数进行MD模拟。

+

DP-CP2K 安装指引

+

首先clone对应的安装包:

+
git clone https://github.com/Cloudac7/cp2k.git -b deepmd_latest --recursive --depth=1
+
+

然后运行相应的Toolchain脚本:

+
cd tools/toolchain/
+./install_cp2k_toolchain.sh --enable-cuda=no --with-deepmd=$deepmd_root --with-tfcc=$tensorflow_root --deepmd-mode=cuda --mpi-mode=no --with-libint=no --with-libxc=no --with-libxsmm=no
+
+

根据脚本运行结尾的提示复制arch文件并source所需的环境变量。最后回到主目录进行编译:

+
make -j 4 ARCH=local VERSION="ssmp sdbg"
+
+

编译正确完成后,可执行文件生成在exe/下,即cp2k.sopt

+
+

注意目前DP-CP2K暂未支持MPI,因而请单独编译此Serial版本。且CP2K由于IO问题,性能相比Lammps低50%以上,如非刚需还是建议使用Lammps进行MD模拟,后者可提供更多特性和加速的支持。

+

同时目前开发者遇到一些困难,故提交的PR尚未更新且由于沉默过久已被官方关闭。如读者有在CP2K实现共享状态的开发经验,请联系作者,谢谢。

+

Now there is some difficulty in implemetion of shared state in CP2K run to decrease IO in each MD step. However, the developer has not find out a proper way as a solution, making the PR silent. If you could provide any experience, please contact me. Thanks!

+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/index.html b/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/index.html new file mode 100644 index 00000000..40b33d41 --- /dev/null +++ b/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/index.html @@ -0,0 +1,3191 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeepMD-kit安装:旧版 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

DeepMD-kit安装:旧版

+
+

本部分写于2020年,适用于DeePMD-kit 1.x 和 TensorFlow 1.14。对目前较新的版本可能不适用,请移步安装最佳实践快速安装教程

+
+

背景:以 Zeus 集群为例,在服务器安装DeepMD-kit和包含完整接口的LAMMPS。

+

参考:

+

DeepMD-kit

+

TensorFlow

+

初始环境说明

+

以下过程以 Zeus 集群为例,操作系统及版本为CentOS 7,采用module作为环境管理。

+
    +
  • 通过yum安装:
  • +
  • Cmake 3.7
  • +
  • GCC 4.8.5
  • +
  • Git 1.8.2
  • +
  • 通过module加载
  • +
  • CUDA 10.0
  • +
  • Miniconda3 (Python 3.7)
  • +
  • GCC 4.9.4
  • +
  • Intel MPI 2017
  • +
+

创建新的环境

+

首先准备必要的依赖。

+

检查可用的模块,并加载必要的模块:

+
module avail
+module add cuda/10.0
+module add gcc/4.9.4
+
+

注意这里导入的是gcc 4.9.4版本,如果采用更低的版本(不导入gcc)则dp_ipi不会被编译。

+

然后创建虚拟环境,步骤请参考Anaconda 使用指南

+

假设创建的虚拟环境名称是 deepmd,则请将步骤最后的 <your env name> 替换为 deepmd。若采用该步骤的设置,则虚拟环境将被创建在/data/user/conda/env/deepmd下(假设用户名为user)。

+

由于GPU节点不能联网,故我们需要将所需的驱动程序库libcuda.solibcuda.so.1手动链接到某个路径/some/local/path并加入环境变量。

+
ln -s /share/cuda/10.0/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/share/cuda/10.0/lib64/stubs:/some/local/path
+
+
+

提示

+

若在 Zeus 集群上安装,管理员已事先把libcuda.so.1 链接在/share/cuda/10.0/lib64/stubs/下,故无需额外创建软链接,同理/some/local/path也无需加入环境变量。

+
+

安装Tensorflow的C++ 接口

+

以下安装,假设软件包下载路径均为/some/workspace, 以TensorFlow 1.14.0版本、DeePMD-kit 1.2.0 版本为例进行说明,其他版本的步骤请参照修改。

+

下载对应的bazel安装包

+
cd /some/workspace
+wget https://github.com/bazelbuild/bazel/releases/download/0.24.0/bazel-0.24.0-installer-linux-x86_64.sh
+chmod +x bazel-0.24.0-installer-linux-x86_64.sh
+./bazel-0.24.0-installer-linux-x86_64.sh --user
+export PATH="$HOME/bin:$PATH"
+
+
+

注意

+

注意bazel的兼容性问题,合理的bazel版本设置请参阅Tensorflow官方文档中的说明

+
+

下载TensorFlow源代码

+
cd /some/workspace 
+git clone https://github.com/tensorflow/tensorflow tensorflow -b v1.14.0 --depth=1
+cd tensorflow
+
+

编译TensorFlow C++ Interface

+

tensorflow文件夹下运行configure,设置编译参数。

+
./configure
+Please specify the location of python. [Default is xxx]:
+
+Found possible Python library paths:
+  /xxx/xxx/xxx
+Please input the desired Python library path to use.  Default is [xxx]
+
+Do you wish to build TensorFlow with XLA JIT support? [Y/n]:
+XLA JIT support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with OpenCL SYCL support? [y/N]:
+No OpenCL SYCL support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with ROCm support? [y/N]:
+No ROCm support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with CUDA support? [y/N]: y
+CUDA support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with TensorRT support? [y/N]:
+No TensorRT support will be enabled for TensorFlow.
+
+Found CUDA 10.0 in:
+    /share/cuda/10.0/lib64
+    /share/cuda/10.0/include
+Found cuDNN 7 in:
+    /share/cuda/10.0/lib64
+    /share/cuda/10.0/include
+
+Please specify a list of comma-separated CUDA compute capabilities you want to build with.
+You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.
+Please note that each additional compute capability significantly increases your build time and binary size, and that TensorFlow only supports compute capabilities >= 3.5 [Default is: 3.5,7.0]:
+
+Do you want to use clang as CUDA compiler? [y/N]:
+nvcc will be used as CUDA compiler.
+
+Please specify which gcc should be used by nvcc as the host compiler. [Default is /share/apps/gcc/4.9.4/bin/gcc]:
+
+Do you wish to build TensorFlow with MPI support? [y/N]:
+No MPI support will be enabled for TensorFlow.
+
+Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native -Wno-sign-compare]:
+
+Would you like to interactively configure ./WORKSPACE for Android builds? [y/N]:
+Not configuring the WORKSPACE for Android builds.
+
+Preconfigured Bazel build configs. You can use any of the below by adding "--config=<>" to your build command. See .bazelrc for more details.
+    --config=mkl             # Build with MKL support.
+    --config=monolithic      # Config for mostly static monolithic build.
+    --config=gdr             # Build with GDR support.
+    --config=verbs           # Build with libverbs support.
+    --config=ngraph          # Build with Intel nGraph support.
+    --config=numa            # Build with NUMA support.
+    --config=dynamic_kernels    # (Experimental) Build kernels into separate shared objects.
+    --config=v2              # Build TensorFlow 2.x instead of 1.x.
+Preconfigured Bazel build configs to DISABLE default on features:
+    --config=noaws           # Disable AWS S3 filesystem support.
+    --config=nogcp           # Disable GCP support.
+    --config=nohdfs          # Disable HDFS support.
+    --config=noignite        # Disable Apache Ignite support.
+    --config=nokafka         # Disable Apache Kafka support.
+    --config=nonccl          # Disable NVIDIA NCCL support.
+Configuration finished
+
+
+

注意

+

若采用前文导入的GCC 4.9.4版本,请根据which gcc的输出判断GCC的安装路径。但一般情况下安装程序可以直接检测到正确路径。

+
+

随后进行编译,由于时间较长,可以考虑使用screen或者tmux将进程放置在后台。

+
bazel build -c opt --verbose_failures //tensorflow:libtensorflow_cc.so
+
+
+

说明

+

安装高版本Tensorflow(如2.1.0)时,若提示没有git -c的命令,请升级git到最新版。用户可能需要在本地进行编译并加入环境变量。

+
+
+

提示

+

一般情况下,bazel默认在~/.cache/bazel下进行编译。由于编译所需硬盘空间较大,如有需要,请在运行bazel前采用环境变量指定编译用临时文件夹,以/data/user/.bazel为例:

export TEST_TMPDIR=/data/user/.bazel

+
+

整合运行库与头文件

+

假设Tensorflow C++ 接口安装在/some/workspace/tensorflow_root下,则定义环境变量:

+
export tensorflow_root=/some/workspace/tensorflow_root
+
+

创建上述文件夹并从编译结果中抽取运行库和头文件。

+
mkdir -p $tensorflow_root
+
+mkdir $tensorflow_root/lib
+cp -d bazel-bin/tensorflow/libtensorflow_cc.so* $tensorflow_root/lib/
+cp -d bazel-bin/tensorflow/libtensorflow_framework.so* $tensorflow_root/lib/
+cp -d $tensorflow_root/lib/libtensorflow_framework.so.1 $tensorflow_root/lib/libtensorflow_framework.so
+
+mkdir -p $tensorflow_root/include/tensorflow
+cp -r bazel-genfiles/* $tensorflow_root/include/
+cp -r tensorflow/cc $tensorflow_root/include/tensorflow
+cp -r tensorflow/core $tensorflow_root/include/tensorflow
+cp -r third_party $tensorflow_root/include
+cp -r bazel-tensorflow/external/eigen_archive/Eigen/ $tensorflow_root/include
+cp -r bazel-tensorflow/external/eigen_archive/unsupported/ $tensorflow_root/include
+rsync -avzh --include '*/' --include '*.h' --include '*.inc' --exclude '*' bazel-tensorflow/external/protobuf_archive/src/ $tensorflow_root/include/
+rsync -avzh --include '*/' --include '*.h' --include '*.inc' --exclude '*' bazel-tensorflow/external/com_google_absl/absl/ $tensorflow_root/include/absl
+
+

清理目标目录下赘余的源代码文件,保留编译好的接口。

+
cd $tensorflow_root/include
+find . -name "*.cc" -type f -delete
+
+

安装DeePMD-kit的Python接口

+

首先安装Tensorflow的Python接口

+
pip install tensorflow-gpu==1.14.0
+
+

若提示已安装,请使用--upgrade选项进行覆盖安装。若提示权限不足,请使用--user选项在当前账号下安装。

+

然后下载DeePMD-kit的源代码。

+
cd /some/workspace
+git clone --recursive https://github.com/deepmodeling/deepmd-kit.git deepmd-kit
+
+

在运行git clone时记得要--recursive,这样才可以将全部文件正确下载下来,否则在编译过程中会报错。

+
+

提示

+
+

如果不慎漏了--recursive, 可以采取以下的补救方法:

+
git submodule update --init --recursive
+
+

" %}

+

随后通过pip安装DeePMD-kit:

+
cd deepmd-kit
+pip install .
+
+

安装DeePMD-kit的C++ 接口

+

延续上面的步骤,下面开始编译DeePMD-kit C++接口:

+
deepmd_source_dir=`pwd`
+cd $deepmd_source_dir/source
+mkdir build 
+cd build
+
+

假设DeePMD-kit C++ 接口安装在/some/workspace/deepmd_root下,定义安装路径deepmd_root

+
export deepmd_root=/some/workspace/deepmd_root
+
+

修改环境变量以使得cmake正确指定编译器:

+
export CC=`which gcc`
+export CXX=`which g++`
+
+

在build目录下运行:

+
cmake -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..
+
+

若通过yum同时安装了Cmake 2和Cmake 3,请将以上的cmake切换为cmake3

+

最后编译并安装:

+
make
+make install
+
+

若无报错,通过以下命令执行检查是否有正确输出:

+
$ ls $deepmd_root/bin
+dp_ipi
+$ ls $deepmd_root/lib
+libdeepmd_ipi.so  libdeepmd_op.so  libdeepmd.so
+
+

因为GCC版本差别,可能没有$deepmd_root/bin/dp_ipi

+

安装LAMMPS的DeePMD-kit模块

+

接下来安装

+
cd $deepmd_source_dir/source/build
+make lammps
+
+

此时在$deepmd_source_dir/source/build下会出现USER-DEEPMD的LAMMPS拓展包。

+

下载LAMMPS安装包,按照常规方法编译LAMMPS:

+
cd /some/workspace
+# Download Lammps latest release
+wget -c https://lammps.sandia.gov/tars/lammps-stable.tar.gz
+tar xf lammps-stable.tar.gz
+cd lammps-*/src/
+cp -r $deepmd_source_dir/source/build/USER-DEEPMD .
+
+

选择需要编译的包(若需要安装其他包,请参考Lammps官方文档):

+
make yes-user-deepmd
+make yes-kspace
+
+

如果没有make yes-kspace 会因缺少pppm.h报错。

+

加载MPI环境,并采用MPI方式编译Lammps可执行文件:

+
module load intel/17u5 mpi/intel/17u5
+make mpi -j4
+
+
+

注意

+

此处使用的GCC版本应与之前编译Tensorflow C++接口和DeePMD-kit C++接口一致,否则可能会报错:@GLIBCXX_3.4.XX。如果在前面的安装中已经加载了GCC 4.9.4,请在这里也保持相应环境的加载。

+
+

经过以上过程,Lammps可执行文件lmp_mpi已经编译完成,用户可以执行该程序调用训练的势函数进行MD模拟。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/index.html b/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/index.html new file mode 100644 index 00000000..62e79193 --- /dev/null +++ b/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/index.html @@ -0,0 +1,2889 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeePMD-kit安装实战:嘉庚超算 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

DeePMD-kit安装实战:嘉庚超算

+

嘉庚超算中心没有统一安装DeepMD-kit软件,用户使用前需要自行编译。本文参考最佳实践,基于嘉庚超算预装的模块进行。此处以DeepMD-kit v2.2.0版本为例。

+

初次安装

+
    +
  1. +

    创建虚拟环境(此处以deepmd为例)

    +
    module load anaconda/2020.3
    +conda create -n deepmd python=3.9
    +
    +
  2. +
  3. +

    (可选)虚拟环境激活/退出的配置,也可将activate.sh中代码每次手动设置

    +
    # replace your own username here!
    +mkdir -p $CONDA_PREFIX/etc/conda/activate.d
    +touch $CONDA_PREFIX/etc/conda/activate.d/activate.sh
    +mkdir -p $CONDA_PREFIX/etc/conda/deactivate.d
    +touch $CONDA_PREFIX/etc/conda/deactivate.d/deactivate.sh
    +conda env config vars set LD_LIBRARY_PATH=$tensorflow_root/lib:$deepmd_root/lib:$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
    +
    +
      +
    • $CONDA_PREFIX/etc/conda/activate.d/activate.sh
    • +
    +
    module load intel/2018.3
    +module load gcc/9.2
    +module load cmake/3.21
    +module load cuda/11.3
    +module load lammps/2022.6.23
    +
    +export CC=`which gcc`
    +export CXX=`which g++`
    +export FC=`which gfortran`
    +
    +# replace CONDA_PREFIX and deepmd_source_dir!!!
    +export deepmd_source_dir=/public/home/username/apps/deepmd-2.2.0
    +export tensorflow_root=$deepmd_source_dir/_skbuild/tensorflow_root
    +export deepmd_root=$deepmd_source_dir/_skbuild/deepmd_root
    +export LAMMPS_PLUGIN_PATH=$deepmd_root/lib/deepmd_lmp
    +
    +
      +
    • $CONDA_PREFIX/etc/conda/deactivate.d/deactivate.sh
    • +
    +
    module unload intel/2018.3
    +module unload gcc/9.2
    +module unload cmake/3.21
    +module unload cuda/11.3
    +module unload lammps/2022.6.23
    +
    +unset deepmd_source_dir
    +unset tensorflow_root
    +unset deepmd_root
    +unset LAMMPS_PLUGIN_PATH
    +
    +

    设置好后,重启虚拟环境。此后每次激活虚拟环境时,会自动加载相应的模块。 +3. 训练代码安装

    +

    pip install tensorflow==2.7 --upgrade
    +pip install scikit-build ninja
    +pip install protobuf==3.20
    +cd $deepmd_source_dir
    +export DP_VARIANT=cuda
    +pip install .
    +
    +4. (可选)第三方接口安装

    +
    mkdir -p $tensorflow_root/lib 
    +cd $tensorflow_root
    +ln -s $CONDA_PREFIX/lib/python3.9/site-packages/tensorflow/include .
    +cd lib
    +ln -s $CONDA_PREFIX/lib/python3.9/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so libtensorflow_cc.so
    +ln -s $CONDA_PREFIX/lib/python3.9/site-packages/tensorflow/libtensorflow_framework.so.2 .
    +ln -s libtensorflow_framework.so.2 libtensorflow_framework.so
    +
    +mkdir -p $deepmd_source_dir/source/build
    +mkdir -p $deepmd_root
    +cd $deepmd_source_dir/source/build
    +cmake -DLAMMPS_SOURCE_ROOT=/public/software/lammps/lammps-2022.6.23-intel -DUSE_TF_PYTHON_LIBS=TRUE -DUSE_CUDA_TOOLKIT=TRUE -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..
    +make -j20
    +make install
    +
    +
  4. +
+

代码更新

+
    +
  1. +

    Python代码

    +
    cd $deepmd_source_dir
    +export DP_VARIANT=cuda
    +pip install .
    +
    +
  2. +
  3. +

    C++代码

    +
    cd $deepmd_source_dir/source/build
    +make -j20
    +make install
    +
    +
  4. +
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/index.html b/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/index.html new file mode 100644 index 00000000..66636d9e --- /dev/null +++ b/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/index.html @@ -0,0 +1,3392 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeepMD-kit安装最佳实践 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

DeepMD-kit安装最佳实践

+

背景:以 Zeus 集群为例,在服务器通过源代码编译安装DeepMD-kit和包含完整接口的LAMMPS。虽然官方已经提供了通过 Conda 一键安装的方法,但由于此法所安装的各个组件均为预编译版本,因而针对课题实际情况无法做更多拓展和改动,且通过 Conda 安装的 Protobuf 存在版本冲突,无法进一步编译其他接口。这里介绍一种方法,通过 Conda 安装通常不需要改动的TensorFlow C++ Interface,其余部分仍手动编译。由于目前新版Lammps已经提供Plugin支持,DeePMD亦支持通过Plugin调用,故可令组件之间相互解耦、减少后续安装的工序。

+

初始环境说明

+

以下过程以 Zeus 集群为例,操作系统及版本为CentOS 7,管理节点联网,采用module作为环境管理。

+

以下是预先配置好的环境,对于其他集群,可以此要求准备环境,其中 Intel MPI 可以用 MPICH 代替,其余组件请自行安装。注意CUDA 11.3对Nvidia驱动版本有要求,需要预先检查好(可用nvidia-smi快速查看)。

+
    +
  • 通过yum安装
  • +
  • Git >= 1.8.2
  • +
  • 通过module加载
  • +
  • CUDA 11.3
  • +
  • Miniconda 3
  • +
  • GCC >= 7.4.0
  • +
  • Intel MPI 2017 (暂未对其他版本进行测试)
  • +
+
+

版本号仅供参考,实际安装因人而异,参考执行即可。

+
+

DeePMD-kit 常用组件关系

+
flowchart TB
+  tfcpp(TensorFlow C++ Interface) -.-> tfpy(TensorFlow Python Interface)
+  tfpy --> dppy(DeePMD Python Interface)
+  dpcpp(DeePMD C++ Interface) -.-> dppy
+  tfcpp --> dpcpp
+  dpcpp --> lmp(DeePMD Lammps API)
+  tfcpp --> lmp
+

如图所示展示了DeePMD-kit各个常用组件之间的联系,需要声明的是,图示并非对代码的严谨解析,仅仅是对组织结构的直观表现。

+

势函数训练过程通常依赖于DeePMD Python Interface,这一部分在用 Pip 安装时即依赖于TensorFlow的Python Interface,因此在图中用实线箭头表示。而用Pip安装的TensorFlow Wheel已经预先编译了底层所需的Tensorflow C++ Interface,这一隐含的依赖用虚线箭头表示。类似地,DeePMD-kit在Pip安装时也会调用CMake来编译一部分所需的C++库,因而也存在类似的关系。

+

当用训练好的势函数来进行MD模拟时,则需要运行Lammps等分子动力学软件调用DeePMD-kit接口。以Lammps为例,现有的两种方式分别是: + - 在Lammps安装时即额外编译DeePMD API(即USER-DEEPMD) + - 编译DeePMD Plugin,由支持Plugin的Lammps版本调用

+

这两种方式在编译时均需要调用DeePMD-kit和TensorFlow的C++ Interface,故在图中也用实线表示。而TensorFlow C++ Interface实际上可由源代码结合必要的底层依赖(如GCC、CUDA等)独立编译,DeePMD-kit C++ Interface只需在TensorFlow C++ Interface基础上进行编译(图中实线箭头)。

+

因而在实际处理安装关系时,我们也可以采用相对独立的编译方式来最大化解耦组件。下文的思路将按以下步骤展开:

+
    +
  1. 建立独立的Conda环境,用 Pip 安装 TensorFlow 和 DeePMD-kit,提供势函数训练功能;
  2. +
  3. 结合必要的组件、环境等编译Lammps,提供经典分子动力学模拟功能;
  4. +
  5. 编译 DeePMD C++ Interface,在此基础上编译 DeePMD-kit Lammps Plugin供Lammps调用,提供 DeePMD 模拟功能;
  6. +
  7. 编译 DeePMD CP2K API 和对应的CP2K版本(No free lunch.)
  8. +
+

安装DeePMD-kit Python Interface

+

创建新的环境

+

首先准备必要的依赖。

+

检查可用的模块,并加载必要的模块:

+
module avail
+module add cuda/11.3
+module add gcc/7.4.0
+
+

注意这里导入的是GCC 7.4.0版本,如果采用低于4.9.4的版本(不导入GCC)则dp_ipi不会被编译。

+

然后创建虚拟环境,步骤请参考Anaconda 使用指南

+

假设创建的虚拟环境名称是 deepmd,则请将步骤最后的 <your env name> 替换为 deepmd。若采用该步骤的设置,则虚拟环境将被创建在/data/user/conda/env/deepmd下(假设用户名为user)。

+
conda create -n deepmd python=3.9
+conda activate deepmd
+
+

注意请务必为创建的虚拟环境安装所需的Python环境。通常不指定Python版本号的情况下(例如文中的步骤conda create -n <your env name> python)会安装Conda推荐的最新版本,如需要替代请对应指定,如conda create -n deepmd python=3.10

+

对于无法联网的节点,在编译时需要将所需的驱动程序库的符号库libcuda.solibcuda.so.1的名称手动链接到某个具有权限的路径/some/local/path并分别加入环境变量,以通过编译流程:

+
ln -s /data/share/apps/cuda/11.3/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/share/apps/cuda/11.3/lib64/stubs:/some/local/path
+
+
+

提示

+

若在Zeus 集群上安装,管理员已事先把libcuda.so.1 链接在/data/share/apps/cuda/11.3/lib64/stubs/下,故无需额外创建软链接,同理/some/local/path也无需加入环境变量,但仍需要驱动程序库的符号链接libcuda.so。注意这一步骤执行后,实际运行时需要从环境变量中移除

+
+

安装DeePMD-kit的Python接口

+

以防万一可以升级下pip的版本:

+
pip install --upgrade pip
+
+

接下来安装Tensorflow的Python接口

+
pip install tensorflow
+
+

若提示已安装,请使用--upgrade选项进行覆盖安装。若提示权限不足,请使用--user选项在当前账号下安装。

+

然后下载DeePMD-kit的源代码(注意把v2.1.5替换为需要安装的版本,如v2.0.3等)

+
cd /some/workspace
+git clone --recursive https://github.com/deepmodeling/deepmd-kit.git deepmd-kit -b v2.1.5
+
+

在运行git clone时记得要--recursive,这样才可以将全部文件正确下载下来,否则在编译过程中会报错。

+
+

提示

+

如果不慎漏了--recursive, 可以采取以下的补救方法: +

git submodule update --init --recursive
+

+
+

若集群上 CMake 3没有安装,可以用pip进行安装:

+
pip install cmake
+
+

修改环境变量以使得cmake正确指定编译器:

+
export CC=`which gcc`
+export CXX=`which g++`
+export FC=`which gfortran`
+
+

若要启用CUDA编译,请导入环境变量:

+
export DP_VARIANT=cuda
+
+

随后通过pip安装DeePMD-kit:

+
cd deepmd-kit
+pip install .
+
+

安装Lammps

+

注意这一部分可以从DeePMD安装中解耦出来,因而兼顾对Lammps的不同需求,而不必为DeePMD专门编译一个Lammps可执行文件。

+

环境准备

+

首先加载所需的环境,包括CMake、Intel MPI等。若不需要编译Lammps原生的GPU加速,可不需要加载CUDA环境。注意需要把Intel MPI提供的头文件(mpi.h等)所在路径加入C_INCLUDE_PATH中。

+

仍以Zeus为例,如下所示。注意这里使用的是全局的CMake,如果与上一部分采用同一个环境,可不需重复加载。

+
module load cmake/3.20
+module load intel/17.5.239 mpi/intel/2017.5.239 gcc/7.4.0
+# if not included
+export C_INCLUDE_PATH=<intel_installation_dir>/impi/2017.4.239/include64:$C_INCLUDE_PATH
+
+

若需要编译对应的Lammps组件(如Plumed、NetCDF等),请对应加载所需的环境:

+
module load netcdf/4.9.0_intel17
+module load plumed
+
+

如需编译Lammps原生的GPU加速,可加载CUDA环境,注意这会使得编译得到的Lammps无法在不包括GPU的节点上运行。

+
# gpu acceleration support
+module load cuda/11.3
+
+
+

Warning

+

若编译Lammps原生的GPU加速,请注意原生默认采用半精度。Lammps在开启GPU加速时速度可有较大提升,但精度问题已知可能导致DeePMD势函数模拟误差上升(体现为Model Deviation相比不开启GPU加速显著上升),请针对体系做测试确认误差是否符合预期。DeePMD接口官方未提供Lammps的GPU加速支持,且默认编译的是双精度版本,请务必注意。

+
+

配置编译

+

创建文件夹

+
cd <lammps_source_code>
+mkdir build
+cd build
+
+

进行编译

+
cmake  -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ \
+-DCMAKE_Fortran_COMPILER=gfortran \
+-D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=mpi \
+-D BUILD_SHARED_LIBS=yes \
+-D CMAKE_INSTALL_PREFIX=<lammps_installation_dir> \
+-D CMAKE_INSTALL_LIBDIR=lib \
+-D CMAKE_INSTALL_FULL_LIBDIR=<lammps_installation_dir>/lib \
+-C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake ../cmake
+
+

CMAKE_INSTALL_PREFIX 可以根据安装实际路径修改,但这一方法得到的是共享库( *.so ),所以包括Lammps源代码在内都不要移动。

+

若开启对应插件,请注意在 ../cmake 前插入对应选项,如:

+
-D PKG_PLUMED=yes -D PLUMED_MODE=shared \
+-D PKG_H5MD=yes -D PKG_NETCDF=yes \
+-D NETCDF_INCLUDE_DIR=<netcdf_installation_dir>/include 
+
+

若希望开启GPU加速,请增加选项:

+
-D PKG_GPU=on -D GPU_API=cuda
+
+

开始编译

+

运行

+
make
+make install
+
+

编译DeePMD-kit Lammps Plugin

+

方法一:静态编译

+

安装Tensorflow的C++ 接口

+

以下安装,假设软件包下载路径均为 /some/workspace, 以 TensorFlow 2.7.0版本、DeePMD-kit 2.1.5 版本为例进行说明,其他版本的步骤请参照修改。注意为保证模型兼容性,版本号最好与 Python Interface对应。

+

本步骤需要使用 Conda,因此在前文基础上进行。

+

搜索仓库,查找可用的 TensorFlow 的 C++ 接口版本。

+
conda search libtensorflow_cc -c https://conda.deepmodeling.com
+
+

结果如下:

+
Loading channels: done
+# Name                       Version           Build  Channel
+libtensorflow_cc              1.14.0  cpu_h9a2eada_0
+libtensorflow_cc              1.14.0  gpu_he292aa2_0
+libtensorflow_cc               2.0.0  cpu_h9a2eada_0
+libtensorflow_cc               2.0.0  gpu_he292aa2_0
+libtensorflow_cc               2.1.0  cpu_cudaNone_0
+libtensorflow_cc               2.1.0  gpu_cuda10.0_0
+libtensorflow_cc               2.1.0  gpu_cuda10.1_0
+libtensorflow_cc               2.1.0   gpu_cuda9.2_0
+libtensorflow_cc               2.3.0  cpu_cudaNone_0
+libtensorflow_cc               2.3.0  gpu_cuda10.1_0
+libtensorflow_cc               2.4.1  gpu_cuda11.0_0
+libtensorflow_cc               2.4.1  gpu_cuda11.1_0
+libtensorflow_cc               2.5.0  cpu_cudaNone_0
+libtensorflow_cc               2.5.0  gpu_cuda10.1_0
+libtensorflow_cc               2.5.0  gpu_cuda11.3_0
+libtensorflow_cc               2.7.0  cpu_h6ddf1b9_0
+libtensorflow_cc               2.7.0 cuda101h50fd26c_0
+libtensorflow_cc               2.7.0 cuda113h3372e5c_0
+libtensorflow_cc               2.7.0 cuda113hbf71e95_1
+libtensorflow_cc               2.9.0  cpu_h681ccd4_0
+libtensorflow_cc               2.9.0 cuda102h929c028_0
+libtensorflow_cc               2.9.0 cuda116h4bf587c_0
+
+

这里所希望安装的版本是2.7.0的GPU版本,CUDA版本为11.3,因此输入以下命令安装:

+
conda install libtensorflow_cc=2.7.0=cuda113hbf71e95_1 -c https://conda.deepmodeling.com
+
+

若所安装的环境没有实际的GPU驱动(比如集群的登录节点)或需要用到Conda安装CudaToolkit,可能需要参照此处说明强制指定GPU环境。比如:

+
CONDA_OVERRIDE_CUDA="11.3" conda install libtensorflow_cc=2.7.0=cuda113hbf71e95_1 -c https://conda.deepmodeling.com
+
+

请注意 CONDA_OVERRIDE_CUDA 的值需要与GPU支持以及希望用到的CUDA版本相匹配。

+
+

提示

+

注意A100仅支持TF 2.4.0以上、CUDA11.2以上,安装时请对应选择。

+
+
+

提示

+

个别版本在后续编译时可能会提示需要libiomp5.so,请根据实际情况确定是否需要载入Intel环境或者conda install intel-openmp

+
+
+

提示

+

conda命令可能速度较慢,也可以考虑切换为mamba,后者可大幅加速Conda的性能,且完全兼容。只需参照前述链接安装后将conda替换为mamba即可

+
+

若成功安装,则定义环境变量:

+
export tensorflow_root=/data/user/conda/env/deepmd
+
+

即虚拟环境创建的路径。后文将使用 $tensorflow_root 来指定该路径。

+

安装DeePMD-kit的C++ 接口

+

下面开始编译DeePMD-kit C++接口:

+
deepmd_source_dir=`pwd`
+cd $deepmd_source_dir/source
+mkdir build 
+cd build
+
+

假设DeePMD-kit C++ 接口安装在 /some/workspace/deepmd_root 下,定义安装路径 deepmd_root

+
export deepmd_root=/some/workspace/deepmd_root
+
+

在build目录下运行:

+
cmake -DLAMMPS_SOURCE_ROOT=<lammps_source_code> \
+-DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root \
+-DUSE_CUDA_TOOLKIT=TRUE ..
+
+

注意这里的 <lammps_source_code> 对应前文中Lammps的源码路径。

+

最后编译并安装:

+
make
+make install
+
+

若无报错,通过以下命令执行检查是否有正确输出:

+
$ ls $deepmd_root/lib
+deepmd_lmp/           libdeepmd_cc_low.so   libdeepmd_gromacs.so  libdeepmd_ipi.so      libdeepmd_lmp.so      libdeepmd_op.so
+deepmd_lmp_low/       libdeepmd_cc.so       libdeepmd_ipi_low.so  libdeepmd_lmp_low.so  libdeepmd_op_cuda.so  libdeepmd.so
+
+

注意应当包含deepmd_lmp/libdeepmd_lmp.so,后两者即为Lammps插件的位置。

+

方法二:采用TensorFlow Python 版本的库

+

从 DeePMD-kit v2.2 起,cmake 支持设置 -DUSE_TF_PYTHON_LIBS=TRUE的方式,从而免去了安装 libtensorflow_cc 的麻烦。

+
cmake -DLAMMPS_SOURCE_ROOT=<lammps_source_code> \
+-DUSE_TF_PYTHON_LIBS=TRUE -DUSE_CUDA_TOOLKIT=TRUE \
+-DCMAKE_INSTALL_PREFIX=$deepmd_root ..
+
+
+

Tip

+

请注意,这种方法采用Python Wheel提供的 libtensorflow_framework.so.2_pywrap_tensorflow_internal.so (作为 libtensorflow_cc.so的替代)进行编译。 +后者依赖 Python 库 libpython3.*.so.*(因版本不同而异),请注意基于上述库的编译应保证后者路径也在 LD_LIBRARY_PATH 中。

+
+

为使得编译好的库文件可以更容易找到上述依赖,请执行以下操作,建立一个伪 tensorflow_root 目录,假设该路径位于 /some/workspace/tensorflow_root 下,同时假设 Conda 环境仍位于 /data/user/conda/env/deepmd 下:

+
export tensorflow_root=/some/workspace/tensorflow_root
+mkdir -p $tensorflow_root/lib 
+cd $tensorflow
+ln -s /data/user/conda/env/deepmd/lib/python3.10/site-packages/tensorflow/include .
+cd lib
+ln -s /data/user/conda/env/deepmd/lib/python3.10/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so libtensorflow_cc.so
+ln -s /data/user/conda/env/deepmd/lib/python3.10/site-packages/tensorflow/libtensorflow_framework.so.2 .
+ln -s libtensorflow_framework.so.2 libtensorflow_framework.so
+
+

于是,我们便构建了一个伪 tensorflow_root 目录。注意后文的 $tensorflow_root 此时应指向该路径。

+

调用方法

+

使用前请加载好环境变量。注意若未定义 $deepmd_root$tensorflow_root,请补全为完整路径。这里的 /data/user/conda/env/deepmd 仍是 Conda 环境的路径,请相应替换。

+
export LD_LIBRARY_PATH=$tensorflow_root/lib:$deepmd_root/lib:/data/user/conda/env/deepmd/lib:$LD_LIBRARY_PATH
+export LAMMPS_PLUGIN_PATH=$deepmd_root/lib/deepmd_lmp
+
+

Lammps便会自动寻找插件并加载,从而可以实现DeePMD的支持。

+
pair_style      deepmd ../graph.pb
+pair_coeff      * *
+
+

若无法自动找到,也可以手动在 输入文件 中加载,写在 pair_style 上一行即可,注意 $deepmd_root$tensorflow_root 须替换为完整路径

+
plugin load     $deepmd_root/lib/libdeepmd_lmp.so
+pair_style      deepmd ../graph.pb
+pair_coeff      * *
+
+

运行命令仍然是 lmp_mpi -i <input_file>

+

DP-CP2K 安装指引

+

首先clone对应的安装包:

+
git clone https://github.com/cp2k/cp2k.git --recursive --depth=1
+
+

然后运行相应的Toolchain脚本:

+
module unload mpi/intel/2017.5.239 # (1)!
+module load mpi/openmpi/4.1.6-gcc # (2)!
+cd tools/toolchain/
+./install_cp2k_toolchain.sh --with-gcc=system --mpi-mode=openmpi --with-deepmd=$deepmd_root
+
+
    +
  1. 新版CP2K会自动检测 Intel MPI 且无视强制使用其他环境如 OpenMPI 的设定,旧版 Intel MPI不被兼容
  2. +
  3. 由于 --with-openmpi=install 在 Zeus 上无法正确安装,这里预先安装好了 OpenMPI。
  4. +
+

如不需要 MPI 和 DFT 相关功能,可以如下设置以减少步骤(注意后续编译移除掉 psmp pdbg 选项):

+
cd tools/toolchain/
+module unload mpi/intel/2017.5.239 # (1)!
+./install_cp2k_toolchain.sh --with-deepmd=$deepmd_root --mpi-mode=no --with-libint=no --with-libxc=no --with-libxsmm=no
+
+
    +
  1. 新版CP2K会自动检测 Intel MPI 且无视强制使用其他环境如 OpenMPI 的设定,旧版 Intel MPI不被兼容
  2. +
+

根据脚本运行结尾的提示复制arch文件并source所需的环境变量。

+

这里的目的是让编译时可以正确链接 libpython3.*.so.*,因而 /data/user/conda/env/deepmd/ 仍旧是 Conda 环境路径。

+

最后回到主目录进行编译:

+
make -j 4 ARCH=local VERSION="psmp pdbg ssmp sdbg" # (1)!
+
+
    +
  1. 如不需要 MPI ,请移除掉 psmp pdbg
  2. +
+

编译正确完成后,可执行文件生成在 exe/ 下,即 cp2k.ssmpcp2k.psmp

+

关于 DP-CP2K 的使用,请参考 CP2K: DeePMD插件

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/index.html b/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/index.html new file mode 100644 index 00000000..5a04a382 --- /dev/null +++ b/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/index.html @@ -0,0 +1,3076 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeePMD-kit安装实战:PC篇 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

DeePMD-kit安装实战:PC篇

+

背景

+

需要对DeePMD-kit的源码进行一些修改,针对新的物理量构建模型。对代码的调试需要GPU,但是不需要很好的性能,所以在PC端进行可以节省在集群上的排队时间。

+

安装系统:Ubuntu 20.04

+

DeePMD-kit代码结构

+

在记录安装过程之前先简单描述一下DeePMD-kit的代码结构。

+

DeePMD-kit在训练部分的代码是在.py文件中调用 TensorFlow 实现的(TF自带OP/自定义OP)。但是TF的底层是用 C++ 构建的,所以在使用 DeePMD-kit 时需要安装 TF/python 接口。

+

进入到修改过代码的文件夹,执行:

+
pip install .
+
+

此时会基于已修改的代码生成新的可执行文件。

+

如果想基于DeePMD-kit生成的模型和lammps/CP2K等软件的对接,需要另外安装C++接口。这部分可以参考之前的教程(编译/修改代码后重新编译)。

+

conda安装

+

如果不需要对源码进行修改,可以利用官方教程 easy installation 中的 conda 安装

+
#(base)
+conda create -n deepmd deepmd-kit=*=*gpu libdeepmd=*=*gpu lammps-dp cudatoolkit=11.3 horovod -c https://conda.deepmodeling.org
+
+

此命令新建了一个名为deepmd的虚拟环境,并将deepmd-kit安装在这个环境中。 +Conda 安装会一并安装 CUDA Toolkit,因此只要保证电脑的驱动支持即可。可通过以下指令查看驱动版本及其支持的cuda版本:

+
nvidia-smi
+
+
+

目前通过conda默认安装的是10.1版本的CUDA Toolkit,由于CUDA向下兼容,故版本高于10.1即可。如果驱动支持的CUDA版本过低,可以在Ubuntu的Software&Updates/Additional Drivers里选择新版的驱动进行升级。

+
+

利用 Conda 便捷安装时,DeePMD-kit的C++底层文件全部都已经编译成可执行文件.so,在本地只能查看到可执行文件.so.py文件,无法对底层进行修改。所以如果需要对源码进行修改,需要手动安装编译。

+

Conda安装包括了预编译的 TF/C++ 接口,可通过定义环境变量省去以前教程中提到的编译的步骤。(见下文)

+

手动编译

+

上一节的 Conda 安装是在deepmd虚拟环境下安装的,手动安装我们新建一个环境dp-tf

+
conda info -e
+# if you have been in `deepmd`, deactivate first
+conda deactivate
+# create a new environment
+conda create -n dp-tf
+# if you want to specify the version of python in dp-tf
+#conda create -n dp-tf python=3.9
+
+
+

tip

+

建议在新建环境dp-tf 时设置python版本和deepmd保持一致,否则后续安装tensorflow时可能因为python版本不兼容报错No matching distribution found for tensorflow。

+
+

下载源码&设置环境变量

+

下载源码(注意一定要有--recursive,具体见[wiki](./deepmd-kit_installation_51.md)

+
#(tf-dp)
+git clone --recursive https://github.com/deepmodeling/DeePMD-kit.git DeePMD-kit
+
+

设置环境变量

+
#(tf-dp)
+cd DeePMD-kit
+# set $deepmd_source_dir as the directory of the deepmd source code
+deepmd_source_dir=$(pwd)
+# set $tensorflow_root as the directory of the TF/C++ interface
+# the dir of the environment with conda DP
+tensorflow_root=/dir/for/env/with/condaDP
+
+
+

可以用conda env list指令查看环境deepmd的地址(/dir/for/env/with/condaDP)

+
+

如果担心安装过程中需要退出,可以临时加到~/.bashrc文件中并source ~/.bashrc

+

TF/Python 接口

+

首先可以更新一下pip,并安装新版TensorFlow:

+
#(tf-dp)
+pip install --upgrade pip
+pip install --upgrade tensorflow==2.5.0
+
+
+

tip

+

利用conda便捷安装可以省去后面TF/C++接口的安装,所以这里的TF安装和conda安装中的TF保持一致。(具体版本在conda安装过DeePMD-kit的环境(deepmd)下查看已安装的tensorflow-base版本。

+
+

例如: +

# assume you have been in dp-tf env
+#(tf-dp)
+conda deactivate
+#(base)
+conda activate deepmd
+#(deepmd)
+conda list
+>>> tensorflow-base           2.5.0           gpu_py39h7c1560b_0    https://conda.deepmodeling.org
+#(deepmd)
+conda deactivate
+#(base)
+conda activate dp-tf
+#(tf-dp)
+pip install --upgrade tensorflow==2.5.0
+

+

DeePMD-kit/Python 接口

+
#(tf-dp)
+cd $deepmd_source_dir
+DP_VARIANT=cuda
+pip install .
+
+

这一步的pip installdeepmd_source_dir下的文件进行编译。

+
+

warning

+

环境变量DP_VARIANT的默认值是cpu,要记得根据需要进行修改!

+
+
+

info

+

如果对源码进行了修改,需要重新编译。

+
+

这一步中报错可能的应对措施:

+
    +
  • 网络问题1
  • +
+

修改镜像源(具体可参考使用帮助

+
pip install pip -U
+pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
+
+
    +
  • 网络问题2(...timed out...
  • +
+

多试几次...

+
    +
  • 升级setuptools
  • +
+
pip install --upgrade setuptools --no-build-isolation
+
+
    +
  • 缺各种包
  • +
+

如果直接pip install会发现所有都是已安装的,需要pip uninstallpip install

+

conda list检查发现应该是没有安装到这个环境里。

+

如果有报错而无法直接卸载:

+
It is a distuils installed project and thus we cannot accurately determine which files belongs to it which would lead to only a partial uninstall.
+
+

可以考虑强制覆盖安装:

+
pip install some_package --ignore-installed
+
+
    +
  • GCC版本问题
  • +
+
    138 | #error -- unsupported GNU version! gcc versions later than 8 are not supported!
+
+

Ubuntu 20.04默认的GCC版本是9.3.0(gcc --version查看),需要卸载再重装低版本(比如7.5)

+
sudo apt remove gcc
+sudo apt-get install gcc-7 g++-7 -y
+sudo ln -s /usr/bin/gcc-7 /usr/bin/gcc
+sudo ln -s /usr/bin/g++-7 /usr/bin/g++
+sudo ln -s /usr/bin/gcc-7 /usr/bin/cc
+sudo ln -s /usr/bin/g++-7 /usr/bin/c++
+gcc --version
+
+

DeePMD-kit/C++ 接口

+

官方教程(可能需要apt-get安装cmake,如果没有足够权限也可以通过pip安装)。

+

和其他计算软件(如lammps)的接口

+

官方教程这里

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_installation/gcc/index.html b/en/wiki/software_installation/gcc/index.html new file mode 100644 index 00000000..275c985d --- /dev/null +++ b/en/wiki/software_installation/gcc/index.html @@ -0,0 +1,2758 @@ + + + + + + + + + + + + + + + + + + + + + + + + + GCC 安装教程 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

GCC 安装教程

+

这里以 5.5.0 版本为例,其他版本可以参考,只需将版本号替换即可。

+

首先下载 gcc 安装包,国内直接访问 gnu 官网较慢,可以通过 tuna 等镜像安装

+
wget https://mirrors.tuna.tsinghua.edu.cn/gnu/gcc/gcc-5.5.0/gcc-5.5.0.tar.gz
+
+

解压并下载编译所需环境:

+
tar -zxvf gcc-5.5.0.tar.gz
+cd gcc-5.5.0
+./contrib/download_prerequisites
+cd ..
+
+

创建编译目录,并在其中进行编译:

+
mkdir objdir
+cd objdir
+../gcc-5.5.0/configure --prefix=/share/apps/gcc/5.5.0 --enable-languages=c,c++,fortran,go --disable-multilib
+make
+make install
+
+

编写 modulefile ,修改环境变量:

+
#%Module1.0#####################################################################
+##
+## GCC modulefile
+##
+proc ModulesHelp { } {
+        global version
+
+        puts stderr "\tSets up environment for GCC v$version"
+}
+
+module-whatis   "sets up environment for GCC v5.5.0"
+
+# for Tcl script use only
+set     version 5.5.0
+set     root    /share/apps/gcc/$version
+
+prepend-path    INFOPATH        $root/share/info
+prepend-path    LD_LIBRARY_PATH $root/lib64:$root/lib:$root/libexec
+prepend-path    INCLUDE         $root/include
+prepend-path    MANPATH         $root/share/man
+prepend-path    PATH            $root/bin
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_installation/install_from_src_in_conda/index.html b/en/wiki/software_installation/install_from_src_in_conda/index.html new file mode 100644 index 00000000..5fc3cdcc --- /dev/null +++ b/en/wiki/software_installation/install_from_src_in_conda/index.html @@ -0,0 +1,2766 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 虚拟环境下源码安装教程 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

虚拟环境下源码安装 C/C++程序:以 valgrind 为例

+

源码安装一般由 3 个步骤组成:配置(configure)、编译(make)、安装(make install)。默认情况下进入源码所在文件夹下顺序执行./configure && make && make install 会将文件安装在/usr/local下。但是,这种做法有两个不足:

+
    +
  • 某些软件(版本)仅应用于特定工作任务中,不同任务中的软件(版本)可能会有冲突
  • +
  • 集群上普通用户没有权限修改/usr/local进行安装
  • +
+

是否可以采用类似将 python 包安装到特定虚拟环境下的做法,把 C/C++程序通过源码安装到特定虚拟环境中呢?答案是:可以!接下来,以 Valgrind 为例说明如何将 C/C++软件包安装到特定虚拟环境下。

+
+
+

虚拟环境地址(根据自己情况修改):/new_data/jxzhu/envs/test_env

+
+
    +
  1. 下载源码并解压
  2. +
+
# download source code from official website
+wget -c https://sourceware.org/pub/valgrind/valgrind-3.19.0.tar.bz2
+# decompress
+tar -jxvf valgrind-3.19.0.tar.bz2
+
+
    +
  1. 进入文件夹并执行安装前序工作(此处根据需安装软件的指引进行)
  2. +
+
# enter the source code folder
+cd valgrind-3.19.0
+# NOTE: This is not a general procedure
+# Please check the installation guide for your package
+./autogen.sh
+
+
    +
  1. 通过--prefix将安装地址指定为虚拟环境所在地址
  2. +
+
# configure with given installation path
+./configure --prefix=/new_data/jxzhu/envs/test_env/
+
+
    +
  1. 编译及安装
  2. +
+
# make in parallel
+make -j20
+# install software
+make install
+
+

快速测试

+
(base) [jxzhu@login01:] /data/jxzhu/software/valgrind-3.19.0 $ which valgrind
+/usr/bin/which: no valgrind in (...)
+(base) [jxzhu@login01:] /data/jxzhu/software/valgrind-3.19.0 $ conda activate /new_data/jxzhu/envs/test_env/
+(test_env) [jxzhu@login01:] /data/jxzhu/software/valgrind-3.19.0 $ which valgrind
+/new_data/jxzhu/envs/test_env/bin/valgrind
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_installation/softwares/index.html b/en/wiki/software_installation/softwares/index.html new file mode 100644 index 00000000..53475856 --- /dev/null +++ b/en/wiki/software_installation/softwares/index.html @@ -0,0 +1,4453 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Guidances for installation of codes - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Installation Guide for Codes and Libraries

+

First of all! Load the environments!

+

Before you install anything, especially when you need to compile codes, make sure the type of compiler and the version of compiler you have. Usually, in your personal computer, you can use compiler command directly, for instance, gcc, gfortran, ifort,mpic++. In remote cluster(High Performance Cluster), the compiler is managed by module. You cannnot use it unless you load it in advance. Therefore, make sure which compiler you have in module, and use command such as module load gcc/4.9.4 to load required compilers.

+

General Protocal for Installation:

+
    +
  1. Compile the Code
  2. +
  3. Quick test the code at server node
  4. +
  5. Write module files to code (we recommend to manage codes by module)
  6. +
  7. Test the code in the client node
  8. +
  9. write example lsf file in /share/base/scripts
  10. +
+

Where to Install?

+

Install in the /share/ directory. /share/ directory is the one synchronized to all the nodes by nfs.

+
    +
  1. Libraries: /share/apps/lib/<library name>/<version>
  2. +
  3. Codes, Pacakges, Softwares: /share/apps/<packages name>/<version>
  4. +
+

Standard in Writing Module file

+
    +
  1. module name: <package name>/<version>, like cp2k/6.1
  2. +
+

Standard in Writing lsf file

+
    +
  1. export necessary environmental variable
  2. +
  3. load prerequisite module
  4. +
+

Anaconda Installation Guide

+

Short Introduction

+

The open-source Anaconda Distribution is the easiest way to perform Python/R data science and machine learning on Linux, Windows, and Mac OS X. Choose the one suitable for you usage. If you'd like to use Anaconda in Cluster, ask cluster administrator if Anaconda have been installed, which avoid storage waste in your cluster's storage.

+
+

Tip

+

A minimum version of Conda is install in cluster51 by Yunpei Liu. Use it by module command

+
+

Installation Guide

+
    +
  • Go to this website, choose the right version for you. Personally, I recommend command line Installer for Linux and Mac OS System, while the Graphical Installer for Windows System
  • +
  • Follow the instruction in this page
  • +
+

QUIP Installation Guide

+

Short Introduction

+

The QUIP package is a collection of software tools to carry out molecular dynamics simulations. It implements a variety of interatomic potentials and tight binding quantum mechanics, and is also able to call external packages, and serve as plugins to other software such as LAMMPS, CP2K and also the python framework ASE. Various hybrid combinations are also supported in the style of QM/MM, with a particular focus on materials systems such as metals and semiconductors.

+
+

Tip

+

The tested compiler version: and for your information.

+
+

Use QUIP and quippy in cluster 51

+

If you need use QUIP/GAP in cluster 51, please used command:

+
module load gcc/6.3.0 mpi/openmpi/3.0.0
+module load QUIP/GAP
+
+

If you want to use quippy:

+
module load miniconda/3
+source activate /share/apps/QUIP/quippy-py3/
+
+

Install Guide

+
    +
  • Git clone from repository
  • +
+
git clone --recursive https://github.com/libAtoms/QUIP.git
+
+
    +
  • Go to the package root and export variable
  • +
+
export QUIP_ARCH=linux_x86_64_gfortran
+
+
    +
  • Make configuration
  • +
+
make config
+#if everything fine
+make
+
+

Packages and Extra Interfaces of QUIP

+

Add GAP Packages

+
    +
  • Download GAP file from here, then you obtain a tar file named GAP.tar, unzip it
  • +
+
tar -xvf GAP.tar
+
+
    +
  • You will obtain a directory named GAP/, copy this directory into QUIP root/src.
  • +
+
cp -r GAP <QUIP root>/src/
+
+
    +
  • Reconfig your make by choose install GAP as y
  • +
+
#recompile this code again
+make
+
+

Build QUIPPY, A QUIP Python Interface

+
    +
  • Export another environmental variable
  • +
+
#install for your self
+export QUIPPY_INSTALL_OPTS=--user
+#choose the location for installation of quippy
+export QUIPPY_INSTALL_OPTS=--prefix=<directory>
+
+
    +
  • Go to <QUIP root>/src/f90wrap, and install f90wrap by:
  • +
+
pip install .
+
+
    +
  • Back to <QUIP root>
  • +
+
make install-quippy
+
+
    +
  • Test whether installed successfully.
  • +
+
make test
+
+

Trouble Shooting

+
ImportError: dynamic module does not define module export function
+
Example:
+Traceback (most recent call last):
+  File "<stdin>", line 1, in <module>
+  File "/share/apps/QUIP/quippy-py3/lib/python3.8/site-packages/quippy-https_github.com_libAtoms_QUIP.git_ec1ed34_dirty-py3.8-linux-x86_64.egg/quippy/__init__.py", line 2, in <module>
+    import _quippy
+ImportError: dynamic module does not define module export function (PyInit__quippy)
+
+

Solution: add /build/${QUIP_ARCH} into your Python PATH

+

VASP

+

Short Introduction

+

(TODO)

+

Install Guide

+
    +
  1. +

    Get the VASP source code and pseudopotentials.

    +
  2. +
  3. +

    Load environment +

    module load intel
    +

    +
  4. +
  5. +

    Choose makefile.include according to the platform and make +

    cd vasp.5.4.4
    +make std
    +make gam
    +

    +
  6. +
  7. +

    If everything is right, you will find vasp_std in vasp.5.4.4/build/std and you can run it with mpirun -np 24 vasp_std.

    +
  8. +
+

Plugins

+

Wannier90

+
    +
  1. +

    Download Wannier90 from http://www.wannier.org/download/ . Notice: currently VASP only support Wannier90-1.2

    +
  2. +
  3. +

    Modify compile file for Wannier90 make.sys.intel. Here we use the MKL. +

    #LIBDIR = /opt/intel/mkl721/lib/32
    +#LIBS = -L$(LIBDIR) -lmkl_lapack -lmkl_ia32 -lguide -lpthread
    +LIBDIR = $(MKLROOT)/lib/intel64
    +LIBS = -L$(LIBDIR) -mkl -lpthread
    +

    +
  4. +
  5. +

    Compile and test +

    cp ./config/make.inc.ifort make.inc
    +make 
    +make lib # compile to get the libary: libwannier.a 
    +make tests # test whether the compilation is success
    +

    +
  6. +
  7. +

    Copy the libwannier.a libary file to VASP libary path and modify VASP makefile.include.

    +
  8. +
+
#Precompiler options
+CPP_OPTIONS= -DHOST=\"LinuxIFC\"\
+             -DMPI -DMPI_BLOCK=8000 \
+             -Duse_collective \
+             -DscaLAPACK \
+             -DCACHE_SIZE=4000 \
+             -Davoidalloc \
+             -Duse_bse_te \
+             -Dtbdyn \
+             -Duse_shmem \
+             -DVASP2WANNIER90   ## modify this line for Wannier90
+
+LLIBS += ../../libwannier.a  ## change here to the location of libwannier.a
+
+

Compilation optimization

+

If you use Intel Xeon Silver/Gold/Platium CPU, using the following compilation parameters will get a 2✖ speedup! (Already test on 205 server) +

OFLAG      = -O3 -xCORE-AVX512
+

+

TODO in the future

+
    +
  1. Install vasp_gpu version
  2. +
  3. Benchmark different libary (FFTW/MKL)
  4. +
  5. other plugins: VASP-neb, vasp-beef
  6. +
  7. vasp6
  8. +
+

LAMMPS Installation Guide

+

Short Introduction

+

LAMMPS is a classical molecular dynamics code with a focus on materials modeling. It's an acronym for Large-scale Atomic/Molecular Massively Parallel Simulator.

+
+

Tip

+

I have installed one in cluster51, in directory /share/apps/lammps-7Aug19/. The compiler version: and for your information.

+
+

Install Guide

+
    +
  • Git clone or download package from website
  • +
+
# command for git
+git clone -b stable https://github.com/lammps/lammps.git mylammps
+
+
    +
  • We assume you the package path is
  • +
+
cd <lammps-root>/src
+#choose one of the following or both
+# build a serial LAMMPS executable
+make serial 
+# build a parallel LAMMPS executable with MPI
+make mpi        
+
+
    +
  • You will see the executable binary in src/lmp_serial or src/lmp_mpi
  • +
+

Packages and Extra Interfaces of LAMMPS

+
+

Tip

+

Contact Cluster Administrator if you need any uninstalled packages

+
+

General for Installing Package

+
    +
  • To install package of LAMMPS, just type make yes-<package name> for example, make yes-user-intel
  • +
+

Building USER-ATC Package

+
    +
  • Before you install this package by make yes-user-atc, you should install lib-atc which is a library for atc package
  • +
  • Go to the directory <LAMMPS root>/lib/atc, you can follow the instruction in the README. Remember to load module gcc and open mpi
  • +
+
cd <LAMMPS root>/lib/atc
+
+
    +
  • lib-atc need library lapack and blas installed. Check whether this library installed or not by command:
  • +
+
#check for lapack library
+ldconfig -p | grep lapack
+#check for blas library
+ldconfig -p | grep blas
+
+
    +
  • If lapack and blas are installed. Change the value of EXTRAMAKE variable to Makefile.lammps.installed in the file Makefile.mpi.
  • +
+
EXTRAMAKE= Makefile.lammps.installed
+
+
    +
  • Make library by following command
  • +
+
make -f Makefile.mpi
+
+
    +
  • Make sure you have libatc.a and Makefile.lammps in your current directory
  • +
  • Back to directory <LAMMPS root>/src/ and type make mpi to compile mpi version of LAMMPS
  • +
+

Building Inteface with n2p2

+
    +
  • make sure you have shared library libnnpif-shared in your <path to n2p2>/lib/
  • +
  • export the following in your environmental variable(optional)
  • +
+
#export this if you use shared library, skip if you are using static library
+export LD_LIBRARY_PATH=<path to n2p2>/lib:${LD_LIBRARY_PATH}
+
+
    +
  • Go to LAMMPS root
  • +
+
cd <LAMMPS root>/
+ln -s <path to n2p2> lib/nnp
+cp -r <path to n2p2>/src/interface/LAMMPS/src/USER-NNP <LAMMPS root>/src
+cd <LAMMPS root>/src
+make yes-user-nnp
+make mpi
+
+

Building with Plumed

+
    +
  • Before you install, make sure the Plumed has installed
  • +
  • To directory <LAMMPS root>/src/
  • +
+
make lib-plumed args="-p <path to plumed directory>"
+make yes-user-plumed
+make mpi
+
+

DeePMD Installation Guide

+

Short Introduction

+

DeePMD-kit is a package written in Python/C++, designed to minimize the effort required to build deep learning based model of interatomic potential energy and force field and to perform molecular dynamics (MD). This brings new hopes to addressing the accuracy-versus-efficiency dilemma in molecular simulations. Applications of DeePMD-kit span from finite molecules to extended systems and from metallic systems to chemically bonded systems. Ref. Paper

+

Install Guide

+
    +
  • Here, we display the most easiest way to install DeePMD Code.
  • +
  • Make sure you have GPU install in your computer. Usually, you can check with the drive of GPU
  • +
  • Install the anaconda3 from website. After you installed anaconda3, you can use conda command.
  • +
  • Install DeePMD with cpu or gpu version. Installation by this way will install lammps as well.
  • +
+
#install of cpu version
+conda install deepmd-kit=*=*cpu lammps-dp=*=*cpu -c deepmodeling
+#install of gpu version
+conda install deepmd-kit=*=*gpu lammps-dp=*=*gpu -c deepmodeling
+
+
    +
  • That's all for installation. Check the install package use command:
  • +
+
conda list | grep deep
+
+
    +
  • You will find four packages related with DeePMD code. You can now directly use command dp , lmp.
  • +
  • To test DeePMD Code. Download DeePMD code from github by:
  • +
+
git clone https://github.com/deepmodeling/deepmd-kit.git
+
+
    +
  • Go to the directory examples/water/train/
  • +
  • Test training by
  • +
+
dp train water_se_a.json
+
+

Install Guide of DeePMD

+

快速安装

+

n2p2 Installation Guide

+

Short Introduction

+

n2p2 is a machine learning code to training a machine learning potential. It original paper is from J. Behler and M. Parrinello, Phys. Rev. Lett. 98, 146401 (2007)

+

Install Guide

+
    +
  • Before Installation, make sure you have installed the Eigen Library and the GSL Library.
  • +
  • Make sure you have gcc compiler (including gfortran), I haven't successfully compiled by intel compiler. Make sure you have open MPI(i. e. for mpic++ command).
  • +
  • Download the n2p2 code from github: https://github.com/CompPhysVienna/n2p2. For example, using the following command.
  • +
+
git clone https://github.com/CompPhysVienna/n2p2.git
+
+
    +
  • You can see a directory named n2p2, now go into that by:
  • +
+
cd n2p2/src
+
+
    +
  • Modify the configure file makefile.gnu
  • +
+
#modify this file, I just pick out the part you need to modify
+# Enter here paths to GSL or EIGEN if they are not in your standard include
+# path. DO NOT completely remove the entry, leave at least "./".
+PROJECT_GSL=<path to gsllib>/gsl/include/ # substitute <path> with real path
+PROJECT_EIGEN=<path to eigen>/eigen-eigen-323c052e1731 # substitute <path> with real path
+
+ ###############################################################################
+ # COMPILERS AND FLAGS
+ ###############################################################################
+PROJECT_CFLAGS=-O3 -march=native -std=c++11 -fopenmp -L<pato to gsllib>gsl/lib
+PROJECT_LDFLAGS_BLAS=-lblas -lgslcblas
+
+
    +
  • Save and quit this file, use the following command to compile code:
  • +
+
#choose one of the following command
+make MODE=shared # compile a binary with shared library
+make MODE=static # compile a binary with static library, I use this one
+
+
    +
  • After you compiled successfully, you will have all the excutable binary at n2p2/bin/ directory
  • +
  • Add n2p2/bin/ to your PATH environmental variable, you can easily use this. The most important binary is nnp-train, this is used for training.
  • +
  • Add n2p2 library to your LD_LIBRARY_PATH in .bashrc
  • +
+
export LD_LIBRARY_PATH=<Path to n2p2>/lib/:$LD_LIBRARY_PATH
+
+

Plumed Installation Guide

+

Short Introduction

+

PLUMED is an open-source, community-developed library that provides a wide range of different methods, which include:

+
    +
  • enhanced-sampling algorithms
  • +
  • free-energy methods
  • +
  • tools to analyze the vast amounts of data produced by molecular dynamics (MD) simulations.
  • +
+

These techniques can be used in combination with a large toolbox of collective variables that describe complex processes in physics, chemistry, material science, and biology.

+
+

Tip

+

I have installed one in cluster51. Use module load plumed/2.6.0 to use this library. The compiler version: for your information

+
+

Install Guide

+
    +
  • Download package from here.
  • +
  • Basic Configure
  • +
+
./configure --prefix=<path you want to install> LDFLAGS=-L'/share/apps/lib/fftw/3.3.8/lib' CPPFLAGS=-I'/share/apps/lib/fftw/3.3.8/lib '
+
+
    +
  • Compile
  • +
+
make -j 32
+make install
+
+

Eigen Library Installation Guide

+

Short Introduction

+

Eigen is a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms.

+

Install Guide

+
    +
  • Download the package from wiki:http://eigen.tuxfamily.org/index.php?title=Main_Page#Overview. For me, I choose the Eigen 3.3.7 released version.
  • +
+
wget http://bitbucket.org/eigen/eigen/get/3.3.7.tar.bz2
+
+
    +
  • Unpack this tar file by
  • +
+
tar -zxvf 3.3.7.tar.gz
+
+
    +
  • You will have eigen-eigen-* directory in your computer
  • +
  • These are all steps you need to install eigen library
  • +
+

GSL Library Installation Guide

+

Short Introduction

+

The GNU Scientific Library (GSL) is a numerical library for C and C++ programmers. It is a free open source library under the GNU General Public License.

+

This guide is from: website tutorial

+
+

Tip

+

I have installed one in cluster51, in directory /share/apps/lib/gsl-2.6. The compiler version: for your information

+
+

Install Guide

+ +
wget ftp://ftp.gnu.org/gnu/gsl/gsl-latest.tar.gz
+
+
    +
  • Place the file in whatever directory you want to install and unpack the file with the following command:
  • +
+
tar -zxvf gsl-latest.tar.gz
+
+
    +
  • This will create a directory called gsl-*.* in your home directory. Change to this directory.
  • +
+
cd gsl-*.*
+
+
    +
  • The next step is to configure the installation and tell the system where to install the files. Create a directory to install your gsl package, say <Path to libgsl>/gsl with the following command
  • +
+
mkdir <Path to libgsl>/gsl
+
+
    +
  • Now configure the installation and tell it to use your new directory. This step may take a few minutes.
  • +
+
./configure --prefix=<Path to libgsl>/gsl
+
+
    +
  • If there are no errors, compile the library. This step will take several minutes.
  • +
+
make
+
+
    +
  • Now it is necessary to check and test the library before actually installing it. This step will take some time.
  • +
+
make check
+
+
    +
  • If there are no errors, go ahead and install the library with:
  • +
+
make install
+
+
    +
  • Now we can write a test program to see if the library works. Create the following program and name it example.c
  • +
+
#include <stdio.h>
+#include <gsl/gsl_sf_bessel.h>
+
+int
+main (void)
+{
+    double x = 15.0;
+    double y = gsl_sf_bessel_J0 (x);
+    printf ("J0(%g) = %.18e/n", x, y);
+    return 0;
+}
+
+
    +
  • Compile and link the program with the following commands (but use the correct path for your username):
  • +
+
gcc -Wall -I<Path to libgsl>/gsl/include -c example.c
+gcc -L<Path to libgsl>/gsl/lib example.o -lgsl -lgslcblas -lm
+
+
    +
  • Now run your program!
  • +
+
./a.out
+
+
    +
  • If it is succesfully installed, it will print a number in your screen.
  • +
  • add libray path to LD_LIBRARY_PATH in .bashrc
  • +
+
export LD_LIBRARY_PATH=<path to libgsl>/lib:$LD_LIBRARY_PATH
+
+

Libxc Library Installation Guide

+
    +
  • Download the latest stable version of libxc from official website:
  • +
+
wget http://www.tddft.org/programs/libxc/down.php?file=4.3.4/libxc-4.3.4.tar.gz
+
+

FFTW Library Installation Guide

+

Short Introduction

+

FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data (as well as of even/odd data, i.e. the discrete cosine/sine transforms or DCT/DST).

+
+

Tip

+

I have installed one in cluster51, in directory /share/apps/lib/fftw/3.3.8. Use module load fftw/3.3.8 to use this library. The compiler version: for your information

+
+

Install Guide

+
    +
  • Download the release version from official website using wget
  • +
+
wget http://www.fftw.org/fftw-3.3.8.tar.gz
+
+
    +
  • Unzi the package
  • +
+
tar -xvf fftw-3.3.8.tar.gz
+
+
    +
  • Go to the directory fftw-3.3.8
  • +
+
./configure --prefix=<path to you want to install>    \
+            --enable-shared  \
+            --enable-threads \
+            --enable-sse2    \
+            --enable-avx     
+
+
    +
  • If configure is finished
  • +
+
make
+#check if you install finished
+make check
+#install to the final directory which you have set in --prefix
+make install
+
+

CP2K Installation Guide

+
    +
  • Download the release version from official website using wget like
  • +
+
wget https://github.com/cp2k/cp2k/releases/download/v6.1.0/cp2k-6.1.tar.bz2
+
+
    +
  • Unzip the cp2k package
  • +
+
tar -xvf cp2k-6.1.tar.bz2
+
+
    +
  • Go into directory cp2k-6.1/tools/toolchains/
  • +
  • Stop here! you should check you compiler version, if you are in the High Performance Cluster, Please load the module for compiler and MPI/Open MPI
  • +
  • Note: for gcc version, gcc <= 7.4.0
  • +
  • Execute the following script to see the help message
  • +
+
./install_cp2k_toolchain.sh -h
+
+
    +
  • Choose which package you want to install before cp2k.
  • +
+

Some packages are essential for cp2k, please check this in the official web site

+
    +
  • the minimum required is with-openblas=install, if you want to compile successfully.
  • +
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/DP-GEN/index.html b/en/wiki/software_usage/DP-GEN/index.html new file mode 100644 index 00000000..6ca37c9a --- /dev/null +++ b/en/wiki/software_usage/DP-GEN/index.html @@ -0,0 +1,3647 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DP-GEN使用入门 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

DP-GEN使用入门

+

简介

+

Deep Potential Generator (DP-GEN) 是一个将神经网络势能(machine learning potential)和主动学习(active learing)结合起来的工作流。该包主要由张林峰(普林斯顿大学),王涵(北京应用物理与计算数学研究所)开发。如有问题,可以向他们询问。

+
+

提示

+

考虑到 DP-GEN 在集群运行可能存在一定的性能问题,推荐尝试 ai2-kit 运行势函数训练的 Close Loop Learning (CLL) 任务。

+
+

以下为参考信息:

+ +
+

Warning

+

此页面仅限提供贡献者对于该软件的理解,如有任何问题请联系贡献者。建议在阅读此篇前先对DeePMD-kit有一定了解。
+指路:DeePMD-kit

+
+

DP-GEN的工作流是由以下三步组成的循环:

+
    +
  • 训练:DeePMD-kit同时训练 多条(一般是4条)参数初始化不同的势函数(GPU)。
  • +
  • 采样和筛选:基于训练得到的势函数和指定的初始结构利用LAMMPS进行classical MD,扩展构型空间。然后对MD中得到的构型依照特定指标(对某个构型用不同的势函数预测所得的原子力的标准差)进行筛选(GPU)。
  • +
  • 标记:将筛选所得的构型进行DFTMD单点能计算,得到力和能量,加入训练集进行新一轮的训练(51或52)。
  • +
+

输入文件

+

为了使dpgen运行起来,我们需要准备如下的文件:

+
    +
  • param.json
  • +
+

三步计算中所用的参数,具体指神经网络训练的参数,lammps中MD的参数和DFTMD计算单点能的参数。

+
    +
  • machine.json
  • +
+

制定上述三个步骤分别在哪个服务器计算。

+
+

Tip

+

在 Zeus 集群上配置 machine.json`,请参阅GPU使用说明

+
+
    +
  • 初始训练集数据
  • +
+

放在提交dpgen所在的服务器上,用于训练势函数,参照DeePMD-kit中方法生成。

+
    +
  • MD采样的初始结构
  • +
+

放在提交dpgen所在的服务器上,必须使用vasp5.x的POSCAR,把.xyz文件转化为POSCAR的脚本可见文末

+

输出文件

+

在提交dpgen的文件夹下会出现以下输出文件,用于指示任务运行的状况:

+
    +
  • dpgen.log
  • +
+

包括了运行轮数,单个任务提交的情况,采样准确度等详细的信息。

+
    +
  • record.dpgen
  • +
+

由多行 x y 组成,记录任务进程。其中x为运行的轮数(iteration),从0开始;y取0-8,其中0-2指代训练,3-5指代采样和筛选,6-8指代标记。

+

dpgen通过读取这个文件来决定从哪里重启计算,所以我们可以通过手动修改这个文件来决定重启的点。例如,在第x轮中我们发现采样的准确度过低,需要增加初始结构的数量重新跑MD,我们就可以把record.dpgen文件在x 2之后的内容删除,重新提交dpgen任务。

+
    +
  • nohup.out
  • +
+

这个并不是必要输出,但是建议使用nohup命令把dpgen挂在后台运行。这个文件中输出的信息和dpgen.log的基本一致。

+

例子

+

接下来,把铂水界面势函数训练所用的param.json分解成几个部分进行解释,在实际使用中需要把几段放在一起。

+
+

comment

+

文件中的注释用_comment标注。

+
+

基本参数设置: params.json

+
param.json
{ 
+    "type_map": [        
+        "O", 
+        "H",
+        "Pt"
+    ], 
+    "mass_map": [ 
+        15.999,
+        1.0079,
+        195.08
+    ], 
+    "_comment": " atoms in your systems ",
+    "init_data_prefix": "/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh", 
+    "init_data_sys": [
+        "init/system-000","init/system-001"
+    ], 
+    "_comment": " path of training set ",
+    "init_batch_size": [
+        1,1
+    ], 
+    "sys_configs": [
+        ["/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh/init/configs/POSCAR_0[0-9]"],
+        ["/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh/init/configs/POSCAR_1[0-9]"]
+    ], 
+    "_comment": " path of initial structure for sampling ",
+    "sys_batch_size": [
+        1,1
+    ], 
+
+    ......
+}
+
+
    +
  • 势函数训练(DPMD)
  • +
+
param.json
  {
+      ......
+      "numb_models": 4, 
+      "_comment": " number of NNP for model deviation ",
+      "train_param": "input.json", 
+      "_comment": " name of automatically generated input file for DPMD ",
+      "default_training_param": {
+          "model": {
+          "descriptor": {
+          "type": "se_a",
+    "_comment": "could be bigger than the number of atoms of the very element",
+          "sel": [68, 136, 64], 
+          "rcut_smth": 0.50, 
+          "rcut": 5.00, 
+          "neuron": [25, 50, 100], 
+          "resnet_dt": false, 
+          "axis_neuron": 16,
+          "seed": 1
+          },
+          "fitting_net": {
+          "n_neuron": [240, 240, 240], 
+          "resnet_dt": true, 
+          "seed": 1
+          }},
+          "learning_rate": {
+          "type": "exp",
+          "start_lr": 0.005, 
+          "decay_steps": 2000,
+          "_comment": "last 20000 or 400000", 
+          "decay_rate": 0.95
+          },
+          "loss": {
+          "start_pref_e": 0.02, 
+          "limit_pref_e": 1, 
+          "start_pref_f": 1000, 
+          "limit_pref_f": 1, 
+          "start_pref_v": 0, 
+          "limit_pref_v": 0
+          },
+          "training": {
+          "systems": [ ], 
+          "set_prefix": "set", 
+          "stop_batch": 400000, 
+          "batch_size": 1, 
+          "seed": 1,
+          "disp_file": "lcurve.out", 
+          "disp_freq": 100, 
+          "numb_test": 4, 
+          "save_freq": 1000, 
+          "save_ckpt": "model.ckpt", 
+          "load_ckpt": "model.ckpt", 
+          "disp_training": true, 
+          "time_training": true, 
+          "profiling": false, 
+          "profiling_file": "timeline.json"
+          }},
+      "_comment": "modify according your systems!", 
+      ......
+  }
+
+
    +
  • 采样和筛选(Lammps)
  • +
+
param.json
{  
+    "model_devi_dt":            0.0005,
+    "_comment": "model_devi_dt: Timesteps for MD. Consistent with DFTMD!",
+    "model_devi_skip":          0,
+    "_comment": "model_devi_skip: the first x frames of the recorded frames",
+    "model_devi_f_trust_lo":    0.075,
+    "model_devi_f_trust_hi":    0.10,
+    "_comment": "modify according to the error distribution of system",
+    "model_devi_e_trust_lo":    1e10,
+    "model_devi_e_trust_hi":    1e10,
+    "model_devi_clean_traj":    false,
+    "model_devi_jobs": [
+    {"temps": [300,400],"sys_idx": [0,1],"trj_freq": 10,"nsteps":  2000,"ensemble": "nvt","_idx": 0},
+    {"temps": [300,400],"sys_idx": [0,1],"trj_freq": 10,"nsteps":  2000,"ensemble": "nvt","_idx": 1}
+    ],
+    "_comment": "sys_idx should correspond to sys_configs in the beginning",
+    "_comment": "add the _idx step by step",
+    "_comment": "modify nsteps and sys_idx based on model deviation accuracy",
+    ......
+}
+
+
    +
  • 标记(计算单点能,此处以CP2K为例,VASP的设置可在官方文档中查看)
  • +
+
param.json
{
+    ......
+    "fp_style":     "cp2k",
+    "shuffle_poscar":   false,
+    "fp_task_max":  200,
+    "_comment":         "the maximum number of stcs to calc.",
+    "fp_task_min":  5,
+    "fp_pp_path":   ".",
+    "fp_pp_files":  [],
+    "_comment":"the maximum number of stcs to calc.",
+     "_comment": "fp_params: modify according your systems!",
+    "fp_params": {
+        "FORCE_EVAL":{
+            "DFT":{
+                "BASIS_SET_FILE_NAME": "/data/kmr/BASIC_SET/BASIS_MOLOPT",
+                "POTENTIAL_FILE_NAME": "/data/kmr/BASIC_SET/GTH_POTENTIALS",
+                "MGRID":{
+                    "CUTOFF": 400
+                },
+                "QS":{
+                    "EPS_DEFAULT": 1.0E-13
+                },
+                "SCF":{
+                    "SCF_GUESS": "ATOMIC",
+                    "EPS_SCF": 1.0E-6,
+                    "MAX_SCF": 500,
+                    "ADDED_MOS": 500,
+                    "CHOLESKY": "INVERSE",
+                    "SMEAR":{"ON"
+                        "METHOD": "FERMI_DIRAC",
+                        "ELECTRONIC_TEMPERATURE": 300
+                    },
+                    "DIAGONALIZATION":{
+                        "ALGORITHM": "STANDARD"
+                    },
+                    "MIXING":{
+                               "METHOD": "BROYDEN_MIXING",
+                               "ALPHA":   0.3,
+                               "BETA":    1.5,
+                               "NBROYDEN":  14
+                    }
+                },
+                "XC":{
+                        "XC_FUNCTIONAL":{"_": "PBE"},
+                        "XC_GRID":{
+                                "XC_SMOOTH_RHO": "NN50",
+                                "XC_DERIV": "NN50_SMOOTH"
+                        },
+                        "vdW_POTENTIAL":{
+                                "DISPERSION_FUNCTIONAL": "PAIR_POTENTIAL",
+                                "PAIR_POTENTIAL":{
+                                        "TYPE": "DFTD3",
+                                        "PARAMETER_FILE_NAME": "/data/kmr/BASIC_SET/dftd3.dat",
+                                        "REFERENCE_FUNCTIONAL": "PBE"
+                                }
+                        }
+                }
+           },
+            "SUBSYS":{
+                        "KIND":{
+                                "_": ["O", "H","Pt"],
+                                "POTENTIAL": ["GTH-PBE-q6", "GTH-PBE-q1","GTH-PBE-q10"],
+                                "BASIS_SET": ["DZVP-MOLOPT-SR-GTH", "DZVP-MOLOPT-SR-GTH","DZVP-A5-Q10-323-MOL-T1-DERIVED_SET-1"]
+                        }
+            }
+        }
+    }
+}
+
+
+

计算设置

+

CP2K的input中部分参数有默认设置写入,具体可参照cp2k.py。

+
+

指路:cp2k.py

+
+

计算设置

+

金属体系OT section需要手动关闭,具体见上方的设置。

+
+

任务提交设置: machine.json

+
+

从 DP-GEN 0.10.0 版本开始,官方引入了对 DPDispatcher 的支持,并计划将 machine.json 迁移到 DPDispatcher 上。 +DPDispatcher 相比原本 DP-GEN 自带的 Dispatcher,在接口和语法上有较大变化,需要额外指定 api_version 大于或等于 1.0。

+
+

关于 DPDispatcher 项目的说明,请参阅这里

+

DPDispatcher 相比旧版,基于配置字典而非文件Flag来管理所提交的任务,稳定性更优,且对作业管理系统的支持更加灵活多样,内置接口可支持多任务并行提交。 +但新版在操作习惯上有较大改变,需要适应和调整。

+

以 LSF 为例,对 machine.json 的写法举例如下,请留意以下的注意事项。

+
+

注意

+

train 部分和model_devi部分使用了对新版 LSF 提供支持的写法,即同时指定 gpu_usagegpu_new_syntaxTrue,从而可在提交脚本中使用新版 LSF 的语法。

para_deg表示在同一张卡上同时运行的任务数,通常可不写出,此时默认值为1。这里给出的例子表示在同一张卡上同时运行两个Lammps任务。

fp 部分使用的是针对CPU计算使用的语法。

+
+
+

注意

+

注意在fp部分,mpiexec.hydra需要明确写出以确保任务是并行执行的,可参考以下例子中的写法:mpiexec.hydra -genvall vasp_gam。若你不知道这部分该如何书写,请参考集群上的提交脚本说明(/data/share/base/scripts)。

+
+

若在191上向191上提交任务,可以考虑使用LocalContext,可以减少文件压缩传输的额外IO开销。

+
machine.json
{
+  "api_version": "1.0",
+  "train": [
+    {
+      "command": "dp",
+      "machine": {
+        "batch_type": "Slurm",
+        "context_type": "LocalContext",
+        "local_root": "./",
+        "remote_root": "/data/tom/dprun/train",
+      },
+      "resources": {
+        "number_node": 1,
+        "cpu_per_node": 1,
+        "gpu_per_node": 1,
+        "queue_name": "gpu3",
+        "group_size": 1,
+        "module_list": [
+          "deepmd/2.0"
+        ]
+      }
+    }
+  ],
+  "model_devi":[
+    {
+      "command": "lmp_mpi",
+      "machine":{
+        "batch_type": "Slurm",
+        "context_type": "SSHContext",
+        "local_root": "./",
+        "remote_root": "/data/jerry/dprun/md",
+        "remote_profile": {
+          "hostname": "198.76.54.32",
+          "username": "jerry",
+          "port": 6666
+        }
+      },
+      "resources": {
+        "number_node": 1,
+        "cpu_per_node": 1,
+        "gpu_per_node": 1,
+        "queue_name": "gpu2",
+        "group_size": 5,
+        "kwargs": {
+          "custom_gpu_line": [
+            "#SBATCH --gres=gpu:1g.10gb:1"
+          ]
+        },
+        "strategy": {"if_cuda_multi_devices": false},
+        "para_deg": 2,
+        "module_list": [
+          "deepmd/2.1"
+        ],
+        "source_list": []
+      }
+    }
+  ],
+  "fp":[
+    {
+      "command": "mpiexec.hydra -genvall cp2k.popt input.inp",
+      "machine":{
+        "batch_type": "Slurm",
+        "context_type": "SSHContext",
+        "local_root": "./",
+        "remote_root": "/data/jerry/dprun/fp",
+        "remote_profile": {
+          "hostname": "198.76.54.32",
+          "username": "jerry",
+          "port": 6666
+        }
+      },
+      "resources": {
+        "number_node": 2,
+        "cpu_per_node": 32,
+        "gpu_per_node": 0,
+        "queue_name": "c53-medium",
+        "group_size": 10,
+        "module_list": [
+          "intel/17.5.239",
+          "mpi/intel/2017.5.239",
+          "gcc/5.5.0"
+          "cp2k/7.1"
+        ]
+      }
+    }
+  ]
+}
+
+

相关参数含义,详情请参阅官方文档 +machine 和 +resources 部分的说明。

+

以下是部分参数含义:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数描述
machine指定远程服务器的配置信息。
batch_type提交作业系统的类型,可指定 LSF, Slurm, Shell 等。
context_type连接到远程服务器的方式,常用可选参数SSHContext, LocalContext, LazyLocalContext等。详见官方文档说明。
SSHContext通过SSH连接到远程主机,通常情况下从一个服务器提交到另一个时可使用。
LocalContext若需要在当前服务器上提交任务,可选择此选项,则不必通过SSH连接。此时 remote_profile 部分可不写。
remote_root任务在目标主机上提交的绝对路径。
remote_profile远程主机设置,若context_typeLocalContext, LazyLocalContext可不写。
hostname远程主机IP。
username远程主机用户名。
password远程主机密码。若通过密钥登陆可不写。
portSSH连接的端口,默认为22。
key_filenameSSH密钥存放的路径。默认放在~/.ssh下,此时可不写。
passphrase密钥安全口令,通常在创建密钥时设置。若为空可不写。
resource作业提交相关配置信息。
number_node作业使用的节点数。
cpu_per_node每个节点上使用CPU核数。
gpu_per_node每个节点上使用GPU卡数。
kwargs可选参数,依据各作业系统支持的配置而定。详见官方文档。
custom_gpu_line自定义GPU提交命令,可根据语法自定义。根据作业管理系统不同,以 #BSUB (LSF) 或 #SBATCH (Slurm) 开头。文中的例子即在gpu2上使用MIG实例(1g.10gb)。
custom_flags其他需要使用的Flag,例如Walltime、作业名等设置。
queue_name任务提交的队列名。
group_size每个作业绑定的任务个数。
if_cuda_multi_devices是否允许任务运行在多卡上,默认为 True。在Zeus上建议写成 False
para_deg同一卡上同时运行的任务数。默认为1。
module_list需要load的module。可不写。
module_unload_list需要unload的module。可不写。
source_list需要source的脚本路径。可不写。
envs需要引入的环境变量。可不写。
+
+

登录设置

+

如果服务器是密码登录,在username之后加上关键词password并写上密码。输入的内容要用引号括起!

+
+

准备好所有的输入文件后,就可以用以下指令提交dpgen任务啦!

+

dpgen run param.json machine.json

+
+

提交任务

+

如果在191提交,需要在服务器上自行安装dpgen。具体做法见官方GitHub。 +一般来说运行如下命令即可:

+
pip install --user dpgen
+
+
+
+

Slurm获取状态异常问题的解决

+

若遇到以下报错,很大可能是因为Slurm暂时无法获取任务状态。由于旧版本DPDispatcher对这类波动导致的报错没有充分考虑,会直接退出:

+
RuntimeError: status command squeue fails to execute.job_id:13544 
+error message:squeue: error: Invalid user for SlurmUser slurm, ignored
+squeue: fatal: Unable to process configuration file
+
+

新版这一部分已经做了调整,但由于之前的版本空文件夹复制过程存在严重bug,请务必保证DPDispatcher版本在0.5.6以上。

+
pip install --upgrade --user dpdispatcher
+
+
+
+

支持

+

目前DP-GEN 0.11以上版本已经移除了旧版 dispatcher 的支持,推荐迁移到 DPDispatcher 上。为防止兼容性问题,这里仍保留了旧版的输入,请注意甄别。

+
machine_old.json
{
+  "train": [
+    {
+      "machine": {
+        "machine_type": "slurm",
+        "hostname": "123.45.67.89",
+        "port": 22,
+        "username": "kmr",
+        "work_path": "/home/kmr/pt-oh/train"
+      },
+      "resources": {
+        "node_gpu": 1,
+        "numb_node": 1,
+        "task_per_node": 1,
+        "partition": "large",
+        "exclude_list": [],
+        "source_list": [],
+        "module_list": [
+            "deepmd/2.1"
+        ],
+        "time_limit": "23:0:0"
+      },
+      "python_path": "/share/apps/deepmd/2.1/bin/python"
+    }
+  ],
+  "model_devi": [
+    {
+      "machine": {
+        "machine_type": "slurm",
+        "hostname": "123.45.67.89",
+        "port": 22,
+        "username": "kmr",
+        "work_path": "/home/kmr/pt-oh/dpmd"
+      },
+      "resources": {
+        "node_gpu": 1,
+        "numb_node": 1,
+        "task_per_node": 1,
+        "partition": "large",
+        "exclude_list": [],
+        "source_list": [],
+        "module_list": [
+            "deepmd/2.1"
+        ],
+        "time_limit": "23:0:0"
+      },
+      "command": "lmp_mpi",
+      "group_size": 80
+    }
+  ],
+  "fp": [
+    {
+      "machine": {
+        "machine_type": "slurm",
+        "hostname": "123.45.67.90",
+        "port": 6666,
+        "username": "kmr",
+        "work_path": "/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh/labelling"
+      },
+      "resources": {
+        "cvasp": false,
+        "task_per_node": 28,
+        "numb_node": 1,
+        "node_cpu": 28,
+        "exclude_list": [],
+        "with_mpi": true,
+        "source_list": [
+        ],
+        "module_list": [
+            "intel/17.5.239",
+            "mpi/intel/17.5.239",
+            "cp2k/6.1"
+        ],
+        "time_limit": "12:00:00",
+        "partition": "medium",
+        "_comment": "that's Bel"
+      },
+      "command": "cp2k.popt input.inp",
+      "group_size": 50 
+    }
+  ]
+}
+
+
+

训练集收集

+

DP-GEN代码迭代生成的训练集是分散储存的。可以用DP-GEN自带的collect函数进行数据收集。

+

首先可以使用dpgen collect -h 查看使用说明

+

常用用法是

+
dpgen collect JOB_DIR OUTPUT_DIR -p param.json
+
+

JOB_DIR就是DP-GEN的输出目录,包含有iter.0000*一系列的目录。OUTPUT_DIR就是收集的数据准备放到哪。param.json就是运行DP-GEN跑的param文件。

+

例如:

+
dpgen collect ./ ./collect -p param-ruo2.json
+
+

以上命令会把当前文件夹的DP-GEN数据收集好放入collect目录里。

+
init.000  init.001  sys.000  sys.001
+
+

init.*是初始训练集,sys.*是后来DP-GEN生成的训练集,按照param的sys分类。

+

Bonus!

+

常见报错问题(欢迎补充&修正)

+
    +
  • ... expecting value ...
  • +
+

可能是数组或者字典末尾多写了逗号

+
    +
  • ERROR: lost atoms ...
  • +
+

可能是Lammps算model_devi的时候因为势函数太差导致有原子重合而报错。可以手动在对应的单条轨迹的input.lammps中加入

+
  thermo_modify   lost ignore flush yes
+
+

然后在上一级文件夹下面手动提交任务

+

  bsub<*.sub
+
+- AssertionError

+

某个单点能计算中断后重新开始,导致cp2k的output中有重叠。可以在02.fp文件夹下用以下脚本进行检查: +

import dpdata
+import glob
+l = glob.glob("task.002*")
+l.sort()
+stc = dpdata.LabeledSystem(l[0]+'/output',fmt='cp2k/output')
+for i in l[1:]:
+    print(i)
+    stc += dpdata.LabeledSystem(i+'/output',fmt='cp2k/output')
+

+

其中task.002.*代表遍历002system中的被标记的结构。如果不同系统的原子数相同,也可以直接用task.00*一次性检查所有的结构。

+
    +
  • 如果你发现进行 model deviation 从一开始就非常大,并且测试集的结构被打乱,有可能是在 param 文件中设置了"shuffle_poscar": true。该选项会随机打乱测试集原始 POSCAR 中的行,并用打乱后的结构进行 model deviation 测试。该选项主要用于打乱合金体系的结构,然而对于界面或者共价键连接的体系(如半导体),随机打乱原子的将会使界面结构或者半导体结构变成混乱的一锅粥,没有任何化学含义,因此我们不用进行shuffle(也不可以)。请在 param 文件中设置: +
    ...
    +"shuffle_poscar": false
    +...
    +
  • +
+

script from xyz to POSCAR

+

from ase.io import iread, write
+import ase.build
+
+for j in range(2):
+    i=0
+    for atoms in iread('./traj_'+str(j)+'.xyz', format='xyz'):
+        atoms.set_cell([11.246, 11.246, 35.94,90,90,90])
+        i=i+1
+        if i%20==0:
+            atoms=ase.build.sort(atoms)
+            ase.io.write('POSCAR_'+str(j)+'_'+str(int(i/20)-1), atoms, format='vasp',vasp5=True)
+
+或者调用ase.io.vasp里的write:

+
def write_vasp(filename, atoms, label=None, direct=False, sort=None,
+symbol_count=None, long_format=True, vasp5=False,
+ignore_constraints=False):
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/DeePMD-kit/index.html b/en/wiki/software_usage/DeePMD-kit/index.html new file mode 100644 index 00000000..1901a267 --- /dev/null +++ b/en/wiki/software_usage/DeePMD-kit/index.html @@ -0,0 +1,3720 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeePMD-kit 使用入门 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

DeePMD-kit 2.x 使用入门

+

简介

+

DeePMD-kit是一个训练神经网络势能(Machine Learning Potential)的代码包。该包主要由张林峰(普林斯顿大学),王涵(北京应用物理与计算数学研究所)开发。黄剑兴和庄永斌曾经短时间参与开发。如有问题,可以向他们询问。

+
+

Danger

+

我们已经舍弃了1.x版本的教程。

+
+

以下为参考信息:

+ +
+

Warning

+

此页面仅限提供贡献者对于该软件的理解,如有任何问题请联系贡献者

+
+

第一次尝试

+

运行第一次机器学习

+

如果你正在使用 Zeus 集群,请使用 slurm 脚本来提交 DeePMD-kit 任务。

+

请从 Github 下载 DeePMD-kit 的代码,我们将会使用里面的水模型做为例子。

+
git clone https://github.com/deepmodeling/deepmd-kit.git
+
+

首先进入含有水模型的例子的目录

+
cd <deepmd repositoy>/examples/water/se_e2_a/
+
+

你会看到input.json文件,这是DeePMD-kit使用的输入文件。现在复制/data/share/base/script/deepmd.lsf到当前文件夹,并且修改它。

+
cp /data/share/base/script/deepmd.lsf ./
+vim deepmd.lsf
+
+
+

Warning

+

如果调用的是1.0的版本,需要在learning_rate下加入decay_rate关键词,一般设为0.95.

+
+

你现在仅需要修改 slurm 脚本中的输入文件名称即可。把脚本中的input.json替换成water_se_a.json

+
#!/bin/bash
+
+#BSUB -q gpu
+#BSUB -W 24:00
+#BSUB -J train
+#BSUB -o %J.stdout
+#BSUB -e %J.stderr
+#BSUB -n 8
+#BSUB -R "span[ptile=8]"
+# ============================================
+# modify the number of cores to use
+# according to the number of GPU you select
+# for example, 8 cores for one GPU card
+# while there are 32 cores in total
+# ============================================
+
+# add modulefiles
+module add deepmd/2.2.7
+
+# automatic select the gpu
+source /data/share/base/script/find_gpu.sh
+
+dp train input.json -l train.log
+
+

使用如下命令提交任务:

+
#submit your job
+bsub < deepmd.lsf
+#check your job by
+bjobs 
+
+

当任务执行中,当前目录会生成以下文件:

+
    +
  • train.log: 训练的记录文件
  • +
  • lcurve.out: 机器学习的学习曲线
  • +
  • model.ckpt.data-00000-of-00001, model.ckpt.index, checkpoint, model.ckpt.meta: 以上三个为训练存档点
  • +
+

非常好!已经成功开始第一次机器学习训练了!

+

浏览输出文件

+

使用 less 命令来浏览输出文件

+
less train.log
+
+

你将会看到如下内容

+
# DEEPMD: initialize model from scratch
+# DEEPMD: start training at lr 1.00e-03 (== 1.00e-03), final lr will be 3.51e-08
+2019-12-07 00:03:49.659876: I tensorflow/stream_executor/platform/default/dso_loader.cc:42] Successfully opened dynamic library libcublas.so.10.0
+# DEEPMD: batch     100 training time 5.95 s, testing time 0.18 s
+# DEEPMD: batch     200 training time 4.58 s, testing time 0.20 s
+# DEEPMD: batch     300 training time 4.56 s, testing time 0.14 s
+# DEEPMD: batch     400 training time 4.49 s, testing time 0.13 s
+# DEEPMD: batch     500 training time 4.60 s, testing time 0.14 s
+# DEEPMD: batch     600 training time 4.61 s, testing time 0.15 s
+# DEEPMD: batch     700 training time 4.43 s, testing time 0.18 s
+# DEEPMD: batch     800 training time 4.59 s, testing time 0.13 s
+# DEEPMD: batch     900 training time 4.41 s, testing time 0.17 s
+# DEEPMD: batch    1000 training time 4.66 s, testing time 0.11 s
+# DEEPMD: saved checkpoint model.ckpt
+# DEEPMD: batch    1100 training time 4.45 s, testing time 0.15 s
+# DEEPMD: batch    1200 training time 4.37 s, testing time 0.14 s
+
+

batch后面的数字表明程序已经放入了多少数据进行训练。这个数字的显示间隔,即100,是在输入文件的"disp_freq": 100 设置的。

+

现在来看看你的学习曲线 lcurve.out

+
less lcurve.out
+
+

你将会看到:

+
#  step      rmse_val    rmse_trn    rmse_e_val  rmse_e_trn    rmse_f_val  rmse_f_trn         lr
+      0      1.69e+01    1.58e+01      1.52e+00    5.69e-01      5.35e-01    5.00e-01    1.0e-03
+   1000      4.74e+00    4.68e+00      3.88e-02    4.02e-01      1.50e-01    1.48e-01    1.0e-03
+   2000      5.06e+00    3.93e+00      1.86e-01    1.54e-01      1.60e-01    1.24e-01    1.0e-03
+   3000      4.73e+00    4.34e+00      9.08e-02    3.90e-01      1.49e-01    1.37e-01    1.0e-03
+   4000      4.65e+00    6.09e+00      2.24e-01    1.92e-01      1.47e-01    1.93e-01    1.0e-03
+   5000      3.84e+00    3.25e+00      5.26e-02    2.40e-02      1.25e-01    1.06e-01    9.4e-04
+   6000      4.17e+00    2.78e+00      6.35e-02    3.89e-02      1.36e-01    9.03e-02    9.4e-04
+   7000      3.24e+00    3.00e+00      5.55e-02    8.58e-03      1.05e-01    9.76e-02    9.4e-04
+   8000      2.97e+00    2.83e+00      2.97e-02    2.46e-02      9.68e-02    9.22e-02    9.4e-04
+   9000      1.01e+01    6.92e+00      1.36e-01    1.89e-01      3.28e-01    2.25e-01    9.4e-04
+  10000      3.73e+00    3.39e+00      4.38e-02    3.23e-02      1.25e-01    1.14e-01    8.9e-04
+  11000      3.51e+00    2.76e+00      1.31e-01    3.47e-01      1.17e-01    8.98e-02    8.9e-04
+  12000      2.59e+00    2.89e+00      1.35e-01    1.18e-01      8.57e-02    9.65e-02    8.9e-04
+  13000      5.65e+00    4.68e+00      3.08e-01    3.28e-01      1.88e-01    1.55e-01    8.9e-04
+
+

这些数字展示了当前机器学习模型对于数据预测的误差有多大。 rmse_e_trn 意味着在测试集上使用机器学习模型预测的能量误差会有多大。 rmse_e_val 意味着在训练集上使用机器学习模型预测的能量误差会有多大。 rmse_f_tst and rmse_f_trn 表示相同意义,不过是对于力的预测. 你可以使用Matplotlib Python包进行作图。

+

使用进阶

+

准备训练数据

+

前半部分仅仅是让你运行DeePMD-kit进行训练。为了训练一个针对你的体系的模型,你需要自己来准备数据。这些数据都是第一性原理计算得到的数据。这些数据可以是单点能计算得到的数据,或者是分子动力学模拟得到的数据。作为数据集需要的数据有:

+
    +
  • 体系的结构文件:coord.npy
  • +
  • 体系的结构文件对应的元素标记:type.raw
  • +
  • 体系的结构文件对应的能量:energy.npy
  • +
  • 体系的结构文件对应的力:force.npy
  • +
  • 体系的结构文件对应的晶胞大小,如果是非周期性体系,请在训练文件里准备一个超大周期边界条件:box.npy
  • +
+

代码块里的文件名为DeePMD-kit使用的命名。npy后缀为Python的numpy代码包生成的文件,请在此之前学习numpy。如果你使用cp2k得到数据,你会有 *pos-1.xyz*frc-1.xyz 文件。你可以使用帮助的脚本转化成DeePMD-kit的数据集格式。

+

现在我们来看看DeePMD-kit的训练数据格式。之前我们训练的水模型的数据集储存在 <deepmd repository>/examples/water/data/data_0. 让我们来看看数据集的目录结构:

+
# directory structre for training data
+.
+├── data_0
+│   ├── set.000
+│      ├── box.npy
+│      ├── coord.npy
+│      ├── energy.npy
+│      └── force.npy
+│   ├── type.raw
+│   └── type_map.raw
+├── data_1
+│   ├── set.000
+│      ├── box.npy
+│      ├── coord.npy
+│      ├── energy.npy
+│      └── force.npy
+│   ├── set.001
+│      ├── box.npy
+│      ├── coord.npy
+│      ├── energy.npy
+│      └── force.npy
+│   ├── type.raw
+│   └── type_map.raw
+├── data_2
+│   ├── set.000
+│      ├── box.npy
+│      ├── coord.npy
+│      ├── energy.npy
+│      └── force.npy
+│   ├── type.raw
+│   └── type_map.raw
+└── data_3
+    ├── set.000
+       ├── box.npy
+       ├── coord.npy
+       ├── energy.npy
+       └── force.npy
+    ├── type.raw
+    └── type_map.raw
+
+

显然,我们会看到type.raw文件和一堆以set开头的目录。type.raw文件记录了体系的元素信息。如果你打开你会发现它仅仅记录了一堆数字。这些数字对应着你在water_se_a.json"type_map":["O","H"]的信息。此时0代表O,1代表H。对应着["O","H"]中的位置,其中第一位为0。

+
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
+
+

box.npy, coord.npy, energy.npyforce.npy 储存的信息在上文已经说过。唯一需要注意的是这些文件都储存着一个超大的矩阵。如果我们有Y个结构,每个结构有X个原子。box.npy, coord.npy, energy.npyforce.npy 对应的矩阵形状分别是 (Y, 9), (Y, X*3), (Y, 1), (Y, X*3)。

+

设置你的输入文件

+

输入文件是json文件。你可以使用之前我们的json文件进行细微改动就投入到自己体系的训练中。这些需要修改的关键词如下:

+
    +
  • type": "se_a": 设置描述符(descriptor)类型。一般使用se_a
  • +
  • "sel": [46, 92]: 设置每个原子的截断半径内所拥有的最大原子数。注意这里的两个数字46,92分别对应的是O原子和H原子。与你在type_map里设置的元素类型是相对应的。
  • +
+

"descriptor" :{
+         "type":     "se_a",
+         "sel":      [46, 92],
+         "rcut_smth":    0.50,
+         "rcut":     6.00,
+         "neuron":       [25, 50, 100],
+         "resnet_dt":    false,
+         "axis_neuron":  16,
+         "seed":     1,
+         "_comment":     " that's all"
+     },
+
+在"training"的"training_data"下 +- "systems": ["../data/data_0/", "../data/data_1/", "../data/data_2/"]: 设置包含训练数据的目录。 +- "batch_size": auto, 这个会根据体系原子数进行分配,不过我们自己通常设置为1,因为体系原子数有400-800个左右。

+

    "training_data": {
+        "systems":      ["../data/data_0/", "../data/data_1/", "../data/data_2/"],
+        "batch_size":   "auto",
+        "_comment":     "that's all"
+    }
+
+在"training"的"validation_data"下 +- "systems": ["../data/data_3"]: 设置包含测试数据的目录。 +- "batch_size": 1, 这个会根据体系原子数进行分配,不过我们自己通常设置为1,因为体系原子数有400-800个左右。 +- "numb_btch": 3 , 每次迭代中,测试的结构数量为batch_size乘以numb_btch。 +- 更多参数说明,请参考官方文档:https://deepmd.readthedocs.io/en/latest/train-input.html

+
+

Warning

+

记住在集群上训练,请使用lsf脚本。

+
+

开始你的训练

+

使用如下命令开始:

+
dp train input.json
+
+
+

Warning

+

记住在集群上训练,请使用 Slurm 脚本。

+
+

重启你的训练

+

使用以下命令重启:

+
dp train input.json --restart model.ckpt
+
+
+

Warning

+

记住在集群上训练,请使用 Slurm 脚本。

+
+

使用生成的势能函数进行分子动力学(MD)模拟

+

当我们完成训练之后,我们需要根据节点文件(model.ckpt*)冻结(Freeze)出一个模型来。

+

利用如下命令,可以冻结模型:

+
dp freeze
+
+

你将会得到一个*.pb文件。利用此文件可以使用LAMMPS, ASE, CP2K 等软件进行分子动力学模拟。

+

利用压缩模型进行产出(Production)

+

机器学习势能*.pb文件进行MD模拟虽然已经非常迅速了。但是还有提升的空间。首先我们需要用2.0以上版本的deepmd进行训练势能函数,并得到*.pb文件。利用1.2/1.3版本的deepmd训练得到势能函数也不用担心,可以利用以下命令对旧版本的势能函数进行转换。例如想要从1.2转换的话:

+
dp convert-from 1.2 -i old_frozen_model.pb -o new_frozen_model.pb
+
+
+

关于兼容性的说明

+

关于目前势函数的兼容性,请参考官方文档。 +目前DeePMD-kit支持从 v0.12, v1.0, v1.1, v1.2, v1.3 版本到新版本的转换。

+
+

建议将原训练文件夹备份后复制,我们利用如下命令进行压缩(文件夹下应该含有对应的input.json文件和checkpoint文件):

+
module load deepmd/2.0-cuda11.3
+dp compress -i normal-model.pb -o compressed-model.pb -l compress.log
+
+
+

适用范围

+

注意模型压缩仅适用于部分模型,如 se_e2_a, se_e3, se_e2_r 和上述模型的 Hybrid 模型。

+

若使用其他模型,如 se_attn 模型 (DPA-1),模型压缩尚未被支持,可能会报错。

+

另外请注意,压缩模型是通过使用 5 次多项式拟合 Embedding-net 从而换取性能提升,这一改动 几乎 不会对预测精度产生影响,但实际上部分牺牲了精度。 +因而使用时请务必注意观察默认参数是否适用于当前体系的情况,如是否出现误差漂移,并针对修改参数,如拟合时采用的步数 --step

+
+

压缩模型与原始模型对比

+

测试2080Ti, 显存11G

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
体系原子数提速前 (ns/day)提速后(ns/day)提升倍率
LIGePS50000.8063.5694.42
SnO2/water interface60210.0590.3556.01
SnO2/water interface53520.0670.3825.70
SnO2/water interface26760.1320.7385.59
SnO2/water interface13380.2611.3675.23
SnO2/water interface6690.5012.2364.46
LiGePS4007.46123.9923.21
Cu131351.26865.9441.28
+

SnO2/water interface: 原始模型Maximum 6021 ——> 压缩模型Maximum 54189个原子

+

Trouble Shooting

+

warning: loc idx out of lower bound

+

Solution: https://github.com/deepmodeling/deepmd-kit/issues/21

+

ValueError: NodeDef missing attr 'T' from ...

+

当一个模型使用 deepmd/1.2 训练,但是用更高版本的 deepmd-kit (> v1.3) 进行 lammps 任务的时候经常会报这个错,例子:

+ +

但是,现在发现这个报错在压缩 v1.3 版本模型的时候也会出现。使用下列命令:

+
dp compress ${input} --checkpoint-folder ${ckpt} 1.3-model.pb -o compressed-model.pb -l compress.log
+
+

其中${input}${ckpt}分别是对应模型的输入脚本所在路径和检查点目录。在这个例子里,我们仅把需要压缩的模型复制到了工作文件夹下,输入脚本所在路径和检查点目录人工指认。至于为什么这样会报错 ‘ValueError’,目前还没有找到原因。

+

因此,我们建议 备份之前的训练文件夹,在训练文件夹的一个 copy 下进行压缩任务

+

Extra Support

+

Script for convertion from cp2k xyz to numpy set

+
from ase.io import read
+import numpy as np
+import os, sys
+import glob
+import shutil
+
+
+#############################
+# USER INPUT PARAMETER HERE #
+#############################
+
+# input data path here, string, this directory should contains
+#   ./data/*frc-1.xyz ./data/*pos-1.xyz
+data_path = "./data"
+
+#input the number of atom in system
+atom_num = 189
+
+#input cell paramter here
+cell = [[10.0,0,0],[0,10.0,0],[0,0,10.0]]
+
+# conversion unit here, modify if you need
+au2eV = 2.72113838565563E+01
+au2A = 5.29177208590000E-01
+
+
+####################
+# START OF PROGRAM #
+####################
+
+def xyz2npy(pos, atom_num, output, unit_convertion=1.0):
+    total = np.empty((0,atom_num*3), float)
+    for single_pos in pos:
+        tmp=single_pos.get_positions()
+        tmp=np.reshape(tmp,(1,atom_num*3))
+        total = np.concatenate((total,tmp), axis=0)
+    total = total * unit_convertion
+    np.save(output, total)
+
+def energy2npy(pos, output, unit_convertion=1.0):
+     total = np.empty((0), float)
+     for single_pos in pos:
+         tmp=single_pos.info.pop('E')
+         tmp=np.array(tmp,dtype="float")
+         tmp=np.reshape(tmp,1)
+         total = np.concatenate((total,tmp), axis=0)
+     total = total * unit_convertion
+     np.save(output,total)
+
+def cell2npy(pos, output, cell, unit_convertion=1.0):
+    total = np.empty((0,9),float)
+    frame_num = len(pos)
+    cell = np.array(cell, dtype="float")
+    cell = np.reshape(cell, (1,9))
+    for frame in range(frame_num):
+        total = np.concatenate((total,cell),axis=0)
+    total = total * unit_convertion
+    np.save(output,total)
+
+def type_raw(single_pos, output):
+    element = single_pos.get_chemical_symbols()
+    element = np.array(element)
+    tmp, indice = np.unique(element, return_inverse=True)
+    np.savetxt(output, indice, fmt='%s',newline=' ')
+
+
+# read the pos and frc
+data_path = os.path.abspath(data_path)
+pos_path = os.path.join(data_path, "*pos-1.xyz")
+frc_path = os.path.join(data_path, "*frc-1.xyz")
+#print(data_path)
+pos_path = glob.glob(pos_path)[0]
+frc_path = glob.glob(frc_path)[0]
+#print(pos_path)
+#print(frc_path)
+pos = read(pos_path, index = ":" )
+frc = read(frc_path, index = ":" )
+
+# numpy path
+set_path = os.path.join(data_path, "set.000")
+if os.path.isdir(set_path):
+    print("detect directory exists\n now remove it")
+    shutil.rmtree(set_path)
+    os.mkdir(set_path)
+else:
+    print("detect directory doesn't exist\n now create it")
+    os.mkdir(set_path)
+type_path = os.path.join(data_path, "type.raw")
+coord_path = os.path.join(set_path, "coord.npy")
+force_path = os.path.join(set_path, "force.npy")
+box_path = os.path.join(set_path, "box.npy")
+energy_path = os.path.join(set_path, "energy.npy")
+
+
+#tranforrmation
+xyz2npy(pos, atom_num, coord_path)
+xyz2npy(frc, atom_num, force_path, au2eV/au2A)
+energy2npy(pos, energy_path, au2eV)
+cell2npy(pos, box_path, cell)
+type_raw(pos[0], type_path)
+
+

升级到DeePMD-kit 2.0

+

目前 DeePMD-kit 2.0 正式版已经发布,相比旧版已有众多提升,且压缩模型为正式版特性。目前我们集群上已安装 DeePMD-kit 2.0.3。

+

输入文件

+

DeePMD-kit 2.0 相比 1.x 在输入文件上做了一定改动,以下给出一个 DeePMD-kit 2.0 输入文件的例子:

+
{
+    "_comment": " model parameters",
+    "model": {
+        "type_map": [
+            "O",
+            "H"
+        ],
+        "descriptor": {
+            "type": "se_e2_a",
+            "sel": [
+                46,
+                92
+            ],
+            "rcut_smth": 0.50,
+            "rcut": 6.00,
+            "neuron": [
+                25,
+                50,
+                100
+            ],
+            "resnet_dt": false,
+            "axis_neuron": 16,
+            "seed": 1,
+            "_comment": " that's all"
+        },
+        "fitting_net": {
+            "neuron": [
+                240,
+                240,
+                240
+            ],
+            "resnet_dt": true,
+            "seed": 1,
+            "_comment": " that's all"
+        },
+        "_comment": " that's all"
+    },
+    "learning_rate": {
+        "type": "exp",
+        "decay_steps": 5000,
+        "start_lr": 0.001,
+        "stop_lr": 3.51e-8,
+        "_comment": "that's all"
+    },
+    "loss": {
+        "type": "ener",
+        "start_pref_e": 0.02,
+        "limit_pref_e": 1,
+        "start_pref_f": 1000,
+        "limit_pref_f": 1,
+        "start_pref_v": 0,
+        "limit_pref_v": 0,
+        "_comment": " that's all"
+    },
+    "training": {
+        "training_data": {
+            "systems": [
+                "../data/data_0/",
+                "../data/data_1/",
+                "../data/data_2/"
+            ],
+            "batch_size": "auto",
+            "_comment": "that's all"
+        },
+        "validation_data": {
+            "systems": [
+                "../data/data_3"
+            ],
+            "batch_size": 1,
+            "numb_btch": 3,
+            "_comment": "that's all"
+        },
+        "numb_steps": 1000000,
+        "seed": 10,
+        "disp_file": "lcurve.out",
+        "disp_freq": 100,
+        "save_freq": 1000,
+        "_comment": "that's all"
+    },
+    "_comment": "that's all"
+}
+
+

DeePMD-kit 2.0 提供了对验证集(Validation Set)的支持,因而用户可指定某一数据集作为验证集,并输出模型在该数据集上的误差。 +相比旧版而言,新版输入文件参数的具体含义变化不大,除了对数据集的定义外,大部分参数含义保持一致。

+

以下列出一些需要注意的事项:

+
    +
  1. 训练数据集不再直接写在 training 下,而是写在 training 的子键 training_data 下,格式如下所示: +
    "training_data": {
    +         "systems": [
    +             "../data/data_0/",
    +             "../data/data_1/",
    +             "../data/data_2/"
    +         ],
    +         "batch_size": "auto"
    +     }
    +
    + 默认情况下,每一训练步骤中,DeePMD-kit随机从数据集中挑选结构加入本轮训练,这一步骤加入数据的多少取决于 batch_size 的大小,此时,各 system 中数据被使用的概率是均等的。 + 若希望控制各 system 数据的权重,可使用 auto_prob 来控制,其参数选项如下所示
      +
    • prob_uniform: 各 system 数据权重均等。
    • +
    • prob_sys_size: 各 system 数据的权重取决于其各自的大小。
    • +
    • prob_sys_size: 写法示例如下:sidx_0:eidx_0:w_0; sidx_1:eidx_1:w_1;...。 该参数中,sidx_ieidx_i 表示第 i 组数据的起止点,规则同 Python 语法中的切片,w_i 则表示该组数据的权重。在同一组中,各 system 数据的权重取决于各自的大小。 + batch_size 的值可手动设定,根据经验一般根据“乘以原子数≤32”的规则设定。新版则支持自动设定,若设定为"auto"则表示按照此规则自动设置,若设定为"auto:N"则根据“乘以原子数≤N”的规则设定。
    • +
    +
  2. +
  3. save_ckpt, load_ckpt, decay_rate 等为过时参数,若由 1.x 迁移,请删除这些参数,否则会导致报错。
  4. +
  5. n_neuron 更名为 neuronstop_batch 更名为 numb_steps,请注意更改。对应地,decay rate 由 start_lrstop_lr 决定。
  6. +
  7. lcurve.out 中删除了测试数据的 RMSE 值,因此旧版作图脚本需要对应修改,减少列数(能量在第3列,力在第4列)。若指定了验证集,则会输出模型在验证集上的 RMSE。
  8. +
+

更多详细说明,请参见官方文档

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/ECINT Tutorial/user/index.html b/en/wiki/software_usage/ECINT Tutorial/user/index.html new file mode 100644 index 00000000..e3bda767 --- /dev/null +++ b/en/wiki/software_usage/ECINT Tutorial/user/index.html @@ -0,0 +1,3176 @@ + + + + + + + + + + + + + + + + + + + + + + + ECINT 的使用 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

ECINT 的使用

+

安装与配置

+

在使用 ECINT 前,需安装并配置好 aiida-core 与 aiida 插件,不过也可以在 hydrogen 上体验已配置好的环境

+

如何进入 hydrogen

+
    +
  1. +

    联系集群管理员,将你的公钥放到 hydrogen 上

    +
  2. +
  3. +

    在海洋楼网络环境下,通过以下命令可进入 hydrogen

    +
  4. +
+
ssh -p 8099 chenglab@10.24.3.144
+
+
+

建议在用工作流时,先在 ~/users 下建立一个以自己名字命名的工作目录,users/public.data 为 51/52 的 /public.data

+
+

输入文件

+

在想要运行工作流的工作路径下准备一个 .json 输入文件,示例如下 (要用 ",而不是 '):

+
{
+  "workflow": "NebWorkChain",
+  "webhook": "https://oapi.dingtalk.com/robot/send?access_token=xxxxxx",
+  "resdir": "results",
+  "structure": ["ethane_1_opt.xyz", "ethane_s1.xyz", "ethane_ts.xyz", "ethane_s2.xyz"],
+  "cell": [12, 12, 12],
+  "metadata": {
+    "kind_section": {
+      "BASIS_SET": "TZV2P-GTH",
+      "POTENTIAL": "GTH-PBE"
+    }
+  }
+}
+
+

或者也可以用 .yaml 输入文件,示例如下 (-ethane_1_opt.xyz 之间不要漏了空格):

+
workflow: NebWorkChain
+webhook: https://oapi.dingtalk.com/robot/send?access_token=xxxxxx
+resdir: results
+structure:
+  - ethane_1_opt.xyz
+  - ethane_s1.xyz
+  - ethane_ts.xyz
+  - ethane_s2.xyz
+cell: [12, 12, 12]
+metadata:
+  kind_section:
+    BASIS_SET: TZV2P-GTH
+    POTENTIAL: GTH-PBE
+
+
+

更多输入的例子在 https://github.com/chenggroup/ecint/tree/develop/example

+
+

各关键词解释

+
    +
  • +

    workflow (必填): workflow 的名字,具体可选的请见可选用的 workflow 部分

    +
  • +
  • +

    webhook (选填): 钉钉机器人 webhook,当工作流完成时想要即时收到钉钉提醒时可设置,否则可不用

    +
  • +
  • +

    resdir (选填, default: 当前所在路径): 结果文件的储存路径

    +
  • +
  • +

    structure/structures_folder (必填其中之一): 仅输入一个结构时,structure 为结构文件的路径 (非列表),对于 neb 这种需要多个输入结构的,structure 为结构文件路径的列表。如果批量进行计算,则把批量的结构所在文件夹加入 structures_folder (暂不支持 neb)

    +
  • +
  • +

    cell (选填): 设置了 cell 后会改变那些结构中不包含 cell 信息的 cell。如果用的是 .xyz 格式,一般需要设置 cell (因为 .xyz 一般不包含 cell 的信息),.cif or POSCAR(.vasp) 则不需要设置。cell 的格式与 ase 中的 cell 格式保持一致,如 [12, 12, 12] 或 [[12, 0, 0], [0, 12, 0], [0, 0, 12]] 或 [12, 12, 12, 90, 90, 90]

    +
  • +
  • +

    metadata (选填):

    +
  • +
+
+

以下参数可不填,对于不同的 workflow 均有不同的默认值

+
+
    +
  • +

    config: 可以为 dict, .json, .yaml,表示 cp2k 输入参数的基本设置,以 dict 的形式来表示 cp2k 输入,一些细致的设置,如计算精度,可在此处修改,也可通过 cp2k 输入文件进行转化。无特殊需求可不更改。config 的示例如下:

    +
  • +
  • +

    kind_section: 配置 BASIS_SET 和 POTENTIAL 的基本信息,可以有四种输入形式

    +
    +

    若设置了 kind_section 的话,需同时设置 BASIS_SETPOTENTIAL。如果按元素来指定了 BASIS_SETPOTENTIAL 的话,需要指定所有元素的设置。设置比较复杂的话推荐以文件的方式 (下面的第四种方法) 来引用 kind_section

    +
    +
      +
    • ```python + # .json + "kind_section": {"BASIS_SET": "TZV2P-GTH", "POTENTIAL": "GTH-PBE"}
    • +
    +

    # or .yaml + kind_section: + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + ```

    +
      +
    • ```python + # .json + "kind_section": {"H": {"BASIS_SET": "TZV2P-GTH", "POTENTIAL": "GTH-PBE"}, "O": {"BASIS_SET": "TZV2P-GTH", "POTENTIAL": "GTH-PBE"}, ...}
    • +
    +

    # or .yaml + kind_section: + H: + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + O: + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + ... + ```

    +
      +
    • ```python + # .json + "kind_section": [{"": "H", "BASIS_SET": "TZV2P-GTH", "POTENTIAL": "GTH-PBE"}, {"": "O", "BASIS_SET": "TZV2P-GTH", "POTENTIAL": "GTH-PBE"}, ...]
    • +
    +

    # or .yaml + kind_section: + - _: H + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + - _: O + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + ... + ```

    +
      +
    • ```python + # <> example + kind_section: + H: + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + O: + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + ...
    • +
    +

    # .json + "kind_section": "<>" # YOUR_KIND_SECTION_FILE can be .json or .yaml

    +

    # or .yaml + kind_section: <> # .json or .yaml + ```

    +
  • +
  • +

    machine: 选择配置好的服务器 (目前仅支持 cp2k@aiida_test) 以及配置资源的使用情况

    +
    // example
    +{
    +    "code@computer": "cp2k@aiida_test",
    +    "nnode": 2,
    +    "queue": "medium"
    +}
    +
    +
      +
    • code@computer: 配置好的 aiida 服务器 (目前仅支持 cp2k@aiida_test)
    • +
    • nnode/nprocs/n (选填其中之一): 使用服务器节点数/使用服务器核数/使用服务器核数
    • +
    • walltime/max_wallclock_seconds/w (选填其中之一): 强制终止计算时间,单位 s
    • +
    • queue/queue_name/q (选填其中之一): 服务器队列名
    • +
    • ptile: 每节点至少需使用的核数,默认值为每节点的核数
    • +
    +
  • +
  • +

    ...: some parameters for special workflow

    +
  • +
  • +

    subdata (选填):

    +
  • +
+
+

用于修改多步工作流中,每步工作流的 config, kind_section, machine, 其设置会覆盖掉 metada 中的相关设置。

+

e.g. NebWorkChain 由三部分组成: geoopt, neb, frequency. 若输入如下:

+
workflow: NebWorkChain
+webhook: https://oapi.dingtalk.com/robot/send?access_token=xxx  # your own webhook
+resdir: results_yaml
+structure:
+  - ethane_1_opt.xyz
+  - ethane_s1.xyz
+  - ethane_ts.xyz
+  - ethane_s2.xyz
+cell:
+  - [12, 0, 0]
+  - [0, 12, 0]
+  - [0, 0, 12]
+metadata:
+  kind_section:
+    BASIS_SET: DZVP-MOLOPT-SR-GTH
+    POTENTIAL: GTH-PBE
+subdata:
+  geoopt:
+    kind_section:
+      BASIS_SET: TZV2P-MOLOPT-GTH
+      POTENTIAL: GTH-PBE
+
+

geoopt 部分的 kind_section 会被更新为 {"BASIS_SET": "TZV2P-MOLOPT-GTH", "POTENTIAL": "GTH-PBE"} ,而 nebfrequency 部分的 kind_section 则与 metadata 中的保持一致。

+
+
    +
  • <>:
      +
    • config: 见 metadata
    • +
    • kind_section: 见 metadata
    • +
    • machine: 见 metadata
    • +
    +
  • +
  • <>:
      +
    • config
    • +
    • kind_section
    • +
    • machine
    • +
    +
  • +
  • ...
  • +
+

可选用的 workflow

+

输出的基本信息在 results.dat 中,以下 workflow 中仅说明除了 results.dat 外的输出文件

+

EnergySingleWorkChain

+
+

Just single point energy

+
+
    +
  • 输入默认值:
  • +
  • config: energy.json
  • +
  • kind_section: {"BASIS_SET": "DZVP-MOLOPT-SR-GTH", "POTENTIAL": "GTH-PBE"}
  • +
  • machine: {"code@computer": "cp2k@aiida_test", "nnode": 1, "walltime": 12 * 60 * 60, "queue": "medium"}
  • +
  • 其他输出:
  • +
  • 包含能量信息的结构: coords.xyz
  • +
+

GeooptSingleWorkChain

+
+

Just geomertry optimization

+
+
    +
  • 输入默认值:
  • +
  • config: geoopt.json
  • +
  • kind_section: {"BASIS_SET": "DZVP-MOLOPT-SR-GTH", "POTENTIAL": "GTH-PBE"}
  • +
  • machine: {"code@computer": "cp2k@aiida_test", "nnode": 1, "walltime": 12 * 60 * 60, "queue": "medium"}
  • +
  • 其他输出:
  • +
  • 结构优化完后的结构: structure_geoopt.xyz
  • +
+

NebSingleWorkChain

+
+

Just CI-NEB

+
+
    +
  • 输入默认值:
  • +
  • config: neb.json
  • +
  • kind_section: {"BASIS_SET": "DZVP-MOLOPT-SR-GTH", "POTENTIAL": "GTH-PBE"}
  • +
  • machine: {"code@computer": "cp2k@aiida_test", "nnode": number_of_replica, "queue": "large"}
  • +
  • 其他输出:
  • +
  • 包含始终态及中间态的 trajectory: images_traj.xyz
  • +
  • 势能曲线: potential_energy_curve.png
  • +
  • 过渡态结构: transition_state.xyz
  • +
+

FrequencySingleWorkChain

+
+

Just vabrational analysis

+
+
    +
  • 输入默认值:
  • +
  • config: frequency.json
  • +
  • kind_section: {"BASIS_SET": "DZVP-MOLOPT-SR-GTH", "POTENTIAL": "GTH-PBE"}
  • +
  • machine: {"code@computer": "cp2k@aiida_test", "nnode": 4, "queue": "large"}
  • +
  • 其他输出:
  • +
  • 振动频率的值: frequency.txt
  • +
+

NebWorkChain

+
+

Goopt for initial and final state → NEB → Vabrational analysis

+
+
    +
  • 输入默认值:
  • +
  • geoopt: {default value in GeooptSingleWorkChain}
  • +
  • neb: {default value in NebSingleWorkChain}
  • +
  • frequency: {default value in FrequencySingleWorkChain}
  • +
  • 其他输出:
  • +
  • all outputs of GeooptSingleWorkChain, NebSingleWorkChain and FrequencySingleWorkChain
  • +
+

CP2K input 转 config

+

使用工具 inp2config 可将 cp2k 输入文件转成 config 所需的形式, <<CP2K_INP>> 为 cp2k 输入文件路径 <<CONFIG>> 为输出的 config 文件路径,后缀为 .json/.yaml:

+
inp2config <<CP2K_INP>> <<CONFIG>>
+# e.g.
+inp2config input.inp config.yaml
+
+

要根据 cp2k 输入文件一并生成 kind_section 的输入设置, <<KIND_SECTION>> 为输出的 kind_section 路径,后缀为 .json/.yaml:

+
inp2config <<CP2K_INP>> <<CONFIG>> -k <<KIND_SECTIOn>>
+# e.g.
+inp2config input.inp config.yaml -k kind_section.yaml
+
+

提交任务

+

运行以下命令即可提交工作流,<<YOUR_INPUT_FILE>>.json.yaml 输入文件的路径,缺省值为当前路径下的 ecint.json

+
ecrun <<YOUR_INPUT_FILE>>
+
+

推送

+

计算完成的截图如下:

+

image-20200804224518088

+

计算出错的截图如下:

+

image-20200805150759298

+

常见错误

+

读取结构文件错误

+
  File "xxx/lib/python3.7/site-packages/ase/io/formats.py", line 599, in read
+    io = ioformats[format]
+KeyError: 'coord'
+
+

错误原因: 无法识别扩展名

+

解决方案: 注意扩展名,使用正确的扩展名,如 .xyz, .cif, POSCAR 可用 POSCAR.vasp

+

读取 xyz 错误

+
ase.io.extxyz.XYZError: ase.io.extxyz: Expected xyz header but got: invalid literal for int() with base 10: ...
+
+

错误原因: xyz 文件格式错误,xyz 文件第一行是所有原子个数,第二行是注释行(可空着),第三行开始才是坐标

+

解决方案: 如果第一行开始就是坐标的话,需要在前面加上原子个数 (如 180) 的行以及一个空行

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/MDAnalysis/index.html b/en/wiki/software_usage/MDAnalysis/index.html new file mode 100644 index 00000000..4a15c54a --- /dev/null +++ b/en/wiki/software_usage/MDAnalysis/index.html @@ -0,0 +1,3160 @@ + + + + + + + + + + + + + + + + + + + + + + + + + MDAnalysis 软件包的使用 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

MDAnalysis 软件包的使用

+

我是否需要使用MDAnalysis

+

MDAnalysis是一个处理分子动力学模拟轨迹的python软件包。它最为突出的是优点是全面的轨迹io方法,可以处理常见分子动力学模拟的输出轨迹格式。同时,MDAnalysis和的io理念使其更加适合作为大轨迹文件逐帧进行统计分析的工具。该软件内置了很多分子动力学模拟分析方法,所以你可以用它轻松地实现一些例行分析。比如,径向分布函数(RDF), 水密度(number density)和氢键分析等。除过内置方法,用户也可以用MDAnalysis自定义分析方法。

+

(内置分析)[https://docs.mdanalysis.org/stable/documentation_pages/analysis_modules.html]

+

(如何DIY你自己的分析)[https://userguide.mdanalysis.org/stable/examples/analysis/custom_trajectory_analysis.html]

+

如果你需要作如下的分析,MDAnalysis就非常适合你:

+
    +
  • +

    MD统计分析:需要对MD轨迹中每一个单帧进行相同操作,并且需要循环整条轨迹的统计。例如,你需要统计A原子和B原子间的距离

    +
  • +
  • +

    周期性体系的距离计算:高效快速的距离计算库函数,提供[a, b, c, alpha, beta, gamma] cell parameter就可以考虑PBC下的距离。

    +
  • +
+

IO 理念

+

1. 初始化

+

MDAnalysis将轨迹文件,topology信息等抽象为一个Universe class. 例如一条xyz轨迹可以如下初始化,

+
from MDAnalysis import Universe
+xyzfile = "./tio2-water.xyz"
+u = Universe(xyzfile)
+u.dimensions = np.array([10, 10, 10, 90, 90, 90])    # assign cell parameter
+
+

这样初始化一个u实例其实并不会读取整个文件,在此阶段,用户可以使用u进行选择部分原子,得到一个atomgroup对象。例如,使用

+
ag      = u.atoms        # select all atoms
+xyz     = ag.positions   # get the coordinates for these atoms
+element = ag.elements    # the element labels for theses atoms
+
+

可以将所有原子选取成一个atomgroup对象。其实MDAnalysis支持一些更fancy的选择语法,类似于VMD的语法,详见MDAnalysis选择语法。但是,根据笔者的经验,这中选择语法对我们研究的体系来说不好用,使用ASE进行这些选择就会更加方便。

+

2. 轨迹的读取

+

在初始化一个Universe后,你可以通过如下方法手动激活轨迹的读取:

+
print(u.trajectory)                 # reading the trajectory
+n_frames = u.trajectory.n_frames    # get the number of frames of your traj
+u.trajectory.ts.dt = 0.0005         # set dt to 0.0005 ps, otherwise you will get a warning 
+
+

否则,在运行分析之前,MDAnalysis不会自动读取文件。

+

实际上,就算在上面的读取过程中,MDAnalysis也不会把轨迹读入内存,而是读取了一条轨迹的开头在文件的位置。以我们比较熟悉的xyz文件为例,

+
100                                 <- 帧开头
+TIMESTEP: 0
+*.*****    *.*****    *.*****
+*.*****    *.*****    *.*****
+              ·
+              ·
+              ·
+              ·
+*.*****    *.*****    *.*****
+100                                 <- 帧开头
+TIMESTEP: 2
+*.*****    *.*****    *.***** 。    
+
+

MDAnalysis会遍历整个文件流,将轨迹开头在文件流中的位置保存在u.trajectory._offsets中。

+
    |+----------------+----------------+----------------+--···············--+----------------|
+    |*             ️   *            ️    *            ️    *              ️     *                |
+    |*             ️   *            ️    *            ️    *              ️     *                |
+    |*             ️   *            ️    *            ️    *              ️     *                |
+    |*             ️   *            ️    *            ️    *              ️     *                |
+    |v                v                v                v                   v                |
+    ------------------------------------------------------------------------------------------
+    |0                1                2                3                   N                |
+array(
+    [<_offsets(0)>,   <_offsets(1)>,   <_offsets(2)>,   <_offsets(3)>, ..., <_offsets(N)>    ]
+)  ---> u.trajectory._offsets
+
+

有了这些帧开头在文件的地方,MDAnalysis就可以任意读区任意一帧轨迹文件的的数据。例如,如果你需要读区第70帧的坐标,你就可以

+
>>> print(u.trajectory)
+>>> ag = u.atoms
+>>> print(u.trajectory.ts)
+< Timestep 0 with unit cell dimensions None >
+>>> for ii in range(69):
+...     u.trajectory.next()
+>>> print(u.trajectory.ts) 
+< Timestep 69 with unit cell dimensions None >
+>>> xyz70 = ag.atoms
+>>> u.trajectory.rewind()                       
+< Timestep 0 with unit cell dimensions None >
+
+

可以看到,u.trajectory其实是一个迭代器,你可以通过u.trajectory.next()方法下一得到下一帧的trajectory。同时,这一帧的坐标也会更新至atomgroup.position。实际上,在使用MDAnalysis进行分析时你不需要执行这些底层的nextrewind方法,这些繁琐的步骤已经包装好了。

+

实际上可以直接通过索引的方式对第70帧的结果进行提取: +

>>> print(u.trajectory[70])
+< Timestep 70 with unit cell dimensions None >
+

+

同时,正因为u.trajectory是一个迭代器,对于其父类ProtoReader,其中定义了__iter__方法来返回一个迭代器对象,同时定义了__next__方法来回应对应的迭代过程。而在__next__方法返回的即为next()函数中的内容。因此如果想对一条轨迹进行切片分析而不是逐帧分析,我们就可以使用切片后的轨迹来进行迭代:

+
>>> for ts in u.trajectory[10:10000:20]:    # 从第10帧到10000帧每20步取一帧
+...     print(ts.frame)
+10 30 50 70 90 110 130 150 ...
+
+

综上所述,MDAnalysis的轨迹读取方式有如下优点:

+
    +
  • +

    因为实际读取的是offsets,也就是帧开头的位置,仅仅读了N个整数。不像隔壁ASE,会实例化N个Atoms(包括了整条轨迹的坐标),于是会非常占用内存。MDAnalysis的io方法内存占用小,loop也更快。

    +
  • +
  • +

    读取offsets后你可以将Universe对象保存下来见下文,读取后不需要再遍历整个轨迹文件。这样,假如你又有了新的分析,你就可以省下遍历文件的时间。

    +
  • +
+

保存一个Universe实例

+

假如说你现在有一条轨迹文件traj.xyz,你可以通过如下方法将其保存下来,节省二次分析时读取帧开头的时间。

+
import pickle
+from MDAnalysis import Universe
+
+>>> xyzfile = "/path/to/traj.xyz"     # !!! Use absolute path. It's more robust.     
+>>> outuni  = "./traj.uni"
+>>> u = Universe(xyzfile)
+>>> print(u.trajectory)               # This will take some time
+<XYZReader /path/to/traj.xyz with 100 frames of 3240 atoms>
+>>> with open(outuni, 'wb') as f:
+...    pickle.dump(u, f)
+
+

建议初始化Universe时使用绝对路径,这样你可以将复制到traj.uni复制到任意路径对轨迹分析。在二次分析时,你可以直接这样读取一个Universe:

+
>>> with open(outuni, 'rb') as f:
+...     v = pickle.load(f) 
+>>> print(v.trajectory)
+<XYZReader /path/to/traj.xyz with 100 frames of 3240 atoms>
+
+

笔者的经验是,在我们的<fat>节点上,遍历一个 6G 大小的xyz轨迹文件的帧开头需要 3 min。

+

距离计算库函数

+

MDAnalysis有优秀的底层距离计算函数库MDAnalysis.lib.distances,是开发者用C语言编写底层方法,用python包装一个库,详见lib.distances API。它长于在于计算周期性边界条件(PBC)下的原子间距离,并且文档翔实。而且与MDAnalysisUniverseAnalysis等类相独立,你只需要提供原子坐标,盒子大小,cutoff大小,就可以得到距离、角度等数据。

+

下面是笔者用该函数库里capped_distance方法包装的的一个配位数计算器。

+
def count_cn(atoms1, atoms2, cutoff_hi, cutoff_lo=None, cell=None):
+    """count the coordination number(CN) for atoms1 (center atoms), where atoms2 are coordinate atom. This function will calculate CN within range cutoff_lo < d < cutoff_lo, where d is the distance between atoms1 and atoms2. Minimum image convention is applied if cell is not None
+
+    Args:
+        atoms1 (numpy.ndarray): Array with shape (N, 3), where N is the number of center atoms. 'atoms1' are the position of center atoms. 
+        atoms2 (numpy.ndarray): Array with shape (M, 3), where M is the number of coordination atoms. 'atoms2' are the positions of coordination atoms.
+        cutoff_hi (float): Max cutoff for calculating coordination number. 
+        cutoff_lo (float or None, optional): Min cutoff for calculating coordination number. This function will calculate CN within range cutoff_lo < d < cutoff_lo, where d is the distance between atoms1 and atoms2. Defaults to None.
+        cell (numpy.ndarray, optional): Array with shape (6,), Array([a, b, c, alpha, beta, gamma]). Simulation cell parameters. If it's not None, the CN calculation will use minimum image convention. Defaults to None.
+
+    Returns:
+        results: Array with shape (N,), CN of each atoms atoms1
+    """
+    pairs, _ = capped_distance(reference=atoms1,
+                               configuration=atoms2,
+                               max_cutoff=cutoff_hi,
+                               min_cutoff=cutoff_lo,
+                               box=cell)
+    _minlength = atoms1.shape[0]
+    results = np.bincount(pairs[:, 0], minlength=_minlength)
+    return results
+
+

其实隔壁ASE.geometry下也有类似的底层方法,但是笔者认为使用体验确实不如MDAnalysis.lib.distances(计算速度慢,文档少)。

+

下面对两组原子距离矩阵进行benchmark,每组100个原子,结果是一个100x100的numpy.array,可以发现MDAnalysis.lib.distances会快15倍。所以当你有上万个这样计算的时候,使用ASE的函数库会影响你的效率。

+
>>> import numpy as np
+>>> from ase.geometry import get_distances
+>>> from MDAnalysis.lib.distances import distance_array
+                       ·
+                       ·
+                       ·
+>>> print(xyz1.shape, xyz2.shape)
+(100, 3) (100, 3)
+>>> print(cell)
+[[50.5123      0.          0.        ]
+ [ 5.05820546 13.34921731  0.        ]
+ [ 0.          0.         47.8433    ]]
+>>> print(cellpar)
+[50.5123 14.2754 47.8433 90.     90.     69.2476]
+
+In[1]: %%timeit
+...    dmatrix_mda = distance_array(xyz1, xyz2, box=cellpar)
+1.03 ms ± 5.11 µs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)
+
+In[2]: %%timeit
+...    vec, dmatrix_ase = get_distances(xyz1, xyz2, cell=cell, pbc=True)
+16.6 ms ± 133 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+
+

注意:如果你在处理非正交的模拟盒子

+

我们注意到,在上述距离计算的例子里,我们需要通过cell parameter,[a, b, c, alpha, beta, gamma],给MDAnalysis.lib.distances提供模拟盒子的信息。而实际上,计算距离的时候cell parameter会先通过内部方法转化成3x3的盒子矩阵。如果你的盒子并不是正交的,应该先检查你提供的cell parameter能否正确得到3x3的矩阵,再使用这个函数库,否则你可能会得到错误的结果。这里是他们使用的python转换脚本

+

复杂的轨迹分析——简要介绍面对对象编程的方法

+

在实际的催化自由能计算当中,在分析过程中实际涉及的变量往往不止于一个简单的分子动力学轨迹文件。如在增强采样的轨迹当中,就会有涉及bias偏置势的COLVAR文件,其中的*bias是获得不同结构在实际的相空间中的权重的重要参数。

+

同时,面对过程的编程方法在遇见大量的重复过程的时候往往会面对变量繁多、过程复杂的问题。比如对同样的化学体系在不同的温度下进行分子动力学采样,使用程序对结果进行分析的时候,如果是面对过程编程,要么使用大量的变量来对数据进行提取,要么增加一个变量中数据的维度。因此在遇到复杂的数据处理时很容易理不清过程,找不到问题所在。因此就需要引入面对对象编程的思路,在这种模式下我们解决问题的方法从过程导向变成了对象导向。这个对象可以是任何东西,比如一个文件,一个路径等等,而我们想获得这个对象的信息来解决我们想知道的问题,就需要定义一些方法来处理。

+

下面我们结合一个例子来大致说明面对对象编程的简要思路过程:

+

问题描述

+

在一条增强采样的轨迹当中,我们想要提取结构中一种原子(A)的另外一种原子(B)的配位数分布以及在不同配位数下面的结构分布特征,并结合增强采样的权重来对结果进行更为严谨的分析。首先我们需要引入轨迹文件中的分子结构信息,再计算给定环境的配位数,提取配位环境的原子坐标,并在后续计算局部结构的特征信息(如键角、二面角分布等)。在这些结构当中,我们需要同时提取COLVAR文件中对应的bias信息,用于在后续过程中的加权。

+

在使用MDAnalysis库对这样的文件结构分析的时候,得益于强大的工具支持,我们处理的复杂的问题也就可以更为迅速。参考本文开始时给出的网页中的相关段落,我们是首先引入相应的坐标信息文件以及COLVAR文件来初始化类(在问题的处理过程当中,这一步相当于引入原始信息,当然我们也可以在对应的方法当中再引入,如果有方法中需要用到的统一的信息,在__init__方法中引入是最为直观和方便的): +

class A_center_analysis(AnalysisBase):
+    def __init__(self, lmpfile, COLVAR_file, verbose=True):
+        u = Universe(lmpfile, topology_format="LAMMPSDUMP")
+        u.select_atoms("type 1").masses = 114
+        u.select_atoms("type 2").masses = 514
+        u.trajectory.ts.dt = 0.0005
+        self.cell = u.dimensions
+        self.bias = np.loadtxt(COLVAR_file, skiprows=1, usecols=-1)
+
+        assert u.trajectory.n_frames == len(self.bias) 
+
+        self.atomgroup = u.select_atoms("all")
+        super(A_center_analysis, self).__init__(self.atomgroup.universe.trajectory, verbose=verbose)
+
+其中super()函数用于调用父类AnalysisBase当中的初始化函数,以便于后续变量的易用性。

+

此后我们定义准备好的结果变量,在原例当中只使用了一个self.result变量来包含所有的结果,在后续的调用当中不是很明朗,因此在此处可以多定义一些。 +

    def _prepare(self):
+        self.cn_2 = np.array([])        
+        self.angle_2 = np.array([])
+        self.bias_2 = np.array([])
+
+此后,我们可以定义我们对应的分析方法,并将关心的结果放到合适的类属性当中(这一步就是将原始信息进行处理,获得对象的一系列属性的过程) +
    def _append(self, cn, data):
+        assert data.shape == (cn+1, 3)
+        if cn == 2:
+            self.cn_2 = np.append(self.cn_2, data)
+            self.cn_2 = self.cn_2.reshape(-1,3,3)
+
+            BAB_angle = calc_angles(data[1],data[0],data[2], box=self.cell)
+            self.angle_2 = np.append(self.angle_2, BAB_angle)
+            self.bias_2 = np.append(self.bias_2, self.bias[self.atomgroup.ts.frame])    # 此处对结构对应的bias信息进行提取
+        else:
+            pass
+
+    def _single_frame(self):
+        A_coord = self.atomgroup.select_atoms("type 1").ts[:]
+        B_coord = self.atomgroup.select_atoms("type 2").ts[:]
+
+        pairs, _ = capped_distance(reference=A_coord,configuration=o_coord,
+                                   max_cutoff=2.6,min_cutoff=None,
+                                   box=self.cell)
+        _minlength = A_coord.shape[0]
+        cn_results = np.bincount(pairs[:, 0], minlength=_minlength)
+
+        for A_cn in range(2,5):
+            A_centers = A_coord[cn_results == A_cn]
+            for ag_center in ag_centers:
+                A_B_map = distance_array(A_center, B_coord, box=self.cell)[0]
+                coordinated_B_coord = B_coord[A_B_map.argsort() < A_cn]
+                self._append(A_cn, np.vstack((A_center, coordinated_B_coord)))
+
+其中,_single_frame()方法是后续在跑的过程当中循环迭代的主要程序,其中的self.atomgroup可以看作单一一帧的数据,后面的处理方法也是针对于这一帧的。在父类当中的run()方法会自动帮忙进行迭代,只需要规定好迭代的范围以及步长即可。

+

在此处多定义一个_append()方法的目的是为了将添加到结果变量中的程序和主要的分析程序分开,以便于后续的功能拓展(如对配位数等于3的坐标信息进行二次处理就可以直接在_append()函数当中添加功能而不用再动提取信息的相关程序)。

+

在后续的调用过程中,我们只需要初始化相关分析类并run就可以了 +

>>> lmpfile = "300.lammpstrj"
+>>> a_300 = A_center_analysis(lmpfile, "COLVAR")
+>>> a_300.run(start=200000, stop=-1, step=100)
+>>> print(a_300.bias_2.shape, a_300.angle_2.shape, a_300.cn_2.shape)
+(20472,) (20472,) (20472, 3, 3)
+
+从输出的数据尺寸相同可以看出比较符合我们的预期,能够做到结构和对应的偏置势一一对应,在后续的分布处理过程当中,我们可以对其进行直方图概率密度估计(当然更严谨的方法是使用高斯核概率密度估计的方法): +
>>> plt.hist(a_300.angle_2*180/np.pi, weights=np.exp(beta*a_300.bias_2), bins=100, density=True)
+
+同样的,可以将程序再向上封装一层,即相同体系的不同温度下的结果作为一个类的对象来进行分析,这样输出的结果更为清晰。这样的封装可能就需要统一文件命名方式,以及格式化的路径命名方式等等,在实际的工作当中带来的效率提升往往很可观。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/Tips_for_LaTeX/index.html b/en/wiki/software_usage/Tips_for_LaTeX/index.html new file mode 100644 index 00000000..72145b6c --- /dev/null +++ b/en/wiki/software_usage/Tips_for_LaTeX/index.html @@ -0,0 +1,2795 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Tips for paper writing with LaTeX - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Tips for paper writing with LaTeX

+

cross referece

+

What should we do if we want to cite the figures or tables in supplmentary material? Use the xr package!

+

Firstly, put the following into the preamble of the SI:

+
%Number supplementary material with an S
+\renewcommand{\thepage}{S\arabic{page}}
+\renewcommand{\thesection}{S\arabic{section}} 
+\renewcommand{\thetable}{S\arabic{table}} 
+\renewcommand{\thefigure}{S\arabic{figure}}
+\renewcommand{\theequation}{S\arabic{equation}}
+
+

Then, you can refer to the Figures with Figure Sxxx in your SI file. To cite them in your main text, you can use \ref, by adding the following to the main file:

+
%%Crossreferencing to the SI
+\usepackage{xr}
+\externaldocument[SI-]{<path to folder in which you have the SI>}
+
+

Now you can reference figures in the SI as

+
\ref{SI-<label you gave the figure in the SI>}
+
+

Be cautious: You need to recompile both the paper and the SI after doing so.

+

For overleaf users, please refer to here.

+

Thanks for the suggestion from Dr. Katharina Doblhoff-Dier in Leiden University.

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-constrainedmd/index.html b/en/wiki/software_usage/cp2k/cp2k-constrainedmd/index.html new file mode 100644 index 00000000..e093eadc --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-constrainedmd/index.html @@ -0,0 +1,2925 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:ConstrainedMD - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CP2K: Constrained MD

+

学习目标

+
    +
  • CP2K Constrained MD 设置
  • +
  • Potential of Mean Force 方法计算反应自由能
  • +
+

学习资料

+ +

CP2K Constrained MD 设置

+

CP2K 提供了将施加 Constraint 过程中的拉格朗日乘子输出的能力,其统计平均即该反应坐标下的Potential of Mean Force (PMF)。 +PMF对反应坐标积分即反应自由能。MLMD 可实现高精度长时间尺度模拟,因而适用于计算化学反应体系的自由能。 +这里我们可结合 DeePMD 势进行 Constrained MD 模拟。

+

首先定义 Collective Variable (CV),这里我们选择两原子间距离进行控制:

+
&FORCE_EVAL
+   ...
+   &SUBSYS
+      ...
+      &COLVAR
+         &DISTANCE
+            ATOMS 225 226
+         &END DISTANCE
+      &END COLVAR
+      ...
+   &END SUBSYS
+   ...
+&END FORCE_EVAL
+
+

其中 225226 即为所需控制键长的原子序号。注意 CP2K 中原子序号从 1 开始。

+

然后定义所需控制的键长:

+
&MOTION
+   &CONSTRAINT
+      &COLLECTIVE
+         COLVAR 1
+         INTERMOLECULAR .TRUE.
+         TARGET 3.4015070391941524 # (1)!
+      &END COLLECTIVE
+      &LAGRANGE_MULTIPLIERS ON
+         COMMON_ITERATION_LEVELS 10000000 # (2)!
+      &END LAGRANGE_MULTIPLIERS
+   &END CONSTRAINT
+   ...
+&MOTION
+
+
    +
  1. 设置两原子距离的目标值,注意这里的单位是 a.u.
  2. +
  3. 缺省值为1,为防止输出过长的日志文件,请设置为一个大于总步数的值
  4. +
+

注意这里 TARGET 的单位是 a.u.,请把常用的单位(如 Å )转换为原子单位。

+

附录:物理常数和单位换算

+
*** Fundamental physical constants (SI units) ***
+
+ *** Literature: B. J. Mohr and B. N. Taylor,
+ ***             CODATA recommended values of the fundamental physical
+ ***             constants: 2006, Web Version 5.1
+ ***             http://physics.nist.gov/constants
+
+ Speed of light in vacuum [m/s]                             2.99792458000000E+08
+ Magnetic constant or permeability of vacuum [N/A**2]       1.25663706143592E-06
+ Electric constant or permittivity of vacuum [F/m]          8.85418781762039E-12
+ Planck constant (h) [J*s]                                  6.62606896000000E-34
+ Planck constant (h-bar) [J*s]                              1.05457162825177E-34
+ Elementary charge [C]                                      1.60217648700000E-19
+ Electron mass [kg]                                         9.10938215000000E-31
+ Electron g factor [ ]                                     -2.00231930436220E+00
+ Proton mass [kg]                                           1.67262163700000E-27
+ Fine-structure constant                                    7.29735253760000E-03
+ Rydberg constant [1/m]                                     1.09737315685270E+07
+ Avogadro constant [1/mol]                                  6.02214179000000E+23
+ Boltzmann constant [J/K]                                   1.38065040000000E-23
+ Atomic mass unit [kg]                                      1.66053878200000E-27
+ Bohr radius [m]                                            5.29177208590000E-11
+
+ *** Conversion factors ***
+
+ [u] -> [a.u.]                                              1.82288848426455E+03
+ [Angstrom] -> [Bohr] = [a.u.]                              1.88972613288564E+00
+ [a.u.] = [Bohr] -> [Angstrom]                              5.29177208590000E-01
+ [a.u.] -> [s]                                              2.41888432650478E-17
+ [a.u.] -> [fs]                                             2.41888432650478E-02
+ [a.u.] -> [J]                                              4.35974393937059E-18
+ [a.u.] -> [N]                                              8.23872205491840E-08
+ [a.u.] -> [K]                                              3.15774647902944E+05
+ [a.u.] -> [kJ/mol]                                         2.62549961709828E+03
+ [a.u.] -> [kcal/mol]                                       6.27509468713739E+02
+ [a.u.] -> [Pa]                                             2.94210107994716E+13
+ [a.u.] -> [bar]                                            2.94210107994716E+08
+ [a.u.] -> [atm]                                            2.90362800883016E+08
+ [a.u.] -> [eV]                                             2.72113838565563E+01
+ [a.u.] -> [Hz]                                             6.57968392072181E+15
+ [a.u.] -> [1/cm] (wave numbers)                            2.19474631370540E+05
+ [a.u./Bohr**2] -> [1/cm]                                   5.14048714338585E+03
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-deepmd/index.html b/en/wiki/software_usage/cp2k/cp2k-deepmd/index.html new file mode 100644 index 00000000..7ab1b27a --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-deepmd/index.html @@ -0,0 +1,2942 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:DeePMD-kit插件 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CP2K: DeePMD-kit插件

+

学习目标

+
    +
  • 用 CP2K 调用 DeePMD-kit 以进行 MLMD 模拟
  • +
  • Constrained MD 的参数设置
  • +
+

学习资料

+

CP2K官方手册:

+ +

适用版本

+

以下教程适用于最新版本加入 DeePMD 支持的 CP2K。 +Zeus集群上的 cp2k/2024.2-devdeepmd/2.2.7 (未编译MPI和DFT支持) 可以运行以下教程。

+

注意 cp2k/2024.2-dev 的作业脚本写法如下:

+
module load gcc/9.3.0
+module load intel/17.5.239
+module load cuda/11.8
+module load mpi/openmpi/4.1.6-gcc
+module load cp2k/2024.2-dev
+
+

CP2K MD Section 的输入文件

+

请先了解CP2K的输入文件语法,指路:

+ +

由于 MLMD 通常会需要纳秒甚至更长时间尺度的模拟,若未进行适当配置,可能会产生过长的输出文件,因此我们在 GLOBAL 下做以下调整:

+
&GLOBAL
+   PROJECT pmf # (1)!
+   RUN_TYPE MD
+   PRINT_LEVEL SILENT # (2)!
+   WALLTIME 95:00:00 # (3)!
+&END GLOBAL
+
+
    +
  1. 根据自己的项目名修改,决定输出文件的名称
  2. +
  3. 如果跑DeePMD, 请务必设置为 SILENT, 防止输出文件过大
  4. +
  5. 推荐稍短于作业的 Walltime 以免截断轨迹
  6. +
+

然后我们配置如下的力场参数:

+
&FORCE_EVAL
+   METHOD FIST
+   &MM
+      &FORCEFIELD
+         &NONBONDED
+            &DEEPMD
+               ATOMS C O Pt
+               ATOMS_DEEPMD_TYPE 0 1 2 # (1)!
+               POT_FILE_NAME ../graph.000.pb
+            &END DEEPMD
+         &END NONBONDED
+         IGNORE_MISSING_CRITICAL_PARAMS .TRUE. # (2)!
+      &END FORCEFIELD
+      &POISSON
+         &EWALD
+            EWALD_TYPE none
+         &END EWALD
+      &END POISSON
+   &END MM
+   ...
+&END FORCE_EVAL
+
+
    +
  1. 与元素列表对应,元素在 type_map 中的索引顺序
  2. +
  3. ⚠ 请保留这一行以忽略未定义参数
  4. +
+

通常 MLMD 轨迹文件不需要每步都输出,因而通过以下方式设置输出间隔:

+
&MOTION
+   ...
+   &MD
+      ...
+      &PRINT
+         &ENERGY
+            &EACH
+               MD 100 # (1)!
+            &END EACH
+         &END ENERGY
+      &END PRINT
+   &END MD
+   &PRINT
+      &CELL
+         &EACH
+            MD 100 # (2)!
+         &END EACH
+      &END CELL
+      &FORCES
+         &EACH
+            MD 100 # (3)!
+         &END EACH
+      &END FORCES
+      &RESTART_HISTORY
+         &EACH
+            MD 200000 # (4)!
+         &END EACH
+      &END RESTART_HISTORY
+      &TRAJECTORY
+         &EACH
+            MD 100 # (5)!
+         &END EACH
+      &END TRAJECTORY
+   &END PRINT
+&END MOTION
+
+
    +
  1. 此处修改ener的输出频率,通常与结构轨迹保持一致
  2. +
  3. 此处修改晶胞参数的输出频率,注意如果晶胞参数不变可不写这一部分
  4. +
  5. 此处修改力轨迹的输出频率,通常与结构轨迹保持一致
  6. +
  7. 此处修改restart文件的输出频率,可根据 Walltime 和总步数进行估计
  8. +
  9. 此处修改结构轨迹的输出频率
  10. +
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-dft+u/index.html b/en/wiki/software_usage/cp2k/cp2k-dft+u/index.html new file mode 100644 index 00000000..58af98db --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-dft+u/index.html @@ -0,0 +1,2903 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:DFT+U - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CP2K: DFT+U

+

学习目标

+
    +
  • +

    学习资料

    +
  • +
  • +

    DFT+U基本原理

    +
  • +
  • +

    CP2K DFT+U设置

    +
  • +
  • +

    DFT+U 查看电子占据态

    +
  • +
+

学习资料

+

Dudarev, S. L., Manh, D. N., & Sutton, A. P. (1997). Effect of Mott-Hubbard correlations on the electronic structure and structural stability of uranium dioxide. Philosophical Magazine B: Physics of Condensed Matter; Statistical Mechanics, Electronic, Optical and Magnetic Properties, 75(5), 613–628..

+

Dudarev, S. L., Botton, G. A., Savrasov, S. Y., Humphreys, C. J., & Sutton, A. P. (1998). Electron-energy-loss spectra and the structural stability of nickel oxide: An LSDA+U study. Physical Review B, 57(3), 1505–1509. .

+

Himmetoglu, B.; Floris, A.; de Gironcoli, S.; Cococcioni, M. Hubbard-Corrected DFT Energy Functionals: The LDA+U Description of Correlated Systems. International Journal of Quantum Chemistry 2013, 114 (1), 14–49..

+

DFT+U基本原理

+

DFT对于电子的描述是偏向离域化的,因此DFT可以较好地描述金属态固体。对于过渡金属系列的氧化物,例如Fe2O3,CoO,Co3O4,NiO等。过渡金属中仍然含有d电子。在固体中,d电子较为局域,且局域在过渡金属离子周围。此时单单使用DFT并不能很好的描述局域化的电子。我们可以通过加大d电子之间的静电排斥(U)来达到目的。

+

CP2K DFT+U设置

+

CP2K_INPUT / FORCE_EVAL / DFT

+

PLUS_U_METHOD MULLIKEN
+
+其中MULLIKEN_CHARGES不推荐, LOWDIN方法好像更准但是不能够算FORCES,cp2k v8.2版本后可以算FORCES,(详细参考)[https://groups.google.com/g/cp2k/c/BuIOSWDqJTc/m/fSL89NZaAgAJ]

+

CP2K_INPUT / FORCE_EVAL / SUBSYS / KIND / DFT_PLUS_U

+

对想要+U的元素的对应KIND设置

+
&DFT_PLUS_U
+    # 轨道角动量 0 s轨道 1 p轨道 2 d轨道 3 f轨道
+    L 2 
+    # 有效U值,记得写[eV],不然默认为原子单位
+    U_MINUS_J [eV]  3 
+&END DFT_PLUS_U
+
+

DFT+U 查看电子占据态

+

如果我们想知道+U之后对应原子中,例如d轨道的电子,的占据情况。我们可以利用如下设置将其print在output中。

+

CP2K_INPUT / FORCE_EVAL / DFT / PRINT / PLUS_U下,

+
&PLUS_U MEDIUM
+    ADD_LAST NUMERIC
+&END PLUS_U
+
+

你会在output中得到如下输出

+

  DFT+U occupations of spin 1 for the atoms of atomic kind 3: Fe1
+
+    Atom   Shell       d-2     d-1      d0     d+1     d+2   Trace
+      37       1     1.068   1.088   1.047   1.093   1.069   5.365
+      37       2     0.008   0.008   0.011   0.007   0.009   0.043
+           Total     1.076   1.096   1.058   1.100   1.077   5.408
+
+      38       1     1.064   1.102   1.047   1.089   1.086   5.388
+      38       2     0.009   0.007   0.011   0.009   0.008   0.044
+           Total     1.073   1.109   1.058   1.097   1.094   5.432
+
+如果想看不加U的原子的占据情况,那可以给对应原子加一个非常小的U值,比如1e-20。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-e-f/index.html b/en/wiki/software_usage/cp2k/cp2k-e-f/index.html new file mode 100644 index 00000000..ef5f2790 --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-e-f/index.html @@ -0,0 +1,3080 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:能量与力的计算 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + +

CP2K能量与力的计算

+

学习目标

+
    +
  • +

    认识CP2K的基础输入文件

    +
  • +
  • +

    认识CP2K输入文件的主要部分

    +
  • +
  • +

    运行计算

    +
  • +
+

计算文件下载

+

本教程改编自CP2K原教程,以半导体和OT为例子,更符合组内需求。

+

认识CP2K的基础输入文件

+

原则上来说CP2K的输入文件只需要三个,一个是输入参数的设置文件input.inp,一个是赝势的参数文件GTH_POTENTIALS,一个是基组的参数文件BASIS_SET

+

在集群上,管理员已经把GTH_POTENTIALSBASIS_SET放置在特定文件夹,并且使用特殊的链接方法可以让CP2K程序自动寻找到。因此在后文中涉及到赝势和基组的部分可以直接填写对应的文件名称。

+

认识CP2K输入文件的主要部分

+

现在让我们打开input.inp

+

CP2K的输入文件主要包含两个SECTION.

+
    +
  • "GLOBAL": 一些让CP2K跑起来的通用选项,比如任务名称,任务类型。
  • +
  • "FORCE_EVAL": 包含了所有跟求解原子的力有关的参数设置,也包括了原子的坐标信息
  • +
+

现在我们先看GLOBAL

+
 &GLOBAL
+   PROJECT Universality
+   RUN_TYPE ENERGY_FORCE
+   PRINT_LEVEL
+ &END GLOBAL
+
+

当要计算体系的力和能量时,我们必须在RUN_TYPE中对计算的类型进行指定。比如RUN_TYPE ENERGY_FORCE就是对当前的体系进行力和能量的计算。其他类型的计算可以在CP2K手册里找到。

+

PROJECT定义了这个计算的项目名称,通常被用来命名一些输出文件。

+

PRINT_LEVEL定义了CP2K output文件里输出信息量的大小。

+

现在我们接着看FORCE_EVAL

+
METHOD Quickstep
+
+

METHOD Quickstep表明选择了使用密度泛函理论(Density Functional Theory)中的GPW方法进行计算原子受力。

+
   &SUBSYS
+     &CELL
+       ABC [angstrom]    4.593 4.593 2.959
+     &END CELL
+     &COORD
+ @include rutile.xyz
+     &END COORD
+     &KIND O
+       BASIS_SET DZVP-MOLOPT-SR-GTH
+       POTENTIAL GTH-PBE-q6
+     &END KIND
+     &KIND Ti
+       BASIS_SET DZVP-MOLOPT-SR-GTH
+       POTENTIAL GTH-PBE-q12
+     &END KIND
+   &END SUBSYS
+
+

Subsection SUBSYS定义了模拟的晶胞大小(ABC晶胞长度角度等)和原子坐标的初始结构. 有关于@include的用法,请参考这里

+

Subsection KIND 定义了计算中出现的元素。对于每一种元素必须要有一个对应的KIND Section. 然后在KIND里面定义它的基组(BASIS_SET)和赝势(POTENTIAL)。

+

BASIS_SET和POTENTIAL的名称一定要对应到基组文件里和赝势文件里存在的条目。

+
 O GTH-PBE-q6 GTH-PBE
+     2    4
+      0.24455430    2   -16.66721480     2.48731132
+     2
+      0.22095592    1    18.33745811
+      0.21133247    0
+
+

Subsection CELL 定义了模拟中的晶胞大小。 此例子中,ABC指的是晶胞的边长。如不额外指定角度,默认为90, 90, 90度。[angstrom]是指定长度单位。

+

Subsection COORD定义初始的原子坐标。 原子位置的默认格式为

+
<ATOM_KIND> X Y Z
+
+

X Y Z 为笛卡尔坐标,单位为Angstrom。如果添加SCALED .TRUE.,便是分数坐标。

+

Subsection DFT 控制了所有跟DFT计算有关的细节。该Subsection只有当你把method选择为quickstep时才会起作用。

+
BASIS_SET_FILE_NAME  BASIS_SET
+POTENTIAL_FILE_NAME  GTH_POTENTIALS
+
+

BASIS_SET_FILE_NAMEPOTENTIAL_FILE_NAME定义了基组和赝势的文件路径。由于管理员已经在集群上设置好了路径,用户直接填写这两个文件名即可。

+
&QS
+  EPS_DEFAULT 1.0E-13
+&END QS
+
+

SubsectionQS包含了一些通用的控制参数。EPS_DEFAULT设置了所有quickstep会用到的默认容忍度。

+
     &MGRID
+       CUTOFF 400
+       REL_CUTOFF 60
+     &END MGRID
+
+

Subsection MGRID 定义了如何使用quickstep中的积分网格。quickstep使用了多网格方法来表示高斯函数。比较窄和尖的高斯函数会被投影到更精细的网格,而宽和顺滑的高斯函数则相反。在这个例子中,我们告诉代码需要设置最精细的网格为400Ry,并且REL_CUTOFF为60Ry。关于CUTOFF和REL_CUTOFF方面请阅读

+

Subsection XC

+
     &XC
+       &XC_FUNCTIONAL PBE
+       &END XC_FUNCTIONAL
+     &END XC
+
+

这里定义了我们想使用的交换-关联密度泛函,在这个例子中我们选择了PBE泛函。P泛函要与基组和赝势的选择一致。

+
     &SCF
+       SCF_GUESS ATOMIC
+       EPS_SCF 3.0E-7
+       MAX_SCF 50
+       &OUTER_SCF
+         EPS_SCF 3.0E-7
+         MAX_SCF 10
+       &END OUTER_SCF
+       &OT
+         MINIMIZER DIIS
+         PRECONDITIONER FULL_SINGLE_INVERSE
+       &END OT
+     &END SCF
+
+

SCF_GUESS设置了应该如何生成初始的尝试电子密度。在这个例子中,初始密度是由原子电荷密度重叠生成的。一个好的电子密度可以帮助CP2K快速得到收敛结果。EPS_SCF设置了电子密度差异的容忍度(收敛精度要求)。这个会覆盖EPS_DEFAULT设置的值。MAX_SCF指最多会迭代多少次。

+

Subsection OUTER_SCF这里暂时先不多介绍,但是一般精度设置要跟以上的EPS_SCF一样。以上的SCF为INNER_SCF。OUTER_SCF设置MAX_SCF 为10。在计算中实际上会迭代的次数是INNER_SCF乘以OUTER_SCF,即50*10,500次。

+

Subsection OT是利用Orbital Transformation的方法来优化波函数。

+
&PRINT
+  &FORCES ON
+  &END FORCES
+&END PRINT
+
+

这个subsection可以在output里打印出体系的原子受力。

+

运行计算

+

正常运行CP2K的方法为

+
mpirun -n 32 cp2k.popt input.inp > output & 
+
+

在集群上,我们使用lsf脚本文件提交,这行命令已经写在了脚本文件里,请直接提交。

+

输出结果

+

在任务结束后,你会得到如下文件

+
    +
  • output
  • +
  • Universality-RESTART.wfn
  • +
  • Universality-RESTART.wfn.bak-1
  • +
  • Universality-RESTART.wfn.bak-2
  • +
  • Universality-RESTART.wfn.bak-3
  • +
+

文件output包含了计算的主要输出。Universality-RESTART.wfn是计算最后得到波函数。Universality-RESTART.wfn.bak-<n>记录了最后第\<n>步前SCF得到的波函数。此例中,Universality-RESTART.wfn.bak-1是SCF最后一步的波函数。

+

但你想要利用波函数重启计算时,可以改为SCF_GUESS RESTART

+

他会自动从<PROJECT_NAME>-RESTART.wfn文件开始重启计算。

+

我们现在详细看一下output文件里的部分

+
 SCF WAVEFUNCTION OPTIMIZATION
+
+  ----------------------------------- OT ---------------------------------------
+  Minimizer      : DIIS                : direct inversion
+                                         in the iterative subspace
+                                         using   7 DIIS vectors
+                                         safer DIIS on
+  Preconditioner : FULL_SINGLE_INVERSE : inversion of
+                                         H + eS - 2*(Sc)(c^T*H*c+const)(Sc)^T
+  Precond_solver : DEFAULT
+  stepsize       :    0.08000000                  energy_gap     :    0.08000000
+  eps_taylor     :   0.10000E-15                  max_taylor     :             4
+  ----------------------------------- OT ---------------------------------------
+
+  Step     Update method      Time    Convergence         Total energy    Change
+  ------------------------------------------------------------------------------
+     1 OT DIIS     0.80E-01    0.5     0.15753643      -176.9839582002 -1.77E+02
+     2 OT DIIS     0.80E-01    0.8     0.09878604      -178.9306891883 -1.95E+00
+     3 OT DIIS     0.80E-01    0.8     0.04863529      -179.6564913758 -7.26E-01
+     4 OT DIIS     0.80E-01    0.8     0.03582212      -179.9871432342 -3.31E-01
+     5 OT DIIS     0.80E-01    0.8     0.02520552      -180.2247770848 -2.38E-01
+     6 OT DIIS     0.80E-01    0.8     0.01876959      -180.4037691134 -1.79E-01
+     7 OT DIIS     0.80E-01    0.8     0.01356216      -180.5257615047 -1.22E-01
+     8 OT DIIS     0.80E-01    0.8     0.01016476      -180.5867232155 -6.10E-02
+     9 OT DIIS     0.80E-01    0.8     0.00712662      -180.6348174041 -4.81E-02
+    10 OT DIIS     0.80E-01    0.8     0.00528671      -180.6543176954 -1.95E-02
+    11 OT DIIS     0.80E-01    0.8     0.00401555      -180.6682811925 -1.40E-02
+    12 OT DIIS     0.80E-01    0.8     0.00331228      -180.6769383021 -8.66E-03
+    13 OT DIIS     0.80E-01    0.8     0.00273633      -180.6824801501 -5.54E-03
+    14 OT DIIS     0.80E-01    0.8     0.00227705      -180.6858569326 -3.38E-03
+    15 OT DIIS     0.80E-01    0.8     0.00189452      -180.6891762522 -3.32E-03
+    16 OT DIIS     0.80E-01    0.8     0.00163117      -180.6913433711 -2.17E-03
+    17 OT DIIS     0.80E-01    0.8     0.00137647      -180.6931734207 -1.83E-03
+    18 OT DIIS     0.80E-01    0.8     0.00119961      -180.6942368984 -1.06E-03
+    19 OT DIIS     0.80E-01    0.9     0.00100873      -180.6952066209 -9.70E-04
+    20 OT DIIS     0.80E-01    0.8     0.00084472      -180.6960712607 -8.65E-04
+    21 OT DIIS     0.80E-01    0.9     0.00073811      -180.6966143834 -5.43E-04
+    22 OT DIIS     0.80E-01    0.8     0.00062100      -180.6969845494 -3.70E-04
+    23 OT DIIS     0.80E-01    0.8     0.00052079      -180.6972986282 -3.14E-04
+    24 OT DIIS     0.80E-01    0.8     0.00044814      -180.6975096788 -2.11E-04
+    25 OT DIIS     0.80E-01    0.8     0.00038815      -180.6976499085 -1.40E-04
+    26 OT DIIS     0.80E-01    0.8     0.00034010      -180.6977592686 -1.09E-04
+    27 OT DIIS     0.80E-01    0.8     0.00029429      -180.6978276824 -6.84E-05
+    28 OT DIIS     0.80E-01    0.8     0.00025218      -180.6979007896 -7.31E-05
+    29 OT DIIS     0.80E-01    0.8     0.00022927      -180.6979456455 -4.49E-05
+    30 OT DIIS     0.80E-01    0.8     0.00020201      -180.6979830729 -3.74E-05
+    31 OT DIIS     0.80E-01    0.8     0.00017896      -180.6980145219 -3.14E-05
+    32 OT DIIS     0.80E-01    0.8     0.00016066      -180.6980416001 -2.71E-05
+    33 OT DIIS     0.80E-01    0.8     0.00014606      -180.6980603801 -1.88E-05
+    34 OT DIIS     0.80E-01    0.8     0.00012970      -180.6980811127 -2.07E-05
+    35 OT DIIS     0.80E-01    0.8     0.00011431      -180.6980956614 -1.45E-05
+    36 OT DIIS     0.80E-01    0.8     0.00009560      -180.6981114298 -1.58E-05
+    37 OT DIIS     0.80E-01    0.8     0.00008482      -180.6981210277 -9.60E-06
+    38 OT DIIS     0.80E-01    0.8     0.00007281      -180.6981278770 -6.85E-06
+    39 OT DIIS     0.80E-01    0.8     0.00006188      -180.6981329264 -5.05E-06
+    40 OT DIIS     0.80E-01    0.8     0.00005294      -180.6981368983 -3.97E-06
+    41 OT DIIS     0.80E-01    0.8     0.00004688      -180.6981391197 -2.22E-06
+    42 OT DIIS     0.80E-01    0.8     0.00004055      -180.6981410282 -1.91E-06
+    43 OT DIIS     0.80E-01    0.8     0.00003559      -180.6981421977 -1.17E-06
+    44 OT DIIS     0.80E-01    0.8     0.00003040      -180.6981432648 -1.07E-06
+    45 OT DIIS     0.80E-01    0.8     0.00002734      -180.6981439881 -7.23E-07
+    46 OT DIIS     0.80E-01    0.8     0.00002451      -180.6981445033 -5.15E-07
+    47 OT DIIS     0.80E-01    0.8     0.00002178      -180.6981449169 -4.14E-07
+    48 OT DIIS     0.80E-01    0.8     0.00001953      -180.6981452985 -3.82E-07
+    49 OT DIIS     0.80E-01    0.8     0.00001795      -180.6981455598 -2.61E-07
+    50 OT DIIS     0.80E-01    0.8     0.00001622      -180.6981458123 -2.52E-07
+
+  Leaving inner SCF loop after reaching    50 steps.
+
+
+  Electronic density on regular grids:        -47.9999999967        0.0000000033
+  Core density on regular grids:               48.0000000000       -0.0000000000
+  Total charge density on r-space grids:        0.0000000033
+  Total charge density g-space grids:           0.0000000033
+
+  Overlap energy of the core charge distribution:               0.00000000000007
+  Self energy of the core charge distribution:               -379.90298629198736
+  Core Hamiltonian energy:                                    102.12467948924306
+  Hartree energy:                                             125.99881317904760
+  Exchange-correlation energy:                                -28.91865218857406
+
+  Total energy:                                              -180.69814581227070
+
+  outer SCF iter =    1 RMS gradient =   0.16E-04 energy =       -180.6981458123
+
+

以上显示了我们使用OT DIIS方法进行计算。现在计算已经进行了50个SCF迭代。当然现在还未达到收敛限。我们可以看到最后一个outer SCF iter = 1也就是说一个outer SCF包含了一个完整的innter SCF。

+
 ATOMIC FORCES in [a.u.]
+
+ # Atom   Kind   Element          X              Y              Z
+      1      1      Ti          0.00000026    -0.00000079     0.00000063
+      2      1      Ti          0.00000026    -0.00000027     0.00000004
+      3      2      O          -0.07002277     0.07002168    -0.00000018
+      4      2      O           0.07002184    -0.07002056     0.00000006
+      5      2      O           0.07002270     0.07002086    -0.00000083
+      6      2      O          -0.07002229    -0.07002093     0.00000028
+ SUM OF ATOMIC FORCES           0.00000000    -0.00000000     0.00000000     0.00000000
+
+

以上显示了原子受力的情况,我们发现有些原子的受力不接近于0,说明这个系统还没处在最佳的结构位置。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-geoopt/index.html b/en/wiki/software_usage/cp2k/cp2k-geoopt/index.html new file mode 100644 index 00000000..25f13901 --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-geoopt/index.html @@ -0,0 +1,2892 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:结构和晶胞优化 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CP2K: 结构和晶胞优化

+

学习目标

+
    +
  • +

    学习资料

    +
  • +
  • +

    基本原理

    +
  • +
  • +

    CP2K 结构优化设置

    +
  • +
  • +

    CP2K 结构优化问题

    +
  • +
+

学习资料

+

Slides

+

基本原理

+

建设中, 参考官网

+

CP2K 结构优化设置

+

结构优化

+
&GLOBAL
+RUN_TYPE GEO_OPT
+&END GLOBAL
+
+

晶胞优化

+
&GLOBAL
+RUN_TYPE CELL_OPT
+&END GLOBAL
+
+

同时,在MOTION下设置OPTIMIZER和一些CONSTRAIN

+
&MOTION
+  &CELL_OPT
+    OPTIMIZER LBFGS 
+    KEEP_ANGLES
+    TYPE DIRECT_CELL_OPT
+  &END CELL_OPT
+&END MOTION
+
+

LBFGS是对大体系常用的,BFGS针对小体系,更为Robust的是CG。

+

KEEP_ANGLES是指保持晶胞的角度不变。

+

TYPE默认是DIRECT_CELL_OPT,即同时优化晶胞和里面的位置,是最快的优化方法。

+

CP2K 结构优化问题

+

晶胞优化需要计算STRESS TENSOR。通常采用ANALYTICAL方法计算即可,也是最快的方法。但是一些泛函并没有实现相应的STRENSS TENSOR的计算,可以采用NUMERICAL的方法进行计算。比如SCAN。在cp2k v8.2后加入了METAGGA(包括SCAN)的STRESS TENSOR,但是仅实现 kinetic energy density的部分,优化会出问题,原因不明。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-hf/index.html b/en/wiki/software_usage/cp2k/cp2k-hf/index.html new file mode 100644 index 00000000..c88abbf5 --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-hf/index.html @@ -0,0 +1,3207 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:杂化泛函 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

CP2K: 杂化泛函

+

学习目标

+
    +
  • 学习资料
  • +
  • 杂化泛函基本原理
  • +
  • 杂化泛函辅助基组
  • +
  • CP2K杂化泛函设置
  • +
  • 参数的测试和收敛
  • +
  • 一些元素推荐的ADMM
  • +
+

学习资料

+

Slides: UCL DFT with Hybrid Functionals

+

Slides: Hybrid Functional and ADMM

+

官方练习

+

杂化泛函基本原理

+

建设中

+

杂化泛函辅助基组

+

建设中

+

CP2K杂化泛函设置

+
    # BASIS Purification
+    BASIS_SET_FILE_NAME BASIS_ADMM_MOLOPT
+    BASIS_SET_FILE_NAME BASIS_ADMM
+    &AUXILIARY_DENSITY_MATRIX_METHOD
+      METHOD BASIS_PROJECTION
+      ADMM_PURIFICATION_METHOD MO_DIAG
+    &END AUXILIARY_DENSITY_MATRIX_METHOD
+    # KIND 设置例子
+    &KIND O
+      BASIS_SET DZVP-MOLOPT-SR-GTH
+      POTENTIAL GTH-PBE-q6
+      BASIS_SET AUX_FIT cFIT3
+    &END KIND
+
+
# HSE06泛函部分
+      &XC_FUNCTIONAL
+        &PBE
+          SCALE_X 0.0
+          SCALE_C 1.0
+        &END PBE
+        &XWPBE
+          SCALE_X -0.25
+          SCALE_X0 1.0
+          OMEGA 0.11
+        &END XWPBE
+      &END XC_FUNCTIONAL
+      &HF
+        &SCREENING
+          EPS_SCHWARZ 1.0E-6
+          SCREEN_ON_INITIAL_P FALSE
+        &END SCREENING
+        &INTERACTION_POTENTIAL
+          POTENTIAL_TYPE SHORTRANGE
+          OMEGA 0.11
+          T_C_G_DATA t_c_g.dat
+        &END INTERACTION_POTENTIAL
+        &MEMORY
+          MAX_MEMORY 10000
+          EPS_STORAGE_SCALING 0.1
+        &END MEMORY
+        # this depends on user
+        &PERIODIC
+          NUMBER_OF_SHELLS 0
+        &END PERIODIC
+        FRACTION 0.25
+      &END HF
+
+

参数的测试和收敛

+

RESTART波函数

+

务必使用相同原子结构的PBE泛函优化后的波函数进行重启,可以省下大量机时,除非你很有钱。

+

在测试参数收敛前**务必**把SCF步数调成1。只要计算的数值收敛即可。

+
&SCF
+      EPS_SCF 3.0E-7
+      MAX_SCF 1
+&END SCF
+
+

EPS_PGF_ORB的收敛

+

在初次计算中,用户会遇到如下Warning

+
 *** WARNING in hfx_energy_potential.F:605 :: The Kohn Sham matrix is not  ***
+ *** 100% occupied. This may result in incorrect Hartree-Fock results. Try ***
+ *** to decrease EPS_PGF_ORB and EPS_FILTER_MATRIX in the QS section. For  ***
+ *** more information see FAQ: https://www.cp2k.org/faq:hfx_eps_warning    ***
+
+

这是因为CP2K会根据某些设定的值,来筛选出不需要计算的四电子积分。可以有效降低Hartree-Fock矩阵的计算。如果筛选的积分过多,那么H-F计算出来的结果就会失真。也是此Warning的来源。

+

控制这个筛选标准的有EPS_PGF_ORB这个参数。越小的话筛选的积分越少,H-F结果也就越真实。通常情况下这个Warning是不会消失的,即使用户调到一个非常小的量级,例如1.0E-20。

+

我们可以通过比对不同的EPF_PGF_ORB的能量收敛来选择合适的值。

+ + + + + + + + + + + + + + + + + + + + + + + + + +
EPS_PGF_ORB能量(a. u.)与上一个的误差
1.0E-13-8402.872803898026177
1.0E-15-8402.872803587537419-3.1E-07
1.0E-17-8402.872803510470476-7.7E-08
+

一般的SCF收敛限在3.0E-7,能量基本也在这个量级以下,因此能量收敛需要达到1.0E-7以下最好。所以我们选择1.0E-15作为EPS_PGF_ORB的值。

+

ADMM基组的收敛

+

与EPS_PGF_ORB类似的是ADMM基组的收敛。对于同一种元素, CP2K提供多了多种基组,例如cFIT10, cFIT11, cFIT12 等...。测试的方法就是逐渐增大ADMM基组。能量误差必须归一到每个原子。通常保证误差在1meV/atom的量级最好。

+

以SrTiO3体系为例

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ADMM_BASIS For Ti能量(a.u.)与上一个的误差(meV/atom)原子数
cFIT10-9062.291421862293646368
cFIT11-9062.255359275355659-2.6368
cFIT12-9062.2600560887713070.3368
cFIT13-9062.210205928951837-3.6368
+

这个时候选择**cFIT10**或者**cFIT11**即可

+

一些元素推荐的ADMM

+

笔者亲测,通常与体系关系不大。

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
元素ADMM基组
OcFIT3
HcFIT3
TicFIT11
CdcFIT10
SncFIT9
PbcFIT9
SrcFIT9
PtcFIT10
MgcpFIT3
BacFIT9
NacFIT3
TacFIT10
+

其他Warning处理

+

其他的Warning在官方文档中有提过 +杂化泛函计算Warning

+

Cutoff Radiis Warning +*** WARNING in hfx_types.F:1287 :: Periodic Hartree Fock calculation *** + *** requested with use of a truncated or shortrange potential. The cutoff *** + *** radius is larger than half the minimal cell dimension. This may lead *** + *** to unphysical total energies. Reduce the cutoff radius in order to *** + *** avoid possible problems. ***

+

这是由于在周期边界条件下, CP2K只取HF exchange短程部分,而长程部分则由DFT exchange来补充。因此需要短程的长度,即Cutoff Radiis。 对于该Warning有如下三种处理方式。

+
    +
  • 如果使用HSE06,请忽视,因为这个cutoff由omega确定。
  • +
  • 减少CUTOFF_RADIUS,如果你用的是PBE0-TC
  • +
  • 用更大周期边界盒子
  • +
+

参考

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-neb/index.html b/en/wiki/software_usage/cp2k/cp2k-neb/index.html new file mode 100644 index 00000000..2feba268 --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-neb/index.html @@ -0,0 +1,2947 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:NEB - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CP2K: Nudged Elastic Band

+

学习目标

+
    +
  • +

    学习资料

    +
  • +
  • +

    NEB 基本原理

    +
  • +
  • +

    CP2K NEB设置

    +
  • +
+

学习资料

+
    +
  • +

    Henkelman, G. & Jónsson, H. Improved tangent estimate in the nudged elastic band method for finding minimum energy paths and saddle points. J. Chem. Phys. 113, 9978–9985 (2000).

    +
  • +
  • +

    Henkelman, G., Uberuaga, B. P. & Jónsson, H. A climbing image nudged elastic band method for finding saddle points and minimum energy paths. J Chem Phys 113, 9901–9904 (2000).

    +
  • +
+

NEB 基本原理

+

当确定反应物和产物结构后可以找到从反应物到产物的能量最小路径(Minimum Energy Path, MEP). 处于能量最小路径上的任意一个结构中,作用在原子上并垂直于MEP的力分量都为0. NEB是一种寻找MEP的方法。首先NEB在反应物结构和产物结构之间建立一套结构(称为image或者replica)。 这些相邻的image之间用弹簧力连接(spring force),形成一条类橡皮筋(Elastic Band)的构造。其中每个image受到垂直于MEP的真正的力同时受到平行于MEP的弹簧力,通过最小化这个Band的力,即可得到MEP。

+

CP2K NEB设置

+

首先把RUN_TYPE设置为BAND +

&GLOBAL
+    RUN_TYPE BAND
+&END GLOBAL
+

+

其次是MOTION部分 +

&MOTION
+    &BAND
+        # 提交任务时 总cpu数目为NROC_REP*NUMBER_OF_REPLICA
+        NROC_REP 24 #一个image要用多少cpu来算
+        NUMBER_OF_REPLICA 8 #创造多少image, 这里是包含初始结构和最终结构的数目。 
+        BAND_TYPE CI-NEB #使用Climbing Image NEB方法,具体内容参照文献SEC. IV
+        K_SPRING 0.05 弹簧振子的强度,理论上弹簧振子强度不会影响优化的结果
+        &CONVERGENCE_CONTROL # 跟结构优化类似
+            MAX_FORCE 0.0030
+            RMS_FORCE 0.0050
+            MAX_DR 0.002
+            RMS_DR 0.005
+        &END CONVERGENCE_CONTROL
+        ROTATE_FRAMES F
+        ALIGN_FRAMES F
+        &CI_NEB 
+            NSTEPS_IT  2 # 在变成CI之前,需要跑正常NEB, 这里设置跑正常NEB的回合数目
+        &END CI_NEB
+        &OPTIMIZE_BAND
+            OPT_TYPE DIIS
+            &DIIS
+                NO_LS T
+                MAX_STEPS 1000
+                N_DIIS 3
+            &END DIIS
+        &END OPTIMIZE_BAND
+        &REPLICA #初始结构的坐标
+            &COORD
+            @include init.xyz # 第一种方法,只包含坐标xyz,不需要元素
+            &END COORD
+        &END REPLICA
+        &REPLICA # 最终结构的坐标
+            &COORD
+            @include fin.xyz # 只包含坐标xyz,不需要元素,
+            &END COORD
+        &END REPLICA
+        &REPLICA # 最终结构的坐标
+            COORD_FILE_NAME ./tr7.xyz # 第二种方法,这个是正常的xyz文件
+        &END REPLICA
+        &PROGRAM_RUN_INFO # 看REPLICA间的距离
+            INITIAL_CONFIGURATION_INFO
+        &END
+    &END BAND
+&END MOTION
+
+注意到如果只定义两个REPLICA section,并且小于你的NUMBER_OF_REPLICA,那么剩余的REPLICA结构将会由CP2K自己生成。 +如果定义的REPLICA section数目等于NUMBER_OF_REPLICA,那么CP2K将不会自动生成REPLICA的结构。

+

重新启动NEB

+

在cp2k input文件里加入EXT_RESTART section。并且将xxx-1.restart改成你的真实的restart文件。 +

&EXT_RESTART
+  RESTART_BAND
+  RESTART_FILE_NAME   xxx-1.restart
+&END
+
+同时,我们可以利用之前的波函数RESTART,只需要在FORCE_EVAL/DFT/SCF下设置 +
SCF_GUESS RESTART
+
+即可。 +假设你的PROJECT NAME 是 water,见GLOBAL/PROJECT,同时你的NUMBER_OF_REPLICA为8, 那么你将会生成如下文件 +
water-BAND01-RESTART.wfn
+water-BAND02-RESTART.wfn
+water-BAND03-RESTART.wfn
+water-BAND04-RESTART.wfn
+water-BAND05-RESTART.wfn
+water-BAND06-RESTART.wfn
+water-BAND07-RESTART.wfn
+water-BAND08-RESTART.wfn
+
+其中BAND后面的数字代表REPLICA的序数。在重新启动时,则会自动读取这些波函数。如果波函数是通过其他方法生成或者提前准备好的,也可以通过更改波函数的名称使其符合上述规则来启动NEB。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-reftraj/index.html b/en/wiki/software_usage/cp2k/cp2k-reftraj/index.html new file mode 100644 index 00000000..f8ffb778 --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-reftraj/index.html @@ -0,0 +1,2987 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:REFTRAJ根据已有MD轨迹计算 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + +

根据已有轨迹运行CP2K分子动力学计算

+

学习目标

+
    +
  • +

    CP2K 分子动力学计算的输入文件

    +
  • +
  • +

    如何根据已有的CP2K轨迹进行计算

    +
  • +
+

学习资料

+

CP2K官方手册:Section MD

+

CP2K官方练习:AIMD of bulk liquid water

+

CP2K MD Section 的输入文件

+

请先了解CP2K的输入文件语法,指路:CP2K:能量与力的计算

+

CP2K 的输入文件由不同的 SECTION 组成,而每个 SECTION 下级有可以包含 SUBSECTIONKEYWORDS,这些不同等级的 SECTION 和 KEYWORD 都是大写英文单词。一份输入文件的语法如下:

+
&SECTION
+  &SUSECTION
+  ...
+  &END SUBSECTION
+  KEYWORD1 <value>
+  KEYWORD2 <value>
+  ...
+&END SECTION
+
+

而如果希望用CP2K进行MD计算,需要根据体系的需要,配置CP2K:能量与力的计算中介绍的 GLOBALFORCE_EVAL 这两部分,并且将 SECTION GLOBAL 下的关键字 RUN_TYPE 改为MD

+
&GLOBAL
+  ...
+  RUN_TYPE MD <---- 运行MD任务请将 RUN_TYPE 改为 MD
+&END GLOBAL
+
+

此外,还需要在配置文件 input.inp 中写入 :

+
    +
  • MOTION: 包含如何演变原子核(例如MD),控制输出什么数据
  • +
+
+

SECTION in input.inp. This section defines a set of tool connected with the motion of the nuclei.

+
+
    +
  • MD: 包含了一些分子动力学模拟的基本参数,如选择什么系综(ensemble)、温度、步长和总步数等。
  • +
+
+

SUBSECTION in MOTION. This section defines the whole set of parameters needed perform an MD run.

+
+

一个简单的 MOTION 部分的例子

+
&MOTION 
+  &MD
+    ENSEMBLE NVE
+    STEPS 10
+    TIMESTEP 0.5
+    TEMPERATURE 300.0
+  &END MD
+  &PRINT
+    &CELL
+      &EACH
+        MD 1
+      &END EACH
+    &END CELL
+    &FORCES
+      &EACH
+        MD 1
+      &END EACH
+    &END FORCES
+    &TRAJECTORY
+      &EACH
+        MD 1
+      &END EACH
+    &END TRAJECTORY
+    &VELOCITIES
+      &EACH
+        MD 1
+      &END EACH
+    &END VELOCITIES
+  &END PRINT
+&END MOTION
+
+

以上例子非常直接,一行一行读下来字面意思就是MD的参数设置。值得注意的是在 PRINT 部分中的 &EACH MD 1 &END EACH 控制的是MD打印输出的频率,指的是每一步MD模拟对应一个输出,设置成3就是每三步输出一次。EACH中MD输出频率缺省值是1

+
+

Warning

+

为了方便分析,CELL 的输出频率应该和 TRAJECTORY 的保持一致

+
+

根据已有轨迹进行MD计算

+

有的时候,我们需要对已有的一条MD轨迹进行计算:

+
    +
  • +

    对机器学习势函数生成的MD轨迹进行精确计算

    +
  • +
  • +

    更改FORCE_EVAL 部分的参数,提升已有轨迹能量和力的计算的精度

    +
  • +
  • +

    ……

    +
  • +
+

我们可以在CP2K输入文件的 MD SECTION 下加入REFTRAJ SECTION来实现对已有轨迹的计算。

+

以TiO2为例子,需要在提交任务的目录下准备:

+
tree
+.
+├── cp2k.lsf                    <---- cp2k 任务提交脚本(/data/share/base/scripts/cp2k.lsf) 
+├── input.inp               <---- cp2k 输入文件
+├── reftraj.xyz       <---- 已有的轨迹
+└── rutile.xyz          <---- 可以是轨迹中的一帧结构
+
+0 directories, 4 files
+
+

其中 rutile.xyz 对应的是输入文件input.inpSUBSYS 中指定盒子中的原子坐标文件,可以直接选用已有轨迹中的某一帧数据。

+

针对这一任务,在 MOTION 部分写入

+
&MOTION
+  &MD
+    &REFTRAJ
+      TRAJ_FILE_NAME reftraj.xyz
+      EVAL_ENERGY_FORCES .TRUE.
+      EVAL_FORCES .TRUE.
+      FIRST_SNAPSHOT 1
+      LAST_SNAPSHOT 50
+      STRIDE 1
+    &END REFTRAJ
+    ...
+  &END MD
+  &PRINT
+    ...
+  &END PRINT
+
+

其中 TRAJ_FILE_NAME 关键字指定了当前文件夹下的 reftraj.xyz 做为需要计算的轨迹。

+

值得注意的是,CP2K输入文件中给关键字赋逻辑值时用 .TRUE..FALSE.,而 EVAL_ENERGY_FORCES 和 EVAL_FORCES 的缺省值是 .FALSE.,因此如果要计算能量和力必须要明确指定这两个关键字。

+

FIRST_SNAPSHOT , LAST_SNAPSHOTSTRIDE这一组关键词指定了如何对 reftraj.xyz 的结构进行计算。指的是从已有轨迹的第 FIRST_SNAPSHOT 帧到第 LAST_SNAPSHOT 帧结构,每 STRIDE 帧结构计算一次。而对于本例子,reftraj.xyz中共有50帧结构,因此以上配置文件表明从已有轨迹的第 1 帧到第 50 帧结构,每 1 帧结构计算一次,所以这样设置会计算已有轨迹中的每一个结构的能量和力。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-scan/index.html b/en/wiki/software_usage/cp2k/cp2k-scan/index.html new file mode 100644 index 00000000..096c32f1 --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-scan/index.html @@ -0,0 +1,2888 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:SCAN泛函 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CP2K: SCAN泛函

+

学习目标

+
    +
  • +

    学习资料

    +
  • +
  • +

    SCAN基本原理

    +
  • +
  • +

    CP2K SCAN泛函设置

    +
  • +
  • +

    CP2K SCAN泛函的问题

    +
  • +
+

学习资料

+

Sun, J., Remsing, R. C., Zhang, Y., Sun, Z., Ruzsinszky, A., Peng, H., … Perdew, J. P. (2016). Accurate first-principles structures and energies of diversely bonded systems from an efficient density functional. Nature Chemistry, 8(9), 831–836. https://doi.org/10.1038/nchem.2535

+

Sun, J., Remsing, R. C., Zhang, Y., Sun, Z., Ruzsinszky, A., Peng, H., … Perdew, J. P. (2015). SCAN: An Efficient Density Functional Yielding Accurate Structures and Energies of Diversely-Bonded Materials, 1–19. Retrieved from http://arxiv.org/abs/1511.01089

+

SCAN基本原理

+

SCAN泛函属于MetaGGA的一类。加入了密度梯度的二阶导数。近年来,SCAN泛函被用于水的计算研究逐渐增多,同时对于半导体**体相**计算的能带也比较准。

+

CP2K SCAN泛函设置

+

SCAN泛函并不是CP2K源码自带,实际是引用了libxc中的泛函函数。只有CP2K4.1以上版本的libxc库才能够使用SCAN泛函

+
&XC_FUNCTIONAL
+     &LIBXC
+        FUNCTIONAL MGGA_X_SCAN
+     &END LIBXC
+     &LIBXC
+        FUNCTIONAL MGGA_C_SCAN
+     &END LIBXC
+&END XC_FUNCTIONAL
+
+

SCAN泛函有一套自己对应的赝势,放在Hutter的github库中。

+

具体可以参考以下谷歌论坛链接

+

https://github.com/juerghutter/GTH/blob/master/SCAN/POTENTIAL

+

主集群上我已经放置了一份SCAN赝势。名称为GTH-SCAN-POTENTIAL

+

cp2k 输入文件设置为如下即可:

+
POTENTIAL_FILE_NAME GTH-SCAN-POTENTIAL
+
+

CP2K SCAN泛函的问题

+

SCAN泛函对于有大量真空的体系似乎非常难以收敛。笔者至今试用过了Hematite Slab模型和SrTiO3模型,均无法正常收敛。其他意见参考谷歌论坛。如有任何建议建议快速联系笔者。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-slab/index.html b/en/wiki/software_usage/cp2k/cp2k-slab/index.html new file mode 100644 index 00000000..6f53e796 --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-slab/index.html @@ -0,0 +1,2861 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:Slab计算 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CP2K: Slab计算

+

学习目标

+
    +
  • +

    什么是Slab模型

    +
  • +
  • +

    CP2K 偶极矫正

    +
  • +
  • +

    其他去除周期性的方式

    +
  • +
+

什么是Slab模型

+

Slab模型是在三维周期性边界条件下计算固体表面的一种方法。通常选择z方向为表面朝向,即模型的z方向中有一半是真空(无原子)另一半为固体模型。如下图为一个典型的Slab模型:

+

image-20210529141736287

+

CP2K 偶极矫正

+

Slab模型虽然是代表表面,但是实际上在z方向是固体-真空-固体-真空-...的交替。如果我们建立的Slab模型在z方向是非对称的,模型就会产生一个沿z方向的偶极。偶极会产生静电势,静电势接着会影响模型的镜像(周期性边界条件)。最后算出来的模型的总能量和力与真实情况是不相符的。因此我们需要方法去矫正这种虚假的静电影响。

+

一种常用的方法就是偶极矫正,在真空部分加入一个超窄的但是方向相反的偶极。这样一来,固体模型产生的偶极和真空中的偶极就会相互抵消。模型和其镜像之间的静电势影响就会抵消。

+

具体的设置如下:

+

在FORCE_EVAL/QS/DFT下开启

+
SURFACE_DIPOLE_CORRECTION .TRUE.
+
+

其他去除周期性的方式

+

表面偶极矫正仅有z方向可以去除,若要去除其他三个方向的周期,可以采用另外的设置

+

在FORCE_EVAL/SUBSYS/CELL下

+
PERIODIC NONE
+
+

在FORCE_EVAL/DFT/POISSON下

+
PERIODIC NONE
+POISSON_SOLVER MT (其他也可以 笔者仅试过MT)
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-tools/index.html b/en/wiki/software_usage/cp2k/cp2k-tools/index.html new file mode 100644 index 00000000..b3c07186 --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-tools/index.html @@ -0,0 +1,2784 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:tools - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CP2K

+

cp2k有许多方便的工具。可以帮我们脚本化工作流程,节约时间。

+

PYCP2K: 脚本化输入文件生成工具

+

主要使用Python语言,可以把cp2k输入工作集成为Python +具体使用链接看这里

+

要注意的是,他这里只适用v5.1以前版本的 cp2k。如果我们使用例如v7.1以上的版本,那么可以自己生成对应的包。 +详情见Pycp2k github的 README 中 Manual installation 部分。 +在我们集群,要生成 xml 文件,首先module load cp2k/7.1,然后使用cp2k.popt --xml命令即可得到 xml 文件。 +其他按照 Manual installation 的指示即可。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k-zpe/index.html b/en/wiki/software_usage/cp2k/cp2k-zpe/index.html new file mode 100644 index 00000000..b03fc152 --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k-zpe/index.html @@ -0,0 +1,2960 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:ZPE - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

CP2K: ZPE(Zero-point energy)

+

学习目标

+
    +
  • +

    ZPE基本原理

    +
  • +
  • +

    CP2K Frequence设置

    +
  • +
  • +

    CP2K Frequency计算结果检查

    +
  • +
  • +

    ZPE求解

    +
  • +
  • +

    注意事项

    +
  • +
+

ZPE基本原理

+

零点能(ZPE)是量子力学系统可能具有的最低可能能量,此时系统所处的态称为基态;所有量子力学系统都有零点能。与经典力学不同,量子系统在Heisenberg不确定性原理所描述的最低能量状态下不断波动。

+

我们在计算吉布斯自由能(\(G=E_{DFT}+ZPE-TS^\circ\))时会涉及到零点振动能,零点振动能的计算公式为:

+

\(ZPE=\sum_{i=0}^{3N}\frac{\hbar\omega}{2}\)

+

因此我们需借助CP2K计算得到振动频率\(\omega\)

+

Boyer, T. H. Quantum Energy and Long-Range Forces. Ann. Phys 1970, 56, 474–503.

+

Girod, M.; Grammaticos, B. The Zero-Point Energy Correction and Its Effect on Nuclear Dynamics. Nucl. Physics, Sect. A 1979, 330 (1), 40–52. https://doi.org/10.1016/0375-9474(79)90535-9.

+

CP2K Frequence设置

+
    +
  1. 设置CP2K INPUT / GLOBAL / RUN_TYPE
  2. +
+

RUN_TYPE  VIBRATIONAL_ANALYSIS
+
+2. 在CP2K INPUT / VIBRATIONAL_ANALYSIS

+
&VIBRATIONAL_ANALYSIS
+  NPROC_REP 192  # 总核数=节点数*核数(通常与提交作业cp2k.lsf文件中的核数一致)
+  DX 0.02
+  FULLY_PERIODIC
+  &PRINT
+    &MOLDEN_VIB
+    &END
+    &CARTESIAN_EIGS
+    &END
+    &PROGRAM_RUN_INFO
+      &EACH
+        REPLICA_EVAL 1
+      &END
+    &END
+  &END PRINT
+&END VIBRATIONAL_ANALYSIS
+
+
    +
  1. CP2K INPUT / MOTION
  2. +
+
&MOTION
+  &CONSTRAINT
+    &FIXED_ATOMS
+      LIST 1..320 # 计算时需要固定的无关原子对应的序号
+    &END
+  &END
+&END MOTION
+
+

CP2K Frequency计算结果检查

+

正常计算结束会输出project-VIBRATIONS-1.mol文件,里面[FREQ]模块即为计算得到的frequence(unit:\(cm^{-1}\))

+
[FREQ]
+      204.783042
+      296.784083
+      379.892297
+      414.559665
+      913.554709
+     3650.225071
+
+

在CP2K计算NEB的过程中寻找过度态时,过渡态的Frequence中会有虚频,对应负值:

+
[FREQ]
+     -150.004617
+       76.011787
+       90.652110
+      105.659737
+      114.363774
+      118.342870
+      125.738357
+      ……
+
+

ZPE求解

+

\(ZPE=\sum_{i=0}^{3N}\frac{\hbar\omega_i}{2}\)

+

CP2K计算得到的Frequence是波长的倒数\(\frac{1}{\lambda}\),单位为\(cm^{-1}\),根据\(\frac{1}{\omega}=\frac{\lambda}{c}\)可以计算得到振动频率\(\omega\)

+

N对应计算的原子个数。

+

注意事项

+

(1) 由于PBC条件的限制,CP2K的Frequence计算结果中不包含平动频率,是否包含转动频率取决于体系的状态(CONSTRAINT),通常振动频率远大于转动频率。

+

(2) 计算真空中一个分子的Frequence时,要去除盒子所有方向的周期性,通常可以用\(20Å\times20Å\times20Å\)的盒子进行测试。

+

(3) 使用CP2K计算一个稳定结构式的频率时,也常会出现多个虚频。这是CP2K计算使用GTH赝势时存在的一个问题。详细内容请参考(https://groups.google.com/forum/?fromgroups#!topic/cp2k/DVCV0epl7Wo)

+

解决方案有四种:

+

a. 使用NLCC赝势(http://arxiv.org/abs/1212.6011)。不过NLCC赝势很不完整,只有B-Cl的元素有,且只提供了PBE泛函的赝势。

+

b. 增大CUTOFF,使用600 Ry以上的CUTOFF。

+

c. 在XC_GRID部分使用平滑参数SMOOTING,不推荐使用。

+

d. 在XC_GRID部分使用USE_FINER_GRID。加上这个参数后,XC部分的格点的精度提高为4*CUTOFF。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/cp2k/cp2k/index.html b/en/wiki/software_usage/cp2k/cp2k/index.html new file mode 100644 index 00000000..c7cb8e83 --- /dev/null +++ b/en/wiki/software_usage/cp2k/cp2k/index.html @@ -0,0 +1,3003 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K入门 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + +

CP2K 入门

+

学习目标

+
    +
  • 设置CP2K环境变量
  • +
  • 书写CP2K的输入文件
  • +
  • 检查CP2K输入文件
  • +
  • 单点能计算
  • +
  • 结构优化
  • +
  • 分子动力学
  • +
+

CP2K的特色

+

CP2K同时使用了平面波基组和高斯基组,因此可以在傅立叶空间里描述长程作用力和实空间里描述局域的波函数。使用CP2K进行分子动力学(MD)运算效率很高。CP2K使用了单k点的计算方式,又称为gamma approximation,因此在早期CP2K版本中没有K点的设置。近年仅在单点能中加入了k点的计算。

+

设置CP2K环境变量

+

哪里获取Basis和PseudoPotential文件

+

Github

+

省略路径

+

CP2K需要用到赝势和基组文件。假设这些文件都存在于目录/somewhere/basis/下。可以通过设置环境变量CP2K_DATA_DIR来让CP2K自己找到文件。

+

打开自己的 ~/.bashrc文件. 添加以下命令

+
export CP2K_DATA_DIR=/somewhere/basis/
+
+

之后在使用赝势和基组时可以直接写文件名字而不需要指出路径。

+

书写CP2K输入文件

+

CP2K输入文件的书写在CP2K官网中有许多例子,请大家自己上网学习。

+

除了简单的SECTION, VALUE的书写形式以外,CP2K还提供了一些简单的变量设置条件判断等设定方式,具体参考CP2K输入参考手册

+

什么是好的输入文件习惯?

+

CP2K的输入文件参数设置繁杂,往往我们是第一次从头到位写一遍或者直接拿别人的input修改后进行使用。但是这样会造成书写错误或者设置错误频繁发生。提交超算之后被退回来的话排队时间就浪费了。在此笔者有几个建议:

+
    +
  1. 使用cp2k.popt -c input.inp 检查输入文件的语法
  2. +
  3. 使用注释(#)来提醒输入文件的设置
  4. +
  5. 使用变量和条件判断来简单的开关CP2K的功能
  6. +
+
#a good example of input file
+#set variable and condition to open/close section in CP2K
+#if variable is 0 in condition, it is false, otherwise it is true
+@SET HSE06 0
+
+########## This part is HSE06 ##########
+@IF ${HSE06}
+            &XC_FUNCTIONAL
+                &PBE
+                    SCALE_X 0.0
+                    SCALE_C 1.0
+                &END PBE
+                &XWPBE
+                    SCALE_X -0.25
+                    SCALE_X0 1.0
+                    OMEGA 0.11
+                &END XWPBE
+            &END XC_FUNCTIONAL
+            &HF
+                &SCREENING
+                    EPS_SCHWARZ 1.0E-6
+                    SCREEN_ON_INITIAL_P FALSE
+                &END SCREENING
+                &INTERACTION_POTENTIAL
+                    POTENTIAL_TYPE SHORTRANGE
+                    OMEGA 0.11
+                    T_C_G_DATA t_c_g.dat
+                &END INTERACTION_POTENTIAL
+                &MEMORY
+                    MAX_MEMORY 10000
+                    EPS_STORAGE_SCALING 0.1
+                &END MEMORY
+                &PERIODIC
+                     NUMBER_OF_SHELLS 0
+                &END PERIODIC
+                FRACTION 0.25
+            &END HF
+@ENDIF
+
+
+

Warning

+

注释要单独占一行,代码和注释混合会导致input读入错误

+
+

检查CP2K输入文件

+

在服务器上,需要通过module load cp2k/版本号 来启动CP2K软件。Load后,可以使用cp2k.popt命令,这是CP2K软件的主要程序。

+

CP2K的计算运行是

+
cp2k.popt input.inp > output
+
+

当然在服务器上需要通过提交脚本来执行命令。

+

由于CP2K输入文件有时较为庞大,经常会有误写或者语法错误的情况发生,为了避免提交之后被退回来,可以先使用命令检查:

+
cp2k.popt -c input.inp
+
+
+

Warning

+

cp2k.popt -c 仅检查是否有语法错误,实际运行的错误不会检查出来

+
+

单点能计算

+

参见官网的例子: CP2K能量和力的计算

+

参见官网的例子: CP2K中CUTOFF和REL_CUTOFF的测试

+

结构优化

+

建设中

+

分子动力学

+

建设中

+

CP2K的一些常用工具

+

CP2K Vim input 插件

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/default_version/index.html b/en/wiki/software_usage/default_version/index.html new file mode 100644 index 00000000..bfb93eb2 --- /dev/null +++ b/en/wiki/software_usage/default_version/index.html @@ -0,0 +1,2756 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 软件默认版本推荐 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

软件默认版本推荐

+

目前集群上很多软件都编译了多个版本,但由于软硬件平台、版本、环境的更新,需要对常用软件的一些版本梳理如下,并给出建议使用版本。

+

Zeus 集群采用 module 对软件环境进行管理,通常使用前需要加载环境,例如 module load vasp/5.4.4即可加载 VASP 5.4.4 版本运行所需环境。因此下文对软件推荐版本的说明,将会列出Zeus上使用的<module name>,具体使用时请自行补全为module load <module name>

+

注意如果在 ~/.bashrc~/.bash_profile 中加载了环境,如果与下述版本用到的环境存在冲突,可在提交脚本中加入module purge行进行卸载,以免产生冲突。

+

注意: CentOS 7 默认使用的 GCC 版本为4.9.4,Python 版本为2.7,Python 3 版本为 3.6,故以下涉及到上述环境若未加载,则表示使用默认环境。

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
软件名推荐版本命令需要调用环境备注
VASPvasp/5.4.4常规计算:vasp_std
Gamma点 :vasp_gam
intel/17.5.239
mpi/intel/2017.5.239
CPU并行计算
CP2Kcp2k/7.1启用OpenMP:cp2k_psmp
未启用:cp2k_popt
gcc/5.5.0
intel/17.5.239
mpi/intel/2017.5.239
CPU并行计算
DeePMD-kitdeepmd/2.0-cuda11.3训练:dp
跑MD:lmp_mpi
cuda/11.3
gcc/7.4.0
intel/17.5.239
mpi/intel/2017.5.239
GPU加速势函数训练,采用的Lammps版本是20201029
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/experience_of_dpmd_and_dpgen/index.html b/en/wiki/software_usage/experience_of_dpmd_and_dpgen/index.html new file mode 100644 index 00000000..7c4f675d --- /dev/null +++ b/en/wiki/software_usage/experience_of_dpmd_and_dpgen/index.html @@ -0,0 +1,2963 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DPMD和DPGEN使用经验 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

DPMD和DPGEN使用经验

+

DPMD train.json参数设置和理解:

+

dp-kit 安装

+
    +
  • 如果本地有GPU,推荐使用dp-kit全包下载,总大小1G。 shell执行安装。便于在本地开发测试。
  • +
+

DeepPotential

+
    +
  1. 形象化理解sel_a:一个原子越高概率出现,对应sela越大;sela对应以任意原子为center,能找到的该原子的最大数目
  2. +
  3. neuron network和resnet大小一般不修改;同时训练多个势函数需要修改随机种子seed
  4. +
  5. 用于实际使用的势函数需要well-train,需要“长训练”,常用设置为:
  6. +
+
"learning_rate" - "decay_steps"20000,
+"stop_batch": 400000, # 使用200000 步也大致没有问题。
+
+

DPGEN 使用

+
    +
  1. 提交训练后需要跟踪train的情况。有时候由于提交后无法配置GPU资源(被其他程序占用或其他原因),导致训练输出为“nan”,需要重新提交并确保获取GPU资源。
  2. +
  3. V100卡上短训练一般在4~8小时。长训练是短训练10倍时间。理论上dpmd方法训练时间随元素类型数目线性增长。(MN,M原子数,N类型数)。
  4. +
  5. 用于训练的数据要正确的设置type.raw。尤其注意初始数据的处理,保证元素顺序,编号正确。
  6. +
  7. 注意测试k-points,dpgen在vasp的INCAR中使用kspacingkgamma来决定kpoints。一般要求能量收敛到 1 meV/atom ,力分量收敛到 5 meV/A 以下。
  8. +
  9. dpgen 的exploration步骤通过md采样,探索步数一般随着迭代增加到10000~20000即可。一般增加随机的md起点数目比增加探索步数采样更高效。这是最关键的步骤,设计exploration策略时需要考虑实际使用时要探索体系和采样空间相类似。
  10. +
  11. 通过修改machine.json对应配置让dpgen报错停下,用于数据分析和检测。例如设置错误的端口/IP使任务在某步停下。
  12. +
  13. 如果训练了较旧版本的势函数,可以用更新版本从checkpoint开始,再增加2000步后freeze。(版本差异不能过大)
  14. +
  15. 神经网络拟合能力是很强的,不consistent的数据(不同k点)也能拟合出非常小的能量/力误差。所以,要注意使用测试体系检查势函数质量,测试体系取决于所研究的问题。也要注意输入的DFT数据做好充分的计算参数测试。
  16. +
  17. 提交任务后lcurve.out出现NaN;原因可能是内存或gpu没有正确分配。需要重启。
  18. +
  19. dp restart/freeze 要保持在相同的路径下,如果改变了文件夹位置/名称,可以修改checkpoint指明model路径。
  20. +
  21. MD同时使用四个模型评估不影响速度(在显存不占满的情况下)。
  22. +
  23. 使用多个模型MD,在旧版本中是用平均值,新版本>1.0是用第一个势函数值。
  24. +
  25. 注意可视化每轮的训练结果,包括学习曲线(训练误差随batch下降趋势),model_deviation的分布,单点能的收敛和结构正确,对每轮的结果进行分析。
  26. +
+

DFT单点能计算经验

+
    +
  • 一般对体系影响最大的是k点,需要测试不同的k点,k点数目和计算成本是对应的
  • +
  • vasp擅长小体系多k点并行;大体系少k点会显著较慢;可以使用kspacing控制,参照
  • +
+
from pymatgen import Structure
+from math import pi
+import numpy as np
+import pandas as pd
+stc = Structure.from_file('POSCAR')
+a,b,c = stc.lattice.abc
+# CASTEP 和 VASP 计算KSPACING不同,差一个常数2pi
+kspacing_range = np.linspace(0.1, 0.6, 21)
+kpoint_a = np.ceil( 2*pi/kspacing_range/a).astype('int')
+kpoint_b = np.ceil( 2*pi/kspacing_range/b).astype('int')
+kpoint_c = np.ceil( 2*pi/kspacing_range/c).astype('int')
+
+df = pd.DataFrame({'kspacing': kspacing, 'a': kpoint_a, 'b': kpoint_b, 'c': kpoint_c})
+print(df) # 查看不同kspacing 对应的K点数目
+
+
    +
  • 主要的INCAR计算参数是
      +
    • ENCUT(一般取600/650保证quality,对计算速度影响不明显);
    • +
    • ISMEAR=0(ISMEAR=-5的 Bloch方法需要k不小于4个,有时候不能用,测试表明,二者能量/力误差在1e-3以下,ISMEAR=0计算成本更低)
    • +
    • spin会对体系有非常大影响,一种brute force做法是直接给一个好的初猜(代码辅助),
    • +
    • LASPH可以考虑加入,提高精度,极少量成本。
    • +
    • LWAVE,LCHARG关掉,减少计算时间和储存空间浪费。
    • +
    +
  • +
  • 测试计算的思路应当是:先选一个最贵的,再提高精度,看是否收敛,之后以此为参照,降低不同参数。在保证了精度可靠的基础上,减少计算成本
  • +
+
from ase.io import read
+at = read('OUTCAR')
+ref = read('ref/OUTCAR') # 
+dE = ref.get_potential_energy() - at.get_potential_energy() # 一般dE 小于10meV
+dEperAtom = dE/len(ref) # 要求小于1meV/atom
+dF = ref.get_forces() - at.get_forces()
+pritn(dF.max(), dF.min()) # 要求在5meV/A以下,尽可能在1meV/A 以下
+
+
    +
  1. LREAL = auto,对于大体系,推荐是real(auto默认会设置),对于GPU,必须要real。由于求积分方法差异,在实空间计算会引入1·2meV/atom的系统误差。
  2. +
  3. VASP输出的结构只要是电子步收敛的,都可以添加到训练集。需要注意添加了错误的结构(能量绝对值极大)会导致训练误差无法下降。
  4. +
  5. 如果VASP计算只有单K点,使用vasp_gam,相对vasp_std可以节省⅙ - ⅓的时间。
  6. +
+

文件空间管理

+

随着模拟时间和模拟体系扩增,储存文件占用的空间非常巨大。在储存文件时候注意: +1. 保留必要的输入和输出文件:包括初始结构(data.lmp),计算设置(input.lammps),计算输出(log),轨迹(traj) +2. 建议用如下方案压缩:

+
zip -9r -y data.zip data/   # 使用最大压缩率;保留文件相对路径压缩
+
+

也可以用npz压缩,相比zip直接压缩提高5%左右。

+
import numpy as np
+data = ...
+data = data.astype('float32') # 保存为32位不损失坐标/力等需要的精度
+np.save_compressionz('data.npz', data=data)
+data = np.load(data)['data']  # 重新载入
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/i-pi/index.html b/en/wiki/software_usage/i-pi/index.html new file mode 100644 index 00000000..0bee3a07 --- /dev/null +++ b/en/wiki/software_usage/i-pi/index.html @@ -0,0 +1,2661 @@ + + + + + + + + + + + + + + + + + + + + + + + + + I pi - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

I pi

+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/n2p2/index.html b/en/wiki/software_usage/n2p2/index.html new file mode 100644 index 00000000..d2aacd0a --- /dev/null +++ b/en/wiki/software_usage/n2p2/index.html @@ -0,0 +1,2967 @@ + + + + + + + + + + + + + + + + + + + + + + + + + n2p2 Usage Guide - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

n2p2 Usage Guide

+

Short Introduction

+

This repository provides ready-to-use software for high-dimensional neural network potentials in computational physics and chemistry.

+

The following link is for your information:

+ +
+

Warning

+

This page is just the experience and understanding of author. If you find any mistake or vague part, please report the issue

+
+

Basic Principle

+

The basic of n2p2 softeware is based on the method of neural network fitting. For detail of neural network(NN), please refer to [here].

+

The extra works done by Behler and Parrinello, is build a link between Potential Energy Surface and NN.

+

At first, they decomposes the total energy into atomic energy(\(E^{atom}\)). \(E^{atom}\) is not the energy of neutral atom in the vacuum as we have seen in quantum chemistry book. The \(E^{atom}\) is just the decomposition of total energy into the contribution of every atoms, as expressed by the following equation: +$$ +E_{tot}=\sum_i {E_i^{atom}} +$$ +Where i runs over the index of atom in a system.

+

Usage in Cluster

+

n2p2 has installed in Cluster51. Use command module load n2p2/2.0.0 to load the code n2p2. After that, you can use all the executable binary of n2p2. LSF script is in the directory /share/base/script/n2p2.lsf. Explanation of lsf script is put in here

+

Training Procedure

+

Overview

+

The Core library in n2p2 is nnp-train. You can see this command after load module n2p2/2.0.0. Enter the Directory of prepared files and type nnp-train is all enough. For mpi running of command, just type mpirun nnp-train. The input files for nnp-train include:

+
    +
  • input.nn: input setup for training
  • +
  • input.data: input training set for training procedure.
  • +
  • scaling.data: scaling data from data set (you will obtain this from nnp-scaling)
  • +
+

Example input file is in the github repository <n2p2 root>/examples/nnp-train

+

File: input.data

+

See input.data format here

+

Python script for convertion from cp2k xyz to input.data

+
 from ase.io import read, write
+ import os, sys
+
+ # data_path: directory contains forces.xyz and coords.xyz
+ data_path = "./test_data"
+ data_path = os.path.abspath(data_path)
+
+ #input cell parameter here, a 3x3 list
+ cell = [[10., 0., 0. ], [0., 10., 0.], [0., 0., 10.]]
+
+ #read coords and forces
+ pos_path= os.path.join(data_path, "coords.xyz")
+ frc_path= os.path.join(data_path, "forces.xyz")
+ pos = read(pos_path, index = ":")
+ frc = read(frc_path, index = ":")
+
+ out_path = os.path.join(data_path, "input.data")
+ fw = open(out_path, "w")
+ for frame_idx in range(len(pos)):
+     fw.write("begin\n")
+     for i in range(3):
+         fw.write("lattice{:10.4f}{:10.4f}{:10.4f}\n".format(cell[i][0], cell[i][1], cell[i][2]))
+     for atom in zip(pos[i], frc[i]):
+         fw.write("atom{:12.5f}{:12.5f}{:12.5f}".format(atom[0].position[0], atom[0].position[1], atom[0].position[2]))
+         fw.write("{:3}".format(atom[0].symbol))
+         fw.write("{:10.4f}{:10.4f}".format(0.0, 0.0))
+         fw.write("{:12.5f}{:12.5f}{:12.5f}\n".format(atom[1].position[0], atom[1].position[1], atom[1].position[2]))
+     fw.write("energy{:20.4f}\n".format(pos[i].info['E']))
+     fw.write("charge{:20.4f}\n".format(0.0))
+     fw.write("end\n")
+
+

nnp-scaling

+

nnp-scaling should be executed before nnp-train in order to obtain file scaling-data. There are only two files you need:

+
    +
  • input.nn
  • +
  • input.data
  • +
+

Example input file is in the github repository <n2p2 root>/examples/nnp-scaling. A point is worth to notice. The random_seed keyword in file input.nn is followed by a number. This number serves as a initialization of psudo-random code. However as you can imply from the name, this random number is fake. It depends strongly on your initialization number (more exactly, you will get a same serial number if you start by a same random seed number). Therefore, if you would like a random starting for parameter in NN, set a different number for random seed.

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/software_usage/vmd/index.html b/en/wiki/software_usage/vmd/index.html new file mode 100644 index 00000000..a0061521 --- /dev/null +++ b/en/wiki/software_usage/vmd/index.html @@ -0,0 +1,2937 @@ + + + + + + + + + + + + + + + + + + + + + + + + + vmd 使用说明 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

VMD 使用说明

+

VMD介绍

+

VMD是分子可视化软件,主要用以查看分子动力学轨迹,

+

官网: http://www.ks.uiuc.edu/Research/vmd/

+

VMD安装

+

Linux 和 Windows

+

直接查看官网,其他无需特殊注意

+

MacOS Catalina版本以上

+

由于苹果不再支持32位的软件,因此需要64位版本的VMD。

+

已经编译好的软件从这里下载: https://www.ks.uiuc.edu/Research/vmd/mailing_list/vmd-l/31222.html

+

使用集群的VMD进行远程查看

+

现在51和52集群上均安装了VMD/1.9.3

+

使用方法是

+
module load vmd/1.9.3
+
+

然后如同在本地端使用vmd一样使用即可。

+

集群打开vmd报错

+

如果遇到报错

+
XRequest.149: BadMatch (invalid parameter attributes) 0xa00105
+XRequest.149: GLXBadContext 0xa00001
+
+

首先在集群上查看

+
glxinfo
+glxgears
+
+

如果得到报错

+
name of display: localhost:24.0
+libGL error: No matching fbConfigs or visuals found
+libGL error: failed to load driver: swrast
+X Error of failed request:  GLXBadContext
+  Major opcode of failed request:  149 (GLX)
+  Minor opcode of failed request:  6 (X_GLXIsDirect)
+  Serial number of failed request:  23
+  Current serial number in output stream:  22
+
+

+
libGL error: No matching fbConfigs or visuals found
+libGL error: failed to load driver: swrast
+X Error of failed request:  BadValue (integer parameter out of range for operation)
+  Major opcode of failed request:  149 (GLX)
+  Minor opcode of failed request:  3 (X_GLXCreateContext)
+  Value in failed request:  0x0
+  Serial number of failed request:  28
+  Current serial number in output stream:  30
+
+

那么请在**本地Mac/iMac的终端上**退出**XQuartz**然后在本地终端里输入:

+
defaults write org.macosforge.xquartz.X11 enable_iglx -bool true 
+
+

即可解决问题

+

Ref: https://www.ks.uiuc.edu/Research/vmd/mailing_list/vmd-l/28494.html

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/teamwork/archive_rules/index.html b/en/wiki/teamwork/archive_rules/index.html new file mode 100644 index 00000000..e1dd7fc9 --- /dev/null +++ b/en/wiki/teamwork/archive_rules/index.html @@ -0,0 +1,2944 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 如何整理项目文件 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

如何归档/整理项目文件

+

数据整理的必要性

+

为了能让接收项目的人,以及组里其他人的数据能够相互参考,避免不必要的重复计算和浪费。我与云霈总结了一些简单的整理规则。

+

数据整理的规则

+

规则1:

+

以项目名称命名大文件夹。例:SnO2110面的机器学习

+
SnO2110-ML #项目文件名
+
+

规则2:

+

数字 作为目录名前缀,以下 划线命名法 来给目录命名。

+

因为计算必定伴随着 目的,所以目录名以计算的 目的 来命名。

+

数字 可以使目录按照自己的意志来排序, 下划线命名法 可以有效的阅读。例:

+
./SnO2110-ML
+├── 00.train_set #放训练集
+├── 01.train_set_test #做训练集测试
+├── 02.DP_Pots #放机器学习势能
+├── 03.dissociation #计算解离度
+├── 04.surface_tension #计算表面张力
+
+

注意:再次一级目录可不按照以上方法来命名,尽量使用 下划线命名法 即可。

+

规则3:

+

对于 作图类的目录,要保留作图的 数据原始脚本作出来的图。例:

+
01.train_set_test
+├── TrainSetEnergy.pdf #作出来的图
+├── TrainSetForce.png #作出来的图
+├── TrainingSetError.py #处理作图的脚本 可以直接运行!
+├── e.out #作图的原始数据
+└── f.out #作图的原始数据
+
+

对于 计算类的目录,要保留 必要的输出文件输入文件。例:

+
02.DP_Pots #放机器学习势能
+├── v1.0 #版本号
+│   ├── graph.000.pb #势能函数,输出文件的一种
+│   ├── graph.001.pb
+│   ├── graph.002.pb
+│   ├── graph.003.pb
+│   ├── input.000.json #对应的输入文件
+│   ├── input.001.json
+│   ├── input.002.json
+│   └── input.003.json
+├── v1.2
+│   ├── graph.000.pb
+│   ├── graph.001.pb
+│   ├── graph.002.pb
+│   ├── graph.003.pb
+│   ├── input.000.json
+│   ├── input.001.json
+│   ├── input.002.json
+│   └── input.003.json
+└── v1.3
+    ├── README
+    ├── graph.000.pb
+    ├── graph.001.pb
+    ├── graph.002.pb
+    └── graph.003.pb
+
+

规则4:

+

在文件夹里放入必要的说明文件,例如 README

+
└── v1.3
+    ├── README #必要的说明文件,推荐使用markdown语言书写
+    ├── graph.000.pb
+    ├── graph.001.pb
+    ├── graph.002.pb
+    └── graph.003.pb
+
+
# README
+ converted from v1.2 pot
+ compress input use that v1.2 training input
+
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/teamwork/git_usage/index.html b/en/wiki/teamwork/git_usage/index.html new file mode 100644 index 00000000..58fe3b53 --- /dev/null +++ b/en/wiki/teamwork/git_usage/index.html @@ -0,0 +1,3190 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Git 基本使用教程 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Git 基本使用教程

+
+

Git是目前世界上最先进的分布式版本控制系统(没有之一)—— 廖雪峰

+
+

版本控制系统可以帮助用户快速识别、整理项目的修改等,避免出现诸如 "新建文本文件_by浩二-第19版_修改190810-v114.514 - 副本(9).txt" 等令人血压上升、呕吐不止的情况。

+

Git作为开源社区常用的版本控制系统,有着强大的功能,可以帮助用户管理以文本(如代码等)为主的项目。当然对二进制文件,例如docx、pptx等,Git的支持尚不够完善,加上服务器众所周知的原因,因而不建议把Github当成网盘使用。

+

目前组内有关机器学习、自动化的工作逐渐增多,需要代码共享和协同的场合逐渐增加。另一方面,基于 LaTeX 等标记语言的论文写作,其其实质上对也是文本文件的处理。但鉴于 Git 的入门和使用尚有一定门槛,需要一些基础命令的入门。因而写下这篇文字,整理一些常用的Git操作,限于篇幅和水平,可能会有一些缺漏,还请指正。

+

本文将长期更新,不定期收录一些小故事小Trick。

+

项目创建

+

基于 Github 创建项目

+

首先注册 Github 账号,这里不作赘述。

+
+

提示

+

若正在阅读本文的读者是在校师生,可通过 Github 官方渠道 申请成为校园专业用户(Campus Expert),从而可以免费使用(白嫖)专业版特性,并享受一系列优惠(包括Pycharm专业版等,详见官网介绍)。当然这不影响我们后文的操作,读者可以稍后申请。注意申请的IP需要位于校园网环境内,并且最好保证IP定位在校区范围内以免出现错误识别导致申请失败。例如厦门大学曾呈奎楼不位于Github认可的校区范围内,请最好到化学楼、卢嘉锡楼、图书馆等地申请。申请时可能需要提供学校邮箱、学生卡照片信息等,请按照相应提示操作。

+
+

完成注册到达首页,便可以看到如图的按钮,点击"New"即可创建一个仓库(Repository)。

+

image-20211222110147676

+

随后便出现如下图的界面,可以选择设置该仓库的归属(Owner)、名称(Repository name)、说明(Description)、权限等。需要说明的是,公共仓库(Public)的内容任何人都能看到,但提交(Push)需要设置权限;而私有仓库(Private)的访问权限取决于归属者,若为个人仓库默认仅自己可见,若为组织(Organization)则仅该组织成员可见。

+

截屏2021-12-22 上午11.15.52

+

尽管直接点击“Create repository”我们便可以快速创建一个仓库,这里推荐根据情况选择是否要创建说明文档(README file)、忽略信息(.gitignore)以及开源协议(License)。关于开源协议的说明,请点击"Learn more",这里限于篇幅原因不过多描述。

+

需要说明的是.gitignore,如图所示,可以看到 Github 提供了多种模板供选择,例如需要创建的项目以Python代码为主,则可以选择Python。则仓库创建后,Git将不再追踪文件夹下可能存在的日志文件、预编译文件(如.pyc)、Jupyter Notebook缓存等,这对于保持工作区和修改信息的清晰有很大帮助。当然,这里的模板可能无法包含所有需求,故也可以先创建仓库再添加。

+

为了合作的快捷、防止在提交时把过多无用文件提交到Git仓库中,强烈推荐在项目创建之初就建立.gitignore文件。后文将更加详细地介绍这一文件的用法。

+

截屏2021-12-22 上午11.24.54

+

远程↔︎本地

+

在Github上创建项目后,下一个关心的议题自然是,如何把本地的代码上传到远程。

+

截屏2021-12-22 上午11.32.43

+

相信不少人已经对上图中的按钮 "Add file" 跃跃欲试了,点击即可看到两个选项,即创建文件和上传文件。前者可以提供一个文本框输入你想要建立的文字,后者则提供了一个区域可以通过浏览器拖动文件手动上传或者打开资源管理器选择要上传的文件。但当文件较多、较大时,这两种方法便显得不够便捷。因此这里我们从 Git 命令行出发,介绍更常用的提交方式。

+

实际上 Github 仅仅是世界最大的 Git 远程项目管理平台,Git 本身则不依赖于 Github 存在,因此我们在本地即可追踪文件的修改,进行版本控制。Git在本地的安装非常简单,用户可以参照廖雪峰老师的教程进行。在安装的最后,用户需要设置自己的信息,即用户名和密码。为了使在远程的用户信息和本地保持一致,通常与Github的用户名和注册邮箱保持一致。

+
git config --global user.name "Your Name"
+git config --global user.email "email@example.com"
+
+

注意git config命令的--global参数,用了这个参数,表示你这台机器上所有的Git仓库都会使用这个配置,当然也可以对某个仓库指定不同的用户名和Email地址,即去掉--global

+

在远程创建仓库后,我们便可以把远程的仓库拉取(Pull)到本地。点击绿色的Code按钮,即可看到如图的对话框,点击文本框右侧的按钮复制链接。

+

截屏2021-12-22 上午11.46.27

+

若在本地某个目录下,输入如下命令:

+
git clone https://github.com/chenggroup/Test.git
+
+

即可将远程仓库拉取到本地,并创建一个Test目录用于存放文件。

+

先别急着输入上面的命令。由于安全性原因,Github官方从2021年8月起关闭了通过HTTPS协议直接上传提交的功能,因此要想从本地向远程上传提交,需要使用SSH协议,因此我们需要进行额外配置,请参考廖雪峰老师的教程操作

+

配置完成后,即可用SSH面板里的链接来克隆(Clone)远程仓库到本地:

+

截屏2021-12-22 上午11.52.29

+
git clone git@github.com:chenggroup/Test.git
+
+

注意 git clone 后的链接要修改为你复制的链接。

+

随后 cd Test 进入本地仓库,便可以对本地仓库进行编辑。这里我们用Vim创建一个文件,为演示操作方便,文件名假设是first_commit.txt

+
vim first_commit.txt
+
+

在文件中进行一些编辑,例如输入:

+
test
+2021
+first commit
+
+

如果尚不熟悉 Vim 的操作,请参考Linux快速基础入门

+

保存并退出,输入git status,可以看到已经监测到尚未提交的更改:

+
$ git status
+On branch master
+Your branch is up to date with 'origin/master'.
+
+Untracked files:
+  (use "git add <file>..." to include in what will be committed)
+    first_commit.txt
+
+nothing added to commit but untracked files present (use "git add" to track)
+
+

注意这里提到,我们正处于master分支上,并与远程的origin/master分支保持一致。输入

+
git add .
+
+

即可将当前目录下修改的文件添加到暂存区,可供提交。因此输入:

+
git commit -m "some description"
+
+

即可生成一个提交,包含了上述文件的修改。这里some description可以参照自己的编辑进行修改。

+

但上述步骤仅仅是提交到本地的Git仓库,要想和远程同步,则需要:

+
git push origin
+
+

将本地的更改提交到远程对应的分支,即上述的origin/master,输出如下:

+
$ git push origin
+Enumerating objects: 4, done.
+Counting objects: 100% (4/4), done.
+Delta compression using up to 4 threads
+Compressing objects: 100% (2/2), done.
+Writing objects: 100% (3/3), 309 bytes | 309.00 KiB/s, done.
+Total 3 (delta 0), reused 0 (delta 0)
+To github.com:chenggroup/Test.git
+   26c6605..d964d89  master -> master
+
+

回到远程页面就会发现,我们已经提交成功。

+

截屏2021-12-22 下午1.12.12

+

点击进入,内容和本地一致:

+

截屏2021-12-22 下午1.14.44

+

从而我们可以把本地仓库的修改同步到远程。在git commit之前,实际上任何修改都可以添加到暂存区中,但这里需要注意可以被Track的文件是否是自己想要的,而不要无脑git add .甚至git add *,以免追踪到一些“不速之客”。

+

项目维护

+

分支

+

如果项目本身内容较多,且由多个人维护,将所有提交都放到同一条时间线上,就会形成非常长的修改,不利于每个人追踪自己的修改。并且有时会希望在重构的同时,保持主线完整性。这一需求可由Git轻松解决。

+

Git支持创建分支(Branch),即可以从主线分支出一个独立的Branch,并在该Branch修改,通过后再合并(Merge)到主线上。这样,便可以在不干涉主线的情况对分支进行维护和修改。并且每个人都可以创建自己的独立分支,从而避免各自的修改之间出现冲突,导致混乱。

+

切换分支的命令如下:

+
git checkout -b devel
+
+

若本地之前不存在devel分支,则可由当前分支出发创建一个。这样的实现方式就如同从当前地铁站换乘到另一条地铁线路,再继续乘坐。之后的所有修改便体现在devel分支上。

+

当修改的代码测试完善,我们便可以把支线代码合并到主线上,即在换乘线路的地铁站修建一个换乘站,与主线换乘,并保留之前的所有修改。命令如下:

+
git checkout master
+git merge devel
+
+

关于分支管理,更详细的介绍,可以参考廖雪峰的教程

+

拉取请求(Pull Request)

+

类似于分支的实现,对公开在Github上的远程项目,可以由当前项目出发,建立项目的复刻(Fork)。复刻出的项目可以看作是主项目的分支,并保留了初始项目的相应分支。

+

截屏2021-12-22 下午2.31.01

+

Fork的项目仍是远程项目,因而可以Clone到本地作进一步修改,并可以与本地同步从而更新远程的Fork项目,而原始项目保持不变(并且很可能也没权限改变)

+

此时,要想向原始项目提交自己的修改,则需要创建拉取请求(Pull request,简写为PR)。点击页面上的"Contribute",点击"Open pull request"即可创建PR。

+

截屏2021-12-22 下午2.35.58

+

随后,便可以指定从Fork项目的某个分支提交PR到原始项目的某个分支。例如图中是从自己的devel到原始的master分支。在下方的文本框中可以输入自己的修改及对应的描述,便于原始项目的维护者审核、处理、合并PR。

+

提交PR

+

页面向下翻,可以看到自己的历史提交,以及修改的文件等。注意在创建PR前,请务必查看本次PR相比原始文件修改了哪些,以免一些不希望上传的内容混进来,给审核人员带来困扰,不利于抓住真正核心的修改。、

+

提交PR以后,审核人员可能会提出一些建议,甚至是修改意见。若提交到对应的复刻分支,则相应的修改也会同步到PR中,因此不需要额外提交修改请求。

+

创建议题(Issue)

+

当发现代码可能存在BUG或者自己有一些疑问需要维护者回答时,抑或是有一些想要开发者实现的新功能,用户也可以在原始项目中创建议题(Issue),用尽可能简洁的语言描述自己遇到的问题,或自己的需求。一些流行的项目可能会提供Issue模板,请按照模板提示填写,提高解决问题的效率,方便开发者对应修复BUG或者开发特性。

+

DeePMD-kit 项目中的Issue

+

如果你看到相关的Issue,而恰好你的修改可以为之提供帮助,也可以提交PR,并在PR的描述中用#<ID>连接到对应的Issue,便于提问者同步你的修改。

+

.gitignore 文件

+

开发者常常需要在项目文件夹下调试,而论文撰稿人常常需要编译 LaTex 项目产生 PDF 供预览。这些过程,都可能产生一些日志、缓存、输出等文件,一些甚至是二进制文件。在默认情况下,Git 会监测项目目录下的所有文件,如果git add .,则会全部加入到暂存区。若在git commit时仍未发现问题,这些文件就会一并被仓库追踪。当上传到远程仓库,有权限查看这些项目的人便会在Github或者其他地方看到这些文件,血压可能会无法抑制地急速上升……

+

为了避免这种情况,便需要有办法拒绝追踪这些文件。Git提供的解决方案便是创建一个.gitignore文件,记录这些希望被忽略的文件或目录。其格式如下所示,即把希望忽略或者排除的文件加入其中。

+
# 排除特定文件
+text.txt
+
+# 排除tmp下的所有文件
+tmp/*
+
+# 排除所有.开头的隐藏文件
+.*
+
+# 排除所有.class文件
+*.class
+
+# 不排除.gitignore和App.class
+!.gitignore
+!App.class
+
+

可以想像,如果所有规则都手动编写,对于维护者可能会有困扰。因此,Github上亦有维护一个.gitignore文件的仓库(github/gitignore: A collection of useful .gitignore templates),用户只需要根据自己的需求从中选取相应的忽略信息,加入到本地的.gitignore即可。注意,该仓库的根目录下放置的是一些常用语言环境,而一些编辑器或IDE同样会产生缓存文件,这些模板见于global下。实际上,从Github创建的仓库便是从这个仓库中拉取.gitignore的模板。

+

但是,很多意识到自己需要.gitignore的用户往往是经历了血压的上升,想要亡羊补牢的。即已经把诸如日志文件一类的文件提交到远程仓库中,甚至在clone时才发现问题。一个比较快速的解决方案便是,在建立.gitignore后,直接运行:

+
git rm -r --cached .
+git add .
+
+

相当于从头开始,直接将不希望继续track的文件标记为删除,从而在提交上彻底忽略这些文件的存在,但同时不删除原始文件。但这些文件的记录仍存在于远程。

+

另一种思路则是利用git update-index --assume-unchanged <file>命令,忽略掉该文件的更改,但仍保留了文件本身。总之,这两种方法都无法从根本上解决已经提交到远程的文件,因此还是推荐在git init之初就写好.gitignore,或利用 Github 自带的模板。

+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/en/wiki/teamwork/tutorial_rules/index.html b/en/wiki/teamwork/tutorial_rules/index.html new file mode 100644 index 00000000..e1555352 --- /dev/null +++ b/en/wiki/teamwork/tutorial_rules/index.html @@ -0,0 +1,2785 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 如何组织培训 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

如何组织培训

+
    +
  1. +

    一定要在培训和展示**前**把幻灯片和培训材料发送给学员

    +
  2. +
  3. +

    培训材料请遵循以下格式

    +
  4. +
  5. +

    主题

    +
  6. +
  7. +

    目标和此次培训的收益

    +

    --- 例:1.理解工作流 2.学习如何自动化工作流 3.学习如何通过airflow可视化工作流

    +
  8. +
  9. +

    提前帮参与者/学员准备

    +

    --- 所需的背景知识和提供必要的引导 - 链接,书籍,必读文章等

    +

    --- 其他需要在培训前做好的准备

    +

    --- 例:1.安装PyCharm, Jupiter, python3.9等 2.安装和验证所需的包(airflow)

    +
  10. +
  11. +

    培训内容的时间安排

    +

    --- 例:

    +
      +
    1. 介绍工作流(10分钟)
    2. +
    3. 介绍aiida和aiflow(20分钟)
    4. +
    5. 练习工作流和可视化工作流(50分钟)
    6. +
    7. 答疑(19分钟)
    8. +
    +
  12. +
  13. +

    确保你足够早地发送的幻灯片和培训材料。留下充足的时间给学员完成准备的任务。

    +
  14. +
+

Training/Presentaion Guideline

+
    +
  1. +

    Always send slides and agenda BEFORE presentation and training

    +
  2. +
  3. +

    Follow the agenda format as below:

    +

    a. Topic

    +

    b. Objective and benefit of training

    +

    —— e.g. 1. Understand workflow 2. Learn how to automate workflow 3. Learn how to visualize workflow via package'airflow'

    +

    c. Participant's preparation

    +

    —— State the desired background knowledge and provide induction — links, books, must-read papers etc.

    +

    —— State the preparation that the participants need to complete before attending the training

    +

    —— e.g. 1. Install IDE PyCharm, Jupiter, python3.9 etc. 2. Install and validate required packages(airflow)

    +

    d. Items with time slot

    +

    —— e.g.

    +
  4. +
  5. +

    Introduce workflow (10 minutes)

    +
  6. +
  7. +

    Introduce Aida and airflow (20 minutes)

    +
  8. +
  9. +

    Practice workflow and visualize via 'airflow' (50 minutes)

    +
  10. +
  11. +

    Q&A (10 minutes)

    +
  12. +
  13. +

    Make sure you send slides and agenda early and leave plenty of time for the participants to complete the preparation tasks.

    +
  14. +
+ + + + + + + + + + + + + + + +

Comments

+ + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/images/favicon.png b/images/favicon.png new file mode 100644 index 00000000..76d17f57 Binary files /dev/null and b/images/favicon.png differ diff --git a/index.html b/index.html new file mode 100644 index 00000000..faef134f --- /dev/null +++ b/index.html @@ -0,0 +1,2692 @@ + + + + + + + + + + + + + + + + + + + + + + + 首页 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

欢迎来到程组Wiki

+

这里是厦门大学程俊课题组的组内wiki。我们欢迎其他访问者一起浏览和提升!如果有其他建议请在github issue提出。

+

Welcome to ChengGroup Wiki

+

This is group wiki for chenggroup of XMU which is personal academic group. Visitors are welcomed to have look in our wiki. It's better if you would like to post any suggestions for us. This Wiki are mainly written in Simplified Chinese.

+

本Wiki的使用方法

+

本Wiki储存大家所需要的技术性和原则性知识。技术性知识包括,基本知识的学习和代码知识的学习。原则性知识包括文章写作原则,学术研究原则等。以下列出一些常用的入门,供大家浏览和跳转。

+
    +
  1. 新生用: 组内安排和入门教程顺序
  2. +
  3. 组员用: 集群使用基础
  4. +
  5. 组员用: 集群GPU使用
  6. +
  7. 组员用: 如何修改与贡献Wiki
  8. +
  9. 集群管理人员用: 常用的软件安装
  10. +
+

软件入门与使用经验

+
    +
  1. CP2K入门
  2. +
+

如有疑问

+

针对计算化学问题

+

请首先阅读在网上求助计算化学问题时的注意事项

+

如果确实有需要作者帮助答疑的问题,请归纳清楚要点并通过邮件联系作者。

+
+

可参阅:为什么国外把邮件当微信一样发?

+
+

针对代码方面问题

+

请首先阅读提问的智慧(How To Ask Questions The Smart Way) (作者: Eric Steven Raymond, 译者: ryanhanwu)

+

如果确实有需要作者答疑的问题,请归纳清楚要点并在Github提问题(Open Issue in Github),或通过邮件联系作者。

+

Please read How To Ask Questions The Smart Way (Written by Eric Steven Raymond) first. If there is problem in need of us, please open issue in Github or write E-mail to contact us.

+ + + + + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/javascripts/analytic.js b/javascripts/analytic.js new file mode 100644 index 00000000..5c4f1638 --- /dev/null +++ b/javascripts/analytic.js @@ -0,0 +1,7 @@ +var _hmt = _hmt || []; +(function() { + var hm = document.createElement("script"); + hm.src = "https://hm.baidu.com/hm.js?a45ff4ce6afb8b67b2a67377468d5c0f"; + var s = document.getElementsByTagName("script")[0]; + s.parentNode.insertBefore(hm, s); +})(); \ No newline at end of file diff --git a/javascripts/mathjax.js b/javascripts/mathjax.js new file mode 100644 index 00000000..ef8f34c2 --- /dev/null +++ b/javascripts/mathjax.js @@ -0,0 +1,19 @@ +window.MathJax = { + loader: {load: ['[tex]/mhchem']}, + tex: { + inlineMath: [["\\(", "\\)"]], + displayMath: [["\\[", "\\]"]], + processEscapes: true, + processEnvironments: true, + packages: {'[+]': ['mhchem']} + }, + options: { + ignoreHtmlClass: ".*|", + processHtmlClass: "arithmatex" + }, + }; + + document$.subscribe(() => { + MathJax.typesetPromise() + }) + \ No newline at end of file diff --git a/news/index.html b/news/index.html new file mode 100644 index 00000000..a7cb0b72 --- /dev/null +++ b/news/index.html @@ -0,0 +1,2685 @@ + + + + + + + + + + + + + + + + + + + + + 新闻通知 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + 跳转至 + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

新闻通知

+

2022

+

2022-10-01 新版Wiki上线

+

相比于旧版,新版采用了原生的Mkdocs+Github Action实现,支持更多特性。欢迎使用。

+ + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..c6a0e930 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en","zh"],"separator":"[\\s\\-\\.]","pipeline":["stemmer"]},"docs":[{"location":"","title":"\u6b22\u8fce\u6765\u5230\u7a0b\u7ec4Wiki","text":"

\u8fd9\u91cc\u662f\u53a6\u95e8\u5927\u5b66\u7a0b\u4fca\u8bfe\u9898\u7ec4\u7684\u7ec4\u5185wiki\u3002\u6211\u4eec\u6b22\u8fce\u5176\u4ed6\u8bbf\u95ee\u8005\u4e00\u8d77\u6d4f\u89c8\u548c\u63d0\u5347\uff01\u5982\u679c\u6709\u5176\u4ed6\u5efa\u8bae\u8bf7\u5728github issue\u63d0\u51fa\u3002

"},{"location":"#welcome-to-chenggroup-wiki","title":"Welcome to ChengGroup Wiki","text":"

This is group wiki for chenggroup of XMU which is personal academic group. Visitors are welcomed to have look in our wiki. It's better if you would like to post any suggestions for us. This Wiki are mainly written in Simplified Chinese.

"},{"location":"#wiki_1","title":"\u672cWiki\u7684\u4f7f\u7528\u65b9\u6cd5","text":"

\u672cWiki\u50a8\u5b58\u5927\u5bb6\u6240\u9700\u8981\u7684\u6280\u672f\u6027\u548c\u539f\u5219\u6027\u77e5\u8bc6\u3002\u6280\u672f\u6027\u77e5\u8bc6\u5305\u62ec\uff0c\u57fa\u672c\u77e5\u8bc6\u7684\u5b66\u4e60\u548c\u4ee3\u7801\u77e5\u8bc6\u7684\u5b66\u4e60\u3002\u539f\u5219\u6027\u77e5\u8bc6\u5305\u62ec\u6587\u7ae0\u5199\u4f5c\u539f\u5219\uff0c\u5b66\u672f\u7814\u7a76\u539f\u5219\u7b49\u3002\u4ee5\u4e0b\u5217\u51fa\u4e00\u4e9b\u5e38\u7528\u7684\u5165\u95e8\uff0c\u4f9b\u5927\u5bb6\u6d4f\u89c8\u548c\u8df3\u8f6c\u3002

  1. \u65b0\u751f\u7528: \u7ec4\u5185\u5b89\u6392\u548c\u5165\u95e8\u6559\u7a0b\u987a\u5e8f
  2. \u7ec4\u5458\u7528: \u96c6\u7fa4\u4f7f\u7528\u57fa\u7840
  3. \u7ec4\u5458\u7528: \u96c6\u7fa4GPU\u4f7f\u7528
  4. \u7ec4\u5458\u7528: \u5982\u4f55\u4fee\u6539\u4e0e\u8d21\u732eWiki
  5. \u96c6\u7fa4\u7ba1\u7406\u4eba\u5458\u7528: \u5e38\u7528\u7684\u8f6f\u4ef6\u5b89\u88c5
"},{"location":"#_1","title":"\u8f6f\u4ef6\u5165\u95e8\u4e0e\u4f7f\u7528\u7ecf\u9a8c","text":"
  1. CP2K\u5165\u95e8
"},{"location":"#_2","title":"\u5982\u6709\u7591\u95ee","text":""},{"location":"#_3","title":"\u9488\u5bf9\u8ba1\u7b97\u5316\u5b66\u95ee\u9898","text":"

\u8bf7\u9996\u5148\u9605\u8bfb\u5728\u7f51\u4e0a\u6c42\u52a9\u8ba1\u7b97\u5316\u5b66\u95ee\u9898\u65f6\u7684\u6ce8\u610f\u4e8b\u9879

\u5982\u679c\u786e\u5b9e\u6709\u9700\u8981\u4f5c\u8005\u5e2e\u52a9\u7b54\u7591\u7684\u95ee\u9898\uff0c\u8bf7\u5f52\u7eb3\u6e05\u695a\u8981\u70b9\u5e76\u901a\u8fc7\u90ae\u4ef6\u8054\u7cfb\u4f5c\u8005\u3002

\u53ef\u53c2\u9605\uff1a\u4e3a\u4ec0\u4e48\u56fd\u5916\u628a\u90ae\u4ef6\u5f53\u5fae\u4fe1\u4e00\u6837\u53d1\uff1f

"},{"location":"#_4","title":"\u9488\u5bf9\u4ee3\u7801\u65b9\u9762\u95ee\u9898","text":"

\u8bf7\u9996\u5148\u9605\u8bfb\u63d0\u95ee\u7684\u667a\u6167(How To Ask Questions The Smart Way) (\u4f5c\u8005: Eric Steven Raymond, \u8bd1\u8005: ryanhanwu)

\u5982\u679c\u786e\u5b9e\u6709\u9700\u8981\u4f5c\u8005\u7b54\u7591\u7684\u95ee\u9898\uff0c\u8bf7\u5f52\u7eb3\u6e05\u695a\u8981\u70b9\u5e76\u5728Github\u63d0\u95ee\u9898(Open Issue in Github)\uff0c\u6216\u901a\u8fc7\u90ae\u4ef6\u8054\u7cfb\u4f5c\u8005\u3002

Please read How To Ask Questions The Smart Way (Written by Eric Steven Raymond) first. If there is problem in need of us, please open issue in Github or write E-mail to contact us.

"},{"location":"news/","title":"\u65b0\u95fb\u901a\u77e5","text":""},{"location":"news/#2022","title":"2022","text":""},{"location":"news/#2022-10-01-wiki","title":"2022-10-01 \u65b0\u7248Wiki\u4e0a\u7ebf","text":"

\u76f8\u6bd4\u4e8e\u65e7\u7248\uff0c\u65b0\u7248\u91c7\u7528\u4e86\u539f\u751f\u7684Mkdocs+Github Action\u5b9e\u73b0\uff0c\u652f\u6301\u66f4\u591a\u7279\u6027\u3002\u6b22\u8fce\u4f7f\u7528\u3002

"},{"location":"wiki/book_recommendation/","title":"\u63a8\u8350\u4e66\u7c4d\u4e00\u89c8","text":""},{"location":"wiki/book_recommendation/#_2","title":"\u57fa\u7840\u7406\u8bba\u7cfb\u5217","text":"
  1. Quantum Chemistry

    \u9762\u5411\u5316\u5b66\u5b66\u751f\u7684\u91cf\u5b50\u529b\u5b66\u57fa\u7840\uff0c\u63a8\u7b97\u6bd4\u8f83\u8be6\u7ec6\u3002

  2. Modern Quantum Chemistry

    \u7ecf\u5178\u91cf\u5b50\u5316\u5b66\u6559\u6750\uff0cHartree-Fock\u7406\u8bba\u662f\u5168\u4e66\u6700\u51fa\u5f69\u7684\u5730\u65b9\u3002

  3. The Electronic Structure and Chemistry of Solids

    \u5b9a\u6027\u7684\u56fa\u4f53\u7269\u7406\uff0c\u9002\u5408\u5316\u5b66\u5b66\u751f\u9605\u8bfb\uff0c\u5bf9\u56fa\u4f53\u7269\u7406\u6709\u7b80\u5355\u7684\u4e86\u89e3\u3002

  4. Statistical Mechanics

    \u7cfb\u7edf\u800c\u5168\u7684\u7269\u7406\u5411\u7edf\u8ba1\u529b\u5b66\u5165\u95e8\uff0c\u516c\u5f0f\u63a8\u5bfc\u8be6\u7ec6\uff0c\u8ba4\u771f\u770b\u4f1a\u5bf9\u7edf\u8ba1\u529b\u5b66\u6709\u76f8\u5bf9\u6df1\u5ea6\u7684\u7406\u89e3

  5. Second Quantized Approach to Quantum Chemistry

    \u6b63\u5982\u8fd9\u672c\u4e66\u7684\u524d\u8a00\u6240\u8bf4\uff0c\u672c\u4e66\u662f\u4e3a\u4e86\u7b80\u5355\u5f15\u5bfc\u5316\u5b66\u5bb6\uff08\u975e\u7269\u7406\u4e13\u4e1a\u7684\u4eba\u58eb\uff09\u719f\u6089\u4e8c\u6b21\u91cf\u5b50\u5316\u8fd9\u95e8\u8bed\u8a00\u3002\u5982\u679c\u4f60\u5728\u6587\u732e\u9605\u8bfb\u4e2d\u60f3\u8be6\u7ec6\u4e86\u89e3\u4e8c\u6b21\u91cf\u5b50\u5316\uff0c\u4e2a\u4eba\u5341\u5206\u63a8\u8350\u8fd9\u672c\u4e66\u3002\u6b63\u5982\u4f5c\u8005\u6240\u8bf4\uff0c\u4e8c\u6b21\u91cf\u5b50\u5316\u7684\u7f8e\u53ea\u6709\u4f60\u5f00\u59cb\u4f7f\u7528\u4e86\u4f60\u624d\u4f1a\u6b23\u8d4f\u5230\u3002

"},{"location":"wiki/book_recommendation/#_3","title":"\u7f16\u7a0b\u7cfb\u5217","text":"
  1. Linux Command Line and Shell Scripting Bible

    \u4ece\u5165\u95e8\u5230\u4e2d\u7ea7\uff0c\u8bb2\u89e3\u548c\u5168\u9762\u6027\u6765\u770b\u90fd\u662f\u4e0d\u9519\u7684\u4e66\u3002\u4f7f\u7528bash\u548cshell\u8ddfUnix\u5185\u6838\u8fdb\u884c\u4ea4\u4e92\u662f\u6240\u6709\u7f16\u7a0b\u5f00\u59cb\u7684\u57fa\u7840\uff08\u5982\u679c\u4f60\u60f3\u7528Linux\u6216MacOS\u5feb\u4e50\u7684\u7f16\u7a0b\u7684\u8bdd\uff09

  2. Python Crash Course

    Python\u7f16\u7a0b\u7684\u7cfb\u7edf\u5165\u95e8\u4e66\uff0cproject\u7ae0\u8282\u53ea\u9700\u8981\u770b\u6570\u636e\u5c55\u793a\u90e8\u5206\u3002

  3. Numerical Python

    \u6709\u5173\u4e8e\u6570\u636e\u79d1\u5b66\u4e2d\u4f7f\u7528python\u7684text book\uff0cnumpy\uff0cscipy\uff0cmatplotlib\u90fd\u6709\u5305\u62ec

  4. Fortran for Scientists & Engineers

    Fortran\u5165\u95e8+\u57fa\u7840\uff0c\u4f8b\u5b50\u7b80\u5355\u660e\u4e86\uff0c\u5168\u9762\uff0c20\u5929\u5de6\u53f3\u53ef\u505a\u5b8c\u5168\u4e66\uff0c\u6bcf\u4e2a\u7ae0\u8282\u540e\u9762\u7684\u603b\u7ed3\u548c\u4ee3\u7801\u53c2\u8003\u5f88\u5b8c\u5584\u3002\u53e6\u5916\u5173\u4e8e\u548cC/C++\u7684\u63a5\u53e3\u6559\u7a0b\u5728\u9644\u5f55Fortran/C Interoperablity

"},{"location":"wiki/book_recommendation/#_4","title":"\u5199\u4f5c\u7cfb\u5217","text":"
  1. How to Write a Lot

    \u6253\u788e\u4e0d\u5199\u4f5c\u7684\u501f\u53e3

"},{"location":"wiki/miscellaneous/","title":"Miscellaneous","text":"

Put temporary or unclassied content here!

"},{"location":"wiki/miscellaneous/#run-process-when-you-logout-shell","title":"Run Process when you logout shell","text":"

Everytime you login the cluster, you want to run some commands while you have to logout the shell. Unfortunately, these commands will stop as soon as you logout. How to keep commands run? The trick here is use command nohup and &.

bash nohup command &

You just need to prepend nohup and append & in your commands.Now, you can go back and have a nice sleep.

"},{"location":"wiki/miscellaneous/#linux","title":"\u5220\u9664 linux \u4e0b\u7684\u7b26\u53f7\u94fe\u63a5\uff08\u5feb\u6377\u65b9\u5f0f\uff09","text":"

Linux \u7cfb\u7edf\u4e0b\u7684\u7b26\u53f7\u94fe\u63a5\uff0c\u53c8\u79f0\u8f6f\u94fe\u63a5\uff0c\u57fa\u672c\u7c7b\u4f3c\u4e8e Windows \u7cfb\u7edf\u4e0b\u7684\u5feb\u6377\u65b9\u5f0f\u3002\u5982\u679c\u4f60\u5df2\u7ecf\u63a5\u89e6\u8fc7deepmd\uff0c\u4f60\u5e94\u8be5\u5df2\u7ecf\u5bf9\u89c1\u5230\u8fc7\u4e00\u4e9b\u7b26\u53f7\u94fe\u63a5\u4e86\u3002\u9700\u8981\u6ce8\u610f\u7684\u4e00\u70b9\u662f\uff0c\u7b26\u53f7\u94fe\u63a5\u672c\u8d28\u4e0a\u662f\u4e00\u4e2a \u72ec\u7acb\u7684\u6587\u672c\u6587\u4ef6\uff0c\u64cd\u4f5c\u7cfb\u7edf\u4f1a\u5c06\u5176\u89e3\u91ca\u4e3a\u53e6\u4e00\u4e2a\u6587\u4ef6\u6216\u8005\u8def\u5f84\uff08\u6587\u4ef6\u5939\uff09\u3002\u56e0\u6b64\u7b26\u53f7\u94fe\u63a5\u6709\u5982\u4e0b\u4e24\u4e2a\u6027\u8d28\uff1a

  • \u5220\u9664\u7b26\u53f7\u94fe\u63a5\u6587\u4ef6\u5e76\u4e0d\u4f1a\u5f71\u54cd\u539f\u672c\u7684\u6587\u4ef6/\u8def\u5f84\uff08\u6587\u4ef6\u5939\uff09

  • \u5220\u9664\u539f\u59cb\u6587\u4ef6/\u8def\u5f84\u540e\uff0c\u7b26\u53f7\u94fe\u63a5\u4ecd\u7136\u5b58\u5728\uff0c\u4f46\u662f\u94fe\u63a5\u4f1a\u635f\u574f\uff0c\u6210\u4e3a \u201cstale symbolic link\u201d\uff08\u5b57\u9762\u610f\u601d\uff09\u3002

\u5728\u6574\u7406\u5de5\u4f5c\u6587\u4ef6\u5939\u7684\u65f6\u5019\uff0c\u6211\u4eec\u53ef\u80fd\u4f1a\u9700\u8981\u5220\u9664\u7b26\u53f7\u94fe\u63a5\uff0c\u6211\u4eec\u5c24\u5176\u9700\u8981\u6ce8\u610f\u8def\u5f84\u7b26\u53f7\u94fe\u63a5\u7684\u5220\u9664\uff1a

\u4e00\u4e2adp-gen\u7684\u8bad\u7ec3\u8def\u5f84\u7ed3\u6784\u5982\u4e0b\uff1a

00.train/\n\u251c\u2500\u2500 000\n\u251c\u2500\u2500 001\n\u251c\u2500\u2500 002\n\u251c\u2500\u2500 003\n\u251c\u2500\u2500 data.init -> /data/rhbi/TiO2-ML/00.cp2k_md\n\u251c\u2500\u2500 data.iters\n\u251c\u2500\u2500 graph.000.pb -> 000/frozen_model.pb\n\u251c\u2500\u2500 graph.001.pb -> 001/frozen_model.pb\n\u251c\u2500\u2500 graph.002.pb -> 002/frozen_model.pb\n\u251c\u2500\u2500 graph.003.pb -> 003/frozen_model.pb\n\u2514\u2500\u2500 jr.json\n

\u5047\u8bbe\u4f60\u60f3\u8981\u5220\u9664\u548c\u6587\u4ef6\u5173\u8054\u7684\u8f6f\u94fe\u63a5\u2018graph.000.pb\u2019\uff0c\u8f93\u5165 rm graph.000.pb\uff0c\u6ca1\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u4f60\u6210\u529f\u5220\u9664\u4e86\u8fd9\u4e2a\u6587\u4ef6\u3002\u7136\u800c\u5982\u679c\u4f60\u60f3\u5220\u9664\u548c\u4e00\u4e2a\u6587\u4ef6\u5939\u76f8\u5173\u7684\u94fe\u63a5\uff0cdata.init\uff0c\u4f60\u53ef\u80fd\u4f1a\u4e0d\u5047\u601d\u7d22\u5730\u8f93\u5165

rm data.init/\n

\u8fd9\u65f6\u5019\u4f60\u4f1a\u6536\u5230\u62a5\u9519\uff1a

rm: cannot remove \u2018data.init/\u2019: Is a directory\n

\u518d\u6b21\u5f3a\u8c03\uff0c\u7b26\u53f7\u94fe\u63a5\u672c\u8d28\u4e0a\u662f\u4e00\u4e2a \u72ec\u7acb\u7684\u6587\u672c\u6587\u4ef6\u3002\u6536\u5230\u62a5\u9519\u662f\u56e0\u4e3ashell\u7684\u81ea\u52a8\u5168\u529f\u80fd\u628a\u2018data.init\u2019\u8bc6\u522b\u4e3a\u4e86\u4e00\u4e2a\u8def\u5f84\uff0c\u56e0\u6b64\u5728\u6700\u540e\u52a0\u5165\u4e86\u659c\u6760\u2018/\u2019\uff0c\u7136\u800c\u7b26\u53f7\u94fe\u63a5\u53ea\u662f\u4e00\u4e2a\u6587\u672c\u6587\u4ef6\uff0c\u8fd9\u4e2a\u65f6\u5019\u7cfb\u7edf\u8ba4\u4e3a\u4e0d\u80fd\u7528rm\u547d\u4ee4\u5220\u6389\u4e00\u4e2a\u8def\u5f84\uff0c\u6240\u4ee5\u62a5\u9519\u3002\u6b63\u786e\u7684\u89e3\u51b3\u65b9\u6cd5\u662f\u53bb\u6389\u659c\u6760\uff0c\u8f93\u5165\u6b63\u786e\u7684\u547d\u4ee4\u6210\u529f\u5220\u9664\u94fe\u63a5\uff1a

rm data.init\n

\u5f53\u7136shell\u7684\u81ea\u52a8\u8865\u5168\u548c\u4f60\u4f7f\u7528\u7684 shell \u7248\u672c\u6709\u5173\uff0c\u6709\u53ef\u80fd\u4f60\u7684 shell \u4e0d\u4f1a\u72af\u8822\u76f4\u63a5\u52a0\u4e0a\u2018/\u2019\uff0c\u4f46\u662f\u5728\u5220\u9664\u94fe\u63a5\u7684\u65f6\u5019\u4f60\u9700\u8981\u989d\u5916\u6ce8\u610f\uff0c\u907f\u514d\u4f60\u7684\u6570\u636e\u635f\u5931\u3002

danger

\u5343\u4e07\u4e0d\u8981\u8fd0\u884c 'rm -rf data.init/*' \uff0c\u4f60\u4f1a\u5220\u9664\u6389\u539f\u8def\u5f84\u4e0b\u7684\u6240\u6709\u6587\u4ef6\uff01\uff01\uff01'

"},{"location":"wiki/miscellaneous/#binshm-bad-interpreter-no-such-file-or-directory","title":"\u96c6\u7fa4\u4f7f\u7528\u51fa\u9519\uff1a/bin/sh^M: bad interpreter: No such file or directory","text":""},{"location":"wiki/miscellaneous/#_1","title":"\u9519\u8bef\u60c5\u51b5","text":"

/bin/sh^M: bad interpreter: No such file or directory

\u5728\u96c6\u7fa4\u4e0a\u4f7f\u7528bsub\u63d0\u4ea4\u4f5c\u4e1a\u540e\u6b63\u5e38\u663e\u793a\uff1a

Job <1360> is submitted to queue <53-large>\n

\u4f46\u662f\u7528bjobs\u67e5\u770b\u4e0d\u5230\u8fd9\u4e2a\u4f5c\u4e1a\uff0c\uff08\u53ef\u80fd\u5148\u663e\u793a\u5728\u6392\u961fPEND\uff09\u663e\u793aNo unfinished job found\uff0c\u8fd9\u4e2a\u65f6\u5019\u4f7f\u7528ls\u547d\u4ee4\u4f1a\u770b\u89c1\u63d0\u4ea4\u7684.lsf\u4f5c\u4e1a\u7684\u76ee\u5f55\u4e0b\u4f1a\u751f\u6210\u8f93\u51fa\u548c\u62a5\u9519\u6587\u4ef6\uff1a1360.stdout\uff0c1360.stderr\uff0c\u8fd9\u8bf4\u660e\u4f5c\u4e1a\u5df2\u7ecf\u8fd0\u884c\u7ed3\u675f\uff08\u5f02\u5e38\u7ed3\u675f\uff09\u3002

"},{"location":"wiki/miscellaneous/#_2","title":"\u9519\u8bef\u539f\u56e0","text":"

\u4f7f\u7528vim\u547d\u4ee4\u67e5\u770b.stdout\u548c.stderr\u8fd9\u4e24\u4e2a\u6587\u4ef6\uff0c\u4f1a\u53d1\u73b0\u5728\u4f5c\u4e1a\u7684\u6362\u884c\u5904\u51fa\u73b0\u5f88\u591a^M\u7b26\u53f7\uff0c\u67e5\u8be2\u539f\u56e0\u662fwindows\u7684\u6587\u4ef6\u4e0a\u4f20\u5230linux\u7cfb\u7edf\u65f6\u6587\u4ef6\u683c\u5f0f\u53ef\u80fd\u4e0d\u4e00\u81f4

"},{"location":"wiki/miscellaneous/#_3","title":"\u9519\u8bef\u5904\u7406","text":"

\u65b9\u6cd5\u4e00\uff1a\u53c2\u8003linux\u4e0b\u8fd0\u884c\u811a\u672c\u62a5\u8bfb\u53d6\u6216^M\u9519\u8bef\u5904\u7406 - \u77e5\u4e4e (zhihu.com)

\u65b9\u6cd5\u4e8c\uff1a\u7528vim\u547d\u4ee4\u5728\u96c6\u7fa4\u4e0a\u65b0\u5efa\u4e00\u4e2a\u4f5c\u4e1a\uff0c\u7136\u540e\u628a\u4f5c\u4e1a\u5185\u5bb9\u590d\u5236\u4e0a\u53bb\uff0c\u518dbsub\u63d0\u4ea4\u4f5c\u4e1a\u5373\u53ef

"},{"location":"wiki/miscellaneous/#scrum-group","title":"Scrum Group","text":""},{"location":"wiki/miscellaneous/#_4","title":"\u7b80\u5355\u4ecb\u7ecd","text":"
  • scrum meeting \u5373\u6bcf\u65e5\u4f8b\u4f1a\uff0c\u5728\u6a44\u6984\u7403\u8fd0\u52a8\u4e2d a scrum \u610f\u601d\u4e3a\u4e00\u573a\u6bd4\u8d5b\uff0cscrum meeting \u65e8\u5728\u901a\u8fc7\u6bcf\u65e5\u4f8b\u4f1a\u7684\u5f62\u5f0f\u6765\u603b\u7ed3\u6700\u8fd1\u6240\u505a\u7684\u5de5\u4f5c\uff0c\u8fdb\u884c\u8ba8\u8bba\u548c\u53cd\u601d\u5e76\u5bf9\u672a\u6765\u77ed\u671f\u5185\u7684\u5de5\u4f5c\u8fdb\u884c\u89c4\u5212\u548c\u5c55\u671b\u3002
"},{"location":"wiki/miscellaneous/#_5","title":"\u57fa\u672c\u89c4\u5219","text":"
  • \u6240\u6709\u7684\u5b66\u751f\u6839\u636e\u6240\u7814\u7a76\u65b9\u5411\u5206\u4e3a\u82e5\u5e72\u5c0f\u7ec4\uff0c\u6bcf\u4e2a\u5c0f\u7ec4\u7531\u5404\u81ea\u7684 scrum master \u7ba1\u7406\uff0c\u5e76\u7531 scrum master \u5e26\u9886\u8fdb\u884c\u6bcf\u5468\u7684\u6c47\u62a5\u3002
  • scrum meeting \u6bcf\u5468\u8fdb\u884c\u4e24\u6b21\uff0c\u8fdb\u884c\u65f6\u95f4\u6839\u636e\u5177\u4f53\u60c5\u51b5\u800c\u5b9a\u3002
  • \u6240\u6709\u7684\u7814\u7a76\u751f\u548c\u672c\u79d1\u56db\u5e74\u7ea7\u5b66\u751f\u9664\u975e\u6709\u8981\u4e8b\u5747\u9700\u53c2\u52a0scrum meeting\uff0c\u5982\u679c\u6709\u4e8b\u4e0d\u80fd\u53c2\u52a0\u7684\u9700\u5411\u6240\u5728\u7ec4\u7684 scrum master \u8fdb\u884c\u8bf7\u5047\u548c\u6c47\u62a5\u3002
  • \u5982\u679c\u5f53\u5929\u8001\u5e08\u7e41\u5fd9\uff0c\u5404\u4e2a\u5c0f\u7ec4\u5e94\u8be5\u81ea\u884c\u7ec4\u7ec7 scrum meeting\u3002
"},{"location":"wiki/miscellaneous/#_6","title":"\u4f8b\u4f1a\u5185\u5bb9","text":"
  • \u6c47\u62a5\u4ece\u4e0a\u6b21 scrum meeting \u5230\u76ee\u524d\u4e3a\u6b62\u6240\u505a\u7684\u5de5\u4f5c\u5185\u5bb9\uff0c\u5305\u62ec\u9047\u5230\u7684\u95ee\u9898\u3001\u65b0\u7684\u53d1\u73b0\u6216\u8005\u5b58\u5728\u7684\u7591\u95ee\u7b49\u3002
"},{"location":"wiki/miscellaneous/#_7","title":"\u53c2\u8003\u6587\u4ef6","text":"
  • \u8bf7\u53c2\u8003\u4ee5\u4e0b\u6587\u4ef6\uff08\u5f85\u66f4\u65b0\uff09
  • https://www.scrumguides.org/scrum-guide.html
"},{"location":"wiki/cluster_usage/cluster_usage/","title":"\u8ba1\u7b97\u96c6\u7fa4\u4f7f\u7528\u8bf4\u660e","text":""},{"location":"wiki/cluster_usage/cluster_usage/#_2","title":"\u96c6\u7fa4\u7684\u57fa\u672c\u6982\u5ff5","text":""},{"location":"wiki/cluster_usage/cluster_usage/#cpucore","title":"CPU/Core(\u6838)\u7684\u6982\u5ff5","text":"

CPU \u662f Central Processing Unit \u7684\u7f29\u5199\u3002\u6bd4\u8d77\u5168\u79f0\uff0c\u4ed6\u7684\u7f29\u5199\u66f4\u4e3a\u5927\u5bb6\u6240\u719f\u77e5\u3002\u6211\u4eec\u4e70\u7535\u8111\u65f6\u90fd\u4f1a\u770b\u8fd9\u4e2a\u7535\u8111\u62e5\u6709\u51e0\u4e2a CPU\u3002CPU\u53ef\u4ee5\u8ba1\u7b97\u6570\u5b57\u6216\u8005\u6267\u884c\u4f60\u7684\u4ee3\u7801\u7b49\u7b49\u3002\u6bcf\u4e2aCPU\u6709\u591a\u4e2a\u8ba1\u7b97\u6838\u5fc3(Core)\uff0c\u8c03\u5ea6\u7cfb\u7edf\u53ef\u6309\u7167\u6240\u9700\u6838\u5fc3\u6570\u5bf9\u4efb\u52a1\u4f7f\u7528\u7684\u8d44\u6e90\u8fdb\u884c\u5206\u914d\uff0c\u56e0\u800c\u5728\u5b9e\u9645\u4f7f\u7528\u4e2d\uff0c\u6211\u4eec\u5e38\u5e38\u7528\u6838\u4ee3\u66ffCPU\u8fd9\u4e2a\u8868\u8ff0\u3002

"},{"location":"wiki/cluster_usage/cluster_usage/#memory","title":"Memory(\u5185\u5b58)\u7684\u6982\u5ff5","text":"

\u5185\u5b58(Memory)\u5c31\u662f\u50a8\u5b58\u6570\u636e\u7684\u5730\u65b9\u3002\u8ddf\u786c\u76d8(disk)\u50a8\u5b58\u7684\u6570\u636e\u4e0d\u540c\uff0c\u5185\u5b58\u91cc\u7684\u6570\u636e\u53ef\u4ee5\u76f4\u63a5\u88ab \u6838 \u8bfb\u53d6\u3002\u8ddf\u4f60\u5728\u786c\u76d8\u91cc\u50a8\u5b58\u7684\u6570\u636e\u7c7b\u4f3c\uff0c\u53ea\u662f\u5b83\u88ab\u6838\u8bfb\u53d6\u7684\u901f\u5ea6\u66f4\u5feb\u3002\u5f53\u6267\u884c\u7a0b\u5e8f\u65f6\uff0c\u6709\u4e00\u4e9b\u6570\u636e\u4f1a\u5148\u88ab\u8bfb\u5165\u5185\u5b58\uff0c\u7136\u540e\u518d\u6267\u884c\u8ba1\u7b97\u3002\u56e0\u6b64\u5185\u5b58\u8d8a\u5927\uff0c\u88ab\u8bfb\u5165\u7684\u6570\u636e\u4e5f\u5c31\u8d8a\u591a\uff0c\u80fd\u591f\u540c\u65f6\u5904\u7406\u7684\u6570\u636e\u4e5f\u5c31\u8d8a\u591a\uff0c\u4ee3\u7801\u8fd0\u884c\u7684\u65f6\u95f4\u4f1a\u66f4\u77ed\u3002

"},{"location":"wiki/cluster_usage/cluster_usage/#node","title":"Node(\u8282\u70b9)\u7684\u6982\u5ff5","text":"

\u8282\u70b9(Node)\u6362\u4e2a\u65e5\u5e38\u7684\u8bf4\u6cd5\u5c31\u662f\u4f60\u7684\u7535\u8111\uff0c\u6bd4\u5982\u4e00\u53f0\u53f0\u5f0f\u673a\u6216\u8005\u7b14\u8bb0\u672c\u7535\u8111\u3002\u5b83\u7531\u82e5\u5e72\u4e2a\u6838\u548c\u4e00\u4e2a\u5185\u5b58\u7ec4\u6210\u3002\u56e0\u6b64\u53ef\u4ee5\u628a\u8282\u70b9\u7b80\u5355\u7406\u89e3\u6210\u65e5\u5e38\u89c1\u5230\u7684\u7535\u8111(\u4e3b\u673a)\u3002

"},{"location":"wiki/cluster_usage/cluster_usage/#hpc","title":"HPC(\u96c6\u7fa4/\u8d85\u7ea7\u8ba1\u7b97\u673a/\u8d85\u7b97)\u7684\u6982\u5ff5","text":"

HPC\u5c31\u662fHigh Performance Cluster\u7684\u7f29\u5199\uff0c\u53c8\u79f0\u4e3a\u8d85\u7ea7\u8ba1\u7b97\u673a\uff0c\u9ad8\u6027\u80fd\u96c6\u7fa4\u7b49\u3002\u5b83\u7531\u82e5\u5e72\u4e2a\u8282\u70b9\u7ec4\u6210\u3002\u5b9e\u9645\u4f7f\u7528\u4e2d\uff0c\u8fd9\u4e9b\u8282\u70b9\u4f1a\u6709\u4e0d\u540c\u7684\u89d2\u8272\uff0c\u901a\u5e38\u5305\u542b\u767b\u5f55\u8282\u70b9\uff0c\u7ba1\u7406\u8282\u70b9\u548c\u8ba1\u7b97\u8282\u70b9\u7b49\u3002\u767b\u5f55\u8282\u70b9\u987e\u540d\u601d\u4e49\u5c31\u662f\u7528\u6765\u767b\u5f55\u7684\u8282\u70b9\u3002\u7528\u6237\u4ece\u81ea\u5df1\u7535\u8111\u53ef\u4ee5\u767b\u5f55\u5230\u767b\u5f55\u8282\u70b9\u3002\u8ba1\u7b97\u8282\u70b9\u662f\u7528\u6765\u8ba1\u7b97\u7684\u8282\u70b9\uff0c\u4ed6\u4eec\u7684\u552f\u4e00\u4f7f\u547d\u5c31\u662f\u8ba1\u7b97\u3002\u7ba1\u7406\u8282\u70b9\u6bd4\u8f83\u7279\u6b8a\uff0c\u7528\u6765\u7ba1\u7406\u8ba1\u7b97\u8282\u70b9\uff0c\u6bd4\u5982\u5206\u914d\u67d0\u67d0\u8ba1\u7b97\u4efb\u52a1\u7ed9\u67d0\u51e0\u4e2a\u8ba1\u7b97\u8282\u70b9\u6765\u7b97\u3002

"},{"location":"wiki/cluster_usage/cluster_usage/#message-passing-interfacempi","title":"Message Passing Interface(MPI)\u5e76\u884c\u8ba1\u7b97\u7684\u6982\u5ff5","text":"

\u5e76\u884c\u8ba1\u7b97\u662f\u82e5\u5e72\u4e2a\u8282\u70b9\u4e00\u8d77\u6267\u884c\u8ba1\u7b97\u7684\u610f\u601d\u3002\u4ece\u8282\u70b9\u7684\u6982\u5ff5\u53ef\u4ee5\u77e5\u9053\uff0c\u4e00\u4e2a\u8282\u70b9\u7684\u5185\u5b58\u548c\u6838\u80af\u5b9a\u662f\u6709\u9650\u3002\u6bd4\u5982\uff0c\u73b0\u6709\u4e00\u4e2a\u8282\u70b9\u670924\u4e2a\u6838\u548c32GB\u7684\u5185\u5b58\uff0c\u6211\u4eec\u60f3\u6267\u884c\u4e00\u4e2a\u8ba1\u7b97\uff0c\u7528\u523048\u4e2a\u6838\uff0c\u81ea\u7136\u9700\u8981\u7528\u5230\u4e24\u4e2a\u8282\u70b9\u3002\u95ee\u9898\u662f\u53e6\u4e00\u4e2a\u8282\u70b9\u768424\u4e2a\u6838\u5982\u4f55\u8bfb\u53d6\u5230\u7b2c\u4e00\u4e2a\u8282\u70b9\u7684\u5185\u5b58\u91cc\u7684\u6570\u636e?\u8fd9\u4e00\u4e2a\u65f6\u5019\u5c31\u8981\u7528\u5230MPI/\u5e76\u884c\u8ba1\u7b97\u4e86\u3002MPI\u662f\u4fe1\u606f\u4f20\u8f93\u754c\u9762\u7684\u7b80\u79f0\u3002\u662f\u4e00\u79cd\u544a\u8bc9\u8282\u70b9\u600e\u4e48\u8de8\u8282\u70b9\u8bfb\u53d6\u5185\u5b58\u7684\u4ee3\u7801\u3002\u4e5f\u5c31\u662f\u8bf4\u8fd9\u662f\u8ba1\u7b97\u673a\u4ee3\u7801\u7684\u4e00\u90e8\u5206\uff0c\u6211\u4eec\u5e38\u7528\u7684\u8ba1\u7b97\u8f6f\u4ef6vasp\u6216cp2k\u90fd\u5df2\u7ecf\u5199\u5165\u4e86\uff0c\u6240\u4ee5\u53ea\u8981\u76f4\u63a5\u4f7f\u7528\u4fbf\u53ef\u4ee5\u3002

"},{"location":"wiki/cluster_usage/cluster_usage/#_3","title":"\u7ec4\u5185\u96c6\u7fa4\u77e5\u8bc6","text":"

\u672c\u8bfe\u9898\u7ec4\u4f7f\u7528 Zeus \u8ba1\u7b97\u96c6\u7fa4\u63d0\u4ea4\u8ba1\u7b97\u4efb\u52a1\u8fdb\u884c\u8ba1\u7b97\u6a21\u62df\u3002Zeus \u96c6\u7fa4\u7531\u4e24\u4e2a\u767b\u9646\u8282\u70b9\u3001\u4e00\u4e2a\u7ba1\u7406\u8282\u70b9\u3001\u4e09\u4e2a\u8ba1\u7b97\u96c6\u7fa4\u6784\u6210\uff0c\u6bcf\u4e2a\u8ba1\u7b97\u96c6\u7fa4\u5305\u542b\u591a\u4e2a\u8ba1\u7b97\u8282\u70b9\uff08\u542b\u516d\u4e2a GPU \u8282\u70b9\u548c\u4e00\u4e2a\u5927\u5185\u5b58\u80d6\u8282\u70b9\uff09\uff0c\u5176\u4e2d GPU \u8282\u70b9\u5305\u62ec\u4e00\u4e2a\u5b89\u88c5\u6709 4 \u5f20 V100 \u7684\u8282\u70b9\u3001\u4e00\u4e2a\u5b89\u88c5\u67094\u5f20 A100 \u7684\u8282\u70b9\u548c\u56db\u4e2a\u5b89\u88c5\u6709 8 \u5f20 2080 Ti \u7684\u8282\u70b9\u3002

\u76ee\u524d\uff0c\u6240\u6709 CPU \u8282\u70b9\u53ef\u4ee5\u901a\u8fc7\u540c\u4e00\u767b\u9646\u8282\u70b9\u8fdb\u884c\u63d0\u4ea4\uff0c\u4ee5\u4e0b\u5bf9\u96c6\u7fa4\u4f7f\u7528\u7684\u4e00\u4e9b\u6ce8\u610f\u4e8b\u9879\u8fdb\u884c\u8bf4\u660e\u3002\u5173\u4e8e GPU \u7684\u4f7f\u7528\uff0c\u8bf7\u53c2\u8003\u4f7f\u7528\u96c6\u7fa4\u4e0a\u7684GPU\u3002

\u4f7f\u7528\u4e0a\u8ff0\u96c6\u7fa4\u4e4b\u524d\uff0c\u4f60\u5fc5\u987b\u62e5\u6709\u4e00\u4e2a\u8d26\u53f7\u624d\u80fd\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4\u3002\u7533\u8bf7\u8d26\u53f7\u8bf7\u8054\u7cfb\u96c6\u7fa4\u7ba1\u7406\u5458\u3002

"},{"location":"wiki/cluster_usage/cluster_usage/#_4","title":"\u521b\u5efa\u5bc6\u94a5\u5bf9","text":"

Warning

\u65b0\u4eba\u5fc5\u5b66

ssh \u662f\u7528\u6765\u5b89\u5168\u8fdb\u884c\u767b\u5f55\u8fdc\u7a0b\u7535\u8111\u7684\u547d\u4ee4\u3002\u4f7f\u7528\u540e\uff0c\u6709\u4e24\u79cd\u9009\u62e9\u6765\u9a8c\u8bc1\u767b\u5f55

  1. \u4f7f\u7528\u5bc6\u7801
  2. \u4f7f\u7528\u5bc6\u94a5

\u7b2c\u4e00\u79cd\u65b9\u6cd5\u5df2\u7ecf\u4e3a\u5927\u4f17\u6240\u719f\u77e5\uff0c\u4f46\u662f\u4e0d\u5b89\u5168\uff0c\u76ee\u524d\u96c6\u7fa4\u5bf9\u65b0\u5f00\u8d26\u53f7\u539f\u5219\u4e0a\u4e0d\u63d0\u4f9b\u767b\u9646\u5bc6\u7801\u3002\u56e0\u6b64\u6211\u4eec\u91c7\u7528\u5bc6\u94a5\u8fdb\u884c\u767b\u5f55\u3002

\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u751f\u6210\u5bc6\u94a5:

ssh-keygen\n

\u6839\u636e\u7ec8\u7aef\u7684\u63d0\u793a\u8fdb\u884c\u64cd\u4f5c\uff08\u5b9e\u9645\u4e0a\u4f60\u53ef\u80fd\u53ea\u9700\u8981\u4e0d\u505c\u6309enter\u952e\uff09\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\u4f60\u4f1a\u5728~/.ssh\u76ee\u5f55\u4e2d\u5f97\u5230id_rsa\u548cid_rsa.pub\u6587\u4ef6\uff0c\u4ed6\u4eec\u5206\u522b\u662f \u79c1\u94a5 \u548c \u516c\u94a5\u3002\u521b\u5efa\u597d\u4e86\u4e4b\u540e\u8bf7\u628a \u516c\u94a5 id_rsa.pub \u6587\u4ef6\u53d1\u7ed9\u670d\u52a1\u5668\u7ba1\u7406\u5458\u3002

Warning

\u79c1\u94a5\u662f\u767b\u5f55\u96c6\u7fa4\u7684\u94a5\u5319\uff0c\u8bf7\u52a1\u5fc5\u4fdd\u7ba1\u597d\u8fd9\u4e2a\u6587\u4ef6\uff0c\u9632\u6b62\u81ea\u5df1\u7684\u7535\u8111\u88ab\u5165\u4fb5

"},{"location":"wiki/cluster_usage/cluster_usage/#_5","title":"\u83b7\u53d6\u8d26\u53f7","text":"

\u96c6\u7fa4\u53ea\u5141\u8bb8\u5df2\u7ecf\u6388\u6743\u7684\u7528\u6237\u8fdb\u884c\u767b\u5f55\u3002\u5728\u4ece\u7ba1\u7406\u5458\u5904\u83b7\u5f97\u4f60\u7684\u8d26\u53f7\u540d\u548c\u521d\u59cb\u5bc6\u7801\u540e\uff0c Linux \u6216 Mac \u7528\u6237\u53ef\u76f4\u63a5\u4ece\u547d\u4ee4\u884c\u767b\u5f55\u96c6\u7fa4\uff0c\u4f7f\u7528 ssh \u547d\u4ee4\u5373\u53ef\u3002

$ ssh -p <port> username@ip_address\n

\u8bf7\u5c06 username \u548c ip_address \u66ff\u6362\u4e3a\u7ba1\u7406\u5458\u63d0\u4f9b\u7684\u8d26\u53f7\u548cIP\u5730\u5740\uff0c<port> \u66ff\u6362\u4e3a\u7aef\u53e3\u53f7\u3002

\u96c6\u7fa4\u5747\u91c7\u7528 Linux \u7cfb\u7edf\uff0c\u56e0\u6b64\u4e0d\u719f\u6089 Linux \u57fa\u672c\u64cd\u4f5c\u7684\u7528\u6237\uff08\u4f8b\u5982\u67e5\u770b\u6587\u4ef6\u3001\u7f16\u8f91\u6587\u672c\u3001\u590d\u5236\u6570\u636e\u7b49\uff09\u53ef\u4ee5\u53c2\u8003Linux\u5feb\u901f\u57fa\u7840\u5165\u95e8\uff0c\u5e76\u719f\u6089\u8fd9\u4e9b\u64cd\u4f5c\u3002\u672c\u6587\u6863\u5047\u8bbe\u7528\u6237\u6709\u4e00\u5b9a\u7684 Linux \u57fa\u7840\u3002

"},{"location":"wiki/cluster_usage/cluster_usage/#windows","title":"Windows \u7528\u6237","text":"

\u5bf9 Windows \u7528\u6237\u6765\u8bf4\uff0c\u53ef\u4ee5\u4f7f\u7528\u4ee5\u4e0b\u65b9\u6cd5\u767b\u9646\u96c6\u7fa4\u3002

  1. (Windows 10/11\u7528\u6237\u63a8\u8350)\u4f7f\u7528 WSL(Windows Subsystem for Linux)\u3002WSL \u662f Windows 10 \u65b0\u7248\u7684\u7279\u6027\uff0c\u53ef\u4f7f\u5f97\u7528\u6237\u5728 Windows \u7cfb\u7edf\u4e0b\u8fd0\u884c\u547d\u4ee4\u884c\u6a21\u5f0f\u7684 Ubuntu \u6216 OpenSUSE \u7b49\u5b50\u7cfb\u7edf\u3002\u4f7f\u7528 WSL \u7684\u7528\u6237\u53ef\u76f4\u63a5\u53c2\u8003 Linux \u7684\u4f7f\u7528\u65b9\u6cd5\u8fdb\u884c\u64cd\u4f5c\u3002\u5177\u4f53\u5b89\u88c5\u65b9\u5f0f\u53ef\u4ee5\u53c2\u8003\u5b98\u65b9\u6559\u7a0b\u3002 \u5bf9\u4e8e\u4f7f\u7528\u96c6\u7fa4\u7684\u5927\u591a\u6570\u9700\u6c42\uff0cWSL 1 \u5373\u53ef\u6ee1\u8db3\uff0c\u56e0\u6b64\u4e0d\u4e00\u5b9a\u9700\u8981\u5347\u7ea7\u5230 WSL 2 \u3002
  • \u8fd9\u79cd\u65b9\u6cd5\u5bf9\u4e8e\u56fe\u5f62\u754c\u9762\uff08VMD\u3001GNUPlot\uff09\u7b49\u652f\u6301\u8f83\u5dee\uff0c\u5c1a\u9700\u8981\u989d\u5916\u7684\u6b65\u9aa4\u914d\u7f6e\u56fe\u5f62\u754c\u9762\u8f6c\u53d1\uff0c\u8fd9\u91cc\u9650\u4e8e\u7bc7\u5e45\u539f\u56e0\u6682\u4e0d\u8fdb\u884c\u4ecb\u7ecd\u3002\u5982\u6709\u9700\u8981\u8bf7\u53c2\u8003\u8fd9\u91cc\u3002
  • \u76ee\u524d Windows 11 \u5df2\u7ecf\u63d0\u4f9b\u4e86\u5bf9\u56fe\u5f62\u754c\u9762\u7684\u76f4\u63a5\u652f\u6301\uff08\u8bf7\u53c2\u8003\uff09\uff0c\u4f46\u9700\u8981\u4f7f\u7528 WSL 2\u3002
  • \u6ce8\u610f\uff1a\u7531\u4e8e\u4ee3\u7406\u673a\u5236\u539f\u56e0\uff0cWSL 2 \u65e0\u6cd5\u76f4\u63a5\u4f7f\u7528\u684c\u9762\u7aef\u7684 Easy Connect VPN\u670d\u52a1\uff0c\u987b\u8bbe\u6cd5\u8fdb\u884c\u7aef\u53e3\u8f6c\u53d1\u3002WSL 1 \u53ef\u4ee5\u3002\u4e5f\u53ef\u4ee5\u8003\u8651\u4f7f\u7528 Easy Connect Docker \u955c\u50cf\uff0c\u901a\u8fc7Socks\u4ee3\u7406\u8bbf\u95eeSSH\u3002
  1. \u4f7f\u7528 Git Windows\u5ba2\u6237\u7aef\uff0c\u5176\u81ea\u5e26\u4e00\u4e2a\u57fa\u4e8eZsh\u7684shell\uff0c\u4ea6\u53ef\u4ee5\u63d0\u4f9b\u5bf9SSH\u7684\u652f\u6301\uff0c\u4f53\u9a8c\u66f4\u63a5\u8fd1\u539f\u751fBash\uff0c\u7f3a\u70b9\u662f\u6ca1\u6709SFTP\u7ba1\u7406\u7b49\u529f\u80fd\u3002

  2. \u4f7f\u7528 Xshell\u3001PuTTY \u7b49 SSH \u5ba2\u6237\u7aef\uff0cWindows 10 \u4ee5\u4e0b\u7684\u7528\u6237\u53ef\u4f7f\u7528\u8fd9\u79cd\u65b9\u5f0f\u3002\u8fd9\u7c7b SSH \u5ba2\u6237\u7aef\u53ef\u4ee5\u63d0\u4f9b\u8f83\u5b8c\u6574\u7684 SSH \u529f\u80fd\u3002\u5173\u4e8ePutty\u7684\u4f7f\u7528\u8bf7\u53c2\u8003\u3002

  3. \u4f7f\u7528\u865a\u62df\u673a\u5b89\u88c5 Linux\u3002\u82e5\u4e0d\u60f3\u5b89\u88c5 Linux \u53cc\u7cfb\u7edf\u53ef\u4ee5\u9009\u62e9\u4f7f\u7528\u8fd9\u79cd\u65b9\u5f0f\u3002\u6b63\u786e\u914d\u7f6e\u7684\u865a\u62df\u673a\u548c\u771f\u6b63\u4f7f\u7528 Linux \u51e0\u4e4e\u65e0\u5dee\u522b\u3002\u4f46\u865a\u62df\u673a\u542f\u52a8\u65f6\u95f4\u957f\uff0c\u4e14\u5b8c\u5168\u542f\u52a8\u65f6\u5360\u7528\u7cfb\u7edf\u8d44\u6e90\u8f83\u591a\u3002

"},{"location":"wiki/cluster_usage/cluster_usage/#_6","title":"\u76ee\u5f55\u7ed3\u6784","text":"

Zeus \u96c6\u7fa4\u5177\u6709\u5982\u4e0b\u7684\u76ee\u5f55\u7ed3\u6784\uff0c\u4e3a\u4e86\u4fdd\u6301\u7edf\u4e00\u6027\uff0c\u8bf7\u5728/data/username\uff08username\u8bf7\u66ff\u6362\u4e3a\u81ea\u5df1\u7684\u7528\u6237\u540d\uff09\u4e0b\u505a\u8ba1\u7b97\u3002

/data <--\u76ee\u524d\u7684\u6570\u636e\u76d8\uff08432TB\u5927\u5b58\u50a8\uff09\n\u251c\u2500\u2500 51-data <--\u539f51\u5907\u4efd\u540e\u7684\u6570\u636e\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 ...\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 ...\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 username\n\u251c\u2500\u2500 52-data <--\u539f52\u5907\u4efd\u540e\u7684\u6570\u636e\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 ...\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 ...\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 username\n\u251c\u2500\u2500 home <--Zeus(191)\u767b\u9646\u540e\u7684home\u6587\u4ef6\u5939\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 ...\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 ...\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 username\n\u251c\u2500\u2500 ...\n\u251c\u2500\u2500 ...\n\u2514\u2500\u2500 username <--\u5728\u8fd9\u91cc\u89e3\u538b\u6570\u636e\u3001\u63d0\u4ea4\u8ba1\u7b97\n
"},{"location":"wiki/cluster_usage/cluster_usage/#_7","title":"\u4f5c\u4e1a\u63d0\u4ea4","text":""},{"location":"wiki/cluster_usage/cluster_usage/#_8","title":"\u8ba1\u7b97\u8282\u70b9\u3001\u961f\u5217\u548c\u811a\u672c","text":"

\u901a\u8fc7sinfo\u547d\u4ee4\u53ef\u4ee5\u770b\u5230\uff0c\u76ee\u524d\u7684\u96c6\u7fa4\u5305\u62ec51/52/53\u4e09\u4e2a\u7c7b\u522b\uff0c\u5206\u522b\u4e3a51/52/53\u8ba1\u7b97\u96c6\u7fa4\uff0c51/52/53\u96c6\u7fa4\u7684\u8ba1\u7b97\u8282\u70b9\u5206\u522b\u5bf9\u5e94\u7f16\u53f7\u4e3ac51-00x/c52-00x/c53-00x\u3002

PARTITION   AVAIL  TIMELIMIT  NODES  STATE NODELIST\ngpu1           up   infinite      1   idle c51-g001\ngpu2           up   infinite      1   idle c51-g002\ngpu3           up   infinite      4   idle c51-m[001-004]\nc51-small      up      20:00     33   idle c51-[001-011,013-034]\nc51-medium     up   12:00:00     33   idle c51-[001-011,013-034]\nc51-large      up 1-00:00:00     33   idle c51-[001-011,013-034]\nc51-long       up 2-00:00:00     33   idle c51-[001-011,013-034]\nc51-xlong      up 3-00:00:00     33   idle c51-[001-011,013-034]\nc51-xlarge     up 1-00:00:00     33   idle c51-[001-011,013-034]\nc51-exlong     up 7-00:00:00     33   idle c51-[001-011,013-034]\nc52-small      up      20:00     40   idle c52-[001-040]\nc52-medium     up   12:00:00     40   idle c52-[001-040]\nc52-large      up 1-00:00:00     40   idle c52-[001-040]\nc52-long       up 2-00:00:00     40   idle c52-[001-040]\nc52-xlong      up 3-00:00:00     40   idle c52-[001-040]\nc52-xlarge     up 1-00:00:00     40   idle c52-[001-040]\nc52-exlong     up 7-00:00:00     40   idle c52-[001-040]\nc53-small      up      20:00     34   idle c53-[001-034]\nc53-medium     up   12:00:00     34   idle c53-[001-034]\nc53-large      up 1-00:00:00     34   idle c53-[001-034]\nc53-long       up 2-00:00:00     34   idle c53-[001-034]\nc53-xlong      up 3-00:00:00     34   idle c53-[001-034]\nc53-xlarge*    up 1-00:00:00     34   idle c53-[001-034]\n

\u7531\u4e8e\u5904\u7406\u5668\u6838\u6570\u4e0d\u540c\uff0c\u4efb\u52a1\u53ea\u80fd\u5728\u5177\u6709\u76f8\u540c\u6838\u6570\u7684\u8282\u70b9\u95f4\u5e76\u884c\uff0c\u7531\u6b64\u5bf9\u4e0d\u540c\u96c6\u7fa4\u7684\u8282\u70b9\u6309\u7167\u961f\u5217\u8fdb\u884c\u4e86\u5206\u7ec4\uff0c\u961f\u5217\u524d\u7f00\u5206\u522b\u4e3a51-/52-/53-\uff0c\u5176\u5bf9\u5e94\u6bcf\u4e2a\u8282\u70b9\u4e0a\u7684\u6838\u6570\u5206\u522b\u4e3a24/28/32\u3002\u901a\u8fc7sinfo\u547d\u4ee4\u53ef\u4ee5\u770b\u5230\u5f53\u524d\u96c6\u7fa4\u4e0a\u7684\u961f\u5217\u53ca\u5176\u4f7f\u7528\u60c5\u51b5\u3002

\u73b0\u7f16\u53f7\u4e3ac51-00x \u7684\u8282\u70b9\u9700\u901a\u8fc7\u961f\u5217c51-small\u3001c51-medium\u3001c51-large\u7b49\u7b49\u6765\u8fdb\u884c\u63d0\u4ea4\uff0c\u5e76\u8bbe\u7f6e\u6838\u6570\u4e3a24\u7684\u500d\u6570\uff0824\uff0c48\uff0c72\u7b49\uff09\u4ee5\u786e\u5b9a\u8282\u70b9\u6570\uff0c--ntasks-per-node=24\u3002 \u4f7f\u7528\u8282\u70b9\u7684\u6570\u91cf\u901a\u8fc7\u603b\u6838\u6570\u9664\u4ee5\u6bcf\u4e2a\u8282\u70b9\u6838\u6570\u7684\u503c\u6765\u786e\u5b9a\u3002 \u540c\u7406\uff0c\u82e5\u60f3\u4f7f\u7528\u7f16\u53f7\u4e3ac52-00x \u7684\u8282\u70b9\uff0c\u5219\u961f\u5217\u540d\u4e3ac52-small\u3001c52-medium\u3001c52-large\u7b49\u7b49\uff0c\u6838\u6570\u4e3a28\u7684\u500d\u6570\uff0828\uff0c56\uff0c84\u7b49\uff09\uff0c--ntasks-per-node=28\uff1b\u82e5\u60f3\u4f7f\u7528\u7f16\u53f7\u4e3ac53-00x \u7684\u8282\u70b9\uff0c\u5219\u961f\u5217\u540d\u4e3ac53-small\u3001c53-medium\u3001c53-large\u7b49\u7b49\uff0c\u6838\u6570\u4e3a32\u7684\u500d\u6570\uff0832\uff0c64\uff0c96\u7b49\uff09\uff0c--ntasks-per-node=32\u3002

GPU\uff08Tesla V100\u8282\u70b9\uff09\u548c\u80d6\u8282\u70b9\u4ecd\u6309\u716751\u8fdb\u884c\u7f16\u7ec4\uff0c\u7f16\u53f7\u5206\u522b\u4e3ac51-g001\u548cc51-s001\u3002

\u76ee\u524d\u6bcf\u4e2a\u961f\u5217\u4ecd\u9650\u5236\u540c\u65f6\u8fd0\u884c4\u4e2a\u4efb\u52a1\u3001\u961f\u5217\u5185\u4f7f\u7528\u81f3\u591a12\u4e2a\u8282\u70b9\u3002\u65b0\u589e\u5168\u5c40\u4efb\u52a1\u9650\u5236\uff0c\u5373\u4e09\u7ec4\u961f\u5217\u603b\u5171\u4f7f\u7528\u6838\u6570\u4e0d\u8d85\u8fc7556\uff0c\u82e5\u8d85\u51fa\u6b64\u9650\u5236\u5219\u4efb\u52a1\u4f1a\u5904\u4e8ePEND\u72b6\u6001\u3002

\u63d0\u4ea4\u811a\u672c\u793a\u4f8b\u653e\u5728/data/share/base/scripts\u91cc\u9762\uff0c\u8f6f\u4ef6\u7edf\u4e00\u5b89\u88c5\u5728/data/share/apps\u4e0b\uff0c\u76ee\u524d\u5b89\u88c5\u4e86VASP 5.4.4\u3001CP2K 7.1\u3001Gaussian 16\u3001Lammps\u3001Gromacs\u3001DeePMD-kit\u7b49\u3002

\u8fd9\u91cc\u5bf9\u4f5c\u4e1a\u63d0\u4ea4\u811a\u672c\u4e3e\u4f8b\u8bf4\u660e\u5982\u4e0b\uff1a

cp2k.slurm
#!/bin/bash\n\n#SBATCH -J cp2k\n#SBATCH -o cp2k.out.%j\n#SBATCH -e cp2k.err.%j\n#SBATCH -p c53-large\n#SBATCH -N 2\n#SBATCH --ntasks-per-node=32\n#SBATCH --exclusive\n#SBATCH --mem=8G\n\n# add modulefiles\nulimit -s unlimited\nmodule load intel/17.5.239 mpi/intel/2017.5.239\nmodule load gcc/5.5.0\nmodule load cp2k/7.1\n\nmpiexec.hydra cp2k.popt input.inp >& output_$LSB_JOBID\n

\u5176\u4e2d\uff1a

  • #SBATCH -p \u961f\u5217\u540d \u7528\u4e8e\u6307\u5b9a\u4f5c\u4e1a\u63d0\u4ea4\u7684\u961f\u5217\u3002
  • #SBATCH -t hh:mm:ss \u7528\u4e8e\u6307\u5b9a\u4efb\u52a1\u6240\u9700\u7684\u65f6\u95f4\uff08Walltime\uff09\uff0c\u82e5\u8fd0\u884c\u8d85\u8fc7hh:mm:ss\uff0c\u5219\u4efb\u52a1\u4f1a\u88ab\u7ba1\u7406\u7cfb\u7edf\u6740\u6b7b\u3002\u5bf9\u4e8e\u4e0d\u540c\u7c7b\u578b\u7684\u961f\u5217\uff0cWalltime\u4e0a\u9650\u6709\u6240\u4e0d\u540c\u3002\u5bf9small\u961f\u5217\u8981\u6c42\u572820\u5206\u949f\u4ee5\u5185\uff0c\u5bf9medium\u8981\u6c42\u572812\u5c0f\u65f6\u4ee5\u5185\uff0c\u5bf9large\u548cxlarge\u8981\u6c42\u572824\u5c0f\u65f6\u4ee5\u5185\uff0c\u5bf9long\u8981\u6c42\u572848\u5c0f\u65f6\u4ee5\u5185\uff0c\u5bf9xlong\u5219\u572872\u5c0f\u65f6\u4ee5\u5185\u3002
  • #SBATCH --job-name=cp2k\u6307\u5b9a\u4f5c\u4e1a\u540d\u79f0\uff0c\u4e00\u822c\u6309\u7167\u5b9e\u9645\u8ba1\u7b97\u6765\u53d6\u540d\u4ee5\u65b9\u4fbf\u67e5\u770b\u5b8c\u6210\u60c5\u51b5\u3002
  • #SBATCH -N 2\u6307\u5b9a\u4f5c\u4e1a\u63d0\u4ea4\u7684\u603b\u8282\u70b9\u6570\uff0c#SBATCH --ntasks-per-node=32\u6307\u5b9a\u63d0\u4ea4\u961f\u5217\u7684\u6bcf\u4e2a\u8282\u70b9\u4e0a\u7684CPU\u603b\u6838\u6570\uff0c\u4f8b\u5982\u8fd9\u91cc\u572853\u961f\u5217\u4e2d\u9009\u53d62\u4e2a\u8282\u70b9\u8fdb\u884c\u5e76\u884c\u8ba1\u7b97\uff0c\u5373\u4f7f\u7528\u4e8664\u4e2a\u6838\u3002
  • #SBATCH --mem=8G \u6307\u5b9a\u4f5c\u4e1a\u6240\u9700\u6d88\u8017\u7684\u5185\u5b58\uff0c\u4f8b\u5982\u8fd9\u91cc\u9650\u5236\u4f5c\u4e1a\u5360\u7528\u5185\u5b58\u4e3a 8 GB\u3002
  • module load xxx\u7528\u4e8e\u52a0\u8f7d\u73af\u5883\uff0c\u4fdd\u6301/data/share/base/scripts\u793a\u4f8b\u4e2d\u7684\u5199\u6cd5\u5373\u53ef\u3002
  • mpiexec.hydra cp2k.popt input.inp >& output_$LSB_JOBID\u662f\u5b9e\u9645\u6267\u884c\u4efb\u52a1\u7684\u547d\u4ee4\u3002

\u53ef\u4ee5\u770b\u5230\uff0c\u4efb\u52a1\u63d0\u4ea4\u811a\u672c\u5b9e\u9645\u4e0a\u662f\u4e00\u4e2a\u5177\u6709\u7279\u6b8a\u6ce8\u91ca\u683c\u5f0f\u7684 bash \u811a\u672c\u3002\u56e0\u6b64\u5728\u52a0\u8f7d\u73af\u5883\u540e\uff0c\u53ef\u4ee5\u4f7f\u7528 bash \u8bed\u6cd5\u6765\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u3001\u63a7\u5236\u4efb\u52a1\u8fd0\u884c\u7684\u8def\u5f84\u3001\u8fdb\u884c\u6279\u5904\u7406\u7b49\u7b49\u3002

\u6ce8\u610f

\u5bf9\u4e8e fat \u548c gpu* \u961f\u5217\uff0c\u8bf7 \u52a1\u5fc5 \u6307\u5b9a\u4f5c\u4e1a\u6240\u9700\u7684\u5185\u5b58\u4e0d\u5c0f\u4e8e\u81ea\u5df1\u4efb\u52a1\u5b9e\u9645\u9700\u8981\u5185\u5b58\u5927\u5c0f\uff01 \u5426\u5219\u53ef\u80fd\u4f1a\u56e0\u4e3a\u5176\u4ed6\u4eba\u4efb\u52a1\u5c1a\u672a\u7ed3\u675f\u800c\u8fdf\u8fdf\u65e0\u6cd5\u8fd0\u884c\uff0c\u6216\u56e0\u4e3a\u7533\u8bf7\u4e86\u8fc7\u591a\u7684\u5185\u5b58\u8d44\u6e90\u800c\u4f7f\u5f97\u5176\u4ed6\u4efb\u52a1\u65e0\u6cd5\u63d0\u4ea4\u3002 \u4e3a\u4e86\u516c\u5e73\u4f7f\u7528\uff0c\u8bf7\u4e00\u5b9a\u9075\u5b88\u4e0a\u8ff0\u89c4\u5219\u3002 \u7ba1\u7406\u5458\u4f1a\u6839\u636e\u7528\u6237\u53cd\u9988\u76f4\u63a5\u6e05\u9664\u672a\u9075\u5faa\u89c4\u8303\u7684\u4efb\u52a1\u3002

\u76f8\u5e94\u5730\uff0c\u5176\u4ed6\u8ba1\u7b97\u961f\u5217\u901a\u5e38\u4f1a\u4e00\u4e2a\u4efb\u52a1\u72ec\u5360\u82e5\u5e72\u8282\u70b9\uff0c\u56e0\u800c\u4e0d\u9700\u8981\u8bbe\u7f6e\u5185\u5b58\uff0c\u4fdd\u6301\u9ed8\u8ba4\u2014\u2014\u5360\u6ee1\u5373\u53ef\u3002 \u8003\u8651\u5230 DFT \u8ba1\u7b97\u672c\u8eab\u5bf9\u5185\u5b58\u6709\u4e00\u5b9a\u9700\u6c42\uff0c\u8bf7\u52a1\u5fc5\u6ce8\u610f\u81ea\u5df1\u7684\u8bbe\u7f6e\u7b26\u5408\u5b9e\u9645\u60c5\u51b5\uff0c\u4ee5\u514d\u6d6a\u8d39\u5b9d\u8d35\u7684\u65f6\u95f4\u548c\u673a\u65f6\u3002

"},{"location":"wiki/cluster_usage/cluster_usage/#_9","title":"\u4f5c\u4e1a\u63d0\u4ea4","text":"

\u82e5\u7528\u6237\u5df2\u7ecf\u51c6\u5907\u597d\u76f8\u5e94\u8ba1\u7b97\u7684\u8f93\u5165\u548c\u63d0\u4ea4\u811a\u672c\uff0c\u5219\u53ef\u4ee5\u5bf9\u4efb\u52a1\u8fdb\u884c\u63d0\u4ea4\u3002\u4f8b\u5982\u63d0\u4ea4\u811a\u672c\u6587\u4ef6\u540d\u4e3acp2k.slurm\uff0c\u5219\u63d0\u4ea4\u547d\u4ee4\u4e3a\uff1a

sbatch cp2k.slurm\n

\u82e5\u63d0\u4ea4\u6210\u529f\uff0c\u53ef\u4ee5\u770b\u5230\u4ee5\u4e0b\u63d0\u793a\uff1a

Job <1360> is submitted to queue <53-large>\n

\u8868\u793a\u4efb\u52a1\u5df2\u7ecf\u6210\u529f\u63d0\u4ea4\u5230\u8282\u70b9\u4e0a\uff0c\u7f16\u53f7\u4e3a 1360\u3002

\u4efb\u52a1\u63d0\u4ea4\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7squeue -u <username>\u547d\u4ee4\u67e5\u770b\u81ea\u5df1\u4efb\u52a1\u7684\u8fd0\u884c\u60c5\u51b5\uff0c<username>\u5373\u81ea\u5df1\u7684\u7528\u6237\u540d\u3002

JOBID   USER    STAT  QUEUE      FROM_HOST   EXEC_HOST   JOB_NAME   SUBMIT_TIME\n1227    user    RUN   52-medium  mgt02       28*c52-032  CoX        Mar  9 22:35\n                                             28*c52-023\n1133    user    RUN   51-medium  mgt02       24*c51-024  Cu13       Mar  9 21:20\n                                             24*c51-031\n1360    user    PEND  53-large   mgt02                   cp2k       Mar 10 13:26\n

\u5176\u4e2d JOBID \u5373\u4e3a\u4efb\u52a1\u7f16\u53f7\uff0cSTAT \u8868\u793a\u72b6\u6001\uff0cRUN \u5373\u4e3a\u6b63\u5728\u8fd0\u884c\uff0c\u800c PEND \u8868\u793a\u6b63\u5728\u6392\u961f\uff0c\u53ef\u80fd\u662f\u56e0\u4e3a\u7a7a\u4f59\u8282\u70b9\u6570\u4e0d\u8db3\u3002\u53ef\u4ee5\u770b\u5230\uff0c1227\u548c1133\u53f7\u4efb\u52a1\u6b63\u5728\u8fd0\u884c\uff0c\u5206\u522b\u4f7f\u7528\u4e862\u4e2a\u8282\u70b9\uff0c\u521a\u521a\u63d0\u4ea4\u76841360\u53f7\u4efb\u52a1\u5219\u5728\u6392\u961f\u3002

\u5982\u679c\u60f3\u8981\u505c\u6b62\u6216\u53d6\u6d88\u5df2\u7ecf\u63d0\u4ea4\u7684\u4efb\u52a1\uff0c\u5219\u4f7f\u7528\u547d\u4ee4\uff1a

bkill 1360\n

\u82e5\u770b\u5230 Job <1360> is being terminated \u7684\u63d0\u793a\uff0c\u5219\u8bf4\u660e\u505c\u6b62\u4efb\u52a1\u7684\u8bf7\u6c42\u5df2\u7ecf\u53d1\u51fa\u3002\u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u8be5\u4efb\u52a1\u5373\u88ab\u6740\u6b7b\u3002

\u94fe\u63a5

  • \u66f4\u591a\u4f7f\u7528\u6559\u7a0b\u548c\u8bf4\u660e\u8bf7\u53c2\u8003\uff1aSlurm\u4f5c\u4e1a\u8c03\u5ea6\u7cfb\u7edf\u4f7f\u7528\u6307\u5357
  • \u5173\u4e8eSlurm\u7cfb\u7edf\u548c\u5176\u4ed6\u8c03\u5ea6\u7cfb\u7edf\u95f4\u7684\u547d\u4ee4\u5bf9\u7167\uff0c\u8bf7\u53c2\u8003\u5b98\u65b9\u63d0\u4f9b\u7684 Rosetta\u77f3\u7891
"},{"location":"wiki/cluster_usage/cluster_usage/#_10","title":"\u767b\u51fa\u96c6\u7fa4","text":"

\u8bf7\u5728\u547d\u4ee4\u884c\u4e2d\u8f93\u5165:

exit\n

\u56de\u8f66\u5373\u53ef\u9000\u51fa\u767b\u9646\u3002

"},{"location":"wiki/cluster_usage/cluster_usage/#_11","title":"\u6821\u5916\u8bbf\u95ee","text":"

\u82e5\u4e3a\u5728\u6821\u5e08\u751f\uff0c\u53ef\u4f7f\u7528\u5b66\u6821\u63d0\u4f9b\u7684 SSLVPN \u767b\u9646\u96c6\u7fa4\u3002

\u8be6\u7ec6\u914d\u7f6e\u65b9\u6cd5\u8bf7\u53c2\u9605\uff1aSSLVPN \u4f7f\u7528\u8bf4\u660e-\u53a6\u95e8\u5927\u5b66VPN\u4e13\u9898\u7f51\u7ad9\u3002

"},{"location":"wiki/cluster_usage/conda/","title":"Anaconda \u4f7f\u7528\u6307\u5357","text":""},{"location":"wiki/cluster_usage/conda/#_1","title":"\u521d\u59cb\u5316\u8bbe\u5b9a","text":"

\u767b\u5f55 HPC

module load miniconda/3\nconda init bash\n
\u8fd9\u4f1a\u81ea\u52a8\u4fee\u9970\u4f60\u7684~/.bashrc\u6587\u4ef6 \u767b\u51faHPC\uff0c\u518d\u6b21\u767b\u9646

\u6253\u5f00\u4f60\u7684~/.condarc\u6587\u4ef6

vim ~/.condarc\n

\u4fee\u6539\u4ee5\u4e0b\u6587\u4ef6\u5e76\u653e\u5165\u4f60\u7684~/.condarc\u91cc

channels:\n  - defaults\nssl_verify: true\nenvs_dirs:\n#modify, this is where your environment file in\n  - /data/ch2_101/conda/env\npkgs_dirs:\n#modify, this is where your package file in\n  - /data/ch2_101/conda/pkgs\n

\u9000\u51fa\u6587\u4ef6

\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u4f60\u7684\u73af\u5883

conda env list\n

"},{"location":"wiki/cluster_usage/conda/#_2","title":"\u521b\u5efa\u4f60\u81ea\u5df1\u7684\u73af\u5883","text":"

\u521b\u5efa\u4f60\u81ea\u5df1\u7684\u73af\u5883\uff0c\u4e4b\u540e\u90fd\u542f\u7528\u81ea\u5df1\u7684\u73af\u5883\u8fdb\u884c\u4f7f\u7528

conda create -n <your env name> python\n\nconda activate <your env name>\n
"},{"location":"wiki/cluster_usage/conda/#_3","title":"\u4fee\u9970\u4f60\u7684\u7ec8\u7aef\u524d\u7f00","text":"

\u7528\u4e0a\u8ff0\u65b9\u6cd5\u521b\u9020\u7684\u73af\u5883\u4f1a\u5728\u4f60\u7684\u7ec8\u7aef\u547d\u4ee4\u884c\u524d\u52a0\u4e0a\u4e00\u957f\u4e32\u8def\u5f84\uff0c\u4f8b\uff1a

(/Users/USER_NAME/research/data-science/PROJECT_NAME/envs) $\n

\u53ef\u4ee5\u7528\u4ee5\u4e0b\u547d\u4ee4\u7f29\u77ed\u524d\u7f00

conda config --set env_prompt '({name})'\n

\u6b64\u547d\u4ee4\u4f1a\u4fee\u6539\u4f60\u7684.condarc\u6587\u4ef6

\u53c2\u8003\u6587\u732e

\u6ca1\u6709\u4e86\uff0c\u6109\u5feb\u7684\u7528conda\u8fdb\u884c\u6570\u636e\u5904\u7406\u5427!

"},{"location":"wiki/cluster_usage/gpu_usage/","title":"\u4f7f\u7528\u96c6\u7fa4\u4e0a\u7684 GPU","text":""},{"location":"wiki/cluster_usage/gpu_usage/#gpu_1","title":"GPU \u961f\u5217\u6982\u51b5","text":"

GPU\u8282\u70b9\u8c03\u5ea6\u91c7\u7528Slurm\u8c03\u5ea6\u7cfb\u7edf\u8fdb\u884c\u7ba1\u7406\u3002\u7528\u6237\u4f7f\u7528\u65f6\uff0c\u8bf7\u5728**191\u8282\u70b9**\u4e0a\u63d0\u4ea4\u3001\u7ba1\u7406\u4efb\u52a1\u3002

\u76ee\u524d\u8bfe\u9898\u7ec4GPU\u67096\u4e2a\u8282\u70b9\uff1a

  • c51-g001: \u8282\u70b9\u4e0a\u6709 4 \u5f20 Tesla V100\uff0c\u91c7\u7528\u961f\u5217\u540dgpu1\u8fdb\u884c\u63d0\u4ea4\u3002
  • c51-g002: \u8282\u70b9\u4e0a\u6709 4 \u5f20 A100\uff0c\u91c7\u7528\u961f\u5217\u540dgpu2\u8fdb\u884c\u63d0\u4ea4\u3002\u5176\u4e2d\u67092\u5f20\u5361\uff080,1\uff09\u4e3a\u5b8c\u6574\u7684A100 80G PCIe\uff0c\u53e6\u5916\u4e24\u5f20\uff082,3\uff09\u5df2\u5404\u81ea\u5207\u5206\u4e3a 7 \u4e2a MIG \u8ba1\u7b97\u5b9e\u4f8b\u3002
  • c51-m001 c51-m002 c51-m003 c51-m004\uff09: \u6bcf\u4e2a\u8282\u70b9\u4e0a\u6709 8 \u5f20 2080 Ti\uff0c\u91c7\u7528\u961f\u5217\u540dgpu3\u8fdb\u884c\u63d0\u4ea4\u3002

6\u4e2a\u8282\u70b9\u5747\u53ef\u8054\u7cfb\u7ba1\u7406\u5458\u5f00\u901a\u4f7f\u7528\u6743\u9650\u3002

"},{"location":"wiki/cluster_usage/gpu_usage/#_1","title":"\u961f\u5217\u9009\u62e9\u6307\u5bfc\uff08\u4f9b\u53c2\u8003\uff09","text":"

\u4ee5\u4e0b\u90e8\u5206\u662f\u4e00\u4e2a\u7b80\u5355\u7684\u6307\u5bfc\uff0c\u4ec5\u4f9b\u53c2\u8003\uff0c\u8bf7\u6839\u636e\u81ea\u5df1\u5b9e\u9645\u9700\u8981\u9009\u7528\u3002

gpu3 \u961f\u5217\u4e0a\u670932\u5f20 Nvidia 2080Ti \u663e\u5361\uff0c\u6bcf\u5f20\u5361\u63d0\u4f9b\u7ea611 GB\u663e\u5b58\u3002\u57fa\u672c\u4e0a\u5e73\u65f6\u5bf9\u767e\u539f\u5b50\u7ea7\u522b DeePMD \u52bf\u51fd\u6570\u7684\u8bad\u7ec3\u4e43\u81f3MD\u90fd\u53ef\u4ee5\u5b8c\u6210\uff0c\u6545\u5e73\u65f6DP-GEN\u6d41\u7a0b\u4f7f\u7528\u8be5\u961f\u5217\u8fdb\u884c\u8ba1\u7b97\u5373\u53ef\u3002

gpu1 \u961f\u5217\u914d\u7f6e\u67094\u5f20 Nvidia Tesla V100 \u663e\u5361\uff0c\u6bcf\u5f20\u5361\u63d0\u4f9b\u7ea632 GB\u663e\u5b58\uff0c\u4e14\u63d0\u4f9b\u5b8c\u6574\u7684\u53cc\u7cbe\u5ea6\u52a0\u901f\u652f\u6301\uff0c\u6545\u9002\u7528\u4e8e\u66f4\u5927\u4f53\u7cfb DeePMD \u7684\u8bad\u7ec3\u3002\u5bf9\u6a21\u578b\u8fdb\u884c\u957f\u8bad\u7ec3\u65f6\uff0c\u4e5f\u53ef\u4f7f\u7528\u6b64\u961f\u5217\u3002\u540c\u65f6\uff0c\u56e0\u5176\u5b8c\u6574\u7684\u53cc\u7cbe\u5ea6\u8ba1\u7b97\u652f\u6301\u4ee5\u53caNV-LINK\u7684\u5f15\u5165\uff0c\u4e00\u4e9b\u652f\u6301GPU\u52a0\u901f\u7684\u8ba1\u7b97\u8f6f\u4ef6\uff08\u5982VASP 6.1+\uff09\u4e5f\u63a8\u8350\u5728\u6b64\u8282\u70b9\u4e0a\u63d0\u4ea4\uff0c\u5e76\u53ef\u7528\u4e8e\u591a\u5361\u5e76\u884c\u3002

gpu2 \u961f\u5217\u914d\u7f6e\u67094\u5f20 Nvidia A100 \u663e\u5361\u3002\u5176\u4e2d\u4e24\u5f20\u5361\u4e3a\u5b8c\u6574\u5361\uff0c\u6bcf\u5f20\u63d0\u4f9b80 GB\u663e\u5b58\uff0c\u4e14\u63d0\u4f9b\u5b8c\u6574\u7684\u53cc\u7cbe\u5ea6\u52a0\u901f\u652f\u6301\uff0c\u9002\u7528\u4e8e\u9700\u8981\u66f4\u5927\u4f53\u7cfb DeePMD \u8bad\u7ec3\u4ee5\u53ca\u66f4\u5927\u4f53\u7cfb\u7684GPU\u52a0\u901f\u8ba1\u7b97\uff0c\u4e5f\u9002\u7528\u4e8e\u66f4\u5927Batch\u6570\u636e\u96c6\u7684\u52a0\u8f7d\uff0c\u4f8b\u5982\u9700\u8981\u5185\u5b58\u8f83\u591a\u7684 NLP \u6a21\u578b\u3002\u4f46\u6ce8\u610fA100\u672a\u63d0\u4f9bNV-LINK\u548cNV-Switch\uff0c\u6545\u8bf7\u52ff\u8fdb\u884c\u591a\u5361\u5e76\u884c\u8ba1\u7b97\uff0c\u4ee5\u514d\u6548\u7387\u8fbe\u4e0d\u5230\u9884\u671f\u3002

\u540c\u65f6\uff0cA100\u5f15\u5165\u4e86MIG\u529f\u80fd\uff0c\u53ef\u4ee5\u5c06\u5361\u62c6\u5206\u4e3a2-7\u4e2a\u5c0f\u578b\u7684GPU\u5b9e\u4f8b (GI)\uff0c\u6bcf\u4e2aGI\u53ef\u4ee5\u72ec\u7acb\u8fd0\u884cGPU\u8ba1\u7b97\u4efb\u52a1\uff0c\u901f\u5ea6\u76f8\u6bd4\u5728\u540c\u4e00\u5f20\u5361\u4e0a\u76f4\u63a5\u540c\u65f6\u8fd0\u884c\u591a\u4e2a\u4efb\u52a1\u7684\u60c5\u51b5\u4e0b\u6709\u660e\u663e\u63d0\u5347\uff0c\u4f46\u76f8\u6bd4\u5355\u4efb\u52a1\u901f\u5ea6\u4e0b\u964d50%\u4ee5\u5185\u3002\u76ee\u524d\uff0c\u8be5\u8282\u70b9\u914d\u7f6e\u4e3a2\u5f20\u5b8c\u6574\u768480 GB\u5361(0-1\u53f7\u5361)\u548c2\u5f20\u5207\u5206\u4e3a7\u4e2aGI\u7684\u5361(2-3\u53f7\u5361)\uff0c\u6bcf\u4e2aGI\u7684\u901f\u5ea6\u5927\u81f4\u4e0e2080Ti\u76f8\u8fd1\u4e14\u7565\u5f3a\uff0c\u6545\u53ef\u4ee5\u7528\u4e8eDP-GEN\u8bad\u7ec3\u3002\u901a\u8fc7Slurm\u8c03\u5ea6\u7cfb\u7edf\u53ef\u4ee5\u63a7\u5236\u4f7f\u7528\u5b8c\u6574\u7684 A100 \u8fd8\u662f\u5207\u5206\u540e\u7684\u5c0f\u5361\u3002

"},{"location":"wiki/cluster_usage/gpu_usage/#gpu_2","title":"\u63d0\u4ea4\u4efb\u52a1\u81f3 GPU","text":"

\u7531\u4e8e\u5609\u5e9a\u8d85\u7b97\u7684\u6295\u7528\uff0cSlurm\u7cfb\u7edf\u5c06\u5f97\u5230\u5e7f\u6cdb\u5e94\u7528\uff0c\u4e14\u540e\u8005\u53ef\u4ee5\u5b8c\u6574\u652f\u6301MIG\u7b49GPU\u786c\u4ef6\u65b0\u7279\u6027\uff0c\u6545\u76ee\u524d\u8ba1\u5212\u9010\u6b65\u5207\u6362\u81f3Slurm\u8c03\u5ea6\u3002 \u76ee\u524dGPU\u7684\u8c03\u5ea6\u5df2\u7ecf\u5168\u90e8\u5207\u6362\u81f3Slurm\u3002 \u5173\u4e8eSlurm\u4ecb\u7ecd\u7684\u90e8\u5206\u5c06\u5728\u5168\u9762\u8fc1\u79fb\u540e\uff0c\u72ec\u7acb\u6210\u4e00\u7bc7\u6587\u6863\u3002

"},{"location":"wiki/cluster_usage/gpu_usage/#gpu1gpu3","title":"gpu1\u548cgpu3\u961f\u5217","text":"

\u5e38\u89c4\u4f7f\u7528gpu1\u961f\u5217\u548cgpu3\u961f\u5217\u7684\u793a\u4f8b\u811a\u672c\u653e\u5728/data/share/base/scripts\u4e0b\uff0c\u4e3e\u4f8b\u5982\u4e0b\uff1a

deepmd.sub
#!/bin/bash\n#SBATCH -N 1\n#SBATCH --ntasks-per-node=1\n#SBATCH -t 96:00:00\n#SBATCH --partition=gpu3\n#SBATCH --gres=gpu:1\n#SBATCH --mem=8G\n\n# add modulefiles\nmodule add deepmd/2.0-cuda11.3\n\ndp train input.json 1>> train.log 2>> train.err\ndp freeze  1>> train.log 2>> train.log\n

\u5176\u4e2d -N 1\u8868\u793a\u4f7f\u75281\u4e2a\u8282\u70b9\uff0c--ntasks-per-node=1 \u8868\u793a\u6bcf\u4e2a\u8282\u70b9\u4e0a\u4f7f\u75281\u4e2aCPU\u6838\uff0c--partition=gpu3\u5373\u8868\u793a\u63d0\u4ea4\u4efb\u52a1\u5230gpu3\u961f\u5217\u4e0a\uff0c--gres=gpu:1\u5373\u5206\u914d\u5176\u4e2d\u76841\u5f20\u5361\u7ed9\u4efb\u52a1\u3002gpu3\u4e2d\u6bcf\u4e2a\u8282\u70b9\u67098\u5f202080Ti\u5361\uff0c\u56e0\u800c\u4e0a\u8ff0\u547d\u4ee4\u7ec4\u5408\u8d77\u6765\u5373\u8868\u793a\u5206\u914d1\u4e2a\u8282\u70b9\u4e0a\u76841\u4e2aCPU\u6838\u4ee5\u53ca1\u5f202080Ti\u5361\u7528\u4e8e\u8ba1\u7b97\u3002

\u82e5\u9700\u8981\u4f7f\u7528\u5176\u4ed6\u961f\u5217\uff0c\u53ea\u9700\u5c06--partition\u7684\u53c2\u6570\u4fee\u6539\u4e3a\u5bf9\u5e94\u7684\u961f\u5217\uff0c\u5373gpu1\u548cgpu3\u3002

\u5173\u4e8e\u5185\u5b58\u7528\u91cf\u7684\u8bf4\u660e

\u6ce8\u610f --mem=8G \u8868\u793a\u5185\u5b58\u6d88\u8017\u4e3a 8 GB\u3002\u76ee\u524d\u96c6\u7fa4\u8bbe\u7f6e\u4e86\u9ed8\u8ba4\u503c\uff0c\u5373\u5728\u4e0d\u5199\u7684\u60c5\u51b5\u4e0b\uff0c\u6bcf\u5206\u914d 1 \u5f20GPU\u5361\u53ef\u4f7f\u7528 16 GB \u7269\u7406\u5185\u5b58\u3002 \u82e5\u9700\u8981\u66f4\u591a\u7269\u7406\u5185\u5b58\uff0c\u8bf7\u624b\u52a8\u6307\u5b9a\u8be5\u503c\u4e3a\u66f4\u5927\u7684\u6570\u503c\uff0c\u4ee5\u514d\u4efb\u52a1\u7531\u4e8e\u8d85\u51fa\u9ed8\u8ba4\u5185\u5b58\u9650\u5236\u6216\u56e0\u4e3a\u5176\u4ed6\u4efb\u52a1\u6324\u5360\u3001\u8d44\u6e90\u4e0d\u8db3\u800c\u88ab\u7cfb\u7edf\u56e0 OOM (Out of Memory) \u539f\u56e0\u5f3a\u5236\u9000\u51fa\u3002 \u4f8b\u5982\uff1a--mem=24G \u5373\u53ef\u5206\u914d\u6bcf\u4e2a\u4efb\u52a1\u4f7f\u752824GB\u5185\u5b58\u3002 \u76ee\u524d gpu1 \u548c gpu2 \u961f\u5217\u6bcf\u4e2a\u8282\u70b9\u7684\u603b\u5185\u5b58\u4e3a 256 GB\uff0c gpu3 \u961f\u5217\u6bcf\u4e2a\u8282\u70b9\u603b\u5185\u5b58\u4e3a 128 GB\uff0c\u56e0\u800c\u6ce8\u610f\u5982\u679c\u6bcf\u4e2a\u4efb\u52a1\u5206\u914d\u5185\u5b58\u8fc7\u5927\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u5361\u7a7a\u7f6e\u4f46\u6ca1\u6709\u8db3\u591f\u7684\u5185\u5b58\u5206\u914d\u7684\u95ee\u9898\u3002 \u56e0\u6b64\u8bf7\u52a1\u5fc5\u6839\u636e\u81ea\u5df1\u7684\u5b9e\u9645\u9700\u8981\u6307\u5b9a\u8be5\u53c2\u6570\u4ee5\u4fdd\u8bc1\u516c\u5e73\u4f7f\u7528\uff01

"},{"location":"wiki/cluster_usage/gpu_usage/#gpu2","title":"gpu2\u961f\u5217","text":"

gpu2\u961f\u5217\u63d0\u4f9b\u4e862\u5f20\u5b8c\u6574A100 80G\u5361\u4f9b\u5927\u4efb\u52a1\u4f7f\u7528\uff0c\u4ee5\u53ca2\u5f20\u5206\u5361\u517114\u4e2a\u5b9e\u4f8b\u4f9b\u76f8\u5bf9\u6bd4\u8f83\u96f6\u6563\u7684\u4efb\u52a1\u4f7f\u7528\u3002

\u5b8c\u6574\u5361\u4f7f\u7528\u65f6\uff0c\u53ef\u53c2\u7167gpu1\u548cgpu3\u961f\u5217\uff0c\u5c06--gres\u7684\u53c2\u6570\u6539\u4e3agpu:a100:1\u5373\u53ef\uff0c\u5176\u4e2d1\u4ecd\u8868\u793a\u5206\u914d1\u5f20\u5361\u3002

MIG \u5b9e\u4f8b\uff08\u5373\u4fd7\u79f0\u7684A100\u5206\u5361\u3001\u5c0f\u5361\uff09\u7684\u4f7f\u7528\u811a\u672c\u653e\u5728/data/share/base/scripts\u4e0b\uff0c\u4e3e\u4f8b\u5982\u4e0b\uff1a

cp2k_mig.sub
#!/bin/bash -l\n#SBATCH --parsable\n#SBATCH --nodes 1\n#SBATCH --ntasks-per-node 1\n#SBATCH --partition gpu2\n#SBATCH --gres=gpu:1g.10gb:1\n#SBATCH --time=96:00:00\n#SBATCH --mem=4G\n\nmodule load deepmd/2.1\ncp2k.ssmp -i input.inp 1>>output 2>>err.log\n

\u5176\u4e2d--gres=gpu:1g.10gb:1\u5373\u8868\u793a\u5206\u914d 1 \u4e2aMIG\u5b9e\u4f8b\u7ed9\u4efb\u52a1\u4f7f\u7528\u3002

\u6ce8\u610f

A100\u5206\u914dGPU\u7684\u547d\u4ee4\u9700\u8981\u5199\u660e\u786c\u4ef6\u7c7b\u578b\uff0c\u5426\u5219Slurm\u5728\u5206\u914d\u8d44\u6e90\u65f6\u65e0\u6cd5\u533a\u5206\u3002

"},{"location":"wiki/cluster_usage/gpu_usage/#slurm","title":"\u5173\u4e8eSlurm\u4f5c\u4e1a\u7ba1\u7406\u7cfb\u7edf","text":"

\u82e5\u7528\u6237\u5df2\u7ecf\u51c6\u5907\u597d\u76f8\u5e94\u8ba1\u7b97\u7684\u8f93\u5165\u548c\u63d0\u4ea4\u811a\u672c\uff0c\u5219\u53ef\u4ee5\u5bf9\u4efb\u52a1\u8fdb\u884c\u63d0\u4ea4\u3002\u4f8b\u5982\u63d0\u4ea4\u811a\u672c\u6587\u4ef6\u540d\u4e3adeepmd.sub\uff0c\u5219\u63d0\u4ea4\u547d\u4ee4\u4e3a\uff1a

sbatch deepmd.sub\n

\u82e5\u63d0\u4ea4\u6210\u529f\uff0c\u53ef\u4ee5\u770b\u5230\u4ee5\u4e0b\u63d0\u793a\uff1a

Submitted batch job 630\n

\u8868\u793a\u4efb\u52a1\u5df2\u7ecf\u6210\u529f\u63d0\u4ea4\u5230\u8282\u70b9\u4e0a\uff0c\u7f16\u53f7\u4e3a 630\u3002

\u4efb\u52a1\u63d0\u4ea4\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7squeue\u547d\u4ee4\u67e5\u770b\u96c6\u7fa4\u4e0a\u4efb\u52a1\u7684\u8fd0\u884c\u60c5\u51b5\u3002

JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n  620      gpu2    100-2     user  R    5:47:46      1 c51-g002\n  619      gpu2    150-2     user  R    7:19:49      1 c51-g002\n  630      gpu3 deepmd.s    ypliu PD       0:00      1 (Resources)\n  623      gpu3 deepmd.s     user  R       0:22      1 c51-m001\n  625      gpu3    ec_dp     user  R      55:28      1 c51-m001\n  610      gpu3 deepmd.s     user  R   19:04:13      1 c51-m003\n  609      gpu3 deepmd.s     user  R   19:05:22      1 c51-m002\n

\u5176\u4e2d JOBID \u5373\u4e3a\u4efb\u52a1\u7f16\u53f7\uff0cST \u8868\u793a\u72b6\u6001\uff0cR \u5373\u4e3a\u6b63\u5728\u8fd0\u884c\uff0c\u800c PD \u8868\u793a\u6b63\u5728\u6392\u961f\uff0c\u53ef\u80fd\u662f\u56e0\u4e3a\u7a7a\u4f59\u5361\u6570\u4e0d\u8db3\u3002\u53ef\u4ee5\u770b\u5230\uff0c623\u53f7\u4efb\u52a1\u6b63\u5728\u8fd0\u884c\uff0c\u53ef\u80fd\uff0c\u521a\u521a\u63d0\u4ea4\u7684630\u53f7\u4efb\u52a1\u5219\u5728\u6392\u961f\u3002

\u5982\u679c\u60f3\u8981\u505c\u6b62\u6216\u53d6\u6d88\u5df2\u7ecf\u63d0\u4ea4\u7684\u4efb\u52a1\uff0c\u5219\u4f7f\u7528\u547d\u4ee4\uff1a

scancel 630\n

\u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u8be5\u4efb\u52a1\u5373\u88ab\u6740\u6b7b\u3002

Slurm \u4e0e LSF \u547d\u4ee4\u5bf9\u7167\u8868\u5982\u4e0b\u6240\u793a\uff1a

LSF Slurm \u63cf\u8ff0 bsub < script_file sbatch script_file \u63d0\u4ea4\u4efb\u52a1\uff0c\u4f5c\u4e1a\u811a\u672c\u540d\u4e3ascript_file bkill 123 scancel 123 \u53d6\u6d88\u4efb\u52a1\uff0c\u4f5c\u4e1a ID \u53f7\u4e3a 123 bjobs squeue \u6d4f\u89c8\u5f53\u524d\u7528\u6237\u63d0\u4ea4\u7684\u4f5c\u4e1a\u4efb\u52a1 bqueues sinfosinfo -s \u6d4f\u89c8\u5f53\u524d\u8282\u70b9\u548c\u961f\u5217\u4fe1\u606f\uff0c'-s'\u547d\u4ee4\u8868\u793a\u7b80\u6613\u8f93\u51fa bhosts sinfo -N \u67e5\u770b\u5f53\u524d\u8282\u70b9\u5217\u8868 bjobs -l 123 scontrol show job 123 \u67e5\u770b 123 \u53f7\u4efb\u52a1\u7684\u8be6\u7ec6\u4fe1\u606f\u3002\u82e5\u4e0d\u6307\u5b9a\u4efb\u52a1\u53f7\u5219\u8f93\u51fa\u5f53\u524d\u6240\u6709\u4efb\u52a1\u4fe1\u606f bqueues -l queue scontrol show partition queue \u67e5\u770b\u961f\u5217\u540d\u4e3aqueue\u7684\u961f\u5217\u7684\u8be6\u7ec6\u4fe1\u606f\u3002\u82e5\u4e0d\u6307\u5b9a\u961f\u5217\u5219\u8fd4\u56de\u5f53\u524d\u6240\u6709\u53ef\u7528\u961f\u5217\u7684\u8be6\u7ec6\u4fe1\u606f\u3002 bhosts -l g001 scontrol show node g001 \u67e5\u770b\u8282\u70b9\u540d\u4e3a g001\u7684\u8282\u70b9\u72b6\u6001\u3002\u82e5\u4e0d\u6307\u5b9a\u8282\u70b9\u5219\u8fd4\u56de\u5f53\u524d\u6240\u6709\u8282\u70b9\u4fe1\u606f\u3002 bpeek 123 speek 123 * \u67e5\u770b 123 \u53f7\u4efb\u52a1\u7684\u6807\u51c6\u8f93\u51fa\u3002

* speek \u547d\u4ee4\u4e0d\u662f Slurm \u6807\u51c6\u547d\u4ee4\uff0c\u4ec5\u9002\u7528\u539f Metal \u96c6\u7fa4\u4f7f\u7528\u3002

\u4f5c\u4e1a\u63d0\u4ea4\u811a\u672c\u5bf9\u7167\u8868\uff1a

LSF Slurm \u63cf\u8ff0 #BSUB #SBATCH \u524d\u7f00 -q queue_name -p queue_name \u6216 --partition=queue_name \u6307\u5b9a\u961f\u5217\u540d\u79f0 -n 64 -n 64 \u6307\u5b9a\u4f7f\u752864\u4e2a\u6838 --- -N 1 \u4f7f\u75281\u4e2a\u8282\u70b9 -W [hh:mm:ss] -t [minutes] \u6216 -t [days-hh:mm:ss] \u6307\u5b9a\u6700\u5927\u4f7f\u7528\u65f6\u95f4 -o file_name -o file_name \u6307\u5b9a\u6807\u51c6\u8f93\u51fa\u6587\u4ef6\u540d -e file_name -e file_name \u6307\u5b9a\u62a5\u9519\u4fe1\u606f\u6587\u4ef6\u540d -J job_name --job-name=job_name \u4f5c\u4e1a\u540d -M 128 -mem-per-cpu=128M \u6216 --mem-per-cpu=1G \u9650\u5236\u5185\u5b58\u4f7f\u7528\u91cf -R \"span[ptile=16]\" --tasks-per-node=16 \u6307\u5b9a\u6bcf\u4e2a\u6838\u4f7f\u7528\u7684\u8282\u70b9\u6570

\u901a\u8fc7 scontrol \u547d\u4ee4\u53ef\u4ee5\u65b9\u4fbf\u5730\u4fee\u6539\u4efb\u52a1\u7684\u961f\u5217\u3001\u622a\u6b62\u65f6\u95f4\u3001\u6392\u9664\u8282\u70b9\u7b49\u4fe1\u606f\uff0c\u4f7f\u7528\u65b9\u6cd5\u7c7b\u4f3c\u4e8e LSF \u7cfb\u7edf\u7684 bmod \u547d\u4ee4\uff0c\u4f46\u4f7f\u7528\u4e0a\u66f4\u52a0\u7b80\u6d01\u3002

\u94fe\u63a5

\u66f4\u591a\u4f7f\u7528\u6559\u7a0b\u548c\u8bf4\u660e\u8bf7\u53c2\u8003\uff1aSlurm\u4f5c\u4e1a\u8c03\u5ea6\u7cfb\u7edf\u4f7f\u7528\u6307\u5357

"},{"location":"wiki/cluster_usage/gpu_usage/#dpgen-gpu","title":"dpgen \u63d0\u4ea4 GPU \u4efb\u52a1\u53c2\u6570\u8bbe\u7f6e","text":"

\u8bf7\u53c2\u8003DP-GEN\u4f7f\u7528\u8bf4\u660e\u3002

"},{"location":"wiki/cluster_usage/jupyter/","title":"Jupyter \u7cfb\u5217\u4f7f\u7528\u6307\u5357","text":""},{"location":"wiki/cluster_usage/jupyter/#jupyter-notebook","title":"Jupyter Notebook","text":""},{"location":"wiki/cluster_usage/jupyter/#jupyter-notebook-python","title":"\u8f6c\u5316 Jupyter Notebook \u4e3a Python \u811a\u672c","text":"
ipython nbconvert --to python *.ipynb\n
"},{"location":"wiki/cluster_usage/jupyter/#jupyter-notebook_1","title":"\u8fdc\u7a0b\u6253\u5f00 Jupyter Notebook","text":"

Jupyter notebook \u53ef\u4ee5\u901a\u8fc7\u672c\u5730\u7535\u8111\u7684\u6d4f\u89c8\u5668\u6253\u5f00\u3002\u4f46\u5982\u679c\u4f60\u60f3\u5728\u8fdc\u7a0b\u7535\u8111\u4e0a\uff08\u5982\u96c6\u7fa4\uff09\u6253\u5f00\uff0c\u600e\u4e48\u529e\uff1f\u8fdc\u7a0b\u6253\u5f00 Jupyter notebook \u7684\u597d\u5904\u5c31\u662f\u53ef\u4ee5\u4e0d\u7528\u4e0b\u8f7d\u6570\u636e\uff0c\u76f4\u63a5\u8fdc\u7a0b\u5904\u7406\u3002\u4f46\u662f\u7531\u4e8e\u96c6\u7fa4\u5e76\u6ca1\u6709\u663e\u793a/\u8f93\u51fa\u88c5\u7f6e\uff0c\u4f60\u9700\u8981\u901a\u8fc7\u5176\u4ed6\u65b9\u6cd5\u6765\u6253\u5f00 Jupyter notebook\u3002

\u8fdc\u7a0b\u6253\u5f00\u7684\u65b9\u6cd5

  • \u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u5728\u96c6\u7fa4\u4e0a\u6253\u5f00\u4f60\u7684 jupyter notebook\uff1a
# \u5728\u8fdc\u7a0b\u96c6\u7fa4\u8fd0\u884c\u5982\u4e0b\u547d\u4ee4\n# <port number>\u7531\u4f60\u81ea\u5df1\u51b3\u5b9a\uff0c\u6bd4\u5982 9898\njupyter notebook --no-browser --port=<port number>\n
  • \u5728\u4f60\u7684\u672c\u5730\u7684\u7535\u8111\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\uff1a
# <port number>\u7531\u4f60\u81ea\u5df1\u51b3\u5b9a\uff0c\u6bd4\u5982 9898\uff0c\u662f\u8ddf\u8fdc\u7a0b\u6253\u5f00\u7684\u7aef\u53e3\u5bf9\u5e94\u3002\nssh -N -f -L localhost:8888:localhost:<port number> username@your_remote_host_name\n

ssh \u767b\u9646\u7684\u547d\u4ee4\u53ef\u4ee5\u67e5\u770b\u8fd9\u91cc\u8fdb\u884c\u7b80\u5316.

  • \u6253\u5f00\u672c\u5730\u7535\u8111\u7684\u6d4f\u89c8\u5668\uff0c\u8f93\u5165localhost:8888 \u3002\u7136\u540e\u4f1a\u5f39\u51fa\u8f93\u5165 password \u6216 token\u7684\u9875\u9762, \u4f60\u53ef\u4ee5\u5728\u96c6\u7fa4\u4e0a\u8f93\u5165\u5982\u4e0b\u547d\u4ee4\u6765\u67e5\u770b\uff1a
#type this command in your remote computer, you can find token to enter remote notebook\njupyter notebook list\n
"},{"location":"wiki/cluster_usage/jupyter/#jupyter_1","title":"\u5229\u7528\u7a7a\u8282\u70b9\u8fd0\u884c Jupyter","text":"

\u7531\u4e8e\u767b\u9646\u8282\u70b9\u8d44\u6e90\u5341\u5206\u6709\u9650\uff0c\u5b9e\u9645\u4e0a\u4e0d\u592a\u5efa\u8bae\u5728\u767b\u9646\u8282\u70b9\u4e0a\u76f4\u63a5\u8fd0\u884c Jupyter \u670d\u52a1\u3002\u8fd9\u91cc\u63d0\u4f9b\u4e00\u79cd\u53ef\u80fd\u7684\u65b9\u6848\uff0c\u901a\u8fc7 LSF \u542f\u52a8 Jupyter \u670d\u52a1\uff0c\u5b9e\u73b0\u5728\u8fdc\u7a0b\u7684\u8c03\u7528\u3002

\u9996\u5148\u5728\u81ea\u5df1\u5e0c\u671b\u4f5c\u4e3a Jupyter \u6839\u76ee\u5f55\u7684\u6587\u4ef6\u5939\u4e0b\u7f16\u8f91\u63d0\u4ea4\u811a\u672c\uff08\u4f8b\u5982jupyter.lsf\uff09\uff1a

#!/bin/bash\n#BSUB -q fat\n#BSUB -J deepmd\n#BSUB -o %J.stdout\n#BSUB -e %J.stderr\n\n# add modulefiles\nsource ~/.bashrc\n\ncat /etc/hosts | grep c51-s001\njupyter-lab --ip=0.0.0.0 --port=<port>\n

\u5982\u56fe\u5373\u4f7f\u7528\u4e86\u80d6\u8282\u70b9\u7684 1 \u4e2a\u6838\u6765\u5f00\u542f\u4efb\u52a1\uff0c\u540c\u65f6\u5728\u4efb\u52a1\u8f93\u51fa\u4e2d\u663e\u793a\u51fa\u80d6\u8282\u70b9\u6240\u5728\u7684 IP \u5730\u5740\uff0c\u8bf7\u5728\u63d0\u4ea4\u540e\u7a0d\u7b49\u7247\u523b\u540e\u901a\u8fc7 bpeek \u547d\u4ee4\u67e5\u770b\uff08\u53ef\u80fd\u4e00\u5f00\u59cb\u662f\u7a7a\u7684\uff0c\u7a0d\u540e\u4f1a\u6709\u8f93\u51fa\uff09\uff1a

123.45.67.89 c51-s001 c51-s001.hpc.xmu\n

\u5047\u8bbe\u8f93\u51fa\u4e3a 123.45.67.89\uff0c\u5219\u53ef\u5728\u672c\u5730\u8fd0\u884c\u547d\u4ee4\uff1a

ssh -L <local_port>:123.45.67.89:<port> <username>@<ip_of_cluster>\n

\u5176\u4e2d<local_port>\u4e3a\u672c\u5730\u4efb\u610f\u7aef\u53e3\uff0c<port>\u4e0e\u4f5c\u4e1a\u811a\u672c\u4fdd\u6301\u4e00\u81f4\uff0c\u5176\u4f59\u90e8\u5206\u4e0e\u5e73\u65f6\u767b\u9646\u547d\u4ee4\u4fdd\u6301\u4e00\u81f4\uff0c\u6ce8\u610f\u4e0d\u8981\u6f0f\u6389-p xxxx\u3002\u6b64\u90e8\u5206\u7684\u8bf4\u660e\u8bf7\u53c2\u8003SSH \u4f7f\u7528\u8bf4\u660e\u3002

\u5728\u672c\u5730\u6d4f\u89c8\u5668\u8f93\u5165\uff1alocalhost:<local_port>\u5373\u53ef\u8bbf\u95ee\u8fd9\u4e00\u8fdc\u7a0b Jupyter \u670d\u52a1\u3002

\u6b64\u9014\u5f84\u6700\u5927\u7684\u597d\u5904\u662f\u53ef\u4ee5\u5728 GPU \u96c6\u7fa4\u4e0a\u8fd0\u884c\uff0c\u4ece\u800c\u53ef\u4ee5\u76f4\u63a5\u8c03\u7528 GPU \u5361\u3002\u4f46\u8bf7\u6ce8\u610f\uff0c\u9700\u8981\u5728\u811a\u672c\u4e2d\u6307\u5b9a\u6240\u9700\u7684 GPU \u5361\u6570\u3002

#!/bin/bash\n#BSUB -q gpu\n#BSUB -W 24:00\n#BSUB -J deepmd\n#BSUB -o %J.stdout\n#BSUB -e %J.stderr\n#BSUB -n 8\n#BSUB -gpu \"num=1:mode=shared:mps=no:j_exclusive=yes\"\n#BSUB -R \"span[ptile=8]\"\n\n# add modulefiles\nsource ~/.bashrc\n\n#dp train input.json 1>> train.log 2>> train.err\ncat /etc/hosts\njupyter-lab --ip=0.0.0.0 --port=8888\n

\u5982\u679c\u60f3\u5728 Jupyter \u4e2d\u8c03\u7528\u865a\u62df\u73af\u5883\uff08\u5982myenv\uff09\uff0c\u9700\u8981\u5728\u5bf9\u5e94\u865a\u62df\u73af\u5883\u4e2d\u5b89\u88c5 ipykernel\u548c\u73af\u5883\u7684 kernel\u3002[\u53c2\u8003\u8d44\u6599]

# \u6fc0\u6d3b\u865a\u62df\u73af\u5883 myenv\n# \u4e5f\u53ef\u7528 conda activate myenv\nsource activate myenv\nconda install pip\nconda install ipykernel\n# \u5b9e\u9645\u4f7f\u7528\u4e2d\u9700\u66ff\u6362 myenv \u548c \"Python (myenv)\"\npython -m ipykernel install --user --name myenv --display-name \"Python (myenv)\"\n
"},{"location":"wiki/cluster_usage/jupyter/#jupyter-lab","title":"Jupyter Lab","text":"

Under construction

"},{"location":"wiki/cluster_usage/jupyter/#jupyter-hub","title":"Jupyter Hub","text":"

Under construction

"},{"location":"wiki/cluster_usage/notification_for_hpc/","title":"\u8ba1\u7b97\u4efb\u52a1\u7684\u63a8\u9001","text":""},{"location":"wiki/cluster_usage/notification_for_hpc/#_2","title":"\u63a8\u9001\u81f3\u9489\u9489","text":"

\u8ba1\u7b97\u8282\u70b9\u53ef\u8bbf\u95ee\u4e92\u8054\u7f51\u7684\u670d\u52a1\u5668\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4efb\u52a1\u5b8c\u6210\u540e\u63a8\u9001\u5230\u9489\u9489\u3002\u6548\u679c\u5982\u4e0b

"},{"location":"wiki/cluster_usage/notification_for_hpc/#_3","title":"\u7533\u8bf7\u9489\u9489\u673a\u5668\u4eba","text":"

\u7533\u8bf7\u6b65\u9aa4\u9700\u8981\u5728PC\u7aef\u9489\u9489\u64cd\u4f5c

\u9996\u5148\u9700\u8981\u7533\u8bf7\u4e00\u4e2a\u9489\u9489\u673a\u5668\u4eba\uff0c\u5e76\u62ff\u5230 webhook \uff0c\u6b65\u9aa4\u5982\u4e0b\uff1a

  1. \u70b9\u51fb\u5934\u50cf\u2192\u673a\u5668\u4eba\u7ba1\u7406

  2. \u6dfb\u52a0 \u81ea\u5b9a\u4e49 \u673a\u5668\u4eba

  1. \u7fa4\u7ec4\u9009\u62e9\u5de5\u4f5c\u901a\u77e5\uff0c\u5b89\u5168\u8bbe\u7f6e\u4e2d\u6dfb\u52a0\u5173\u952e\u8bcd Job, info

  1. \u590d\u5236\u673a\u5668\u4eba\u7684 webhook
"},{"location":"wiki/cluster_usage/notification_for_hpc/#_4","title":"\u670d\u52a1\u5668\u4e0a\u8bbe\u7f6e\u63a8\u9001","text":"

\u5728\u670d\u52a1\u5668\u63d0\u4ea4\u811a\u672c\u4e2d\u52a0\u4e0a module load notification \uff0c\u5e76\u5728\u6700\u540e\u52a0\u4e0a dingtalk_notification WEBHOOK \u5373\u53ef\u5b9e\u73b0\u63a8\u9001\u81f3\u9489\u9489\u3002\u793a\u4f8b\u811a\u672c\u5982\u4e0b:

#!/bin/bash\n#BSUB -J \"test\"\n#BSUB -o %J.txt\n#BSUB -e %J.txt\n#BSUB -q large\n#BSUB -n 2\n#BSUB -W 12:00\n\nmodule load notification\n\nMPIRUN_COMMAND  # your command to run software\n\ndingtalk_notification https://oapi.dingtalk.com/robot/send?access_token=xxxx  # replace it by your webhook\n

\u5176\u4e2d notification \u7684\u793a\u4f8b\u5982\u4e0b\uff0c\u8bf7\u81ea\u884c\u7f16\u8f91modulefile\u6587\u4ef6\uff08\u53ef\u53c2\u8003\u6b64\u5904\uff09\uff0c\u5e76\u66ff\u6362 <YOUR_HPC_NAME> \u4e0e <YOUR_IP> \u7684\u503c:

#%Module\n\nset-alias    dingtalk_notification {\n    curl $1 \\\n        -H 'Content-Type: application/json' \\\n        -d '{\n            \"msgtype\": \"markdown\",\n            \"markdown\": {\n                \"title\":\"Job Info\",\n                \"text\": \"'\"Job Info \\\\n\n\\\\n\nJob $LSB_JOBID is finished in **<YOUR_HPC_NAME>**! \\\\n\n\\\\n\n> Server ip: **<YOUR_IP>** \\\\n\n> \\\\n\n> Job id: **$LSB_JOBID** \\\\n\n> \\\\n\n> Job name: **$LSB_JOBNAME** \\\\n\n> \\\\n\n> Job queue: **$LSB_QUEUE** \\\\n\n> \\\\n\n> Job workdir: **$LS_EXECCWD** \\\\n\"'\"\n            }\n        }'\n}\n
"},{"location":"wiki/cluster_usage/pack_backup/","title":"\u6587\u4ef6\u6574\u7406\u4e0e\u5907\u4efd\u653b\u7565","text":"

\u672c\u6587\u5c06\u6301\u7eed\u66f4\u65b0

\u5728\u5b9e\u9645\u79d1\u7814\u5de5\u4f5c\u4e2d\uff0c\u6211\u4eec\u65f6\u5e38\u4f1a\u9047\u5230\u6587\u4ef6\u6574\u7406\u7684\u95ee\u9898\u3002\u6bd4\u5982\u5229\u7528 CP2K \u901a\u8fc7 Constrained MD \u8ba1\u7b97 Potential Mean Force \u65f6\uff0c\u4f1a\u4ea7\u751f\u5927\u91cf Lagrange Multiplier \u6587\u4ef6\uff1b\u4f7f\u7528 DP-GEN \u8bad\u7ec3\u52bf\u51fd\u6570\u65f6\uff0c\u7531\u4e8e Model Deviation \u8fc7\u7a0b\u4e2d\u4f1a\u751f\u6210\u5927\u91cf\u7ed3\u6784\u6587\u4ef6\uff0c\u6bcf\u4e00\u8f6e\u6bcf\u6761\u8f68\u8ff9\u90fd\u4f1a\u6709\u5f88\u591a\uff0c\u4fbf\u4f1a\u4f7f\u6587\u4ef6\u603b\u6570\u5feb\u901f\u4e0a\u5347\uff1b\u540c\u65f6\uff0c\u8ba1\u7b97\u8fc7\u7a0b\u4e2d\u4f1a\u4ea7\u751f\u6ce2\u51fd\u6570\u3001cube\u7b49\u6587\u4ef6\uff0c\u53ef\u80fd\u4f1a\u5360\u636e\u5927\u91cf\u7684\u7a7a\u95f4\u3002\u5982\u4f55\u9ad8\u6548\u6574\u7406\u8fd9\u4e9b\u6587\u4ef6\u4e5f\u6210\u4e3a\u4e00\u4e2a\u96be\u9898\u3002

\u672c\u6587\u5c06\u7ed9\u51fa\u4e00\u4e9b\u5177\u4f53\u7684\u653b\u7565\uff0c\u4f9b\u5927\u5bb6\u53c2\u8003\u4f7f\u7528\u3002

"},{"location":"wiki/cluster_usage/pack_backup/#_2","title":"\u4e00\u4e9b\u5e38\u8bc6","text":"

Linux\u4e2d\u6587\u4ef6\u50a8\u5b58\u4e0a\u9650\u4e0e\u50a8\u5b58\u7a7a\u95f4\u548c\u50a8\u5b58\u6570\u76ee\u6709\u5173\u7cfb\u3002\u56e0\u6b64\u6211\u4eec\u4e0d\u4ec5\u9700\u8981\u5173\u6ce8\u50a8\u5b58\u7a7a\u95f4\uff08\u6587\u4ef6\uff09\u5927\u5c0f\uff0c\u8fd8\u9700\u8981\u5173\u6ce8\u6587\u4ef6\u7684\u6570\u76ee\u3002\u4f8b\u5982DP-GEN\u4ea7\u751f\u7684\u5927\u91cf\u788e\u7247\u5316\u6587\u4ef6\u548ccp2k\u7684potential mean force \u4ea7\u751f\u7684\u5927\u91cfLagrange Multiplier\u6587\u4ef6\u90fd\u4f1a\u5f71\u54cd\u6587\u4ef6\u50a8\u5b58\u3002

\u4ee5\u4e0b\u547d\u4ee4\u53ef\u4ee5\u67e5\u770b\u81ea\u5df1\u7684\u6587\u4ef6/\u76ee\u5f55\u5927\u5c0f

# \u67e5\u770b\u5f53\u524d\u76ee\u5f55\u7684\u5927\u5c0f\ndu -sch .\n# \u67e5\u770b\u67d0\u6587\u4ef6\u7684\u5927\u5c0f\ndu -sch file_name\n# \u67e5\u770b\u8be5\u76ee\u5f55\u4e0b\u6240\u6709\u6587\u4ef6/\u76ee\u5f55\u5927\u5c0f\ndu -sch ./*\n
"},{"location":"wiki/cluster_usage/pack_backup/#_3","title":"\u6587\u4ef6\u6253\u5305\u4e0e\u538b\u7f29","text":""},{"location":"wiki/cluster_usage/pack_backup/#tar","title":"tar\u547d\u4ee4","text":"

\u5bf9\u4e8e\u7ed3\u6784\u590d\u6742\u7684\u76ee\u5f55\uff0c\u53ef\u4ee5\u4f7f\u7528 tar \u547d\u4ee4\u8fdb\u884c\u6253\u5305\u6216\u538b\u7f29\u3002

tar \u547d\u4ee4\u652f\u6301\u538b\u7f29\u6216\u89e3\u538b\u7f29\uff0c\u5176\u4f7f\u7528\u65b9\u6cd5\u5927\u81f4\u5982\u4e0b\uff1a

\u6253\u5305\u4e0e\u538b\u7f29: tar [-j|-z] [cv] [-f \u521b\u5efa\u7684\u6863\u540d] filename... \n\u89e3\u538b\u7f29: tar [-j|-z] [xv] [-f \u521b\u5efa\u7684\u6863\u540d] [-C \u76ee\u5f55]\n========\n\u9009\u9879\u4e0e\u53c2\u6570\uff1a\n-c  \uff1a\u521b\u5efa\u6253\u5305\u6587\u4ef6\uff0c\u53ef\u642d\u914d -v \u6765\u5bdf\u770b\u8fc7\u7a0b\u4e2d\u88ab\u6253\u5305\u7684\u6587\u4ef6\uff08\u5939\uff09\u540d(filename)\n-x  \uff1a\u89e3\u6253\u5305\u6216\u89e3\u538b\u7f29\u7684\u529f\u80fd\uff0c\u53ef\u4ee5\u642d\u914d -C (\u5927\u5199) \u5728\u7279\u5b9a\u76ee\u5f55\u5b8c\u6210\u89e3\u538b\u7f29\u7684\u64cd\u4f5c\u3002\n-c, -x \u4e0d\u53ef\u540c\u65f6\u4f7f\u7528\uff0c\u8fd8\u8bf7\u6ce8\u610f\uff01\n-z  \uff1a\u901a\u8fc7 gzip \u8fdb\u884c\u538b\u7f29/\u89e3\u538b\u7f29\uff1a\u6b64\u65f6\u538b\u7f29\u6587\u4ef6\u540d\u6700\u597d\u4e3a *.tar.gz \u6216 *.tgz\n-j  \uff1a\u901a\u8fc7 bzip2 \u8fdb\u884c\u538b\u7f29/\u89e3\u538b\u7f29\uff1a\u6b64\u65f6\u538b\u7f29\u6587\u4ef6\u540d\u6700\u597d\u4e3a *.tar.bz2\n-v  \uff1a\u5728\u538b\u7f29/\u89e3\u538b\u7f29\u7684\u8fc7\u7a0b\u4e2d\uff0c\u5c06\u6b63\u5728\u5904\u7406\u7684\u6587\u4ef6\u540d\u663e\u793a\u51fa\u6765\u3002\n-f filename\uff1a-f \u540e\u9762\u8981\u7acb\u523b\u63a5\u8981\u88ab\u5904\u7406\u7684\u6863\u540d\uff01\u5efa\u8bae -f \u5355\u72ec\u5199\u4e00\u4e2a\u9009\u9879\u7f57\uff01\n-p  \uff1a\u4fdd\u7559\u5907\u4efd\u6570\u636e\u7684\u539f\u672c\u6743\u9650\u4e0e\u5c5e\u6027\uff0c\u5e38\u7528\u4e8e\u5907\u4efd(-c)\u91cd\u8981\u7684\u914d\u7f6e\u6863\u3002\n--exclude=FILE\uff1a\u5728\u538b\u7f29\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e0d\u5c06\u6587\u4ef6\u540d\u4e3a FILE \u7684\u6587\u4ef6\u6253\u5305\u3002\n--remove-files\uff1a\u6253\u5305\u540e\u5220\u9664\u88ab\u6253\u5305\u7684\u6587\u4ef6\u3002\n

\u6bd4\u5982\u5e0c\u671b\u5728 /some/place \u4e0b\u6253\u5305/data/userX \u4e0b\u7684\u6240\u6709\u6587\u4ef6\uff08\u5939\uff09\u4e3a userX_backup.tgz\uff0c\u4fbf\u53ef\u4ee5\u4f7f\u7528\u547d\u4ee4

tar -zcvf /some/place/userX_backup.tgz /data/userX/*\n

\u4f7f\u7528tar\u547d\u4ee4\u5c06\u6587\u4ef6\u6253\u5305\u4e3a *.tgz \u7b49\u538b\u7f29\u6863\u7684\u4f18\u70b9\u662f\u53ef\u4ee5\u4fdd\u7559\u8f6f\u8fde\u63a5\uff0c\u9002\u5408\u7528\u4e8e\u7ed3\u6784\u590d\u6742\u7684\u76ee\u5f55\uff0c\u4f8b\u5982 DP-GEN \u9879\u76ee\u76ee\u5f55\u3002

\u5982\u679c\u7528\u6237\u5e0c\u671b\u5c06\u6253\u5305\u540e\u7684\u6587\u4ef6\u76f4\u63a5\u5220\u9664\u4ee5\u8282\u7701\u7a7a\u95f4\uff0c\u5219\u53ef\u4ee5\u4f7f\u7528\uff1a

tar -zcvf /some/place/userX_backup.tgz /data/userX/* --remove-files\n

\u8fd9\u6837\u5728\u521b\u5efa\u538b\u7f29\u6863\u540e\uff0c\u7a0b\u5e8f\u4f1a\u5220\u9664 /data/userX/*\u3002

\u5bf9\u4e8e DP-GEN \u7b49\u6587\u4ef6\u6570\u91cf\u975e\u5e38\u591a\u7684\u4efb\u52a1\uff0c\u76f4\u63a5\u5b58\u50a8\u4f1a\u5360\u636e\u5927\u91cf\u7684 inode \u7a7a\u95f4\uff0c\u4ece\u800c\u51fa\u73b0\u660e\u660e\u78c1\u76d8\u7a7a\u95f4\u591f\u5374\u65e0\u6cd5\u5199\u5165\u7684\u5c34\u5c2c\u5c40\u9762\uff0c\u56e0\u6b64\u53ef\u4ee5\u5bf9\u5df2\u7ecf\u8dd1\u8fc7\u7684 iteration \u8fdb\u884c\u4e3b\u52a8\u6253\u5305\u4ee5\u51cf\u5c11\u6587\u4ef6\u6570\u91cf\uff0c\u8282\u7ea6 inode \u6570\u3002\u540c\u65f6\u5982\u679c\u9700\u8981\u8fdb\u884c\u78c1\u76d8\u7ea7\u7684\u5907\u4efd\u3001\u8fc1\u79fb\uff0c\u5904\u7406\u5c0f\u6587\u4ef6\u7684\u901f\u5ea6\u4f1a\u5927\u5e45\u653e\u7f13\uff0c\u800c\u5904\u7406\u5927\u6587\u4ef6\u7684\u8bfb\u5199\u901f\u5ea6\u53cd\u800c\u53ef\u4ee5\u8fbe\u5230\u786c\u76d8\u8bfb\u5199\u901f\u7387\u6216\u7f51\u7edc\u4f20\u8f93\u901f\u7387\u7684\u4e0a\u9650\u3002

"},{"location":"wiki/cluster_usage/pack_backup/#_4","title":"\u6587\u4ef6\u5220\u9664","text":""},{"location":"wiki/cluster_usage/pack_backup/#find","title":"find\u547d\u4ee4","text":"

\u5bf9\u4e8e\u5927\u91cf\u5177\u6709\u76f8\u4f3c\u547d\u540d\u7684\u6587\u4ef6\uff0c\u53ef\u4ee5\u5229\u7528 find \u547d\u4ee4\u8fdb\u884c\u7d22\u5f15\u548c\u5220\u9664\u3002

\u4f8b\u5982\u5bf9\u5f53\u524d\u76ee\u5f55\u4e0b\uff08./\uff09\uff0c\u60f3\u8981\u67e5\u627e AuO \u4efb\u52a1\u4ea7\u751f\u7684\u6240\u6709\u7684 cube \u6587\u4ef6\uff08\u5047\u8bbe\u547d\u540d\u5747\u4e3aAuO_*.cube\uff09\uff0c\u53ef\u4ee5\u91c7\u7528\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u5c55\u793a\uff1a

find ./ -name AuO_*.cube\n

\u5982\u679c\u60f3\u8981\u5c06\u8fd9\u4e9b\u6587\u4ef6\u76f4\u63a5\u5220\u9664\uff0c\u8fd8\u53ef\u4ee5\u52a0\u5165 -delete \u547d\u4ee4\uff1a

find ./ -name AuO_*.cube -delete\n

\u6ce8\u610f

\u6ce8\u610f find \u547d\u4ee4\u540e\u7684\u9009\u9879\u4e3a - \u800c\u975e -- \u3002

"},{"location":"wiki/cluster_usage/pack_backup/#rsync","title":"rsync\u547d\u4ee4","text":"

rsync \u4f5c\u4e3a\u5e38\u7528\u7684\u6587\u4ef6\u4f20\u8f93\u4e0e\u540c\u6b65\u547d\u4ee4\uff0c\u5b9e\u9645\u4e0a\u4e5f\u53ef\u4ee5\u7528\u4e8e\u5c06\u67d0\u4e00\u6587\u4ef6\u5939\u6e05\u7a7a\uff0c\u5bf9\u4e8e\u6709\u5927\u91cf\u5c0f\u6587\u4ef6\u7684\u60c5\u51b5\u76f8\u6bd4\u4f20\u7edf\u7684 rm \u547d\u4ee4\u4f1a\u5feb\u5f88\u591a\u3002\u4f8b\u5982\u60f3\u8981\u6e05\u7a7a /some/path \u76ee\u5f55\uff0c\u53ef\u4ee5\u5148\u8fd0\u884c\uff1a

mkdir /tmp/empty\n

\u7136\u540e\u8fd0\u884c\uff1a

bash rsync --delete -rlptD /tmp/empty/ /some/path

"},{"location":"wiki/cluster_usage/pack_backup/#_5","title":"\u5e38\u7528\u8f6f\u4ef6\u7684\u6587\u4ef6\u5904\u7406","text":""},{"location":"wiki/cluster_usage/pack_backup/#cp2k","title":"cp2k","text":"

cp2k\u5728\u8ba1\u7b97\u4e2d\u4f1a\u4ea7\u751f\u5927(\u91cf)\u6587\u4ef6\uff0c\u4ee5\u4e0b\u6587\u4ef6\u53ef\u4ee5\u5220\u9664\u3002

  • \u6ce2\u51fd\u6570\u6587\u4ef6\uff08.wfn\uff09\uff1a\u6ce2\u51fd\u6570\u6587\u4ef6\u50a8\u5b58DFT\u8ba1\u7b97\u7684\u8f68\u9053\u4fe1\u606f\uff0c\u5e38\u7528\u4e8erestart\u3002\u4f46.wfn\u6587\u4ef6\u5f80\u5f80\u968f\u7740\u4f53\u7cfb\u589e\u5927\u800c\u8fc5\u901f\u589e\u5927\u3002\u5982\u65e0\u5fc5\u8981\uff08\u91cd\u8981\u6ce2\u51fd\u6570\uff09\uff0c\u7b97\u5b8c\u4e4b\u540e\u5373\u53ef\u5c06\u5176\u5220\u9664\u3002
  • \u7f51\u683c\u6587\u4ef6\uff08.cube\uff09\uff1a\u8fd9\u7c7b\u6587\u4ef6\u50a8\u5b58\u7740\u4e09\u7ef4\u7a7a\u95f4\u4fe1\u606f\uff0c\u4f8b\u5982\uff1a\u9759\u7535\u52bf\u3001\u5206\u5b50\u8f68\u9053\u3002\u5927\u5c0f\u4e2d\u7b49\uff0810MB\u5de6\u53f3\uff09\u3002\u6309\u666e\u901aAIMD\u957f\u5ea6\uff0860000\u6b65\uff09\uff0c\u6bcf50\u6b65\u8f93\u51fa\u4e00\u4e2a\u4f1a\u67091200\u4e2a.cube\u6587\u4ef6\u3002\u7d2f\u79ef\u4e0b\u6765\u7a7a\u95f4\u4e0d\u5bb9\u5c0f\u89d1\u3002\u5982\u5206\u6790\u5b8c\u6bd5\uff0c\u5373\u53ef\u5220\u9664\uff0c\u6216\u7528\u538b\u7f29\u5de5\u5177\u538b\u7f29\uff0c\u6216\u7528\u4e13\u4e1a\u7684bqbtool\u538b\u7f29\u3002
  • \u8f68\u8ff9\u6587\u4ef6\uff08.xyz\uff09: \u5206\u5b50\u52a8\u529b\u5b66/\u7ed3\u6784\u4f18\u5316\u8f93\u51fa\u7684\u8f68\u8ff9\u6587\u4ef6\uff0c\u5305\u542b\u666e\u901a\u8f68\u8ff9\u6587\u4ef6\uff0c\u901f\u5ea6\u6587\u4ef6\uff0c\u529b\u6587\u4ef6\u3002\u666e\u901aAIMD\u957f\u5ea6\u8f93\u51fa\u7684\u4e09\u4e2a\u6587\u4ef6\u57fa\u672c\u57281\u81f32GB\u5de6\u53f3\u3002\u5982\u4f7f\u7528\u673a\u5668\u5b66\u4e60\u52bf\u51fd\u6570\u4f1a\u50a8\u5b58\u5927\u91cf\u8f68\u8ff9\u6570\u636e\uff0c\u5e38\u5e38\u4f1a\u8fbe\u5230100GB\u5de6\u53f3\u3002\u5982\u5206\u6790\u5b8c\u6bd5\uff0c\u5373\u53ef\u5220\u9664\uff0c\u6216\u7528\u538b\u7f29\u5de5\u5177\u538b\u7f29\uff0c\u6216\u7528\u4e13\u4e1a\u7684bqbtool\u538b\u7f29\u3002
  • \u6001\u5bc6\u5ea6\u6587\u4ef6\uff08.pdos\uff09: \u4f53\u7cfb\u7684\u6001\u5bc6\u5ea6\u6587\u4ef6\uff0c\u5927\u5c0f\u504f\u5c0f\uff0c\u7ea6\u4e3a1\u81f32MB\u5de6\u53f3\u4e00\u4e2a\u6587\u4ef6\uff0c\u4f46\u4e00\u4e2a\u4f53\u7cfb\u4f1a\u8f93\u51fa\u591a\u4e2a\u6587\u4ef6\uff0c\u56e0\u6b64\u5dee\u4e0d\u591a\u57286\u81f38MB\uff0c\u4e0e\u7f51\u683c\u6587\u4ef6\u7c7b\u4f3c\uff0c\u5927\u91cf\u79ef\u7d2f\u540e\u4f1a\u4ea7\u751f\u7a7a\u95f4\u5360\u7528\u3002\u5982\u5206\u6790\u5b8c\u6bd5\uff0c\u5373\u53ef\u5220\u9664\uff0c\u6216\u7528\u538b\u7f29\u5de5\u5177\u538b\u7f29\u3002
"},{"location":"wiki/cluster_usage/pack_backup/#bqbtool","title":"\u538b\u7f29\u5de5\u5177: bqbtool","text":"

cp2k\u8f68\u8ff9\u6587\u4ef6/\u7f51\u683c\u6587\u4ef6\uff0c\u5982\u820d\u4e0d\u5f97\u4e22\u6389\u3002\u53ef\u4ee5\u91c7\u7528bqbtool\u8fdb\u884c\u538b\u7f29\u3002bqbtool\u4e13\u95e8\u9488\u5bf9\u6b64\u7c7b\u578b\u6587\u4ef6\u8fdb\u884c\u538b\u7f29\u5f00\u53d1\u7684\u5de5\u5177\uff0c\u538b\u7f29\u7387\u8fbe\u523010%\u3002

\u4e2a\u4eba\u5b89\u88c5\u53c2\u8003bqb\u624b\u518c\uff0c51\u548c52\u670d\u52a1\u5668\u4e0a\u5df2\u7ecf\u5b89\u88c5\uff0c\u4f7f\u7528\u547d\u4ee4\u5982\u4e0b\uff1a

# \u538b\u7f29\u8f68\u8ff9\u6587\u4ef6\nbqbtool compress postraj xxx.xyz xxx.bqb\n# \u538b\u7f29cube\u6587\u4ef6, \u53ef\u63d0\u524d\u628acube\u6587\u4ef6\u6309\u987a\u5e8fcat\u5230\u4e00\u4e2a\u6587\u4ef6\u4e2d\u3002\nbqbtool compress voltraj xxx.cube xxx.bqb\n

\u5982\u679c\u5c06\u67d0\u4e2a\u6587\u4ef6\u5939\u53ca\u5176\u5b50\u6587\u4ef6\u5939\u4e2d\u7684\u6240\u6709\u6587\u4ef6\u90fd\u538b\u7f29\uff0c\u53ef\u4ee5\u7ed3\u5408\u4f7f\u7528find\u548cbqbtool compress\uff1a

find . -name '*.cube' | while read line; do\n  bqbtool compress voltraj $line $line.bqb\ndone\n

\u6279\u91cf\u538b\u7f29\u6548\u679c\uff1a

.\n\u251c\u2500\u2500 bqbtool.log\n\u251c\u2500\u2500 run.sh\n\u251c\u2500\u2500 test.000\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cp2k-TOTAL_DENSITY-1_0.cube\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cp2k-TOTAL_DENSITY-1_0.cube.bqb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cp2k-v_hartree-1_0.cube\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cp2k-v_hartree-1_0.cube.bqb\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 test.002\n\u2502\u00a0\u00a0     \u251c\u2500\u2500 cp2k-TOTAL_DENSITY-1_0.cube\n\u2502\u00a0\u00a0     \u251c\u2500\u2500 cp2k-TOTAL_DENSITY-1_0.cube.bqb\n\u2502\u00a0\u00a0     \u251c\u2500\u2500 cp2k-v_hartree-1_0.cube\n\u2502\u00a0\u00a0     \u2514\u2500\u2500 cp2k-v_hartree-1_0.cube.bqb\n\u251c\u2500\u2500 test.001\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cp2k-TOTAL_DENSITY-1_0.cube\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cp2k-TOTAL_DENSITY-1_0.cube.bqb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 cp2k-v_hartree-1_0.cube\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 cp2k-v_hartree-1_0.cube.bqb\n\u2514\u2500\u2500 test.002\n    \u251c\u2500\u2500 cp2k-TOTAL_DENSITY-1_0.cube\n    \u251c\u2500\u2500 cp2k-TOTAL_DENSITY-1_0.cube.bqb\n    \u251c\u2500\u2500 cp2k-v_hartree-1_0.cube\n    \u2514\u2500\u2500 cp2k-v_hartree-1_0.cube.bqb\n
"},{"location":"wiki/cluster_usage/pack_backup/#_6","title":"\u96c6\u7fa4\u6253\u5305\u8981\u70b9","text":"

\u672c\u6b2151\u548c52\u5c06\u8fdb\u884c\u8fc1\u79fb\uff0c\u6587\u4ef6\u7684\u6570\u76ee\u5c06\u4f1a\u5f71\u54cd\u8fc1\u79fb\u901f\u5ea6\u3002\u56e0\u6b64\u5c3d\u53ef\u80fd\u5730\u628a\u539f\u672c\u76ee\u5f55\u538b\u7f29\u6210\u51e0\u4e2a\u6587\u4ef6\uff0c\u53ef\u4ee5\u63d0\u5347\u8fc1\u79fb\u901f\u5ea6\uff0c\u4f8b\u5982:

-rw-rw-r-- 1 jyhu jyhu 668M Jan 15 17:58 1-CoO.tar.gz\n-rw-rw-r-- 1 jyhu jyhu 559M Jan 15 15:40 2-ZIS.tar.gz\n-rw-rw-r-- 1 jyhu jyhu 2.6G Jan 15 17:07 3-LiS@TiO2.tar.gz\n-rw-rw-r-- 1 jyhu jyhu 2.8G Jan 15 15:53 4-Graphene.tar.gz\n-rw-rw-r-- 1 jyhu jyhu 3.4M Jan 16 11:05 NEB.tar.gz\n-rw-rw-r-- 1 jyhu jyhu 324M Jan 16 11:07 pKa-jqli.tar.gz\n

\u6253\u5305\u65b9\u6cd5\u53ef\u4ee5\u91c7\u7528tar\u538b\u7f29\uff0c\u53c2\u7167\u4ee5\u4e0a\u90e8\u5206

"},{"location":"wiki/cluster_usage/ssh_note/","title":"SSH \u4f7f\u7528\u5165\u95e8","text":"

\u6b64\u5165\u95e8\u4ec5\u4ecb\u7ecd\u4e00\u4e9b\u4f5c\u8005\u8ba4\u4e3a\u5fc5\u8981\u4e14\u5b9e\u7528\u7684\u529f\u80fd\uff0c\u5b8c\u5584\u7684\u5e2e\u52a9\u624b\u518c\u53ef\u4ee5\u901a\u8fc7\u547d\u4ee4\uff0cman ssh_config, man ssh\u67e5\u770b

\u4e3a\u4fbf\u4e8e\u8bf4\u660e\uff0c\u5047\u8bbe\u9700\u8981\u767b\u9646\u7684\u8fdc\u7a0b\u670d\u52a1\u5668IP\u4e3a123.45.67.89\uff0cSSH \u7aef\u53e3\u4e3a 7696\uff0c\u7528\u6237\u540d\u4e3akmr\u3002

"},{"location":"wiki/cluster_usage/ssh_note/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
  • \u4f7f\u7528SSH\u767b\u5f55\u670d\u52a1\u5668/\u96c6\u7fa4
  • \u4f7f\u7528SCP\u8fdb\u884c\u6587\u4ef6\u4f20\u8f93
"},{"location":"wiki/cluster_usage/ssh_note/#_2","title":"\u53ef\u9009\u76ee\u6807","text":"
  • \u4f7f\u7528ssh config\u6587\u4ef6\u8fdb\u884cSSH\u767b\u5f55\u7ba1\u7406
  • \u5b66\u4f1a\u7528\u8df3\u677f\u673a\u8fdb\u884cSSH\u767b\u5f55
"},{"location":"wiki/cluster_usage/ssh_note/#_3","title":"\u521b\u5efa\u5bc6\u94a5\u5bf9","text":"

Warning

\u65b0\u4eba\u5fc5\u5b66

ssh \u662f\u7528\u6765\u5b89\u5168\u8fdb\u884c\u767b\u5f55\u8fdc\u7a0b\u7535\u8111\u7684\u547d\u4ee4\u3002\u4f7f\u7528\u540e\uff0c\u6709\u4e24\u79cd\u9009\u62e9\u6765\u9a8c\u8bc1\u767b\u5f55

  1. \u4f7f\u7528\u5bc6\u7801
  2. \u4f7f\u7528\u5bc6\u94a5

\u7b2c\u4e00\u79cd\u65b9\u6cd5\u5df2\u7ecf\u4e3a\u5927\u4f17\u6240\u719f\u77e5\uff0c\u4f46\u662f\u4e0d\u5b89\u5168\u3002\u56e0\u6b64\u6211\u4eec\u91c7\u7528\u5bc6\u94a5\u8fdb\u884c\u767b\u5f55\u3002

\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u751f\u6210\u5bc6\u94a5:

ssh-keygen\n

\u6839\u636e\u7ec8\u7aef\u7684\u63d0\u793a\u8fdb\u884c\u64cd\u4f5c\uff08\u5b9e\u9645\u4e0a\u4f60\u53ef\u80fd\u53ea\u9700\u8981\u4e0d\u505c\u6309enter\u952e\uff09\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\u4f60\u4f1a\u5728~/.ssh\u76ee\u5f55\u4e2d\u5f97\u5230id_rsa\u548cid_rsa.pub\u6587\u4ef6\uff0c\u4ed6\u4eec\u5206\u522b\u662f\u79c1\u94a5\u548c\u516c\u94a5\u3002\u521b\u5efa\u597d\u4e86\u4e4b\u540e\u8bf7\u628aid_rsa.pub\u6587\u4ef6\u7ed9\u670d\u52a1\u5668\u7ba1\u7406\u5458\u3002

Warning

\u79c1\u94a5\u662f\u767b\u5f55\u96c6\u7fa4\u7684\u94a5\u5319\uff0c\u8bf7\u52a1\u5fc5\u4fdd\u7ba1\u597d\u8fd9\u4e2a\u6587\u4ef6\uff0c\u9632\u6b62\u81ea\u5df1\u7684\u7535\u8111\u88ab\u5165\u4fb5

"},{"location":"wiki/cluster_usage/ssh_note/#ssh_1","title":"\u4f7f\u7528SSH\u767b\u5f55\u670d\u52a1\u5668","text":"

Warning

\u65b0\u4eba\u5fc5\u5b66

\u82e5\u8fdc\u7a0b\u670d\u52a1\u5668\u5df2\u7ecf\u653e\u7f6e\u4e86\u516c\u94a5\uff0c\u5219\u53ef\u8f93\u5165\u4ee5\u4e0b\u547d\u4ee4\u767b\u9646\u670d\u52a1\u5668\uff1a

ssh -i <path to your private key> -p <port number> username@server_ip\n

\u793a\u4f8b\uff0c\u5047\u8bbe\u5bc6\u94a5\u5728\u672c\u5730\u7684\u8def\u5f84\u4e3a ~/.ssh/id_rsa\uff1a

ssh -i ~/.ssh/id_rsa -p 7696 kmr@123.45.67.89\n

-p \u540e\u6307\u5b9a\u7684\u662f\u7aef\u53e3\u3002\u82e5\u7701\u7565\u4e0d\u5199\uff0c\u9ed8\u8ba4\u901a\u8fc7 22 \u7aef\u53e3\u4e0e\u8fdc\u7a0b\u670d\u52a1\u5668\u8fdb\u884c\u8fde\u63a5\u3002

\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0cid_rsa\u548cid_rsa.pub\u6587\u4ef6\u4f4d\u4e8e~/.ssh\u4e0b\uff0c\u5219-i \u9009\u9879\u53ca\u5176\u5bf9\u5e94\u53c2\u6570\u53ef\u4ee5\u7701\u7565\u3002

Warning

\u8ba1\u7b97\u96c6\u7fa4\u53ea\u5141\u8bb8\u5728\u6821\u56ed\u7f51\u7279\u5b9aIP\u8303\u56f4\u5185\u76f4\u63a5\u767b\u9646\u4f7f\u7528\u3002

"},{"location":"wiki/cluster_usage/ssh_note/#scp","title":"\u4f7f\u7528SCP\u8fdb\u884c\u6587\u4ef6\u4f20\u8f93","text":"

SCP\u5b9e\u9645\u4e0a\u662fSSH+FTP\u7684\u7ed3\u5408\uff0c\u5982\u679c\u914d\u7f6e\u597d\u4e86SSH\u547d\u4ee4\uff0c\u53ef\u4ee5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u6765\u8fdb\u884c\u6587\u4ef6\u4f20\u8f93\uff1a

scp myserver:remote_file local_directory_path\nscp local_directory_path myserver:remote_file\n

\u6bd4\u5982\u9700\u8981\u628a\u4e0a\u6587\u63d0\u5230\u7684\u8fdc\u7a0b\u670d\u52a1\u5668\u7684\u6587\u4ef6/data/home/kmr/file\u4f20\u5230\u672c\u5730 /some/local/place \u76ee\u5f55\u4e0b\uff0c \u5219\u4f7f\u7528\u547d\u4ee4\uff1a

scp -P 7696 kmr@123.45.67.89:/data/home/kmr/file /some/local/place\n

\u4ece\u672c\u5730\u4e0a\u4f20\u5230\u8fdc\u7a0b\u5219\u4ea4\u6362\u987a\u5e8f\uff1a

scp -P 7696 /some/local/place/file kmr@123.45.67.89:/data/home/kmr/\n

Warning

\u6ce8\u610f scp \u6307\u5b9a\u7aef\u53e3\u7684\u547d\u4ee4\u662f\u5927\u5199\u7684-P \u800c\u975e\u5c0f\u5199\u7684 -p\uff0c\u8fd9\u662f\u4e0d\u540c\u4e8e ssh \u547d\u4ee4\u7684\u4e00\u70b9\u3002

\u82e5\u6240\u4f20\u6587\u4ef6\u4e3a\u76ee\u5f55\uff0c\u5219\u9700\u8981\u4f7f\u7528-r\u9009\u9879\uff1a

scp -r -P 7696 kmr@123.45.67.89:/data/home/kmr/directory /some/local/place\n

Tip

\u6ce8\u610f scp \u672c\u8eab\u53ef\u4ee5\u770b\u4f5c\u4e00\u4e2a\u7279\u6b8a\u7684 ssh \u547d\u4ee4\uff0c\u56e0\u6b64\u65e0\u8bba\u4ece\u8fdc\u7a0b\u8fd8\u662f\u672c\u5730\u4f20\u8f93\u6587\u4ef6\u90fd\u5e94\u5728\u672c\u5730\u8fd0\u884c\uff0c\u53ea\u662f\u53c2\u6570\u7684\u987a\u5e8f\u51b3\u5b9a\u4e86\u4f20\u8f93\u7684\u65b9\u5411\u3002\u5982\u679c\u4e24\u4e2a\u53c2\u6570\u5747\u5199\u672c\u5730\u8def\u5f84\uff0c\u5219\u4e0e cp \u547d\u4ee4\u7684\u884c\u4e3a\u76f8\u8fd1\uff0c\u4f46\u4e0d\u53ef\u5747\u5199\u8fdc\u7a0b\u8def\u5f84\u3002

zsh\u4e0b \uff08\u6bd4\u5982macOS >=10.15\u7248\u672c\u7684\u9ed8\u8ba4\u7ec8\u7aef\uff09\uff0c\u4e0d\u80fd\u76f4\u63a5\u4f7f\u7528\u901a\u914d\u7b26*\u6279\u91cf\u4f20\u8f93\u6587\u4ef6\uff0c\u9700\u8981\u5c06\u5305\u542b*\u7684\u5b57\u7b26\u4e32\u7528\u5355\u5f15\u53f7\u62ec\u8d77\u3002

"},{"location":"wiki/cluster_usage/ssh_note/#config-ssh","title":"\u53ef\u9009\uff1a\u901a\u8fc7\u914d\u7f6e config \u4f18\u96c5\u5730\u7684\u4f7f\u7528 SSH","text":"

\u4e3a\u4e86\u907f\u514d\u6bcf\u6b21\u90fd\u8f93\u5165\u4e00\u5927\u4e32\u547d\u4ee4\u3002 \u8bf7\u4f7f\u7528vim\u7f16\u8f91\u5982\u4e0b\u6587\u4ef6\uff1a

vim ~/.ssh/config\n

\u6ce8\u610f

\u8bf7\u6ce8\u610f\u4fee\u6539\u8be5\u6587\u4ef6\u6743\u9650\u4e3a 600 (\u5373 -rw------- )\uff0c\u5426\u5219\u53ef\u80fd\u5bfc\u81f4\u65e0\u6cd5\u5e76\u884c\u3002 \u7c7b\u4f3c\u5730\uff0c\u5982\u53d1\u73b0\u81ea\u5df1\u7684\u4efb\u52a1\u4ea4\u4e0a\u53bb\u53ea\u80fd\u5728\u4e00\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\uff0c\u4e5f\u8bf7\u68c0\u67e5 ~/.ssh \u4e0b\u5404\u4e2a\u6587\u4ef6\u7684\u6743\u9650\uff0c\u6ce8\u610f\u53ea\u6709\u516c\u94a5\u662f 644 \u6743\u9650\u3002

\u6211\u4eec\u53ef\u4ee5\u628aSSH\u547d\u4ee4\u7684\u53c2\u6570\u90fd\u50a8\u5b58\u5728\u8fd9\u4e2a\u6587\u4ef6\u91cc\u3002\u4ee5\u4e0b\u662f\u8bed\u6cd5\u793a\u4f8b\u6587\u4ef6\uff1a

Host myserver # (1)!\n    User kmr # (2)!\n    Hostname 123.45.67.89 # (3)!\n    Port 7696 # (4)!\n    IdentityFile ~/.ssh/id_rsa # (5)!\n
  1. nickname for your cluster
  2. replacement of username in ssh
  3. replace of cluster_ip in ssh
  4. replacement of -p <port number> in ssh
  5. replace of -i <path to your private key> in ssh

\u4fdd\u5b58\u4e0a\u8ff0\u6587\u4ef6\uff0c\u4f60\u5c31\u53ef\u4ee5\u7b80\u5355\u5730\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u767b\u5f55:

ssh myserver\n

\u6b64\u547d\u4ee4\u5373\u76f8\u5f53\u4e8e\u4e0a\u6587\u63d0\u5230\u7684ssh -i ~/.ssh/id_rsa -p 7696 kmr@123.45.67.89\u3002

"},{"location":"wiki/cluster_usage/ssh_note/#_4","title":"\u52a0\u6df1\u7406\u89e3","text":"

Warning

\u8be5\u89c6\u9891\u4ec5\u5e2e\u52a9\u7406\u89e3SSH\u539f\u7406\u4ee5\u53ca\u57fa\u672c\u64cd\u4f5c\uff0c\u89c6\u9891\u4e2d\u542b\u6709\u672c\u7b14\u8bb0\u672a\u8981\u6c42\u7684\u5185\u5bb9\uff0c\u4f46\u662f\u5927\u90e8\u5206\u666e\u901a\u7528\u6237\u6ca1\u6709\u6743\u9650\u6267\u884c\u3002

"},{"location":"wiki/cluster_usage/ssh_note/#x11-forwarding","title":"\u5728\u672c\u5730\u7535\u8111\u663e\u793a\u670d\u52a1\u5668\u56fe\u50cf (X11 Forwarding)","text":"

\u4f7f\u7528\u7ec8\u7aef\u767b\u5f55\u670d\u52a1\u5668\u540e\u6ca1\u529e\u6cd5\u76f4\u63a5\u663e\u793a\u56fe\u5f62\u754c\u9762\u3002\u6709\u65f6\u5019\u5728*\u670d\u52a1\u5668*\u4e0a\u4f7f\u7528\u753b\u56fe\u8f6f\u4ef6\u65f6\uff0c\u53ef\u4ee5\u901a\u8fc7X11 Forwarding\u529f\u80fd\u5c06\u56fe\u50cf\u663e\u793a\u5230\u672c\u5730\u7535\u8111\u4e0a\u3002\u53ea\u9700\u8981\u5728\u547d\u4ee4\u91cc\u52a0\u4e0a-X\u6216\u8005-Y\uff1a

ssh -X -i <para.> -p <para.> username@server_ip\n
"},{"location":"wiki/cluster_usage/ssh_note/#configx11-forwarding","title":"\u5728config\u6587\u4ef6\u4e2d\u914d\u7f6eX11 Forwarding*","text":"
Host <hostnickname>\n    ForwardX11 yes  # (1)!\n    ForwardX11Trusted yes # (2)!\n
  1. equivalent to -X
  2. equivalent to -Y (This option valid only if your ForwardX11 is set to yes!)
"},{"location":"wiki/cluster_usage/ssh_note/#_5","title":"\u4f7f\u7528\u8df3\u677f\u673a/\u4ee3\u7406\u8fdb\u884c\u8fdc\u7a0b\u767b\u5f55","text":"

\u672c\u7ec4\u7684\u670d\u52a1\u5668\u9650\u5236\u4e86\u767b\u5f55\u7684ip\uff0c\u5373\u4f60\u53ea\u80fd\u5728\u5b66\u6821ip\u8303\u56f4\u5185\u8fdb\u884c\u767b\u5f55\u3002\u540c\u65f6\u7531\u4e8e\u767b\u5f55\u9700\u8981\u5bc6\u94a5\uff0c\u800c\u5bc6\u94a5\u4fdd\u5b58\u5728\u529e\u516c\u5ba4\u7535\u8111\u4e0a\uff0c\u56e0\u6b64\u767b\u5f55\u5c31\u5fc5\u987b\u4f7f\u7528\u529e\u516c\u5ba4\u7535\u8111\u3002\u56e0\u6b64\uff0c\u4eba\u4e0d\u5728\u529e\u516c\u5ba4\u65f6\u5c31\u5f88\u96be\u767b\u5f55\u670d\u52a1\u5668\u3002

\u89e3\u51b3\u65b9\u6cd5\u5c31\u662f\uff0c\u5148\u5728\u6821\u56ed\u7f51\u73af\u5883\u4e0b\u901a\u8fc7SSH\u767b\u5f55\u5230\u529e\u516c\u5ba4\u7535\u8111\uff08\u4ec5\u81ea\u5df1\u7684\u7528\u6237\u540d\u5bc6\u7801\u5373\u53ef\uff09\uff0c\u518d\u901a\u8fc7\u529e\u516c\u5ba4\u7535\u8111\u767b\u5f55\u5230\u670d\u52a1\u5668\u3002\u6b64\u65f6\u529e\u516c\u5ba4\u7535\u8111\u662f\u4f5c\u4e3a*\u8df3\u677f*\u6765\u4f7f\u7528\u7684\uff1a

ssh username@proxy\nssh -p port_number -i key_file username@cluster191\n
"},{"location":"wiki/cluster_usage/ssh_note/#config","title":"\u5728config\u6587\u4ef6\u4e2d\u914d\u7f6e\u8df3\u677f\u673a*","text":"

\u6253\u5f00 ~/.ssh/config\uff0c\u590d\u5236\u4ee5\u4e0b\u4ee3\u7801\uff08\u6ce8\u610f\u53bb\u6389\u6ce8\u91ca\uff0c\u5426\u5219\u53ef\u80fd\u4f1a\u62a5\u9519\uff09\uff1a

# nickname you set for your office computer\nHost proxy\n    # username you set for login\n    User robinzhuang\n    # IP address of your office computer, change the xxx to real one!\n    Hostname 10.24.3.xxx\n\n# nickname for your cluster\nHost myserver\n    # username you set, change to real one!\n    User kmr\n    # IP for cluster, change to real one!\n    Hostname 123.45.67.89\n    # the key file location used in login \n    IdentityFile ~/.ssh/id_rsa\n    # specify the port number, replace xx with real port!\n    Port xx\n    # use Host proxy as Jump Server\n    ProxyJump proxy\n

\u6211\u4eec\u53ef\u4ee5\u53d1\u73b0\u5176\u5b9e\u662f\u76f4\u63a5\u767b\u5f55\u8bfe\u9898\u7ec4\u670d\u52a1\u5668\u7684\u4e00\u4e9b\u6539\u8fdb\uff0c\u6211\u4eec\u9996\u5148\u914d\u7f6e\u4e86\u4ece\u8fd9\u53f0\u7535\u8111\u767b\u5f55\u5230\u8df3\u677f\u673a\u7684\u547d\u4ee4\uff0c\u7136\u540e\u518d\u914d\u7f6e\u5229\u7528\u8df3\u677f\u673a\u5230\u670d\u52a1\u5668\u7684\u547d\u4ee4\u3002

\u5982\u679c\u4e0a\u8ff0\u7684 ProxyJump proxy \u4e0d\u8d77\u4f5c\u7528\uff0c\u53ef\u5c06\u5176\u66ff\u6362\u4e3a ProxyCommand ssh -o 'ForwardAgent yes' proxy \"ssh-add ~/.ssh/id_rsa && nc %h %p\" \uff0c\u8bf7\u7528\u4f60\u7684\u5bc6\u94a5\u7684\u8def\u5f84\u6765\u4ee3\u66ff\u4e0a\u8ff0\u7684 ~/.ssh/id_rsa \u90e8\u5206\u3002

\u5b8c\u6210\u4ee5\u4e0a\u914d\u7f6e\u540e\u53ef\u4ee5\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u76f4\u63a5\u914d\u7f6e\uff1a

ssh myserver\n
"},{"location":"wiki/cluster_usage/ssh_note/#config_1","title":"\u5728config\u6587\u4ef6\u4e2d\u8f6c\u53d1\u7aef\u53e3*","text":"

\u6709\u65f6\uff0c\u6211\u4eec\u5728\u670d\u52a1\u5668\u4e0a\u90e8\u7f72\u4e86 jupyter notebook \u7b49\u670d\u52a1\u65f6\uff0c\u9700\u8981\u628a\u8fdc\u7a0b\u7684\u67d0\u4e2a\u7aef\u53e3 (\u4ee5\u4e0b\u4f8b\u5b50\u4e2d\u4e3a 8888 \u7aef\u53e3) \u8f6c\u53d1\u5230\u672c\u5730\u7684\u67d0\u4e2a\u7aef\u53e3 (\u4ee5\u4e0b\u4f8b\u5b50\u4e2d\u4e3a 9999 \u7aef\u53e3)\uff0c\u4f7f\u5f97\u5728\u672c\u5730\u8bbf\u95ee https://localhost:9999 \u65f6\u4e5f\u80fd\u8bbf\u95ee\u8fdc\u7a0b\u7684 jupyter notebook \u670d\u52a1\u3002

Host myserver # (1)!\n    User kmr # (2)!\n    Hostname 123.45.67.89 # (3)!\n    LocalForward 9999 localhost:8888 # (4)!\n
  1. \u4e3a\u4f60\u7684\u670d\u52a1\u5668\u53d6\u4e00\u4e2a\u4efb\u610f\u7684\u6635\u79f0
  2. \u8bf7\u4fee\u6539\u4e3a\u771f\u5b9e\u7684\u7528\u6237\u540d
  3. \u8bf7\u4fee\u6539\u4e3a\u771f\u5b9e\u7684IP
  4. localhost:8888 \u662f\u76f8\u5bf9\u4e8e\u8fdc\u7aef\u670d\u52a1\u5668\u7684\u771f\u5b9eIP\u548c\u7aef\u53e3\uff0c\u82e5\u4e0d\u662f localhost\uff0c\u8bf7\u66ff\u6362\u4e3a\u5bf9\u5e94\u7684IP\u548c\u7aef\u53e3\u53f7
"},{"location":"wiki/cluster_usage/ssh_note/#x11-forwarding_1","title":"\u5728\u4f7f\u7528\u8df3\u677f\u673a\u7684\u60c5\u51b5\u4e0b\u4f7f\u7528X11 Forwarding","text":"

\u53ea\u9700\u8981\u5728 ~/.ssh/config \u4e2d\u52a0\u5165

Host * # (1)!\n    ForwardX11Trusted yes\n
  1. \u5bf9\u4efb\u610f\u914d\u7f6e\u751f\u6548
"},{"location":"wiki/cluster_usage/ssh_note/#config_2","title":"\u4e00\u4efd\u793a\u4f8b\u914d\u7f6e\u6587\u4ef6\uff08config\uff09","text":"

\u4ee5\u4e0b\u4e3a ~/.ssh/config \u7684\u4e00\u4e2a\u793a\u4f8b\uff0c\u9700\u8981\u65f6\u53ef\u5728\u8fd9\u4efd\u793a\u4f8b\u6587\u4ef6\u4e0a\u8fdb\u884c\u4fee\u6539\uff0c\u5fc5\u8981\u4fee\u6539\u7684\u90e8\u5206\u5df2\u5728\u6ce8\u91ca\u4e2d\u6807\u51fa\uff0cGeneral config \u53ef\u4ee5\u76f4\u63a5\u7167\u6284\u3002\u6ce8\u610f\u987b\u5220\u6389\u6587\u4ef6\u4e2d\u6240\u6709\u7684\u6ce8\u91ca\u3002

# General config\nHost *\n    ForwardX11Trusted yes\n    ForwardAgent yes\n    AddKeysToAgent yes\n    ServerAliveInterval 60\n    ControlPersist yes\n    ControlMaster auto\n    ControlPath /tmp/%r@%h:%p\n\n# set proxy\n# nickname for your Jump Server\nHost nickname_proxy\n    # IP for Jump Server (REPlACE IT!)\n    Hostname 10.24.3.255\n    # your username for Jump Server (REPlACE IT!)\n    User chenglab\n\n# Host1 and host2\n# nickname for your cluster\nHost nickname_1\n    Hostname 123.45.67.89\n    # your host1 username (REPlACE IT!)\n    User kmr1 \n    LocalForward 8051 localhost:8888\n# nickname for your cluster\nHost nickname_2\n    Hostname 123.45.67.90\n    # your host2 username (REPlACE IT!)\n    User kmr2\n    LocalForward 8052 localhost:8888\n\n# set same parts for host1 and host2\n# use your own nickname\nHost nickname_1 nickname_2\n    Port 7696\n    # use your own nickname\n    ProxyJump nickname_proxy\n
"},{"location":"wiki/cluster_usage/ssh_note/#_6","title":"\u8d85\u7eb2\u7684\u90e8\u5206\u200b\u200b*","text":"

\u5728\u914d\u7f6e\u6587\u4ef6\u4e2d\u5b9e\u73b0\u7c7b\u4f3c\u9009\u62e9\u8bed\u53e5\u7684\u529f\u80fd\uff0c\u4ee5\u4e0b\u4f8b\u5b50\u63cf\u8ff0\u7684\u662f\u5f53\u7f51\u7edc\u73af\u5883\u968f\u65f6\u53d8\u66f4\u65f6\uff0c\u8fde\u63a5\u540c\u4e00\u53f0\u673a\u5668\u53ef\u80fd\u4f1a\u9700\u8981\u8bbf\u95ee\u4e0d\u540cIP\u65f6\u6240\u91c7\u53d6\u7684\u7b56\u7565\u3002

\u6b64\u4f8b\u5b50\u4e0d\u5efa\u8bae\u521d\u5b66\u8005\u76f4\u63a5\u590d\u5236\u7c98\u8d34\uff0c\u5176\u4e2d\u9700\u8981\u66ff\u6362\u7684\u90e8\u5206\u8bf7\u6839\u636e\u5177\u4f53\u5e94\u7528\u573a\u666f\u6765\u81ea\u884c\u659f\u914c

Host elements\n    User chenglab\n    Match host elements exec \"nc -G 4 -z 10.24.3.144 %p\"\n        # Private net IP\n        Hostname 10.24.3.144\n    Match host elements\n        # Public net IP\n        Hostname xxx.xxx.xxx.xxx\n        Port 6000\n
"},{"location":"wiki/cluster_usage/ssh_note/#_7","title":"\u5e38\u89c1\u95ee\u9898","text":""},{"location":"wiki/cluster_usage/ssh_note/#ssh-private-key-are-too-open","title":"ssh private key are too open","text":"

The error message is

@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@ WARNING: UNPROTECTED PRIVATE KEY FILE! @\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\nPermissions 0644 for '/home/me/.ssh/id_rsa_targethost' are too open.\nIt is recommended that your private key files are NOT accessible by others.\nThis private key will be ignored.\nbad permissions: ignore key: /home/me/.ssh/id_rsa_targethost\n

This arises from the permission of your private key:id_rsa file.

Use command ls -l to see your id_rsa permission. if it is not -rw-------, you should change it to that! Use the following command:

chmod 600 ~/.ssh/id_rsa\n
"},{"location":"wiki/cluster_usage/ssh_note/#no-xauth-data-using-fake-authentication-data-for-x11-forwarding","title":"No xauth data; using fake authentication data for X11 forwarding.","text":"

The error message is

Warning: No xauth data; using fake authentication data for X11 forwarding.\n

This is because ssh can't find your xauth location. Usually, the location is in /opt/X11/bin/xauth. Add this in your ssh configure file:

Host *\n    XAuthLocation /opt/X11/bin/xauth\n
"},{"location":"wiki/cluster_usage/ssh_note/#remote-host-identification-has-changed","title":"Remote host identification has changed!","text":"

When the remote host was just repaired, the error like below might be raised.

@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@    WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!     @\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\nIT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!\nSomeone could be eavesdropping on you right now (man-in-the-middle attack)!\nIt is also possible that a host key has just been changed.\nThe fingerprint for the RSA key sent by the remote host is\n51:82:00:1c:7e:6f:ac:ac:de:f1:53:08:1c:7d:55:68.\nPlease contact your system administrator.\nAdd correct host key in /Users/isaacalves/.ssh/known_hosts to get rid of this message.\nOffending RSA key in /Users/isaacalves/.ssh/known_hosts:12\nRSA host key for 104.131.16.158 has changed and you have requested strict checking.\nHost key verification failed.\n

Take it easy, and just edit your /Users/isaacalves/.ssh/known_hosts file to remove the line with the IP address of the very remote host. For some users such as Ubuntu or Debian users, ssh -R xxx might be necessary, which would be shown in the error info.

However, if not any repair or upgrade happened, man-in-the-middle attack might happen. Just stop logging in and contact manager of cluster at once to make sure.

"},{"location":"wiki/cluster_usage/tensorboard/","title":"\u96c6\u7fa4 TensorBoard \u4f7f\u7528\u6307\u5357","text":""},{"location":"wiki/cluster_usage/tensorboard/#_1","title":"\u9700\u6c42","text":"

\u57fa\u4e8e DeepMD-kit \u6216\u8005 TensorFlow \u7684\u4ee3\u7801\u8c03\u8bd5\u53ca\u8bad\u7ec3\u6570\u636e\u5b9e\u65f6\u76d1\u63a7\u3002

TensorBoard \u662f\u4ec0\u4e48

DeepMD-kit \u5b98\u65b9\u6559\u7a0b

"},{"location":"wiki/cluster_usage/tensorboard/#_2","title":"\u7528\u6cd5","text":"

DP \u5b98\u65b9\u6559\u7a0b\u7ed9\u51fa\u4e86\u5728\u672c\u5730\u8fd0\u884c\u7a0b\u5e8f\u65f6\u7684\u53ef\u89c6\u5316\uff0c\u5982\u679c\u5728\u670d\u52a1\u5668\u4e0a\u8fd0\u884c\uff0c\u6211\u4eec\u9700\u8981\u8fdb\u884c\u7aef\u53e3\u8f6c\u53d1\u3002

"},{"location":"wiki/cluster_usage/tensorboard/#_3","title":"\u5728\u8ba1\u7b97\u8282\u70b9\u4e0a\u8fd0\u884c\u7a0b\u5e8f\uff08\u63a8\u8350\uff09","text":"

\u4ee5\u5728 gpu3 \u961f\u5217\u8fd0\u884c DeepMD-kit \u8bad\u7ec3\u7a0b\u5e8f\u4e3a\u4f8b\uff0c\u5176\u4ed6\u7a0b\u5e8f\u53ef\u5bf9\u5e94\u66ff\u6362\u3002

  1. \u901a\u8fc7 lsf \u811a\u672c\u63d0\u4ea4\u7a0b\u5e8f\u5230\u8ba1\u7b97\u8282\u70b9
    #!/bin/bash\n#BSUB -q gpu3\n#BSUB -W 24:00\n#BSUB -J type_map_0\n#BSUB -o %J.stdout\n#BSUB -e %J.stderr\n#BSUB -n 4\n#BSUB -gpu \"num=1:mode=shared:mps=no:j_exclusive=yes\"\n#BSUB -R \"span[ptile=32]\"\n\n# add modulefiles\nmodule add deepmd/2.0-cuda11.3\n\ndp train input.json 1>> train.log 2>> train.err &\ntensorboard --logdir=log --port=6006\n
    \u5982\u679c\u60f3\u8981\u5b9e\u65f6\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u6570\u636e\uff0c\u8bad\u7ec3\u6307\u4ee4\u548c tensorboard \u7684\u8fd0\u884c\u6307\u4ee4\u9700\u8981\u540c\u65f6\u8fd0 \u884c\uff0c\u6545\u91c7\u7528 &\u5c06\u8bad\u7ec3\u6307\u4ee4\u6302\u8d77\u3002

    --logdir\u6307\u5b9a tensorboard \u7684 event \u6587\u4ef6\u6240\u5728\u8def\u5f84\uff08\u5728 json \u6587\u4ef6\u4e2d\u6307\u5b9a\uff09\u3002

    --port\u6307\u5b9a tensorboard \u5728\u670d\u52a1\u5668\u4e0a\u8fd0\u884c\u7684\u7aef\u53e3\u53f7\uff08\u7f3a\u7701\u9ed8\u8ba4\u4e3a 6006\uff09\u3002

  2. \u67e5\u770b\u8ba1\u7b97\u8282\u70b9 ip \u5730\u5740 \u505a\u6cd5\u7c7b\u4f3cjupyter notebook \u6559\u7a0b\uff0c\u5728\u767b\u5f55\u8282\u70b9\u547d\u4ee4\u884c\u8f93\u5165\u4e0b\u9762\u6307\u4ee4\uff08\u5c06 c51-m002\u66ff\u6362\u4e3a\u5b9e\u9645\u8fd0\u884c\u7684\u8282\u70b9\uff09\u3002
    cat /etc/hosts | grep c51-m002\n
  3. \u5c06\u7aef\u53e3\u8f6c\u53d1\u5230\u672c\u5730
    ssh -NfL localhost:<local_port>:<remote_ip>:<port> <username>@<ip_of_cluster>\n
"},{"location":"wiki/cluster_usage/tensorboard/#_4","title":"\u5728\u767b\u5f55\u8282\u70b9\u4e0a\u8fd0\u884c\u7a0b\u5e8f","text":"

Warning

\u4ec5\u4f9b\u77ed\u65f6\u95f4\u6d4b\u8bd5\uff01\u957f\u65f6\u95f4\u8fd0\u884c\u8bf7\u4f7f\u7528\u8ba1\u7b97\u8282\u70b9!!

\u5728\u547d\u4ee4\u884c\u4e2d\u8fd0\u884c\u8bad\u7ec3\u548c tensorboard \u7a0b\u5e8f\u540e\uff0c\u5728\u672c\u5730\u6267\u884c

ssh -NfL <local_port>:localhost:<port> <username>@<ip_of_cluster>\n
"},{"location":"wiki/cluster_usage/vscode_remote/","title":"\u5728\u975e\u767b\u9646\u8282\u70b9\u4e0a\u4f7f\u7528VSCode","text":"

VSCode \u901a\u8fc7 Remote \u63d2\u4ef6\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u8fdc\u7a0b\u7f16\u8f91\u80fd\u529b\uff0c\u4f7f\u5f97\u7528\u6237\u53ef\u4ee5\u5728\u8fdc\u7a0b\u83b7\u5f97\u63a5\u8fd1\u672c\u5730\u7684\u7f16\u8f91\u4f53\u9a8c\u3002 VSCode Server\u539f\u751f\u57fa\u4e8eNode\u548cElectron\u6280\u672f\uff0c\u6709\u7740\u8f83\u9ad8\u7684\u5185\u5b58\u7b49\u9700\u6c42\uff0c \u4f46\u9274\u4e8e\u76ee\u524d\u767b\u9646\u8282\u70b9\u7684\u8d44\u6e90\u65e5\u6e10\u6349\u895f\u89c1\u8098\uff0c\u8fd9\u91cc\u63d0\u51fa\u4e00\u4e2a\u65b9\u6848\uff0c \u53ef\u4ee5\u8ba9\u7528\u6237\u8f83\u4e3a\u65b9\u4fbf\u5730\u4f7f\u7528\u975e\u767b\u9646\u8282\u70b9\u7684\u8d44\u6e90\u5f00\u542fVSCode Remote\u3002

\u672c\u6587\u5047\u8bbe\u7528\u6237\u5df2\u7ecf\u9605\u8bfb\u8fc7SSH \u4e0e SCP \u4f7f\u7528\u5165\u95e8\u7279\u522b\u662f\u6709\u5173 config \u6587\u4ef6\u7684\u90e8\u5206\uff0c \u5e76\u77e5\u6653\u96c6\u7fa4\u7684\u57fa\u7840\u6982\u51b5\u548c\u8c03\u5ea6\u7cfb\u7edf\u4f7f\u7528\u65b9\u6cd5\u3002 \u5982\u672a\u9605\u8bfb\uff0c\u8bf7\u5148\u53c2\u9605\u4e0a\u8ff0\u4e24\u7bc7\u6587\u5b57\u3002

"},{"location":"wiki/cluster_usage/vscode_remote/#macos-linux","title":"MacOS \u548c Linux \u7528\u6237","text":"

\u7531\u4e8e\u7b14\u8005\u76ee\u524d\u4f7f\u7528\u7684\u8bbe\u5907\u662f MacOS \u64cd\u4f5c\u7cfb\u7edf\uff08Linux\u60c5\u51b5\u7c7b\u4f3c\uff09\uff0c\u8fd9\u91cc\u7ed9\u51fa\u8f83\u5b8c\u6574\u7684\u56fe\u6587\u8bf4\u660e\u3002

\u9996\u5148\u7528\u81ea\u5df1\u6700\u987a\u624b\u7684\u65b9\u5f0f\u6253\u5f00\u5e76\u7f16\u8f91 ~/.ssh/config \u6587\u4ef6\uff0c \u53c2\u7167\u8fd9\u91cc\u7684\u8bf4\u660e\uff0c \u589e\u52a0\u767b\u9646\u8282\u70b9\u7684\u914d\u7f6e\u4fe1\u606f:

.ssh/config
Host <nickname>\n    HostName <ip_of_zeus>\n    Port <port>\n    User <username>\n

\u8bf7\u5c06<ip_of_zeus>, <port>, <username>\u66ff\u6362\u4e3a\u5b9e\u9645\u7684IP\u5730\u5740\u3001\u7aef\u53e3\u53f7\u4ee5\u53ca\u7528\u6237\u540d\u3002 <nickname>\u8bf7\u66ff\u6362\u4e3a\u4efb\u610f\u81ea\u5df1\u559c\u6b22\u7684\u6635\u79f0\uff0c\u4f46\u8bf7\u6ce8\u610f\uff0c \u4e0d\u8981\u4f7f\u7528c5*\u7684\u5f62\u5f0f\uff01 \u5426\u5219\u4f1a\u548c\u4e0b\u6587\u51b2\u7a81\u3002

\u7136\u540e\u589e\u52a0\u4ee5\u4e0b\u51e0\u884c\uff1a

.ssh/config
Host c5*\n    User <username>\n    ProxyCommand ssh -o ForwardAgent=yes <username>@<nickname> \"nc -w 120 %h %p\"\n

\u8fd9\u91cc\u91c7\u7528 c5* \u4f5c\u4e3a\u524d\u7f00\u662f\u4e3a\u4e86\u5728\u767b\u9646\u8282\u70b9\u4e0a\u5feb\u901f\u767b\u9646\u5230\u5bf9\u5e94\u7684\u8ba1\u7b97\u8282\u70b9\u3002 Zeus \u96c6\u7fa4\u4e0a\u6240\u6709\u8ba1\u7b97\u8282\u70b9\uff08\u542bCPU\u3001GPU\u3001\u80d6\u8282\u70b9\uff09\u5747\u4ee5 c5* \u5f00\u5934\uff0c\u5177\u6709\u7c7b\u4f3c c5*-* \u7684\u5f62\u5f0f\uff0c \u6545\u8fd9\u91cc\u91c7\u7528\u5982\u6b64\u5199\u6cd5\u3002\u8bf7\u6839\u636e\u96c6\u7fa4\u7684\u60c5\u51b5\u5bf9\u5e94\u8c03\u6574\u3002

\u7136\u540e\u5728\u96c6\u7fa4\u4e0a\uff0c\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u5f00\u542f\u4e00\u4e2a\u865a\u62df\u7ec8\u7aef\uff1a

user@login01$ bsub -q fat -n 1 -Is bash\nJob <xxx> is submitted to queue <fat>.\n<<Waiting for dispatch ...>>\n<<Starting on c51-s001>>\nuser@c51-s001:~$ \n

\u6ce8\u610f bsub \u7684\u9644\u52a0\u547d\u4ee4\u8bf7\u53c2\u7167\u96c6\u7fa4\u4f7f\u7528\u8bf4\u660e\uff0c Walltime\u53ca\u961f\u5217\u60c5\u51b5\u4ecd\u9700\u8981\u53c2\u7167\u8bbe\u7f6e\u3002

\u7136\u540e\uff0c\u8bf7\u6253\u5f00\u4e00\u4e2aVSCode\u7a97\u53e3\uff0c\u5e76\u70b9\u51fb\u5de6\u4e0b\u89d2\u7684\u6309\u94ae\uff0c\u9009\u62e9\u201cConnect to Host\u201d\uff1a

\u8f93\u5165\u865a\u62df\u7ec8\u7aef\u6240\u5728\u7684\u8282\u70b9\uff0c\u4f8b\u5982\u4e0a\u6587\u4e2d\u7684\u8f93\u51fa c51-s001:

\u5982\u679c\u63d0\u793a\u8f93\u5165\u5bc6\u7801\u7b49\u4fe1\u606f\uff0c\u8bf7\u6309\u56de\u8f66\u4ee5\u7ee7\u7eed

\u7b49\u5f85\u5b89\u88c5 VSCode Server \u5373\u53ef\u3002\u82e5\u4ee5\u524d\u66fe\u914d\u7f6e\u8fc7\u8fdc\u7a0b\uff0c\u4f1a\u81ea\u52a8\u8c03\u7528\u4e4b\u524d\u7684\u670d\u52a1\u3002

"},{"location":"wiki/cluster_usage/vscode_remote/#windows","title":"Windows \u7528\u6237","text":"

\u5bf9\u4e8eWindows\u7528\u6237\uff0c\u7531\u4e8e\u7b14\u8005\u6682\u65f6\u6ca1\u6709Windows\u8bbe\u5907\uff0c\u8bf7\u53c2\u7167\u6b64\u6559\u7a0b\u5c1d\u8bd5\uff0c\u601d\u8def\u6bd4\u8f83\u63a5\u8fd1\u3002\u672c\u6587\u5373\u53c2\u8003\u4e86\u8be5\u6587\u7ae0\u7684\u5b9e\u73b0\u3002

"},{"location":"wiki/deprecated/deepmd-kit_installation_104/","title":"DeepMD-kit\u5b89\u88c5\u5b9e\u6218\uff1a\u670d\u52a1\u5668\u7bc7\uff08\u65e7\u7248\uff09","text":"

\u672c\u90e8\u5206\u5199\u4e8e2019\u5e7411\u6708\uff0c\u57fa\u4e8e\u56fd\u91cd\u670d\u52a1\u5668\u73af\u5883\u8fdb\u884c\u5b89\u88c5\uff0c\u9002\u7528\u4e8eTensorflow\u7248\u672c\u4f4e\u4e8e1.13\u7684\u60c5\u5f62\u3002\u76ee\u524d\u9488\u5bf9\u66f4\u9ad8\u7248\u672c\u5df2\u7ecf\u6709\u65b0\u7248\u6559\u7a0b\uff0c\u8bf7\u79fb\u6b65\u3002

"},{"location":"wiki/deprecated/deepmd-kit_installation_104/#_1","title":"\u51c6\u5907\u5de5\u4f5c","text":"

\u9996\u5148\u51c6\u5907\u5fc5\u8981\u7684\u4f9d\u8d56\u3002

\u68c0\u67e5\u53ef\u7528\u7684\u6a21\u5757\uff0c\u5e76\u52a0\u8f7d\u5fc5\u8981\u7684\u6a21\u5757\uff1a

module avail\nmodule add cuda/9.2\nmodule add gcc/4.9.4\n# gcc>=4.9 required by dp_ipi, or it won't be built.\n# For gcc-8.3 could not be supported, here we select a lower version.\n

\u672c\u6559\u7a0b\u63a8\u8350\u4f7f\u7528conda\u865a\u62df\u73af\u5883\u5b89\u88c5\uff0c\u6545\uff1a

module add miniconda/3.7\nconda create -n deepmd python=3.6\nconda activate deepmd\n

\u4e0b\u8f7d\u5e76\u7f16\u8bd1nccl\uff1a

cd /some/nccl_download_path\ngit clone https://github.com/NVIDIA/nccl.git -b v2.4.8-1\ncd nccl\nmake -j src.build --prefix=\"/some/nccl_install_path\" NVCC_GENCODE=\"-gencode=arch=compute_70,code=sm_70\"\n

\u7531\u4e8e\u56fd\u91cdGPU\u8282\u70b9\u4e0d\u80fd\u76f4\u63a5\u8054\u7f51\uff0c\u6545\u4f7f\u7528\u767b\u9646\u8282\u70b9\u8fdb\u884c\u7f16\u8bd1\u6548\u7387\u8f83\u9ad8\uff0c\u4f46\u7531\u4e8e\u7f3a\u5c11\u5fc5\u8981\u7684\u4f9d\u8d56libcuda.so\u548clibcuda.so.1\uff08\u5305\u542b\u5728GPU\u9a71\u52a8\u4e2d\uff0c\u767b\u9646\u8282\u70b9\u672a\u5b89\u88c5\uff09\uff0c\u6545\u91c7\u7528stubs\u6240\u5e26\u7684\u5e93\u7f16\u8bd1\uff0c\u5e76\u624b\u52a8\u52a0\u5165\u73af\u5883\u53d8\u91cf\u3002

ln -s /share/cuda/9.2/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/share/cuda/9.2/lib64/stubs:/some/local/path\n

\u5728\u67d0\u4e2a\u60f3\u8981\u7684\u8def\u5f84\u4e0b\u5c06tensorflow-1.12\u7248\u672c\u7684\u6e90\u4ee3\u7801\u4e0b\u8f7d\u597d\uff1a

cd /some/workspace\ngit clone https://github.com/tensorflow/tensorflow tensorflow -b r1.12 --depth=1\n

\u4e0b\u8f7d\u597dbazel\u5b89\u88c5\u5305\u5e76\u8fd0\u884c\uff0c\u5c06\u6240\u9700\u7684\u73af\u5883\u52a0\u5165\u73af\u5883\u53d8\u91cf\uff1a

wget https://github.com/bazelbuild/bazel/releases/download/0.15.0/bazel-0.15.0-installer-linux-x86_64.sh\nchmod +x bazel-0.15.0-installer-linux-x86_64.sh\n./bazel-0.15.0-installer-linux-x86_64.sh --user\nexport PATH=\"$PATH:$HOME/bin\"\n
"},{"location":"wiki/deprecated/deepmd-kit_installation_104/#tensorflow","title":"tensorflow\u7f16\u8bd1","text":"

\u9996\u5148\u914d\u7f6etensorflow\u7684\u7f16\u8bd1\u9009\u9879\uff1a

cd tensorflow/\n./configure\n

\u6839\u636e\u9700\u8981\uff0c\u63d0\u4f9b\u6b63\u786e\u7684\u7ec4\u4ef6\u548c\u8def\u5f84\uff1a

Please specify the location of python. [Default is /xxx]:\n\nFound possible Python library paths:\n  /xxx/python3.6/site-packages\nPlease input the desired Python library path to use. Default is [xxx/python3.6/site-packages]\n\nDo you wish to build TensorFlow with Apache Ignite support? [Y/n]: Y\n\nDo you wish to build TensorFlow with XLA JIT support? [Y/n]: Y\n\nDo you wish to build TensorFlow with OpenCL SYCL support? [y/N]: N\n\nDo you wish to build TensorFlow with ROCm support? [y/N]: N\n\nDo you wish to build TensorFlow with CUDA support? [y/N]: Y\n\nPlease specify the CUDA SDK version you want to use. [Leave empty to default to CUDA 9.0]: 9.2\n\nPlease specify the location where CUDA 9.2 toolkit is installed. Refer to README.md for more details. [Default is /usr/local/cuda]: /share/cuda/9.2\n\nPlease specify the cuDNN version you want to use. [Leave empty to default to cuDNN 7]: 7\n\nPlease specify the location where cuDNN 7 library is installed. Refer to README.md for more details. [Default is /usr/local/cuda-10.0]: /share/cuda/9.2\n\nDo you wish to build TensorFlow with TensorRT support? [y/N]: N\n\nPlease specify the NCCL version you want to use. If NCCL 2.2 is not installed, then you can use version 1.3 that can be fetched automatically but it may have worse performance with multiple GPUs. [Default is 2.2]: 2.4.8\n\nPlease specify the location where NCCL 2 library is installed. Refer to README.md for more details. [Default is /usr/local/cuda]:/some/nccl_install_path\n\nPlease note that each additional compute capability significantly increases your build time and binary size. [Default is: 3.5,7.0] 6.1\n\nDo you want to use clang as CUDA compiler? [y/N]: N\n\nPlease specify which gcc should be used by nvcc as the host compiler. [Default is /xxx/gcc]: \n\nDo you wish to build TensorFlow with MPI support? [y/N]: N\n\nPlease specify optimization flags to use during compilation when bazel option \"--config=opt\" is specified [Default is -march=native]: -march=native\n\nWould you like to interactively configure ./WORKSPACE for Android builds? [y/N]:N\n

\u6ce8\u610f

  1. CUDA\u9700\u8981\u5199\u6e05\u662f9.2\u7248\u672c\uff0c\u5426\u5219\u53ef\u80fd\u4f1a\u627e\u4e0d\u5230\u5c0f\u7248\u672c\u7684\u4f9d\u8d56\u5e93\u3002

\u7136\u540e\u8fd0\u884c\u7f16\u8bd1\uff0c\u4f46\u7531\u4e8e\u8be5\u8282\u70b9\u7684\u7248\u672c\u8f83\u4e3a\u975e\u4e3b\u6d41\uff0c\u5efa\u8bae\u81ea\u884c\u7f16\u8bd1tf\u7684python interface\u4ee5\u907f\u514d\u517c\u5bb9\u6027\u95ee\u9898\u3002

bazel build --config=opt --copt=-msse4.2 --copt=-mavx --copt=-mavx2 --copt=-mfma --local_resources 2048,.5,1.0 --config=cuda //tensorflow/tools/pip_package:build_pip_package --action_env=\"LD_LIBRARY_PATH=${LD_LIBRARY_PATH}\"\n

\u7531\u4e8e\u76ee\u524d\u8282\u70b9\u652f\u6301\u4e3b\u8981\u7684\u51e0\u79cd\u4f18\u5316\u53c2\u6570\uff0c\u6545\u53ef\u4ee5\u5168\u90e8\u6253\u5f00\u4ee5\u52a0\u5feb\u8fd0\u884c\u901f\u5ea6\u3002

\u4e3a\u4e86\u4ed6\u4eba\u7684\u6b63\u5e38\u4f7f\u7528\uff0c\u5efa\u8bae\u4e3b\u52a8\u9650\u5236\u5728\u767b\u9646\u8282\u70b9\u4e0a\u7f16\u8bd1\u65f6\u7684\u5185\u5b58\u548cCPU\u8d44\u6e90\u4f7f\u7528\u91cf\u3002--local_resources 2048,.5,1.0\u8fd9\u4e2a\u8bbe\u5b9a\u53ef\u80fd\u6709\u4e9b\u4fdd\u5b88\uff0c\u4f46\u53ef\u4ee5\u4fdd\u8bc1\u4e0d\u4f1a\u5360\u7528\u8fc7\u591a\u8d44\u6e90\uff08\u5b9e\u6d4b\u9700\u898111\u4e2a\u5c0f\u65f6\u5de6\u53f3\uff0c\u4f46\u5168\u7a0b\u5185\u5b58\u5360\u7528\u4e0d\u8d85\u8fc72G\u4e14\u53ea\u4f7f\u7528\u4e86\u4e00\u4e2a\u7ebf\u7a0b\uff0c\u82e5\u89c9\u5f97\u592a\u6162\u53ef\u4ee5\u628a\u4e2d\u95f4\u7684\u53c2\u6570\u9002\u5f53\u8c03\u9ad8\uff09\u3002

  1. nccl\u548cgcc\u7684\u8def\u5f84\u5bf9\u5e94\u524d\u9762\u52a0\u8f7d\u548c\u7f16\u8bd1\u7684\u73af\u5883\u3002

\u7f16\u8bd1\u5982\u679c\u901a\u8fc7\uff0c\u5219\u518d\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u7f16\u8bd1c++ interface\uff08\u5b9e\u9645\u4e0a\u4e00\u6b65\u5df2\u7ecf\u7f16\u8bd1\u597d\u6240\u9700\u7684\u5927\u90e8\u5206\u4f9d\u8d56\uff0c\u8fd9\u4e00\u6b65\u53ea\u662f\u518d\u5c01\u88c5\u6210c++\u5e93\uff09\uff1a

bazel build -c opt --copt=-msse4.2 --copt=-mavx --copt=-mavx2 --copt=-mfma --config=cuda --verbose_failures //tensorflow:libtensorflow_cc.so --action_env=\"LD_LIBRARY_PATH=${LD_LIBRARY_PATH}\"\n

\u8fd9\u91cc\u53ef\u4ee5\u5148\u5c06tensorflow-python\u5b89\u88c5\u597d\u3002

./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg\npip install /tmp/tensorflow_pkg/tensorflow-version-tags.whl # depends on your version info\n

\u7136\u540e\uff0c\u5c06\u8fdb\u884c\u4e00\u7cfb\u5217\u4f9d\u8d56\u7684\u7f16\u8bd1\u548c\u5b89\u88c5\u3002\u4ee5\u9632\u4e07\u4e00\uff0c\u5efa\u8bae\u9996\u5148\u5b89\u88c5\u4f9d\u8d56\uff0c\u65b9\u4fbf\u8d77\u89c1\uff0c\u8fd9\u91cc\u4f7f\u7528conda\u5b89\u88c5\u3002

conda install automake autoconf libtool\n

\u5c06cmake\u5207\u6362\u5230\u65b0\u7248\u672c\uff1a

module add cmake/3.7.3\n

\u6307\u5b9atf-cc\u7684\u76ee\u6807\u8def\u5f84\u4e3a\u53d8\u91cf$tensorflow_root\uff0c\u5e76\u4f9d\u6b21\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

mkdir -p $tensorflow_root\nmkdir /tmp/proto\nsed -i 's;PROTOBUF_URL=.*;PROTOBUF_URL=\\\"https://mirror.bazel.build/github.com/google/protobuf/archive/v3.6.0.tar.gz\\\";g' tensorflow/contrib/makefile/download_dependencies.sh\ntensorflow/contrib/makefile/download_dependencies.sh\ncd tensorflow/contrib/makefile/downloads/protobuf/\n./autogen.sh\n./configure --prefix=/tmp/proto/\nmake\nmake install\nmkdir /tmp/eigen\ncd ../eigen\nmkdir build_dir\ncd build_dir\ncmake -DCMAKE_INSTALL_PREFIX=/tmp/eigen/ ../\nmake install\nmkdir /tmp/nsync\ncd ../../nsync\nmkdir build_dir\ncd build_dir\ncmake -DCMAKE_INSTALL_PREFIX=/tmp/nsync/ ../\nmake\nmake install\ncd ../../absl\nbazel build\nmkdir -p $tensorflow_root/include/\nrsync -avzh --include '*/' --include '*.h' --exclude '*' absl $tensorflow_root/include/\ncd ../../../../..\nmkdir $tensorflow_root/lib\ncp bazel-bin/tensorflow/libtensorflow_cc.so $tensorflow_root/lib/\ncp bazel-bin/tensorflow/libtensorflow_framework.so $tensorflow_root/lib/\ncp /tmp/proto/lib/libprotobuf.a $tensorflow_root/lib/\ncp /tmp/nsync/lib64/libnsync.a $tensorflow_root/lib/\nmkdir -p $tensorflow_root/include/tensorflow\ncp -r bazel-genfiles/* $tensorflow_root/include/\ncp -r tensorflow/cc $tensorflow_root/include/tensorflow\ncp -r tensorflow/core $tensorflow_root/include/tensorflow\ncp -r third_party $tensorflow_root/include\ncp -r /tmp/proto/include/* $tensorflow_root/include\ncp -r /tmp/eigen/include/eigen3/* $tensorflow_root/include\ncp -r /tmp/nsync/include/*h $tensorflow_root/include\ncd $tensorflow_root/include\nfind . -name \"*.cc\" -type f -delete\nrm -fr /tmp/proto /tmp/eigen /tmp/nsync\n

\u4ee5\u5b8c\u6210c++\u90e8\u5206\u7684\u7f16\u8bd1\u3002

"},{"location":"wiki/deprecated/deepmd-kit_installation_104/#deepmd-kit10","title":"DeePMD-kit\u5b89\u88c5(1.0+)","text":"

\u9996\u5148\u4e0b\u8f7dDeePMD-kit\uff0c\u5e76\u8fdb\u5165\uff1a

cd /some/workspace\ngit clone https://github.com/deepmodeling/deepmd-kit.git\ncd deepmd-kit\ndeepmd_source_dir=`pwd`\n

\u5982\u679c\u524d\u9762\u4f7f\u7528\u4e86module load gcc/4.9.4\u63d0\u4f9b\u7684\u9ad8\u7248\u672cgcc\uff08\u4ee54.9.4\u4e3a\u4f8b\uff09\u8fdb\u884c\u7f16\u8bd1\uff0c\u9700\u8981\u624b\u52a8\u8f7d\u5165\u5bf9\u5e94\u7684\u73af\u5883\u53d8\u91cf\u4f9bcmake\u8bc6\u522b\u6b63\u786e\u7684gcc\u7248\u672c\u3002

export CC=/share/apps/gcc/4.9.4/bin/gcc\nexport CXX=/share/apps/gcc/4.9.4/bin/g++\n

\u7136\u540e\u5b89\u88c5dpmd-py

pip install .\n

\u5982\u679c\u9047\u5230no module named 'google'\u6216\u8005no module named 'absl'\u7684\u62a5\u9519\uff0c\u5219\u53ef\u80fd\u5b58\u5728\u7248\u672cbug\uff0c\u9700\u8981\u91cd\u65b0\u5b89\u88c5\u4f9d\u8d56\u3002

pip install --update protobus\npip install --update absl-py\n

\u6307\u5b9aDeePMD-kit\u7684\u76ee\u6807\u8def\u5f84\u4e3a\u53d8\u91cf$deepmd_root\uff0c\u968f\u540e\u7f16\u8bd1DeePMD-kit C++ Interface\uff1a

cd $deepmd_source_dir/source\nmkdir build \ncd build\ncmake -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..\nmake\nmake install\n

\u5982\u679c\u8fd0\u884c\uff1a

$ ls $deepmd_root/bin\ndp_ipi\n$ ls $deepmd_root/lib\nlibdeepmd_ipi.so  libdeepmd_op.so  libdeepmd.so\n

\u5f97\u5230\u4e0a\u8ff0\u7684\u7ed3\u679c\uff0c\u8bf4\u660e\u7f16\u8bd1\u6210\u529f\uff08\u82e5cmake\u65f6\u68c0\u6d4b\u5230\u7684\u662f4.8\u6216\u66f4\u4f4e\u7248\u672c\u7684gcc\uff0c\u5219\u7f16\u8bd1\u7ed3\u679c\u4f1a\u7f3a\u5c11dp_ipi\u548clibdeepmd_ipi.so\uff09\u3002

"},{"location":"wiki/deprecated/deepmd-kit_installation_104/#lammps-deepmd-kit","title":"LAMMPS DeePMD-kit \u63a5\u53e3\u7f16\u8bd1","text":"

\u9996\u5148\u7f16\u8bd1\u63a5\u53e3\uff1a

cd $deepmd_source_dir/source/build\nmake lammps\n

\u7136\u540e\u4e0b\u8f7d\u597d\u7a33\u5b9a\u7248\u7684lammps\uff0c\u5e76\u89e3\u538b\uff1a

cd /some/workspace\nwget -c https://lammps.sandia.gov/tars/lammps-stable.tar.gz\ntar xf lammps-stable.tar.gz\n

\u82e5\u89e3\u538b\u540e\u5f97\u5230\u76ee\u5f55\u540d\u4e3alammps-31Mar17\uff0c\u5219

cd lammps-31Mar17/src/\ncp -r $deepmd_source_dir/source/build/USER-DEEPMD .\n

\u6253\u5f00deepmd module\uff0c\u5e76\u6839\u636e\u9700\u8981\u6dfb\u52a0\u6240\u9700\u7684\u6a21\u5757\uff0c\u4ee5fep\u4e3a\u4f8b\uff1a

make yes-user-deepmd\nmake yes-user-fep \n

\u8f7d\u5165\u9700\u8981\u7684mpi\u5e93\uff0c\u5e76\u7f16\u8bd1\uff1a

module load intel/15.0.6\nmodule load mpi/intel/5.0.3.049\nmake mpi -j4\n

\u5f97\u5230\u53ef\u6267\u884c\u6587\u4ef6\uff1almp_mpi\u3002

\u53ef\u5c06\u8be5\u6587\u4ef6\u590d\u5236\u5230\u5728$PATH\u4e2d\u7684\u8def\u5f84\uff0c\u5219\u53ef\u4ee5\u76f4\u63a5\u8f93\u5165\u6587\u4ef6\u540d\u8fd0\u884c\u3002

"},{"location":"wiki/deprecated/deepmd-kit_installation_104/#_2","title":"\u6ce8\u610f","text":"

\u5b8c\u6210\u4e0a\u8ff0\u5b89\u88c5\u6b65\u9aa4\u540e\uff0c\u82e5\u9700\u8981\u7acb\u5373\u6d4b\u8bd5\u8fd0\u884c\uff0c**\u5fc5\u987b**\u5c06stubs\u63d0\u4f9b\u7684libcuda.so\u548clibcuda.so.1\u4ece\u73af\u5883\u53d8\u91cf\u4e2d\u79fb\u9664\uff0c\u5426\u5219\u8fd0\u884c\u65f6\u4f1a\u62a5\u9519\u3002

\u53ef\u4ee5\u76f4\u63a5\u9000\u51fa\u767b\u9646\u5e76\u91cd\u65b0\u767b\u9646\uff0c\u4ee5\u514d\u51fa\u73b0\u8be5\u95ee\u9898\u3002

"},{"location":"wiki/deprecated/deepmd-kit_installation_104/#_3","title":"\u4e00\u4e9b\u53ef\u80fd\u7684\u5751","text":"

\u5c3d\u7ba1\u4e0a\u8ff0\u8fc7\u7a0b\u5e94\u8be5\u5df2\u7ecf\u7ed5\u8fc7\u4e86\u5927\u90e8\u5206\u7684\u5751\uff0c\u4f46\u4ecd\u4e0d\u80fd\u4fdd\u8bc1100%\u5b89\u88c5\u8fd0\u884c\u6210\u529f\u3002\u8fd9\u91cc\u8bb0\u5f55\u51e0\u79cd\u53ef\u80fd\u7684\u62a5\u9519\u7684\u5904\u7406\u65b9\u6848\u3002

"},{"location":"wiki/deprecated/deepmd-kit_installation_104/#conda-init","title":"\u9700\u8981conda init","text":"

\u8fd9\u79cd\u60c5\u51b5\u5df2\u77e5\u53ef\u80fd\u53d1\u751f\u5728lsf\u811a\u672c\u63d0\u4ea4\u7684\u6b65\u9aa4\uff0c\u6765\u6e90\u4e8econda activate deepmd\u7684\u6b65\u9aa4\u3002\u5177\u4f53\u539f\u56e0\u5c1a\u4e0d\u6e05\u695a\uff0c\u89e3\u51b3\u65b9\u6848\u662f\u624b\u52a8\u8f7d\u5165\u6240\u9700\u7684\u73af\u5883\u53d8\u91cf\u3002\u63a8\u8350\u7684\u505a\u6cd5\u662f\u5229\u7528\u7528\u6237\u81ea\u5b9a\u4e49module\u3002

\u9996\u5148\uff0c\u542f\u7528\u81ea\u5b9a\u4e49module\uff1a

module load use.own\n

\u7136\u540e\u8fd0\u884cmodule avail\u67e5\u770b\u81ea\u5b9a\u4e49\u811a\u672c\u7684\u6587\u4ef6\u4f4d\u7f6e\uff0c\u8f93\u51fa\u7ed3\u679c\u53ef\u80fd\u5982\u4e0b\uff1a

----------- /share/base/modulefiles/compilers -----------\n............\n\n------------- /usr/share/Modules/modulefiles ------------\ndot         module-git  module-info modules     null        use.own\n\n------------ /data/home/someuser/privatemodules ------------\nnull\n

\u663e\u793a/data/home/someuser/privatemodules\u662f\u5f53\u524d\u7528\u6237\u81ea\u5b9a\u4e49\u6a21\u5757\u7684\u5b58\u653e\u4f4d\u7f6e\u3002

\u5219\u521b\u5efa\u8def\u5f84\uff0c\u5e76\u8fdb\u5165\uff1a

mkdir -p /data/home/someuser/privatemodules\ncd /data/home/someuser/privatemodules\n

\u7136\u540e\u6839\u636e\u60f3\u8981\u7684\u540d\u5b57\u521b\u5efa\u6587\u4ef6\u6216\u76ee\u5f55\u3002

\u6bd4\u5982\u60f3\u4ee5deepmd\u4e3a\u6a21\u5757\u540d\uff0c\u4e14\u5e0c\u671b\u63d0\u4f9b\u4e0d\u540c\u7248\u672c\u7684\u652f\u6301\uff0c\u5219\u53ef\u4ee5\uff1a

mkdir deepmd\nvim 1.0\n

\u7f16\u8f911.0\u6587\u4ef6\uff1a

# Help message\nproc ModulesHelp { } {\n    set nameversion [module-info name]\n    regsub \"/.*\" $nameversion \"\" name\n    regsub \".*/\" $nameversion \"\" version\n    puts stderr \"\\tLoads the $version $name environment\"\n}\n\n# Set variables\nset nameversion [module-info name]\nregsub \"/.*\" $nameversion \"\" name\nregsub \".*/\" $nameversion \"\" version\n\nmodule-whatis \"Miniconda, an alternative distirbution for python 3.6\"\n\n# set environment variables\n\n    setenv        PYTHONROOT    /data/home/someuser/anaconda3/envs/deepmd\n\n    prepend-path    PATH        $env(PYTHONROOT)/bin\n    prepend-path    MANPATH        $env(PYTHONROOT)/share/man\n    prepend-path    PYTHONPATH    $env(PYTHONROOT)/lib/python3.6/site-packages\n

\u6ce8\u610f\u4fee\u6539PYTHONROOT\u4e3a\u6b63\u786e\u7684\u865a\u62df\u73af\u5883\u8def\u5f84\uff08\u53ef\u7528conda env list\u67e5\u770b\uff09,\u5e76\u4e14python3.6\u4e5f\u8981\u4e0e\u5b9e\u9645\u4f7f\u7528\u7684python\u7248\u672c\u4e00\u81f4\u3002

\u8fd9\u6837\uff0c\u4fbf\u53ef\u4ee5\u901a\u8fc7module\u8c03\u7528\u6240\u9700\u7684\u865a\u62df\u73af\u5883\u3002

\u4f7f\u7528\u65f6\u63d0\u4ea4\u811a\u672c\u53ef\u4ee5\u8fd9\u6837\u5199\uff1a

module load use.own\nmodule load deepmd/1.0\n
"},{"location":"wiki/deprecated/lsf_usage/","title":"Lsf usage","text":""},{"location":"wiki/deprecated/lsf_usage/#lsf","title":"LSF \u4f5c\u4e1a\u7ba1\u7406\u7cfb\u7edf\uff08\u65b0\u7248\uff0c\u4f5c\u4e3a\u5f52\u6863\uff09","text":"

\u76ee\u524d LSF Suite 10.2 \u5df2\u5728 Zeus \u4e0a\u90e8\u7f72\u6d4b\u8bd5\uff0c\u8be5\u7248\u672c\u5305\u542b\u4e86\u65b0\u7248\u7684 LSF \u4f5c\u4e1a\u7ba1\u7406\u7cfb\u7edf\uff0c\u56e0\u800c\u53ef\u5bf9 GPU \u63d0\u4f9b\u652f\u6301\u3002

\u8f93\u5165 lsload -gpu \u5373\u53ef\u67e5\u770b\u96c6\u7fa4\u5f53\u524d\u53ef\u4ee5\u4f7f\u7528\u7684 GPU \u6570\u76ee\uff1a

HOST_NAME       status  ngpus  gpu_shared_avg_mut  gpu_shared_avg_ut  ngpus_physical\nc51-g001            ok      4                  1%                 6%               4\nc51-g002            ok      4                  0%                 6%               4\nc51-m002            ok      8                  9%                68%               8\nc51-m004            ok      8                 12%                89%               8\nc51-m003            ok      8                  9%                72%               8\nc51-m001            ok      8                 15%                72%               8\n

\u8f93\u5165 lsload -gpuload \u5219\u53ef\u4ee5\u5bf9 GPU \u8d1f\u8f7d\u60c5\u51b5\u8fdb\u884c\u7edf\u8ba1\uff1a

HOST_NAME       gpuid   gpu_model   gpu_mode  gpu_temp   gpu_ecc  gpu_ut  gpu_mut gpu_mtotal gpu_mused   gpu_pstate   gpu_status   gpu_error\nc51-g001            0 TeslaV100_S        0.0       48C       0.0     26%       7%      31.7G      1.1G            0           ok           -\n                    1 TeslaV100_S        0.0       38C       0.0      0%       0%      31.7G        0M            0           ok           -\n                    2 TeslaV100_S        0.0       36C       0.0      0%       0%      31.7G        0M            0           ok           -\n                    3 TeslaV100_S        0.0       37C       0.0      0%       0%      31.7G        0M            0           ok           -\nc51-g002            0 A10080GBPCI        0.0       44C       0.0      8%       0%      79.3G     1020M            0           ok           -\n                    1 A10080GBPCI        0.0       49C       0.0      8%       0%      79.3G     1020M            0           ok           -\n                    2 A10080GBPCI        0.0       47C       0.0      8%       0%      79.3G     1020M            0           ok           -\n                    3 A10080GBPCI        0.0       44C       0.0      0%       0%      79.3G      434M            0           ok           -\nc51-m004            0 NVIDIAGeFor        0.0       64C       0.0     91%      13%      10.7G      1.5G            2           ok           -\n                    1 NVIDIAGeFor        0.0       65C       0.0     89%      13%      10.7G      1.5G            2           ok           -\n                    2 NVIDIAGeFor        0.0       60C       0.0     88%      12%      10.7G      1.5G            2           ok           -\n                    3 NVIDIAGeFor        0.0       66C       0.0     89%      13%      10.7G      1.5G            2           ok           -\n                    4 NVIDIAGeFor        0.0       69C       0.0     87%      13%      10.7G      1.5G            2           ok           -\n                    5 NVIDIAGeFor        0.0       70C       0.0     91%      13%      10.7G      1.5G            2           ok           -\n                    6 NVIDIAGeFor        0.0       65C       0.0     85%      12%      10.7G      1.5G            2           ok           -\n                    7 NVIDIAGeFor        0.0       64C       0.0     87%      12%      10.7G      1.5G            2           ok           -\nc51-m002            0 NVIDIAGeFor        0.0       58C       0.0     92%      14%      10.7G      1.5G            2           ok           -\n                    1 NVIDIAGeFor        0.0       65C       0.0     86%      13%      10.7G      2.5G            2           ok           -\n                    2 NVIDIAGeFor        0.0       56C       0.0     86%      13%      10.7G      2.5G            2           ok           -\n                    3 NVIDIAGeFor        0.0       55C       0.0     63%       8%      10.7G      768M            2           ok           -\n                    4 NVIDIAGeFor        0.0       51C       0.0     63%       8%      10.7G      768M            2           ok           -\n                    5 NVIDIAGeFor        0.0       52C       0.0     68%       9%      10.7G      768M            2           ok           -\n                    6 NVIDIAGeFor        0.0       54C       0.0     66%       8%      10.7G      768M            2           ok           -\n                    7 NVIDIAGeFor        0.0       52C       0.0     39%       2%      10.7G      1.5G            2           ok           -\nc51-m003            0 NVIDIAGeFor        0.0       55C       0.0     62%       8%      10.7G      768M            2           ok           -\n                    1 NVIDIAGeFor        0.0       53C       0.0     64%       8%      10.7G      768M            2           ok           -\n                    2 NVIDIAGeFor        0.0       51C       0.0     64%       8%      10.7G      768M            2           ok           -\n                    3 NVIDIAGeFor        0.0       55C       0.0     62%       8%      10.7G      768M            2           ok           -\n                    4 NVIDIAGeFor        0.0       55C       0.0     79%      10%      10.7G      768M            2           ok           -\n                    5 NVIDIAGeFor        0.0       57C       0.0     79%      10%      10.7G      768M            2           ok           -\n                    6 NVIDIAGeFor        0.0       54C       0.0     80%      10%      10.7G      768M            2           ok           -\n                    7 NVIDIAGeFor        0.0       55C       0.0     80%      10%      10.7G      768M            2           ok           -\nc51-m001            0 NVIDIAGeFor        0.0       62C       0.0     98%      21%      10.7G      1.7G            2           ok           -\n                    1 NVIDIAGeFor        0.0       64C       0.0     98%      22%      10.7G      1.7G            2           ok           -\n                    2 NVIDIAGeFor        0.0       58C       0.0     97%      21%      10.7G      1.7G            2           ok           -\n                    3 NVIDIAGeFor        0.0       66C       0.0     93%      19%      10.7G      894M            2           ok           -\n                    4 NVIDIAGeFor        0.0       69C       0.0     98%      21%      10.7G      1.7G            2           ok           -\n                    5 NVIDIAGeFor        0.0       62C       0.0     98%      21%      10.7G      1.7G            2           ok           -\n                    6 NVIDIAGeFor        0.0       25C       0.0      0%       0%      10.7G        0M            8           ok           -\n                    7 NVIDIAGeFor        0.0       35C       0.0      0%       0%      10.7G        0M            8           ok           -\n

\u4f7f\u7528 GPU \u8d44\u6e90\u65f6\uff0c\u9700\u8981\u5bf9\u63d0\u4ea4\u811a\u672c\u8fdb\u884c\u76f8\u5e94\u4fee\u6539\uff0c\u7528 -gpu \u547d\u4ee4\u7533\u8bf7 GPU \u8d44\u6e90\u3002

#!/bin/bash\n\n#BSUB -q gpu\n#BSUB -W 24:00\n#BSUB -J train\n#BSUB -o %J.stdout\n#BSUB -e %J.stderr\n#BSUB -gpu \"num=1:mode=shared:mps=no:j_exclusive=no\"\n#BSUB -n 4\n#BSUB -R \"span[ptile=32]\"\n\nmodule add deepmd/2.0b1\nlmp_mpi -i input.lammps 1>> model_devi.log 2>> model_devi.log\n

\u5176\u4e2d num=1 \u8868\u793a\u7533\u8bf71\u5f20GPU\u5361\uff0cj_exclusive=no \u8868\u793a\u5141\u8bb8\u548c\u5176\u4ed6\u4efb\u52a1\u5171\u5b58\uff0c-n \u8868\u793a\u7533\u8bf7\u7684CPU\u6838\u6570\u3002 \u4f7f\u7528V100\u65f6\uff0c\u8bf7\u8bbe\u7f6e\u4e3a\u4e0d\u8d85\u8fc78\u7684\u6574\u6570\uff1b \u4f7f\u7528A100\u65f6\uff0c\u8bf7\u8bbe\u7f6e\u4e3a\u4e0d\u8d85\u8fc78\u7684\u6574\u6570\uff0c\u82e5\u4e3a\u5f00\u542fMIG\u7684\u60c5\u51b5\uff0c\u8bf7\u53c2\u8003A100\u62c6\u5206\u5b9e\u4f8b\u4f7f\u7528\u8bf4\u660e\uff1b \u4f7f\u75282080Ti\u65f6\uff0c\u8bf7\u8bbe\u7f6e\u4e3a\u4e0d\u8d85\u8fc74\u7684\u6574\u6570\uff0c\u5426\u5219\u5747\u53ef\u80fd\u4f1a\u51fa\u73b0\u8d44\u6e90\u7a7a\u95f2\u4f46\u65e0\u6cd5\u4f7f\u7528\u7684\u60c5\u51b5\u3002\u5982\u5e0c\u671b\u72ec\u5360\u4e00\u5f20\u5361\u8bf7\u4f7f\u7528j_exclusive=yes\u3002

\u94fe\u63a5

\u4f7f\u7528\u65b0\u7248 LSF \u63d0\u4ea4\u4efb\u52a1\uff0c\u4e0d\u9700\u8981\u5f15\u5165\u68c0\u6d4b\u811a\u672c\u6216CUDA_VISIBLE_DEVICES\u63a7\u5236\u4f7f\u7528\u7684GPU\u3002

"},{"location":"wiki/deprecated/lsf_usage/#cpu","title":"\u7ed1\u5b9aCPU","text":"

\u5bf9\u67d0\u4e9b\u4f5c\u4e1a\u7c7b\u578b\uff08\u5982VASP\uff09\uff0c\u5f53\u4f7f\u7528GPU\u65f6\uff0c\u4f1a\u5e0c\u671bCPU\u8fdb\u7a0b\u5c3d\u53ef\u80fd\u72ec\u7acb\u8fd0\u884c\u5728\u6240\u5206\u914d\u7684\u6838\u4e0a\uff0c\u6b64\u65f6\u53ef\u901a\u8fc7\u8bbe\u7f6e CPU \u4eb2\u548c\u6027\u6765\u63a7\u5236\u6240\u7528\u7684\u6838\u6570\u3002\u793a\u4f8b\u5982\u4e0b\uff1a

#!/bin/bash\n#\n#BSUB -q gpu\n#BSUB -W 12:00\n#BSUB -J vasp\n#BSUB -o vasp.%J.stdout\n#BSUB -e vasp.%J.stderr\n#BSUB -n 8\n#BSUB -R \"span[ptile=32]\"\n#BSUB -gpu \"num=1:mode=shared:mps=no:j_exclusive=no\"\n#BSUB -R \"affinity[core(1,exclusive=(core,alljobs))]\"\n\n# add modulefiles\nmodule load vasp/6.1.0-openacc\nmpirun -np 1 vasp_gam\n

\u5176\u4e2d\uff0ccore(1,exclusive=(core,alljobs)) \u8868\u793a\u4f7f\u75281\u4e2a\u6838\u4e14\u4e0e\u5176\u4ed6\u4f5c\u4e1a\u4e0d\u540c\u3002\u6ce8\u610f\u8fd9\u91cc\u9700\u8981\u6839\u636e\u5b9e\u9645\u4f7f\u7528\u7684\u6838\u6570\u6307\u5b9a\uff0c\u56e0\u4e3a\u4f5c\u4e1a\u4e2dmpirun -np\u7684\u53c2\u6570\u662f1\u3002

"},{"location":"wiki/deprecated/lsf_usage/#dp-gen-slurm","title":"DP-GEN Slurm \u7cfb\u7edf\u63d0\u4ea4\u65b9\u6cd5","text":"

\u4ee5\u8bad\u7ec3\u6b65\u9aa4\u4e3a\u4f8b\uff1a

{\n  \"train\": [\n    {\n      \"machine\": {\n        \"machine_type\": \"slurm\",\n        \"hostname\": \"xx.xxx.xxx.xxx\",\n        \"port\": 22,\n        \"username\": \"chenglab\",\n        \"work_path\": \"/home/chenglab/ypliu/dprun/train\"\n      },\n      \"resources\": {\n        \"numb_gpu\": 1,\n        \"numb_node\": 1,\n        \"task_per_node\": 2,\n        \"partition\": \"gpu\",\n        \"exclude_list\": [],\n        \"source_list\": [],\n        \"module_list\": [\n            \"deepmd/1.2\"\n        ],\n        \"time_limit\": \"96:0:0\",\n        \"sleep\": 20\n      },\n      \"python_path\": \"/share/apps/deepmd/1.2/bin/python3.6\"\n    }\n  ],\n  ...\n}\n

\u82e5\u63d0\u4ea4\u4efb\u52a1\u4f7f\u7528QoS\u8bbe\u7f6e\uff0c\u5219\u53ef\u4ee5\u5728resources\u4e2d\u589e\u52a0qos\u9879\u76ee\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a

{\n  \"train\": [\n    {\n      \"machine\": {\n        \"machine_type\": \"slurm\",\n        \"hostname\": \"xx.xxx.xxx.xxx\",\n        \"port\": 22,\n        \"username\": \"chenglab\",\n        \"work_path\": \"/home/chenglab/ypliu/dprun/train\"\n      },\n      \"resources\": {\n        \"numb_gpu\": 1,\n        \"numb_node\": 1,\n        \"task_per_node\": 2,\n        \"partition\": \"gpu\",\n        \"exclude_list\": [],\n        \"source_list\": [],\n        \"module_list\": [\n            \"deepmd/1.2\"\n        ],\n        \"time_limit\": \"96:0:0\",\n        \"qos\": \"normal\",\n        \"sleep\": 20\n      },\n      \"python_path\": \"/share/apps/deepmd/1.2/bin/python3.6\"\n    }\n  ],\n  ...\n}\n
"},{"location":"wiki/deprecated/lsf_usage/#lsf_1","title":"LSF \u4f5c\u4e1a\u7ba1\u7406\u7cfb\u7edf\uff08\u65e7\u7248\uff09","text":"

\u76ee\u524d\u65e7\u7248 LSF \u7cfb\u7edf\uff0810.1.0.0\uff09\u5df2\u4e0d\u518d\u9002\u7528\uff0c\u6b64\u90e8\u5206\u4ec5\u4f5c\u5f52\u6863\uff0c\u4e0d\u518d\u66f4\u65b0\uff0c\u8fd8\u8bf7\u7559\u610f\u3002 \u65b0\u7248\u8bf4\u660e\u8bf7\u79fb\u6b65\u3002

\u5728GPU\u8282\u70b9\u4e0a\uff0c\u9700\u8981\u901a\u8fc7\u6307\u5b9a CUDA_VISIBLE_DEVICES \u6765\u5bf9\u4efb\u52a1\u8fdb\u884c\u7ba1\u7406\u3002

#!/bin/bash\n\n#BSUB -q gpu\n#BSUB -W 24:00\n#BSUB -J test\n#BSUB -o %J.stdout\n#BSUB -e %J.stderr\n#BSUB -n 4\n

lsf \u63d0\u4ea4\u811a\u672c\u4e2d\u9700\u8981\u5305\u542b export CUDA_VISIBLE_DEVICES=X \uff0c\u5176\u4e2d X \u6570\u503c\u9700\u8981\u6839\u636e\u5177\u4f53\u8282\u70b9\u7684\u5361\u7684\u4f7f\u7528\u60c5\u51b5\u786e\u5b9a\u3002

\u4f7f\u7528\u8005\u53ef\u4ee5\u7528 ssh <host> nvidia-smi \u767b\u9646\u5230\u5bf9\u5e94\u8282\u70b9\uff08\u8282\u70b9\u540d\u4e3a <host>\uff09\u68c0\u67e5 GPU \u4f7f\u7528\u60c5\u51b5\u3002 \u793a\u4f8b\u5982\u4e0b\uff1a

$ ssh c51-g001 nvidia-smi\nWed Mar 10 12:59:01 2021\n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 460.32.03    Driver Version: 460.32.03    CUDA Version: 11.2     |\n|-------------------------------+----------------------+----------------------+\n| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n|                               |                      |               MIG M. |\n|===============================+======================+======================|\n|   0  Tesla V100-SXM2...  Off  | 00000000:61:00.0 Off |                    0 |\n| N/A   42C    P0    42W / 300W |      3MiB / 32510MiB |      0%      Default |\n|                               |                      |                  N/A |\n+-------------------------------+----------------------+----------------------+\n|   1  Tesla V100-SXM2...  Off  | 00000000:62:00.0 Off |                    0 |\n| N/A   43C    P0    44W / 300W |  31530MiB / 32510MiB |     62%      Default |\n|                               |                      |                  N/A |\n+-------------------------------+----------------------+----------------------+\n|   2  Tesla V100-SXM2...  Off  | 00000000:89:00.0 Off |                    0 |\n| N/A   43C    P0    45W / 300W |      3MiB / 32510MiB |      0%      Default |\n|                               |                      |                  N/A |\n+-------------------------------+----------------------+----------------------+\n|   3  Tesla V100-SXM2...  Off  | 00000000:8A:00.0 Off |                    0 |\n| N/A   43C    P0    47W / 300W |      3MiB / 32510MiB |      0%      Default |\n|                               |                      |                  N/A |\n+-------------------------------+----------------------+----------------------+\n\n+-----------------------------------------------------------------------------+\n| Processes:                                                                  |\n|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |\n|        ID   ID                                                   Usage      |\n|=============================================================================|\n|    1   N/A  N/A    127004      C   ...pps/deepmd/1.2/bin/python    31527MiB |\n+-----------------------------------------------------------------------------+\n
\u8868\u793a\u76ee\u524d\u8be5\u8282\u70b9\uff08c51-g001 \uff09\u4e0a 1 \u53f7\u5361\u6b63\u5728\u88ab\u8fdb\u7a0b\u53f7\u4e3a 127004 \u7684\u8fdb\u7a0b ...pps/deepmd/1.2/bin/python \u4f7f\u7528\uff0c\u5360\u7528\u663e\u5b58\u4e3a 31527 MB\uff0cGPU \u5229\u7528\u7387\u4e3a 62%\u3002

\u5728 Zeus \u96c6\u7fa4\u4f7f\u7528 deepmd \u7684\u63d0\u4ea4\u811a\u672c\u793a\u4f8b\u5982\u4e0b\uff08\u76ee\u524d large \u961f\u5217\u672a\u5bf9\u7528\u6237\u6700\u5927\u63d0\u4ea4\u4efb\u52a1\u6570\u8bbe\u9650\u5236\uff0cWalltime \u4e5f\u65e0\u65f6\u95f4\u9650\u5236\uff09\uff1a

#!/bin/bash\n\n#BSUB -q large\n#BSUB -J train\n#BSUB -o %J.stdout\n#BSUB -e %J.stderr\n#BSUB -n 4\n\nmodule add cuda/9.2\nmodule add deepmd/1.0\nexport CUDA_VISIBLE_DEVICES=0\n# decided by the specific usage of gpus\ndp train input.json > train.log\n
"},{"location":"wiki/deprecated/lsf_usage/#_1","title":"\u68c0\u6d4b\u811a\u672c","text":"

Zeus \u96c6\u7fa4\u4e0a\u9884\u7f6e\u4e86\u4e24\u4e2a\u68c0\u6d4b\u811a\u672c\uff0c\u9488\u5bf9\u4e0d\u540c\u9700\u8981\u5bf9\u5361\u7684\u4f7f\u7528\u8fdb\u884c\u5212\u5206\u3002

\u53ef\u4ee5\u4f7f\u7528\u68c0\u6d4b\u811a\u672c/share/base/tools/export_visible_devices\u6765\u786e\u5b9a $CUDA_VISIBLE_DEVICES \u7684\u503c\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a

#!/bin/bash\n\n#BSUB -q gpu\n#BSUB -J train\n#BSUB -o %J.stdout\n#BSUB -e %J.stderr\n#BSUB -n 4\n\nmodule add cuda/9.2\nmodule add deepmd/1.0\nsource /share/base/scripts/export_visible_devices\n\ndp train input.json > train.log\n

/share/base/tools/export_visible_devices \u53ef\u4ee5\u4f7f\u7528flag -t mem \u63a7\u5236\u663e\u5b58\u8bc6\u522b\u4e0b\u9650\uff0c\u5373\u4f7f\u7528\u663e\u5b58\u82e5\u4e0d\u8d85\u8fc7 mem \u7684\u6570\u503c\uff0c\u5219\u8ba4\u4e3a\u8be5\u5361\u672a\u88ab\u4f7f\u7528\u3002\u6839\u636e\u5b9e\u9645\u4f7f\u7528\u60c5\u51b5\u548c\u7ecf\u9a8c\uff0c\u9ed8\u8ba4100 MB\u4ee5\u4e0b\u89c6\u4e3a\u7a7a\u5361\uff0c\u5373\u53ef\u4ee5\u5411\u8be5\u5361\u63d0\u4ea4\u4efb\u52a1\u3002

\u4e5f\u53ef\u4ee5\u4f7f\u7528\u68c0\u6d4b\u811a\u672c/share/base/tools/avail_gpu.sh\u6765\u786e\u5b9a $CUDA_VISIBLE_DEVICES \u7684\u503c\u3002/share/base/tools/avail_gpu.sh \u53ef\u4ee5\u4f7f\u7528flag -t util \u63a7\u5236\u663e\u5361\u5229\u7528\u7387\u53ef\u7528\u4e0a\u9650\uff0c\u5373\u4f7f\u7528\u663e\u5361\u5229\u7528\u7387\u82e5\u8d85\u8fc7 util \u7684\u6570\u503c\uff0c\u5219\u8ba4\u4e3a\u8be5\u5361\u88ab\u4f7f\u7528\u3002\u76ee\u524d\u811a\u672c\u9ed8\u8ba4\u663e\u5361\u5229\u7528\u7387\u4f4e\u4e8e5%\u89c6\u4e3a\u7a7a\u5361\uff0c\u5373\u53ef\u4ee5\u5411\u8be5\u5361\u63d0\u4ea4\u4efb\u52a1\u3002

"},{"location":"wiki/deprecated/lsf_usage/#qos","title":"\u4efb\u52a1\u4f18\u5148\u7ea7\u8bbe\u7f6e\uff08QoS\uff09\uff08\u4e0d\u53ef\u7528\uff09","text":"

\u9ed8\u8ba4\u60c5\u51b5\u4e0b\u63d0\u4ea4\u7684\u4efb\u52a1Qos\u8bbe\u7f6e\u4e3anormal\uff0c\u5373\u586b\u5145\u5728\u6574\u4e2a\u961f\u5217\u7684\u672b\u5c3e\u3002\u5982\u679c\u4efb\u52a1\u6bd4\u8f83\u7d27\u6025\uff0c\u53ef\u4ee5\u5411\u7ba1\u7406\u5458\u62a5\u5907\u7533\u8bf7\u4f7f\u7528emergency\u4f18\u5148\u7ea7\uff0c\u91c7\u7528\u6b64\u4f18\u5148\u7ea7\u7684\u4efb\u52a1\u9ed8\u8ba4\u6392\u5728\u961f\u5217\u9876\u3002

\u4f7f\u7528\u65b9\u6cd5\u5982\u4e0b\uff0c\u5373\u5728\u63d0\u4ea4\u811a\u672c\u4e2d\u52a0\u5165\u4e0b\u884c\uff1a

#SBATCH --qos emergency\n
"},{"location":"wiki/deprecated/lsf_usage/#dp-gen","title":"DP-GEN","text":"

\u4ee5\u8bad\u7ec3\u6b65\u9aa4\u4e3a\u4f8b\uff1a

{\n  \"train\": [\n    {\n      \"machine\": {\n        \"machine_type\": \"lsf\",\n        \"hostname\": \"xx.xxx.xxx.xxx\",\n        \"port\": 22,\n        \"username\": \"username\",\n        \"password\": \"password\",\n        \"work_path\": \"/some/remote/path\"\n      },\n      \"resources\": {\n        \"node_cpu\": 4,\n        \"numb_node\": 1,\n        \"task_per_node\": 4,\n        \"partition\": \"large\",\n        \"exclude_list\": [],\n        \"source_list\": [\n            \"/share/base/scripts/export_visible_devices -t 100\"\n        ],\n        \"module_list\": [\n            \"cuda/9.2\",\n            \"deepmd/1.0\"\n                ],\n        \"time_limit\": \"96:0:0\",\n        \"submit_wait_time\": 20\n      },\n      \"python_path\": \"/share/deepmd-1.0/bin/python3.6\"\n    }\n  ],\n  ......\n}\n
"},{"location":"wiki/deprecated/lsf_usage/#dp-gen-v10-api","title":"DP-GEN v1.0 API","text":"

\u6ce8\u610f

train \u90e8\u5206\u4f7f\u7528\u4e86\u5bf9\u65b0\u7248 LSF \u63d0\u4f9b\u652f\u6301\u7684\u5199\u6cd5\uff0c\u5373\u540c\u65f6\u6307\u5b9a gpu_usage \u548c gpu_new_syntax \u4e3a True\uff0c\u4ece\u800c\u53ef\u5728\u63d0\u4ea4\u811a\u672c\u4e2d\u4f7f\u7528\u65b0\u7248 LSF \u7684\u8bed\u6cd5\u3002

model_devi\u90e8\u5206\u4f7f\u7528\u7684\u662f\u65e7\u7248\u8bed\u6cd5\uff0c\u4e14\u672a\u6307\u5b9aGPU\uff0c\u4f46\u5bfc\u5165\u4e86\u68c0\u6d4b\u811a\u672c\u3002

fp \u90e8\u5206\u4f7f\u7528\u7684\u662f\u9488\u5bf9CPU\u8ba1\u7b97\u4f7f\u7528\u7684\u8bed\u6cd5\u3002\u6ce8\u610f mpiexec.hydra \u9700\u8981\u5199\u51fa\u3002

{\n  \"api_version\": \"1.0\",\n  \"train\": [\n    {\n      \"command\": \"dp\",\n      \"machine\": {\n        \"batch_type\": \"LSF\",\n        \"context_type\": \"SSHContext\",\n        \"local_root\": \"./\",\n        \"remote_root\": \"/data/tom/dprun/train\",\n        \"remote_profile\": {\n            \"hostname\": \"123.45.67.89\",\n            \"username\": \"tom\"\n        }\n      },\n      \"resources\": {\n        \"number_node\": 1,\n        \"cpu_per_node\": 4,\n        \"gpu_per_node\": 1,\n        \"queue_name\": \"gpu\",\n        \"group_size\": 1,\n        \"kwargs\": {\n          \"gpu_usage\": true,\n          \"gpu_new_syntax\": true, \n          \"gpu_exclusive\": true\n        },\n        \"custom_flags\": [\n          \"#BSUB -J train\",\n          \"#BSUB -W 24:00\"\n        ],\n        \"module_list\": [\n          \"deepmd/2.0\"\n        ]\n      }\n    }\n  ],\n  \"model_devi\":[\n    {\n      \"command\": \"lmp_mpi\",\n      \"machine\":{\n        \"batch_type\": \"LSF\",\n        \"context_type\": \"SSHContext\",\n        \"local_root\": \"./\",\n        \"remote_root\": \"/data/jerry/dprun/md\",\n        \"remote_profile\": {\n          \"hostname\": \"198.76.54.32\",\n          \"username\": \"jerry\",\n          \"port\": 6666\n        }\n      },\n      \"resources\": {\n        \"number_node\": 1,\n        \"cpu_per_node\": 8,\n        \"gpu_per_node\": 0,\n        \"queue_name\": \"gpu\",\n        \"group_size\": 5,\n        \"kwargs\": {\n          \"gpu_usage\": false\n        },\n        \"custom_flags\": [\n          \"#BSUB -J md\",\n          \"#BSUB -W 24:00\"\n        ],\n        \"strategy\": {\"if_cuda_multi_devices\": false},\n        \"para_deg\": 2,\n        \"module_list\": [\n          \"deepmd/2.0\"\n        ],\n        \"source_list\": [\n          \"/share/base/tools/avail_gpu.sh\"\n        ]\n      }\n    }\n  ],\n  \"fp\":[\n    {\n      \"command\": \"mpiexec.hydra -genvall vasp_gam\",\n      \"machine\":{\n        \"batch_type\": \"LSF\",\n        \"context_type\": \"SSHContext\",\n        \"local_root\": \"./\",\n        \"remote_root\": \"/data/jerry/dprun/fp\",\n        \"remote_profile\": {\n          \"hostname\": \"198.76.54.32\",\n          \"username\": \"jerry\",\n          \"port\": 6666\n        }\n      },\n      \"resources\": {\n        \"number_node\": 2,\n        \"cpu_per_node\": 32,\n        \"gpu_per_node\": 0,\n        \"kwargs\": {\n          \"gpu_usage\": false\n        },\n        \"custom_flags\": [\n          \"#BSUB -J label\",\n          \"#BSUB -W 12:00\"\n        ],\n        \"queue_name\": \"medium\",\n        \"group_size\": 10,\n        \"module_list\": [\n          \"intel/17.5.239\",\n          \"mpi/intel/2017.5.239\",\n          \"vasp/5.4.4\"\n        ]\n      }\n    }\n  ]\n}\n
"},{"location":"wiki/deprecated/mig_usage/","title":"\u4f7f\u7528\u96c6\u7fa4\u4e0a\u7684 GPU \u2014\u2014 \u4f7f\u7528A100\u5207\u5206\u7684GPU\u5b9e\u4f8b","text":"

\u76ee\u524dZeus\u4e0a\u5df2\u7ecf\u90e8\u7f72\u4e86c51-g002\u8282\u70b9\uff0c\u5b89\u88c5\u67094\u5f20Nvidia Tesla A100\u52a0\u901f\u5361\u3002Nvidia\u5b98\u65b9\u5728A100\u53d1\u5e03\u540e\u5f15\u5165\u4e86Multi-Instance GPU(MIG)\u6280\u672f\uff0c\u53ef\u4ee5\u5c06\u4e00\u5f20A100\u62c6\u5206\u4e3a\u6700\u591a7\u4e2aGPU\u5b9e\u4f8b(GPU Instance)\uff0c\u5728\u6b64\u57fa\u7840\u4e0a\u53ef\u4ee5\u521b\u5efa\u8ba1\u7b97\u5b9e\u4f8b(Computing Instance)\u3002

\u62c6\u5206\u5de5\u4f5c\u9700\u8981\u7ba1\u7406\u5458\u6743\u9650\uff0c\u56e0\u800c\u7ba1\u7406\u5458\u5df2\u7ecf\u4e8b\u5148\u5c06\u5176\u4e2d\u76843\u5f20\u5361\u62c6\u5206\u4e3a7\u4e2aGI\u5e76\u521b\u5efaCI\uff0c\u56e0\u6b64\u76ee\u524dc51-g002\u8282\u70b9\u53ef\u4ee5\u540c\u65f6\u4f7f\u7528\u81f3\u591a22\u4e2aGPU\u5b9e\u4f8b\u3002

\u53d7\u9650\u4e8e\u73b0\u6709\u7684\u8c03\u5ea6\u7cfb\u7edf\uff0c\u5982\u679c\u4f60\u5e0c\u671b\u4f7f\u7528Zeus\u4e0a\u7684A100\u6765\u8fdb\u884c\u8ba1\u7b97\uff0c\u8bf7\u4ed4\u7ec6\u9605\u8bfb\u4ee5\u4e0b\u64cd\u4f5c\u6307\u5f15\u3002

"},{"location":"wiki/deprecated/mig_usage/#_1","title":"\u5e38\u89c4\u4f7f\u7528","text":"

\u76ee\u524d\uff0cc51-g002\u8282\u70b9\u4e0a\u76840\u53f7\u5361\u5c1a\u672a\u5f00\u542fMIG\u529f\u80fd\uff0c\u56e0\u6b64\u4f7f\u7528\u4e0a\u57fa\u672c\u4e0eV100\u4e00\u6837\u3002\u4e3a\u4e86\u8c03\u5ea6\u65b9\u4fbf\uff0c\u8bf7**\u52a1\u5fc5**\u4f7f\u7528j_exclusive=yes\u9009\u9879\u4ee5\u786e\u4fdd\u4efb\u52a1\u53ef\u4ee5\u6b63\u786e\u8c03\u5ea6\u52300\u53f7\u5361\u3002\u5982\u679c\u4f7f\u7528DP-GEN\uff0c\u8bf7\u8bbe\u7f6egpu_exclusive\u4e3atrue\u3002

\u6ce8\u610f

\u4e0d\u8981\u5fc3\u5b58\u4fa5\u5e78\u8bbe\u7f6ej_exclusive=no\uff0c\u4f60\u4f1a\u60ca\u5947\u5730\u53d1\u73b0\u4efb\u52a1\u53ef\u80fd\u88ab\u63d0\u4ea4\u5230\u5176\u4ed6\u5361\u4e0a\uff0c\u56e0\u800c\u65e0\u6cd5\u5c3d\u60c5\u5730\u4eab\u752880GB\u5927\u663e\u5b58\u3002\u540c\u65f6\u8fd9\u4e5f\u4f1a\u4f7f\u5f97\u5176\u4ed6\u4eba\u7684\u4efb\u52a1\u88ab\u63d0\u4ea4\u52300\u53f7\u5361\u4e0a\uff0c\u4ece\u800c\u4ea7\u751f\u5e72\u6270\u3002

\u7531\u4e8eA100\u4ec5\u652f\u6301CUDA 11.1\u4ee5\u4e0a\u7248\u672c\uff0c\u6545\u8bf7\u6ce8\u610f\u4f7f\u7528\u7684\u8f6f\u4ef6\u7248\u672c\u3002\u4ee5DeePMD-kit\u4e3a\u4f8b\uff0c\u76ee\u524d\u96c6\u7fa4\u4e0a\u53ea\u6709deepmd/2.0-cuda11.3\u517c\u5bb9\uff0c\u56e0\u6b64\u8bf7\u52a1\u5fc5\u6ce8\u610f\u52bf\u51fd\u6570\u548c\u4f7f\u7528\u7684DeePMD\u7684\u7248\u672c\uff0c\u4ee5\u514d\u51fa\u73b0\u62a5\u9519\u3002

\u4ee5\u4e0b\u7ed9\u51fa\u793a\u4f8b\u63d0\u4ea4\u811a\u672c\uff1a

#!/bin/bash\n#BSUB -q gpu2\n#BSUB -W 24:00\n#BSUB -J deepmd\n#BSUB -o %J.stdout\n#BSUB -e %J.stderr\n#BSUB -n 11\n#BSUB -gpu \"num=1:mode=shared:mps=no:j_exclusive=yes\"\n#BSUB -R \"span[ptile=11]\"\n\n# add modulefiles\nmodule add deepmd/2.0-cuda11.3\n\ndp train input.json 1>> train.log 2>> train.err\n

\u8bf7\u53c2\u8003/data/share/base/scripts\u4e0b\u7684\u5b9e\u4f8b\uff0c\u53ef\u590d\u5236\u7c98\u8d34\u4f7f\u7528\u3002\uff08\u5e26\u6709A100\u6807\u6ce8\uff0c\u4e0d\u5e26MIG\u540e\u7f00\uff09

\u8fd9\u91cc\u8bbe\u7f6e-n 11\u662f\u8003\u8651\u5230GI\u8c03\u5ea6\u7684\u8981\u6c42\uff0c\u6211\u4eec\u9700\u8981\u9632\u6b62\u51fa\u73b0\u591a\u4e8e22\u4e2a\u4efb\u52a1\u540c\u65f6\u8fd0\u884c\u5728A100\u4e0a\u3002

"},{"location":"wiki/deprecated/mig_usage/#miggi","title":"\u4f7f\u7528MIG\u5207\u5206\u7684GI","text":"

\u53d7\u9650\u4e8e\u73b0\u6709LSF\u8c03\u5ea6\u7cfb\u7edf\uff0c\u5c1a\u4e14\u65e0\u6cd5\u76f4\u63a5\u5b8c\u6210\u5bf9GI\u7684\u8c03\u5ea6\u3002\u56e0\u6b64\u6211\u4eec\u9700\u8981\u53e6\u8f9f\u8e4a\u5f84\uff0c\u6240\u5e78j_exclusive=no\u7684\u60c5\u51b5\u4e0b\u53ef\u4ee5\u8ba9\u4efb\u52a1\u6b63\u786e\u8bc6\u522b\u5230\u5f00\u542f\u4e86MIG\u7684\u5361\uff0c\u4f46\u4e5f\u4ec5\u9650\u4e8e\u6b64\u4e86\u3002\u6211\u4eec\u9700\u8981\u8fdb\u4e00\u6b65\u8ba9\u4efb\u52a1\u6b63\u786e\u5206\u914d\u5230\u7a7a\u95f2\u7684CI\u4e0a\uff0c\u800c\u975e\u9ed8\u8ba4\u7684\u7b2c\u4e00\u4e2a\uff08\u901a\u5e38\u7f16\u53f7\u4e3a7\uff09\u3002

\u6ce8\u610f

\u4e0d\u8981\u5fc3\u5b58\u4fa5\u5e78\u8bbe\u7f6ej_exclusive=yes\uff0c\u4f60\u4f1a\u60ca\u5947\u5730\u53d1\u73b0\u5982\u679c\u6709\u4eba\u7528\u4e860\u53f7\u5361\uff0c\u4f60\u7684\u4efb\u52a1\u4f1a\u5904\u4e8ePEND\u72b6\u6001\uff0c\u8fd9\u662f\u56e0\u4e3aLSF\u8ba4\u4e3a\u5176\u4ed6\u5361\u5747\u975e\u7a7a\u3002

\u6ce8\u610f

\u4e5f\u8bf7\u4e0d\u8981\u53c2\u8003LSF\u5b98\u65b9\u6587\u6863\u5bf9\u4e8e\u8fd9\u91cc\u7684\u8bf4\u660e\uff0c\u6211\u4eec\u7684\u7248\u672c\u4e0d\u517c\u5bb9MIG\u9009\u9879\u3002

\u5b9e\u9645\u4e0a\u82f1\u4f1f\u8fbe\u5b98\u65b9\u6307\u5bfc\u4e2d\uff0c\u82e5\u8981\u624b\u52a8\u4f7f\u7528CI\uff0c\u9700\u8981\u6307\u5b9aCUDA_VISIBLE_DEVICES\u4e3a\u5bf9\u5e94\u7684UUID\u3002\u901a\u8fc7ssh\u767b\u9646\u5230c51-g002\u8282\u70b9\u4e0a\uff0c\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

nvidia-smi -L\n

\u53ef\u4ee5\u5f97\u5230\u4ee5\u4e0b\u8f93\u51fa\uff1a

GPU 0: A100 80GB PCIe (UUID: GPU-558ce120-5b8b-16a1-87d4-ce157bba3e9d)\nGPU 1: A100 80GB PCIe (UUID: GPU-162e30f5-cc45-efb9-1e81-19337f4919ce)\n  MIG 1g.10gb Device 0: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/7/0)\n  MIG 1g.10gb Device 1: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/8/0)\n  MIG 1g.10gb Device 2: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/9/0)\n  MIG 1g.10gb Device 3: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/11/0)\n  MIG 1g.10gb Device 4: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/12/0)\n  MIG 1g.10gb Device 5: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/13/0)\n  MIG 1g.10gb Device 6: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/14/0)\nGPU 2: A100 80GB PCIe (UUID: GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747)\n  MIG 1g.10gb Device 0: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/7/0)\n  MIG 1g.10gb Device 1: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/8/0)\n  MIG 1g.10gb Device 2: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/9/0)\n  MIG 1g.10gb Device 3: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/10/0)\n  MIG 1g.10gb Device 4: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/11/0)\n  MIG 1g.10gb Device 5: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/12/0)\n  MIG 1g.10gb Device 6: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/13/0)\nGPU 3: A100 80GB PCIe (UUID: GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792)\n  MIG 1g.10gb Device 0: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/7/0)\n  MIG 1g.10gb Device 1: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/8/0)\n  MIG 1g.10gb Device 2: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/9/0)\n  MIG 1g.10gb Device 3: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/11/0)\n  MIG 1g.10gb Device 4: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/12/0)\n  MIG 1g.10gb Device 5: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/13/0)\n  MIG 1g.10gb Device 6: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/14/0)\n

\u53ef\u4ee5\u770b\u5230\uff0c1-3\u53f7GPU\u5404\u81ea\u62e5\u6709\u4e867\u4e2a\u72ec\u7acb\u7684MIG Device\uff0c\u5404\u81ea\u7684UUID\u5217\u5728\u62ec\u53f7\u91cc\u3002

\u4f46\u662f\uff0c\u5982\u679c\u4f60\u8bd5\u56fe\u628a\u4efb\u52a1\u76f4\u63a5\u4ea4\u4e0a\u53bb\uff0c\u5e76\u4e14\u624b\u52a8\u6307\u5b9a\u4e00\u4e2aUUID\uff0c\u5219\u4f1a\u53d1\u73b0\u5f88\u53ef\u80fd\u4f60\u7684\u4efb\u52a1\u6ca1\u6709\u8dd1\u5728\u60f3\u8981\u7684\u5361\u4e0a\uff0c\u751a\u81f3\u5728CPU\u4e0a\u8fd0\u884c\u3002\u8fd9\u662f\u56e0\u4e3aLSF\u8c03\u5ea6\u4e0b\uff0c\u53ea\u6709\u4e00\u5f20\u5361\u53ef\u89c1\uff0c\u56e0\u6b64\u53ea\u6709\u8be5\u53ef\u89c1\u5361\u7684UUID\u624d\u6709\u6548\u3002

\u56e0\u6b64\uff0c\u65e0\u8bba\u600e\u6837\uff0c\u6211\u4eec\u90fd\u9700\u8981\u4e00\u4e2a\u811a\u672c\u6765\u76d1\u6d4b\u81ea\u5df1\u76ee\u524d\u4f4d\u4e8e\u54ea\u5f20\u5361\uff0c\u8be5\u5361\u4e0a\u6709\u54ea\u51e0\u4e2aGI\u7a7a\u95f2\u3002

\u7ba1\u7406\u5458\u63d0\u4f9b\u4e86\u4e00\u4e2a\u811a\u672c\u653e\u7f6e\u5728/data/share/base/tools/mig_check.py\uff0c\u53ef\u8f93\u51fa\u5f53\u524d\u53ef\u7528\u7684UUID\u3002\u8be5\u811a\u672c\u5df2\u7ecf\u8bbe\u7f6e\u597d\u6267\u884c\u73af\u5883\uff0c\u56e0\u800c\u76f4\u63a5\u8fd0\u884c\u5373\u53ef\uff0c\u4e0d\u8981\u7528\u672c\u5730\u7684Python\u73af\u5883\u6765\u6267\u884c\u3002 \u4ee5\u4e0b\u7ed9\u51fa\u4e00\u4e2a\u793a\u4f8b\u63d0\u4ea4\u811a\u672c\uff1a

#BSUB -e %J.err\n#BSUB -o %J.out\n#BSUB -n 1\n#BSUB -R 'span[ptile=1]'\n#BSUB -q gpu2\n#BSUB -gpu 'num=1:mode=shared:j_exclusive=no'\n#BSUB -J train\n#BSUB -W 24:00\n\nmodule load deepmd/2.0-cuda11.3\n\nexport CUDA_VISIBLE_DEVICES=`/data/share/base/tools/mig_check.py`\n\ndp train input.json 1>> train.log 2>> train.err\n

\u8bf7\u8bbe\u7f6e\u4f7f\u75281\u4e2aCPU\u6838\u4ee5\u514d\u6ca1\u6709\u8db3\u591f\u591a\u7684CPU\u6570\u4f9b\u4efb\u52a1\u63d0\u4ea4\u3002

\u5982\u679c\u4f7f\u7528\u65b0\u7248DP-GEN\u6216DPDispatcher\u6765\u8c03\u5ea6\u4efb\u52a1\uff0c\u8bf7\u52a0\u5165\u65b0\u7684\u73af\u5883\u53d8\u91cf\u9009\u9879\u3002\u4ee5\u4e0b\u7ed9\u51fa\u4e00\u4e2aresources\u90e8\u5206\u7684\u793a\u4f8b\uff1a

\"resources\": {\n    \"number_node\": 1,\n    \"cpu_per_node\": 1,\n    \"gpu_per_node\": 1,\n    \"queue_name\": \"gpu2\",\n    \"group_size\": 1,\n    \"kwargs\": {\n      \"gpu_usage\": true,\n      \"gpu_new_syntax\": true,\n      \"gpu_exclusive\": false\n    },\n    \"custom_flags\": [\n      \"#BSUB -J train\",\n      \"#BSUB -W 24:00\"\n    ],\n    \"strategy\": {\"if_cuda_multi_devices\": false},\n    \"module_list\": [\"deepmd/2.0-cuda11.3\"],\n    \"envs\": {\"CUDA_VISIBLE_DEVICES\": \"`/data/share/base/tools/mig_check.py`\"},\n    \"wait_time\": 60\n}\n

\u8bf7\u52a1\u5fc5\u8bbe\u7f6egpu_exclusive\u4e3afalse\u4ee5\u786e\u4fdd\u4efb\u52a1\u6b63\u786e\u63d0\u4ea4\u52301-3\u53f7\u5361\uff1b\u8bf7\u52a1\u5fc5\u8bbe\u7f6eif_cuda_multi_devices\u4e3afalse\u4ee5\u514d\u81ea\u52a8\u5199\u5165CUDA_VISIBLE_DEVICES\u3002\u540c\u65f6\u7ecf\u8fc7\u5b9e\u8df5\uff0c30 s\u7684\u7b49\u5f85\u65f6\u95f4\u5bf9\u4e8e\u8bad\u7ec3\u4efb\u52a1\u53ef\u80fd\u592a\u77ed\uff0c\u6216\u9700\u898160s\u3002

"},{"location":"wiki/how_to_edit/howtodo/","title":"\u5982\u4f55\u4f7f\u7528 Wiki","text":"

Wiki \u4e66\u5199\u4f7f\u7528 markdown \u683c\u5f0f\u3002\u672c wiki \u4f7f\u7528 python-markdown \u4f5c\u4e3a markdown \u7684\u89e3\u91ca\u5668\uff0c\u652f\u6301\u4e00\u4e9b markdown \u7684\u6269\u5c55\u8bed\u6cd5\u3002\u5728\u672c\u5730\u7f16\u8f91 markdown \u6587\u4ef6\u65f6\uff0c\u63a8\u8350\u4f7f\u7528 VSCode\u3002

Warning

Typora\u6b63\u5f0f\u7248\u5df2\u7ecf\u6536\u8d39\uff0c\u4e14\u6d4b\u8bd5\u7248\u5728\u67d0\u4e9b\u7cfb\u7edf\u73af\u5883\u5df2\u4e0d\u53ef\u7528\u3002

\u6709\u4efb\u4f55\u95ee\u9898\u53ef\u4ee5\u5728 https://github.com/chenggroup/chenggroup.github.io/issues \u8fdb\u884c\u53cd\u9988\u3002

\u6587\u6863\u4e2d\u5e26\u6709 * \u7684\u90e8\u5206\u53ef\u4ee5\u7565\u8fc7\u3002

"},{"location":"wiki/how_to_edit/howtodo/#wiki_1","title":"\u5bf9\u67d0\u7bc7 wiki \u5185\u5bb9\u6709\u7591\u95ee","text":"

\u8bf7\u4f7f\u7528\u9875\u9762\u4e0b\u65b9\u7684\u8bc4\u8bba\u533a\u3001\u767b\u9646Github\u8d26\u53f7\u540e\u8fdb\u884c\u8bc4\u8bba\u3002\u8be5\u90e8\u5206\u57fa\u4e8egiscus\u6784\u5efa\uff0c\u53ef\u4ee5\u81ea\u52a8\u521b\u5efa\u4e00\u4e2adiscussion\uff0c\u4ece\u800c\u63d0\u4f9b\u65b9\u4fbf\u7684\u4e92\u52a8\u3002\u6b64\u529f\u80fd\u9700\u8981\u521b\u5efa\u9875\u9762\u7684\u8d21\u732e\u8005\u624b\u52a8\u5f00\u542f\u3002

"},{"location":"wiki/how_to_edit/howtodo/#wiki_2","title":"\u5982\u4f55\u4e0a\u4f20 wiki","text":"

\u5982\u679c\u8fd8\u4e0d\u4f1a markdown \u8bed\u6cd5\uff0c\u53ef\u4ee5\u5148\u770b markdown \u8bed\u6cd5\u90e8\u5206\uff0c\u80fd\u88ab\u8bc6\u522b\u4e3a wiki \u7684 markdown \u6587\u4ef6\u5e94\u5728\u6587\u4ef6\u7684\u5f00\u5934\u63d2\u5165 YAML Front Matter\u3002\u628a\u81ea\u5df1\u7684 markdown \u6587\u6863\u4e0a\u4f20\u5230 wiki \u4e0a\u53ef\u4ee5\u6709\u4e24\u79cd\u65b9\u6848\uff0c\u672c\u8d28\u90fd\u662f\u5728\u4f7f\u7528 Github: 1. \u4e0a\u4f20\u6587\u4ef6\u81f3 Github \u4ed3\u5e93 (\u63a8\u8350)\uff1b2. \u7531 wiki \u7f51\u7ad9 \u5bfc\u5411\u7f16\u8f91\u9875\u9762\u3002

"},{"location":"wiki/how_to_edit/howtodo/#github","title":"\u4e0a\u4f20\u6587\u4ef6\u81f3 github \u4ed3\u5e93 (\u63a8\u8350)","text":"

\u63a8\u8350\u901a\u8fc7 pull requests \u7684\u65b9\u6cd5\u6765\u589e\u52a0\u6216\u4fee\u6539 wiki \u7f51\u7ad9 \u4e0a\u7684 wiki\u3002

"},{"location":"wiki/how_to_edit/howtodo/#1-fork-wiki","title":"1. Fork wiki \u6587\u6863\u6240\u5728\u4ed3\u5e93","text":"

\u5148 fork https://github.com/chenggroup/chenggroup.github.io \uff0c\u7136\u540e\u8fdb\u5165 fork \u6210\u529f\u540e\u7684\u4ed3\u5e93\u3002

"},{"location":"wiki/how_to_edit/howtodo/#2","title":"2. \u521b\u5efa\u65b0\u6587\u4ef6\u6216\u4e0a\u4f20\u672c\u5730\u6587\u4ef6","text":"

\u63a8\u8350\u5728\u672c\u5730\u7528 typora \u7b49\u7f16\u8f91\u5668\u5199\u597d markdown \u540e\u76f4\u63a5\u4e0a\u4f20\u6587\u4ef6\uff0c\u6587\u4ef6\u8bf7\u4e0a\u4f20\u81f3 _wiki \u76ee\u5f55 (master \u5206\u652f)\u3002\u4e5f\u53ef\u4ee5\u4fee\u6539 fork \u7684\u4ed3\u5e93\u7684 docs/wiki \u4e0b\u7684\u6587\u4ef6\uff0c\u7136\u540e\u518d\u63d0\u4ea4 PR\u3002

"},{"location":"wiki/how_to_edit/howtodo/#3","title":"3. \u8bbe\u7f6e\u5bfc\u822a","text":"

Note

\u65b0\u589e\u6b65\u9aa4

\u5728\u4e0a\u4f20\u65b0\u7684\u6587\u6863\u540e\uff0c\u9700\u8981\u624b\u52a8\u5728\u4ed3\u5e93\u9996\u7ea7\u7684 mkdocs.yml \u4e2d\u8bbe\u7f6e\u5bfc\u822a\u3002

\u4f8b\u5982\u5728\u8f6f\u4ef6\u4f7f\u7528\u4e2d\u589e\u52a0 VASP \u4f7f\u7528\u6559\u7a0b\u7684\u8bdd\uff08\u5047\u8bbe\u653e\u5728 docs/wiki/software_usage/vasp.md\uff09\uff0c\u4e14\u5e0c\u671b\u653e\u5728 CP2K \u548c DP-GEN \u4e4b\u95f4\uff0c\u8bf7\u5728 nav \u4e2d\u589e\u52a0\u5982\u4e0b\u5185\u5bb9\uff1a

nav:\n  ...\n  - Wikis:\n      ...\n      - \u8f6f\u4ef6\u4f7f\u7528:\n          ...\n          - wiki/software_usage/Tips_for_LaTeX.md\n          - CP2K:\n              ...\n          - wiki/software_usage/vasp.md # \u65b0\u589e\u5bfc\u822a\n          - wiki/software_usage/DP-GEN.md\n          ...\n      ...\n
"},{"location":"wiki/how_to_edit/howtodo/#4-pr","title":"4. \u63d0\u4ea4 PR","text":""},{"location":"wiki/how_to_edit/howtodo/#wiki_3","title":"\u5982\u4f55\u9884\u89c8 wiki","text":"

\u9884\u89c8 wiki \u4e5f\u6709\u4e24\u79cd\u65b9\u6848\uff1a1. \u4f7f\u7528 typora \u7b49\u5b9e\u65f6\u6e32\u67d3\uff1b2. \u5728\u672c\u5730\u542f\u52a8 Mkdocs \u670d\u52a1\u3002

"},{"location":"wiki/how_to_edit/howtodo/#typora","title":"\u901a\u8fc7 typora (\u6ce8\u610f\u5df2\u7ecf\u6536\u8d39)","text":"

\u4f7f\u7528 typora \u7f16\u8f91\u5668\u53ef\u4ee5\u5f88\u65b9\u4fbf\u5730\u5b9e\u65f6\u6e32\u67d3 markdown \u6587\u4ef6\u3002\u5982\u679c\u4e0d\u4f7f\u7528\u672c wiki \u4e2d\u6807\u6ce8\u6709 * \u7684 wiki \u6269\u5c55\u8bed\u6cd5 \uff0c\u5219\u53ef\u4ee5\u5927\u4f53\u4e0a\u8ba4\u4e3a typora \u6240\u6e32\u67d3\u51fa\u7684\u6587\u6863\u4e0e\u76f4\u63a5\u67e5\u770b wiki \u7f51\u7ad9 \u7684\u6587\u6863\u76f8\u5dee\u65e0\u51e0\uff0c\u57fa\u672c\u4ec5\u5b58\u5728\u663e\u793a\u98ce\u683c\u4e0a\u7684\u5dee\u5f02\u3002\u4f46\u8981\u6ce8\u610f\u9700\u66f4\u6539 typora \u7684\u4e00\u4e9b\u8bbe\u7f6e\uff08\u89c1\u540e\u6587\uff09\uff0c\u907f\u514d\u548c wiki \u6240\u4f7f\u7528\u7684 markdown \u6269\u5c55\u529f\u80fd\u53d1\u751f\u51b2\u7a81\u3002

"},{"location":"wiki/how_to_edit/howtodo/#markdown","title":"\u4fee\u6539 markdown \u62d3\u5c55\u8bed\u6cd5\u8bbe\u7f6e","text":"

\u9700\u8981\u5173\u95ed\u4e0a\u4e0b\u6807\u3001\u9ad8\u4eae\u4ee5\u53ca\u56fe\u8868\u7684\u529f\u80fd\u3002

"},{"location":"wiki/how_to_edit/howtodo/#_1","title":"\u4fee\u6539\u6570\u5b66\u516c\u5f0f\u8bbe\u7f6e","text":"

\u9700\u8981\u5173\u95ed\u6570\u5b66\u516c\u5f0f\u81ea\u52a8\u6dfb\u52a0\u5e8f\u53f7\u7684\u529f\u80fd\u3002

"},{"location":"wiki/how_to_edit/howtodo/#_2","title":"\u4fee\u6539\u56fe\u50cf\u8bbe\u7f6e","text":"

\u9700\u8981\u628a\u9ed8\u8ba4\u7684\u65e0\u7279\u6b8a\u64cd\u4f5c\u6539\u4e3a\u901a\u8fc7 iPic \u4e0a\u4f20\u56fe\u7247\uff0c\u4e0d\u8fc7\u5728\u8fd9\u4e4b\u524d\u9700\u8981 \u4e0b\u8f7d iPic \u3002\u63a8\u8350\u5728 iPic \u504f\u597d\u8bbe\u7f6e\u4e2d\u5f00\u542f\u538b\u7f29\u4e0a\u4f20\u56fe\u7247\u7684\u9009\u9879\uff0c\u8fd9\u6837\u53ef\u4ee5\u4f7f wiki \u7f51\u9875\u52a0\u8f7d\u7684\u901f\u5ea6\u66f4\u5feb\u3002

"},{"location":"wiki/how_to_edit/howtodo/#mkdocs","title":"\u901a\u8fc7 Mkdocs \u670d\u52a1*","text":""},{"location":"wiki/how_to_edit/howtodo/#1","title":"1. \u4e0b\u8f7d\u7f51\u7ad9\u6e90\u7801\u81f3\u672c\u5730","text":"
git clone https://github.com/chenggroup/chenggroup.github.io.git\ncd chenggroup.github.io\n
"},{"location":"wiki/how_to_edit/howtodo/#2-mkdocs-material-mkdocs","title":"2. \u5b89\u88c5 mkdocs-material \u548c \u5fc5\u8981\u7684 mkdocs \u63d2\u4ef6","text":"

\u53ef\u53c2\u8003 mkdocs-material \u5b98\u65b9\u5b89\u88c5\u6307\u5357

pip install mkdocs-material \\\n    mkdocs-macros-plugin \\\n    mkdocs-static-i18n[material]\n
"},{"location":"wiki/how_to_edit/howtodo/#4-mkdocs","title":"4. \u542f\u52a8 Mkdocs \u670d\u52a1","text":"
mkdocs serve\n
"},{"location":"wiki/how_to_edit/howtodo/#5-wiki","title":"5. \u7f16\u8f91 wiki","text":"

\u628a\u8981\u9884\u89c8\u7684 wiki \u79fb\u5230 docs/wiki/ \u76ee\u5f55\u4e0b\uff0c\u6216\u662f\u76f4\u63a5\u7f16\u8f91 docs/wiki/ \u76ee\u5f55\u4e0b\u7684 markdown \u6587\u4ef6\u3002

"},{"location":"wiki/how_to_edit/howtodo/#6-wiki","title":"6. \u9884\u89c8 wiki","text":"

\u7b49\u5f85\u7247\u523b\uff0c\u6253\u5f00\u6d4f\u89c8\u5668\u8bbf\u95ee http://127.0.0.1:8000 \u3002

"},{"location":"wiki/how_to_edit/howtodo/#markdown_1","title":"Markdown \u8bed\u6cd5","text":"

Markdown \u662f\u4e00\u79cd\u6807\u8bb0\u8bed\u8a00\uff0c\u548c\u4ee3\u7801\u4e00\u6837\uff0c\u53ef\u4ee5\u7528\u7eaf\u6587\u672c\u7684\u5f62\u5f0f\u6765\u4e66\u5199\u3002\u5176\u4f7f\u7528\u7684\u5e38\u7528\u6807\u8bb0\u7b26\u53f7\u4e0d\u8d85\u8fc7\u5341\u4e2a\uff0c\u53ef\u4ee5\u8ba9\u4eba\u4e13\u6ce8\u4e8e\u6587\u5b57\u800c\u4e0d\u662f\u6392\u7248\uff0c\u5e76\u4e14\u4e5f\u53ef\u4ee5\u65b9\u4fbf\u5730\u5bfc\u51fa\u4e3a HTML\u3001PDF \u7b49\u683c\u5f0f\u3002

"},{"location":"wiki/how_to_edit/howtodo/#_3","title":"\u57fa\u672c\u8bed\u6cd5","text":"

\u63d2\u5165\u56fe\u7247\u65f6\u5207\u52ff\u4f7f\u7528\u672c\u5730\u8def\u5f84\uff0c\u5426\u5219\u5728 wiki \u4e0a\u65e0\u6cd5\u67e5\u770b\uff0c\u5177\u4f53\u8bf7\u53c2\u8003 Typro \u63d2\u5165\u56fe\u7247\u8bbe\u7f6e\u3002

\u53ef\u53c2\u8003 markdown \u6559\u7a0b \u4e0e \u7ec3\u4e60 \u6765\u5b66\u4e60\u57fa\u672c\u8bed\u6cd5\u3002

\u8981\u5f15\u7528\u540c\u4e00\u7bc7 wiki \u4e2d\u7684\u5c0f\u6807\u9898\uff08\u4e8c\u81f3\u516d\u7ea7\u6807\u9898\uff09\u53ef\u4ee5\u901a\u8fc7 [sub title](#sub-title) \u6765\u5f15\u7528\u3002\u4e0d\u8fc7\u9700\u8981\u6ce8\u610f\uff0c\u8981\u628a\u5c0f\u6807\u9898\u4e2d\u7684\u7a7a\u683c\u7528 - \u4ee3\u66ff\uff0c\u6240\u6709\u5927\u5199\u5b57\u6bcd\u6539\u6210\u5c0f\u5199\uff0c\u4e14\u5ffd\u7565 . , & \u7b49\u7279\u6b8a\u7b26\u53f7\u3002\u6bd4\u5982\uff0c\u7528 [1. Fork wiki \u6587\u6863\u6240\u5728\u4ed3\u5e93](#1-fork-wiki-\u6587\u6863\u6240\u5728\u4ed3\u5e93) \u6765\u8868\u793a 1. Fork wiki \u6587\u6863\u6240\u5728\u4ed3\u5e93 \u3002\u82e5\u6709\u591a\u4e2a\u540c\u540d\u6807\u9898\uff0c\u4ee5 title, tile-1, title-2 \u6765\u533a\u5206\u3002

"},{"location":"wiki/how_to_edit/howtodo/#gfm","title":"GFM \u6269\u5c55\u8bed\u6cd5","text":"

GFM(GitHub Flavored Markdown) \u662f github \u6240\u4f7f\u7528\u7684 markdown \u6269\u5c55\u8bed\u6cd5\u3002

"},{"location":"wiki/how_to_edit/howtodo/#_4","title":"\u6e05\u5355","text":"
- [ ] \u672a\u5b8c\u6210\u5217\u8868\n- [x] \u5df2\u5b8c\u6210\u5217\u8868\n
  • \u672a\u5b8c\u6210\u5217\u8868
  • \u5df2\u5b8c\u6210\u5217\u8868
"},{"location":"wiki/how_to_edit/howtodo/#_5","title":"\u8868\u60c5","text":"
:eyeglasses: :+1:\n
"},{"location":"wiki/how_to_edit/howtodo/#wiki_4","title":"Wiki \u6269\u5c55\u8bed\u6cd5","text":"

\u6807\u6ce8 * \u7684\u90e8\u5206\u53ef\u4ee5\u4e0d\u53bb\u6ce8\u610f

"},{"location":"wiki/how_to_edit/howtodo/#yaml-front-matter","title":"YAML Front Matter","text":""},{"location":"wiki/how_to_edit/howtodo/#_6","title":"\u52a0\u5165\u6807\u9898","text":"

\u53ea\u6709\u5728 markdown \u6587\u4ef6\u7684\u5934\u90e8\u52a0\u5165 YAML Front Matter \u90e8\u5206\uff0c\u624d\u80fd\u4f7f\u4f60\u5199\u7684 wiki \u5c55\u793a\u5728\u7f51\u9875\u4e0a\u3002\u56e0\u6b64\u6700\u7b80\u5355\u7684\uff0c\u8bf7\u5728 YAML Front Matter \u4e2d\u52a0\u5165 title\uff0c\u5982\u4e0b\u6240\u793a\uff1a

---\ntitle: getting-started\n---\n
"},{"location":"wiki/how_to_edit/howtodo/#_7","title":"\u6dfb\u52a0\u4f5c\u8005","text":"

\u5728 YAML Front Matter \u4e2d\u52a0\u5165 authors \u5373\u53ef\u6dfb\u52a0\u4f5c\u8005\uff0c\u591a\u4e2a\u4f5c\u8005\u7528 yaml \u8bed\u6cd5\u7684\u5217\u8868\u8868\u793a\uff1a

---\ntitle: getting-started\nauthors: one author\n---\n
---\ntitle: getting-started\nauthors:\n  - author1\n  - author2\n---\n
"},{"location":"wiki/how_to_edit/howtodo/#_8","title":"\u5f00\u542f\u8bc4\u8bba\u529f\u80fd","text":"

\u5bf9\u521b\u5efa\u9875\u9762\u7684\u7f16\u8f91\u8005\u6765\u8bf4\uff0c\u901a\u5e38\u60c5\u51b5\u4e0b\u8bf7\u5f00\u542f\u8bc4\u8bba\u529f\u80fd\u3001\u4ee5\u4fbf\u8bfb\u8005\u53ef\u4ee5\u5feb\u901f\u63d0\u4ea4\u53cd\u9988\u6216\u8bc4\u8bba\uff0c\u5373\u5728 YAML Front Matter \u90e8\u5206\u589e\u52a0\u4e00\u884c\uff1a

---\n...\ncomments: true\n---\n
"},{"location":"wiki/how_to_edit/howtodo/#_9","title":"\u6570\u5b66\u516c\u5f0f","text":"

\u6570\u5b66\u516c\u5f0f\u53ef\u4ee5\u7528 LaTeX \u8bed\u6cd5\u6765\u4e66\u5199\uff0c\u4e24\u7aef\u7528 $(\u4e00\u822c\u7528\u4e8e\u884c\u5185\u516c\u5f0f) \u6216 $$(\u4f1a\u4f7f\u516c\u5f0f\u5c45\u4e2d\u663e\u793a) \u6765\u6807\u8bb0\uff0c\u5982 $E=mc^2$ \u53ef\u8868\u793a \\(E=mc^2\\) \u3002

$$\nE[\\rho] = T_s[\\rho] + \\int \\mathrm{d}r\\ v_{\\rm ext}(r)\\rho(r) + V_{H}[\\rho] + E_{\\rm xc}[\\rho]\n$$\n
\\[ E[\\rho] = T_s[\\rho] + \\int \\mathrm{d}r\\ v_{\\rm ext}(r)\\rho(r) + V_{H}[\\rho] + E_{\\rm xc}[\\rho] \\]

\u8981\u8868\u793a\u591a\u884c\u516c\u5f0f\uff0c\u9700\u8981\u4f7f\u7528 aligned\uff0c\u5e76\u8981\u5728\u884c\u5c3e\u90e8\u52a0 \\\\\u3002

$$\n\\begin{aligned} \\dot{x} &= \\sigma(y-x) \\\\\n\\dot{y} &= \\rho x - y - xz \\\\\n\\dot{z} &= -\\beta z + xy \\end{aligned} \n$$\n
\\[ \\begin{aligned} \\dot{x} &= \\sigma(y-x) \\\\ \\dot{y} &= \\rho x - y - xz \\\\ \\dot{z} &= -\\beta z + xy \\end{aligned} \\]

\u82e5\u5b9e\u73b0\u7ed9\u516c\u5f0f\u7f16\u53f7\u7b49\u529f\u80fd\uff0c\u53ef\u53c2\u7167 LaTeX \u7684\u505a\u6cd5\u3002

"},{"location":"wiki/how_to_edit/howtodo/#_10","title":"\u5316\u5b66\u5f0f\u4e0e\u5316\u5b66\u53cd\u5e94\u5f0f","text":"

\u6b64\u529f\u80fd\u901a\u8fc7 LaTeX \u7684 mhchem \u63d2\u4ef6\u6765\u5b9e\u73b0\uff0c\u4f7f\u7528\u4e0a\u4e0e\u6570\u5b66\u516c\u5f0f\u8f93\u5165\u76f8\u8fd1\uff0c\u90fd\u9700\u8981\u901a\u8fc7 $ \u6216 $$ \u6765\u6807\u8bb0\u3002

\u6e90\u7801 \u5316\u5b66\u5f0f\u4e0e\u5316\u5b66\u53cd\u5e94\u5f0f $\\ce{Mg(OH)2}$ \\(\\ce{Mg(OH)2}\\) $\\ce{CrO4^2-}$ \\(\\ce{CrO4^2-}\\) $\\ce{[Cu(NH3)4]^2+}$ \\(\\ce{[Cu(NH3)4]^2+}\\) $\\ce{CoCl2.6H2O}$ \\(\\ce{CoCl2.6H2O}\\) $\\ce{^{227}_{90}Th+}$ \\(\\ce{^{227}_{90}Th+}\\) $\\ce{C2H5-OH}$ \\(\\ce{C2H5-OH}\\) $\\ce{CH3CH=CH2}$ \\(\\ce{CH3CH=CH2}\\) $\\ce{HC#CH}$ \\(\\ce{HC#CH}\\) $\\ce{CaCO3 ->[900\\,{}^{\\circ}\\mathrm{C}] CaO + CO2}$ \\(\\ce{CaCO3 ->[900\\,{}^{\\circ}\\mathrm{C}] CaO + CO2}\\) $\\ce{H2PO4- <=>C[OH-][H+] H+ + HPO4^2-}$ \\(\\ce{H2PO4- <=>C[OH-][H+] H+ + HPO4^2-}\\)"},{"location":"wiki/how_to_edit/howtodo/#_11","title":"\u4e0a\u4e0b\u6807","text":"

\u4e00\u822c\u60c5\u51b5\u4e0b\u53ef\u4ee5\u7528 <sup></sup> \u8868\u793a\u4e0a\u6807\uff0c\u7528 <sub></sub> \u8868\u793a\u4e0b\u6807\uff0c\u5982 \u652f\u4ed8\u5b9dTM \u53ef\u7528 \u652f\u4ed8\u5b9d<sup>TM</sup> \u8868\u793a\u3002

"},{"location":"wiki/how_to_edit/howtodo/#_12","title":"\u6309\u94ae*","text":"
[Subscribe to our newsletter](#){ .md-button }\n

default primary

"},{"location":"wiki/how_to_edit/howtodo/#_13","title":"\u63d0\u793a*","text":"
!!! tldr \"title\"\n    TLDR means too long, didn't read\n

\u6539\u53d8 tldr \u5373\u53ef\u4f7f\u7528\u4e0d\u540c\u7684\u63d0\u793a\u7c7b\u578b\uff0c\u6bd4\u5982

Use tldr for this.

TLDR means too long, didn't read

Use tip for this.

This is a tip.

Use info for this.

This is a piece of information, or you can use todo.

Use question for this.

This is a question.

Use warning for this.

This is a warning

Use danger for this.

This alerts danger!

Use success for this.

This alerts success

"},{"location":"wiki/how_to_edit/howtodo/#_14","title":"\u6d41\u7a0b\u56fe","text":"

\u6d41\u7a0b\u56fe\u53ef\u4ee5\u7528\u6765\u8868\u793a\u5de5\u4f5c\u6d41\u6216\u8005\u6b65\u9aa4\u7b49\uff1a

``` mermaid\ngraph LR\n  A[Start] --> B{Error?};\n  B -->|Yes| C[Hmm...];\n  C --> D[Debug];\n  D --> B;\n  B ---->|No| E[Yay!];\n```\n
graph LR\n  A[Start] --> B{Error?};\n  B -->|Yes| C[Hmm...];\n  C --> D[Debug];\n  D --> B;\n  B ---->|No| E[Yay!];
"},{"location":"wiki/how_to_edit/howtodo/#wiki_5","title":"\u5f15\u7528\u672c\u7f51\u7ad9\u7684\u5176\u4ed6 wiki","text":"

\u4f7f\u7528

[title](relavent/path/to/file.md)\n

\u5373\u53ef\u5728 wiki \u4e2d\u5f15\u7528\u672c\u7f51\u7ad9\u7684\u5176\u4ed6 wiki \u3002\u53ea\u9700\u5c06 relavent/path/to/file.md \u6539\u6210\u60f3\u8981\u5f15\u7528\u7684 wiki \u76f8\u5bf9\u6b64\u6587\u6863\u7684 \u76f8\u5bf9\u8def\u5f84\u3002

\u6bd4\u5982\uff0c\u8981\u60f3\u5f15\u7528 \u5982\u4f55\u4f7f\u7528 wiki \u8fd9\u7bc7 wiki\uff0c\u5219\u53ea\u9700\u628a relavent/path/to/file.md \u6362\u6210 ../how_to_edit/howtodo.md\u3002

Warning

\u6ce8\u610f\u8fd9\u91cc\u63a8\u8350\u4f7f\u7528\u7684\u662f\u76f8\u5bf9\u8def\u5f84\uff0c\u53ef\u4e0d\u6539\u53d8\u540c\u7ea7\u76ee\u5f55\u7ed3\u6784\u3002\u5982\u9700\u4fee\u6539\u4e0a\u7ea7\u76ee\u5f55\u7ed3\u6784\u9700\u8981\u5bf9\u5e94\u66f4\u6539\u3002

"},{"location":"wiki/how_to_edit/howtodo/#_15","title":"\u6587\u6863\u82f1\u6587\u7ffb\u8bd1","text":"

\u76ee\u524d\u672c Wiki \u91c7\u7528 mkdocs-static-i18n \u5b9e\u73b0\u591a\u8bed\u8a00\u652f\u6301\uff0c\u56e0\u800c\u82e5\u9700\u8981\u7f16\u5199\u7ffb\u8bd1\u7248\u672c\uff0c\u4ec5\u9700\u8981\u5728\u540c\u4e00\u76ee\u5f55\u4e0b\u589e\u52a0\u4e00\u4e2a\u540e\u7f00\u4e3a .en \u7684markdown\u6587\u4ef6\u3002\u4f8b\u5982\u4e2d\u6587\u6587\u6863\u4e3a custom.md\uff0c\u5219\u82f1\u6587\u6587\u6863\u4e3a custom.en.md\u3002

\u6ce8\u610f\u8bf7\u5c06\u5bfc\u8a00\u533a\u7684 title \u5185\u5bb9\u7ffb\u8bd1\u4e3a\u82f1\u6587\u3002

\u82e5\u6d89\u53ca\u5bfc\u822a\u680f\u4e2d\u81ea\u5b9a\u4e49\u680f\u76ee\u7684\u7ffb\u8bd1\uff0c\u8bf7\u5728 mkdocs.yml \u4e2d\u589e\u52a0\u3002\u4ee5\u4e0b\u7ed9\u51fa\u4e00\u4e2a\u5b9e\u4f8b\uff1a

nav:\n  - \u4e3b\u9875: index.md\n  - \u5206\u7c7b1: \n      - topic1/index.md\n      - topic1/item1.md\n  - \u5206\u7c7b2: topic2/index.md\n\nplugins:\n  - i18n:\n    languages:\n      - locale: en\n        default: true\n        name: English\n      - locale: fr\n        name: Fran\u00e7ais\n        nav_translations:\n          \u4e3b\u9875: Home\n          \u5206\u7c7b1: Topic 1\n          \u5206\u7c7b2: Topic 2\n
"},{"location":"wiki/how_to_edit/howtodo/#_16","title":"\u53c2\u8003\u8d44\u6599*","text":"

\u8981\u4f7f\u7528\u66f4\u591a\u529f\u80fd\uff0c\u8bf7\u53c2\u8003mkdocs-material\u5b98\u65b9\u6587\u6863\u3002

\u5f53\u7136\uff0c\u60f3\u8981\u5feb\u901f\u83b7\u5f97\u652f\u6301\uff0c\u4e5f\u53ef\u4ee5\u8054\u7cfb\u4f5c\u8005\u6216\u8005 Open an issue \u3002

"},{"location":"wiki/how_to_edit/howtousenews/","title":"\u5982\u4f55\u53d1\u5e03 News (\u81f4\u7ba1\u7406\u5458)","text":"

\u56e0\u4e3a\u8fc1\u79fb\u65b0\u7684\u5b9e\u73b0\uff0c\u6682\u4e0d\u542f\u7528\u3002

\u670d\u52a1\u5668\u7684\u4e00\u4e9b\u4fe1\u606f\u6216\u662f\u5176\u4ed6\u96f6\u788e\u7684\u4fe1\u606f\u53ef\u53d1\u5e03\u5728 News \u91cc\u3002

"},{"location":"wiki/how_to_edit/howtousenews/#_1","title":"\u4e0a\u4f20\u6587\u4ef6","text":""},{"location":"wiki/how_to_edit/howtousenews/#_2","title":"\u6587\u4ef6\u7684\u683c\u5f0f","text":"

\u8bf7\u4e0a\u4f20 markdown \u683c\u5f0f\u7684\u6587\u4ef6\uff0c\u5f53\u7136\u4e5f\u652f\u6301\u4e00\u4e9b markdown \u7684\u62d3\u5c55\u529f\u80fd \u3002

"},{"location":"wiki/how_to_edit/howtousenews/#_3","title":"\u6587\u4ef6\u7684\u547d\u540d","text":"

\u6587\u4ef6\u4ee5 YYYY-MM-dd-name.md \u6765\u547d\u540d\uff0c\u5982 2019-11-01-welcome.md \u3002

\u5982\u679c\u6587\u4ef6\u524d\u7f00\u7684\u65e5\u671f\u662f\u4e2a\u672a\u6765\u65e5\u671f\uff0c\u5219\u5176\u4e0d\u4f1a\u5728 News \u9875\u9762\u4e0a\u663e\u793a\uff0c\u4e0d\u8fc7\u5f53\u5230\u4e86\u5176\u65e5\u671f\u4e4b\u540e\u5219\u4f1a\u81ea\u52a8\u51fa\u73b0\u5728 News \u9875\u9762\u4e0a\u3002

"},{"location":"wiki/how_to_edit/howtousenews/#news_1","title":"\u8bbe\u7f6e News \u7684\u6458\u8981","text":"

\u5728\u4e00\u7ea7\u6807\u9898\u4e4b\u4e0b\uff0c <!--more--> \u4e4b\u4e0a\u7684\u5185\u5bb9\u4f1a\u88ab\u5f53\u4f5c\u6458\u8981\u3002\u8fdb\u5165 read more \u4e4b\u524d\u4f1a\u663e\u793a\u6458\u8981\u3002

"},{"location":"wiki/how_to_edit/howtousenews/#news_2","title":"\u8bbe\u7f6e News \u7684\u5206\u7c7b","text":"

\u5728 YAML Front Matter \u5904\u6dfb\u52a0 tags \u53ef\u66f4\u65b9\u4fbf\u5730\u6309\u7167\u67d0\u4e9b\u6807\u7b7e\u6765\u68c0\u7d22 News\uff0ctags \u793a\u4f8b\u5982\u4e0b\u6240\u793a\uff1a

---\ntags:\n  - HPCreview\n  - HPCreport\n---\n
"},{"location":"wiki/how_to_edit/howtousenews/#news_3","title":"\u67e5\u770b News","text":"

\u8fdb\u5165 https://wiki.cheng-group.net//news \u53ef\u67e5\u770b\u6240\u6709 News\uff0chttps://wiki.cheng-group.net//archive \u53ef\u67e5\u770b\u6309\u65f6\u95f4\u5206\u7c7b\u7684 News\u3002

"},{"location":"wiki/new_comers/ase/","title":"ASE: \u539f\u5b50\u5efa\u6a21\u57fa\u7840","text":""},{"location":"wiki/new_comers/basis_pps/","title":"\u5bc6\u5ea6\u6cdb\u51fd\u8fd1\u4f3c\uff0c\u57fa\u7ec4\u4e0e\u8d5d\u52bf","text":""},{"location":"wiki/new_comers/dpgen/","title":"\u6df1\u5ea6\u52bf\u80fd\u751f\u6210\u5668: DP-GEN","text":""},{"location":"wiki/new_comers/dpmd/","title":"\u673a\u5668\u5b66\u4e60: \u7406\u8bba\u4e0eDeePMD-kit","text":""},{"location":"wiki/new_comers/linux/","title":"Linux\u5feb\u901f\u57fa\u7840\u5165\u95e8","text":""},{"location":"wiki/new_comers/presentation/","title":"\u5982\u4f55\u8fdb\u884c\u5c55\u793a/Presentation","text":""},{"location":"wiki/new_comers/python_numpy/","title":"Python \u548c Numpy","text":""},{"location":"wiki/new_comers/qc_dft/","title":"\u91cf\u5b50\u5316\u5b66\u4e0e\u5bc6\u5ea6\u6cdb\u51fd\u7406\u8bba","text":""},{"location":"wiki/new_comers/read_papers/","title":"\u5982\u4f55\u9605\u8bfb\u6587\u732e","text":""},{"location":"wiki/new_comers/toc/","title":"\u65b0\u751f\u5165\u95e8\u6559\u7a0b","text":"

\u6b22\u8fce\u52a0\u5165\u7a0b\u4fca\u8bfe\u9898\u7ec4\uff0c\u6bcf\u4e2a\u4eba\u6765\u5230\u65b0\u73af\u5883\u90fd\u9700\u8981\u719f\u6089\u548c\u5b66\u4e60\u89c4\u5219\uff0c\u8bf7\u5404\u4f4d\u65b0\u751f\u6309\u7167\u4ee5\u4e0b\u6e05\u5355\u987a\u5e8f\u8fdb\u884c\u5165\u7ec4\u7684\u51c6\u5907\u3002

"},{"location":"wiki/new_comers/toc/#_2","title":"\u4e2a\u4eba\u5ea7\u4f4d","text":"

\u6bcf\u4f4d\u5165\u5b66\u65b0\u751f\u5c06\u5206\u5230\u4e00\u4e2a\u5ea7\u4f4d\u548c\u4e00\u53f0iMac\u7535\u8111\u7528\u4e8e\u65e5\u5e38\u7684\u79d1\u7814\u3002\u8bf7\u5927\u5bb6\u5148\u6ce8\u518c\u4e00\u4e2aApple ID, \u7136\u540e\u5bfb\u627e**\u8bfe\u9898\u7ec4\u7684\u96c6\u7fa4\u7ba1\u7406\u5458**\uff0c\u4e3a\u4f60\u5f00\u901aiMac\u7535\u8111\u7684\u8d26\u53f7\u3002

"},{"location":"wiki/new_comers/toc/#_3","title":"\u96c6\u7fa4\u4e0e\u96c6\u7fa4\u8d26\u53f7","text":"

\u8bfe\u9898\u7ec4\u914d\u5907\u6709\u96c6\u7fa4(\u8d85\u7b97)\u8d44\u6e90\u4f9b\u79d1\u7814\u4f7f\u7528\uff0c\u800c\u96c6\u7fa4\u662f\u4ee5**Linux**\u7cfb\u7edf\u8fd0\u884c\u7684\u3002\u4e0eWindows\u7c7b\u4f3c\uff0c\u662f\u53e6\u4e00\u79cd\u7535\u8111\u64cd\u4f5c\u7cfb\u7edf\u3002\u4e3b\u8981\u4ee5\u952e\u76d8\u64cd\u4f5c\u4e3a\u4e3b\uff0c\u56e0\u6b64\u5982\u679c\u4e0d\u719f\u6089**Linux**\u7cfb\u7edf\u7684\u540c\u5b66\uff0c\u8bf7\u5148\u81ea\u5df1\u7c97\u7565\u5b66\u4e60\u4e00\u4e0b(\u89c6\u9891)Linux\u5165\u95e8

\u8981\u767b\u9646\u96c6\u7fa4\uff0c\u540c\u6837\u9700\u8981\u96c6\u7fa4\u8d26\u53f7\uff0c\u8bf7\u5bfb\u627e**\u8bfe\u9898\u7ec4\u7684\u96c6\u7fa4\u7ba1\u7406\u5458**\u4e3a\u4f60\u5f00\u901a\u96c6\u7fa4\u8d26\u53f7\u3002

\u767b\u5f55\u96c6\u7fa4**\u5efa\u8bae\u4f7f\u7528iMac\u7684\u7ec8\u7aef(terminal)**\u3002\u8fd9\u91cciMac\uff0c\u6307\u7684\u5c31\u662f\u82f9\u679c\u82f9\u679c\u7535\u8111\u3002\u7531\u4e8e\u82f9\u679c\u64cd\u4f5c\u7cfb\u7edfMacos\u4e0eLinux\u90fd\u662f\u4eceUnix\u7cfb\u7edf\u884d\u751f\u51fa\u6765\uff0c\u56e0\u6b64\u4f7f\u7528\u82f9\u679c\u7cfb\u5217\u7535\u8111\u6765\u767b\u5f55\u96c6\u7fa4\u6700\u4e3a\u65b9\u4fbf\u3002Windows\u7cfb\u7edf\u7684\u7535\u8111\u5219\u9700\u8981\u989d\u5916\u5b89\u88c5\u8f6f\u4ef6\u3002

\u4f7f\u7528iMac\u767b\u5f55\u96c6\u7fa4\u53ea\u9700\u8981\u540c\u65f6\u6309\u4f4fcommand+\u7a7a\u683c\uff0c\u5c31\u4f1a\u8df3\u51fa\u641c\u7d22\u6846\u3002\u5728\u641c\u7d22\u6846\u4e2d\u8f93\u5165terminal/\u7ec8\u7aef\uff0c\u5219\u4f1a\u8df3\u51fa\u7ec8\u7aef\u5e94\u7528\u3002\u4f7f\u7528\u7ec8\u7aef\u7684SSH\u547d\u4ee4\u5373\u53ef\u3002SSH\u4f7f\u7528\u5177\u4f53\u89c1\u4e0b\u6587\u3002

\u4e3a\u5efa\u7acb\u8d26\u53f7\uff0c\u9700\u8981\u751f\u6210SSH\u5bc6\u94a5\u3002\u767b\u5f55\u96c6\u7fa4\u9700\u8981\u4f7f\u7528SSH\u64cd\u4f5c\u3002

\u4f7f\u7528\u96c6\u7fa4\u524d\uff0c\u8bf7\u5927\u5bb6\u719f\u6089\u96c6\u7fa4\u7684\u57fa\u672c\u77e5\u8bc6\u548c\u64cd\u4f5c\u3002\u5982\u679c\u8981\u4f7f\u7528GPU\u7b49\u8d44\u6e90\uff0c\u8fd8\u9700\u5b66\u4e60\u5982\u4f55\u4f7f\u7528\u96c6\u7fa4\u4e0a\u7684GPU\u3002

\u5982\u679c\u4ee5\u4e0a\u6709\u4efb\u4f55\u96be\u4ee5\u7406\u89e3\u7684\u5185\u5bb9\u8bf7\u7acb\u5373\u6c47\u62a5\u7ed9**\u8bfe\u9898\u7ec4\u7684\u96c6\u7fa4\u7ba1\u7406\u5458**

"},{"location":"wiki/new_comers/toc/#imacpython","title":"\u5728iMac\u4e0a\u548c\u5728\u96c6\u7fa4\u4e0a\u4f7f\u7528Python","text":"

Python\u662f\u4e00\u79cd\u975e\u5e38\u65b9\u4fbf\u7684\u7f16\u7a0b\u8bed\u8a00\uff0c\u53ef\u4ee5\u5e2e\u52a9\u6211\u4eec\u5904\u7406\u8ba1\u7b97\u6570\u636e\u3002\u4f46\u662f\u7eafPython\u7684\u5b89\u88c5\u548c\u76f8\u5e94\u7684Python\u5e93\u4f7f\u7528\u662f\u5341\u5206\u70e6\u4eba\u7684\u3002\u56e0\u6b64\u540d\u4e3aAnaconda\u7684\u8f6f\u4ef6\u53ef\u4ee5\u5e2e\u52a9\u6211\u4eec\u89e3\u51b3\u8fd9\u4e2a\u95ee\u9898\u3002

\u5728iMac\u4e0a\uff0c\u5b89\u88c5Anaconda\uff0c\u76f4\u63a5\u53bb\u641c\u7d22\u5f15\u64ce\u641c\u7d22Anaconda\u7136\u540e\u53bb\u5b98\u7f51\u4e0b\u8f7d\u5bf9\u5e94\u7684\u5b89\u88c5\u5305\u5373\u53ef\u3002

\u5728\u96c6\u7fa4\u4e0a\uff0c\u6211\u4eec\u5df2\u7ecf\u63d0\u524d\u4e3a\u5927\u5bb6\u5b89\u88c5\u597d\u4e86Anaconda\uff0c\u4f7f\u7528\u548c\u8bbe\u7f6e\u65b9\u6cd5\u53c2\u89c1\u96c6\u7fa4\u4e0a\u7684Anaconda

"},{"location":"wiki/new_comers/toc/#_4","title":"\u5fc5\u5b66\u9879\u76ee","text":"

\u91cf\u5b50\u5316\u5b66(Levine)(\u524d14\u7ae0)

(\u89c6\u9891)\u91cf\u5b50\u5316\u5b66\u4e0e\u5bc6\u5ea6\u6cdb\u51fd\u7406\u8bba

(\u89c6\u9891)\u5bc6\u5ea6\u6cdb\u51fd\u8fd1\u4f3c\uff0c\u57fa\u7ec4\u4e0e\u8d5d\u52bf

(\u89c6\u9891)Linux\u5165\u95e8

(\u89c6\u9891)\u5982\u4f55\u9605\u8bfb\u6587\u732e

(\u89c6\u9891)\u5982\u4f55\u8fdb\u884c\u5c55\u793a

(\u89c6\u9891)Python\u548cNumpy

"},{"location":"wiki/new_comers/toc/#_5","title":"\u9009\u5b66[\u5177\u4f53\u9879\u76ee\u76f8\u5173]","text":""},{"location":"wiki/new_comers/toc/#_6","title":"\u673a\u5668\u5b66\u4e60","text":"

(\u89c6\u9891)Deep Learning Lecture by Frank Noe *\u9700\u8981\u79d1\u5b66\u4e0a\u7f51

(\u4e66\u7c4d)Pattern Recognition and Machine Learning

(\u4e66\u7c4d)Deep Learning\uff08\u82b1\u4e66\uff09

(\u89c6\u9891)Machine Learning for Physics and the Physics of Learning 2019 *\u9700\u8981\u79d1\u5b66\u4e0a\u7f51

(\u89c6\u9891)\u673a\u5668\u5b66\u4e60: \u7406\u8bba\u4e0eDeePMD-kit

(\u89c6\u9891)\u6df1\u5ea6\u52bf\u80fd\u751f\u6210\u5668: DP-GEN

DeePMD-kit \u4f7f\u7528\u5165\u95e8

DP-GEN\u4f7f\u7528\u5165\u95e8

"},{"location":"wiki/new_comers/toc/#_7","title":"\u5de5\u4f5c\u6d41","text":"

(\u89c6\u9891)\u81ea\u52a8\u5316\u8ba1\u7b97\u4e0e\u5de5\u4f5c\u6d41: AiiDA

"},{"location":"wiki/new_comers/toc/#_8","title":"\u751f\u6210\u6a21\u578b","text":"

(\u89c6\u9891)Diffusion and Score-Based Generative Models

(\u89c6\u9891)Dr. Yang Song \u2014 Advancements in Diffusion Models for Generative AI

(\u535a\u5ba2)Generative Modeling by Estimating Gradients of the Data Distribution

(\u535a\u5ba2)A Pedagogical Introduction to Score Models

(\u89c6\u9891)\u901a\u7528\u5206\u5b50\u7ed3\u6784\u6a21\u578bGraphormer\u7b80\u4ecb - \u90d1\u4e66\u65b0\u535a\u58eb

(\u89c6\u9891)Beyond AlphaFold2: \u4ece\u7ed3\u6784\u9884\u6d4b\u5230\u5206\u5e03\u9884\u6d4b | \u90d1\u4e66\u65b0\u535a\u58eb | \u5fae\u8f6f\u7814\u7a76\u9662 | Distributional Graphormer (DiG)

(\u89c6\u9891)Materials Project Seminars \u2013 Tian Xie \"MatterGen: a generative model for inorganic materials design\"

"},{"location":"wiki/new_comers/toc/#_9","title":"\u7edf\u8ba1\u529b\u5b66","text":"

(\u535a\u5ba2)Introduction to Statistical Mechanics

(\u535a\u5ba2)David Tong at DAMTP, Cambridge: Lectures on Theoretical Physics

(\u535a\u5ba2)Lectures on Statistical Physics

(\u535a\u5ba2)Lectures on Quantum Mechanics

(\u535a\u5ba2)Lectures on Solid State Physics

"},{"location":"wiki/new_comers/workflow/","title":"\u81ea\u52a8\u5316\u8ba1\u7b97\u4e0e\u5de5\u4f5c\u6d41: AiiDA","text":""},{"location":"wiki/question_under_carpet/chemical_computing/","title":"\u8ba1\u7b97\u5316\u5b66\u8e29\u5751\u5408\u96c6","text":"

\u6709\u65f6\u5019\uff0c\u6211\u4eec\u4f1a\u6cbf\u7528\u522b\u4eba\u6d4b\u8bd5\u8fc7\u7684\u8bbe\u7f6e\u8fdb\u884c\u8ba1\u7b97\uff0c\u800c\u4e0d\u4e00\u5b9a\u4f1a\u4ece\u5934\u8fdb\u884c\u7cfb\u7edf\u6d4b\u8bd5\u3002\u4f46\u662f\uff0c\u4f5c\u4e3a\u8ba1\u7b97\u8f6f\u4ef6\u7684\u4f7f\u7528\u8005\uff0c\u6211\u4eec\u9700\u8981\u610f\u8bc6\u5230\u67d0\u4e9b\u53ef\u80fd\u4f1a\u51fa\u9519\u7684\u5730\u65b9\uff08\u6216\u8bb8\u662f\u5f88\u68d8\u624b\u7684\u95ee\u9898\uff09\uff0c\u800c\u4e0d\u662f\u5c06\u8fd9\u4e9b\u95ee\u9898\u89c6\u800c\u4e0d\u89c1(sweep the problems under the carpet)\u3002\u5728\u6b64\u6587\u7ae0\u8bb0\u5f55\u5927\u5bb6\u5728\u9879\u76ee\u4e2d\u78b0\u5230\u7684\u5947\u5947\u602a\u602a\u7684\u5751\uff0c\u4ee5\u4f9b\u53c2\u8003\u3002

\u6709\u65b0\u7684\u5185\u5bb9\u53ef\u4ee5\u901a\u8fc7 PR \u6216\u8005\u8bc4\u8bba\u533a\u63d0\u51fa\u3002\u53ef\u5f15\u7528\u7f6e\u9876issue #131

"},{"location":"wiki/question_under_carpet/chemical_computing/#cu-pseudopotential","title":"Cu pseudopotential","text":"

\u6d89\u53ca Cu \u4e8c\u4ef7\u79bb\u5b50\u7684\u8ba1\u7b97\u53ef\u80fd\u8981\u91c7\u7528 19 \u7535\u5b50\u7684\u8d5d\u52bf (semi-core potential)\u3002

We found that only the computation of the orbital energy of the empty d-level of aqueous Cu2+ requires the use of a semi-core potential with explicit 3s and 3p electrons. Ref: J. Am. Chem. Soc. 2004, 126, 12, 3928\u20133938 [link]

"},{"location":"wiki/skills/QS4writing/","title":"Quick Start for Writing","text":"

\u5c0f\u63d0\u793a

\u4e2d\u6587\u7248\u53ef\u4ee5\u5728\u82f1\u6587\u7248\u4e4b\u540e\u627e\u5230\uff0c\u4f46\u662f\u8fd8\u662f\u9f13\u52b1\u5927\u5bb6\u5148\u8bfb\u8bfb\u82f1\u6587\u7248~

"},{"location":"wiki/skills/QS4writing/#english-version","title":"English version","text":"

I write this blog aiming to share some simple tips about academic writing, and hope it could help the \"poor guys\" struggling with the writing, more or less.

Notice: I am not a master in writing but only a TOTAL FRESHMAN. And all the texts following are based on what I learnt and my understanding, maybe incomplete (I hope no mistakes at least). Nevertheless, I believe that it is the reason why I can make the texts more friendly and achievable for the tyros. If you have any question, please feel free to come and talk with me ;-)

"},{"location":"wiki/skills/QS4writing/#practice-practice-practice","title":"Practice! Practice... Practice?","text":"

I guess some (or even most) of the you would say 'duh' when you heard about \"Practice! Practice! Practice!\" in some books. Sounds 100 percent correct but useless, right? Overall I agree, if you don't have a concrete and reasonable plan. Aimless practice can sometimes not only have no effects, but, even worse, depress you. Hence, I strongly suggest you start with writing YOUR paper, a specific example. For those having no projects, experiment report can also be an alternative. Then, craft your work step by step!

"},{"location":"wiki/skills/QS4writing/#step-one-polish-up-your-outline","title":"Step ONE: polish up your outline","text":"

The first and most important step. Seems to irrelevant to writing skills, uhm? Yes, but checking the outline with your colleagues and supervisors can largely save your time. Just imagine the time to rewrite the whole section! Generally speaking, the big framework for the project (and then the paper) has been made. However, we need to go a further step, and check the structures between paragraphs and even sentences. Actually it is the nightmare for many students, I believe.

For example, here we try to introduce the modelling methods in interface electrochemistry (mainly about EDL modelling), following solution electrochemistry introduced in the last section. Hence, we write down the outline below and discuss it with our partners.

the electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)\n==>\nEDL is hard to be probed (reason)\n==>\nwe can get some info with in situ techniques and ab initio simulations\n==>\nOne of the key characteristics of EDL is its capcacitance\n==>\nEDL capacitance can be measured by experiment (CV/impedance) and be a bencemark for modelling\n==>\nreplace the solute by the electrode (from solution electrochemistry to interface electrochemistry)\n==>\nuse similar simulation methods and focus on their performace on EDL modelling\n

In this step, you don't need to consider the elegance of your language. Simple but accurate texts can make your life easier.

"},{"location":"wiki/skills/QS4writing/#step-two-abt-structure","title":"Step TWO: ABT structure","text":"

\u201cHow long would you need to tell a story?\u201d Randy Olson asked this question in his TEDMED talk. (YouTube link here. Sorry I cannot find another source for the guys in China...) In this talk and his book Houston, We Have a Narrative: Why Science Needs Story, Olson introduced a quite simple method to construct a narrative, the ABT structure:

(...) AND (...), BUT (...), THEREFORE (...)

Let's try to fill this structure with the outline in the last step!

%% start the ABT structure\n% EDL is important (... AND ...)\nthe electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)\n% BUT it is hard to be probed\nHowever, EDL is hard to be probed, not only because xxx but xxx\n% THEREFORE, we need some tools\nTo address this difficulty, both in situ experimental techniques and modelling are required.\n%% END the ABT structure\n

If you don't know how to construct your idea, write down all the points you can think about and try to adapt them to one or more ABT structure(s).

If you think the linking somewhere is not smooth enough, rewrite it with an ABT structure.

Ahhh! Not bad!

"},{"location":"wiki/skills/QS4writing/#step-three-repeat-your-words","title":"Step THREE: repeat your words","text":"

With the two steps mentioned above, I believe you have worked out a comprehensible outline. Then, we need to strengthen the linking between sentences and make the logic more explicitly, by repeating the words in the last sentence. Hence, your texts can be easier to be followed! Here is an example:

Electric double layers (EDL) at the electrode/electrolyte interfaces are where electrochemical reactions occur, and thus are of paramount importance in electrochemistry.\n% Electric double layers (EDL) <==> EDL\nHowever, microscopic understanding of the EDL is still lacking due to its complexity and difficulty to probe.\n% microscopic understanding <==> valuable insight\nThanks to the development of computational methods, modelling has shown great potential in studying the interface of the electrode and the electrolyte in the past few years, and provided valuable insight into EDL structures and dielectric properties.\n

Maybe the repetition between the second and the third sentences is slightly implicit, but the idea is there. Nevertheless, I would not recommend a tyro to do so, since you might confuse the readers with rephrasing. If you are not sure, just repeat the words and make your texts clear!

Albert Einstein: When you are out to describe the truth, leave elegance to the tailor.

Here we come to another example (cited from DOI: 10.1126/SCIADV.ABB1219). I like this compact structure very much.

An electric double layer (EDL) formed at an electrified interface can afford a potential change of a few volts within a very thin layer of 3 to 5 \u00c5, amounting to an extremely large electric field of similar strength to that in a particle accelerator.\n% an extremely large electric field <==> a strong electric field\nNaturally, one would wonder how solvent molecules such as water or any other reactive species inside the EDL would behave in response to such a strong electric field.\n% how ... behave <==> this question\nAnswering this question is not only of fundamental interest but also of technological importance in a broad range of research areas in science and technology, to name a few, energy storage in supercapacitors, electrocatalysis of relevance to energy and environmental applications, self-assembly of colloidal particles, ion transport across biological membranes, and mineralization processes in earth science.\n% fundamental interest, technological importance <==> its significance\nDespite its significance, molecular-level understanding of EDL is largely missing, owing to its complexity and difficulty to probe.\n% molecular-level understanding <==> microscopic structures\nBecause of the advent of advanced experimental (e.g., synchrotron-based techniques and Raman spectroscopy) and computational methods [e.g., ab intio molecular dynamics (AIMD)], it is not until recently that the microscopic structures of EDL have started to be unveiled.\n

Yeah! Finally! I don't want to talk too much to distract you (but I still strongly recommend you to read the book mentioned above for fun!). I think the three tips above are sufficient to work out a readable draft for your big bosses. Don't be afraid of writing! Just have a try!

I am not a natural in writing. On the contrary, I had really struggled with English writing and thought I was a dunderhead at all, even if I had a pretty nice and patient supervisor who helped me a lotttttttttt in my first paper. Things turned up in a day (shortly after I finished the quasi-final version of my first paper) when I was asked to give a hand to my colleague for a review. When I started to read the review, I knew how to put all I had been taught into practice magically. Just like a spark in my mind. Maybe you know what should be improved only when you need to deal with an \"unreadable draft\" ? (Just kidding! Don't kill me, Xiaohui!)

Sincere thanks to Dr. Katharina Doblhoff-Dier in Leiden University

"},{"location":"wiki/skills/QS4writing/#_1","title":"\u4e2d\u6587\u7248","text":"

\u7b14\u8005\u5199\u8fd9\u7bc7\u77ed\u6587\u7684\u76ee\u7684\u662f\u4e3a\u4e86\u5206\u4eab\u51e0\u4e2a\u7b14\u8005\u89c9\u5f97\u5f88\u5b9e\u7528\u7684\u5199\u4f5c\u5c0f\u6280\u5de7\uff0c\u5e0c\u671b\u53ef\u4ee5\u5e2e\u5230\u6b63\u5728\uff08\u6216\u5c06\u8981\uff09\u6323\u624e\u5728\u79d1\u7814\u5199\u4f5c\u4e2d\u7684\u540c\u5b66\u4eec\u3002

\u6ce8\uff1a\u7b14\u8005\u5e76\u4e0d\u662f\u79d1\u7814\u5199\u4f5c\u7684\u4e13\u5bb6\uff0c\u53ea\u662f\u4e00\u4e2a\u521a\u521a\u6572\u5b8c\u81ea\u5df1\u7b2c\u4e00\u7bc7\u8bba\u6587\u7684\u83dc\u9e1f\u3002\u4ee5\u4e0b\u7684\u5185\u5bb9\u90fd\u662f\u57fa\u4e8e\u7b14\u8005\u6240\u5b66\u7684\u548c\u7b14\u8005\u7684\u7406\u89e3\uff0c\u53ef\u80fd\u6bd4\u8f83\u7247\u9762\uff08\u5e0c\u671b\u6ca1\u6709\u9519\u8bef\uff09\u3002\u5c3d\u7ba1\u5982\u6b64\uff0c\u7b14\u8005\u5e0c\u671b\u7528\u4e00\u4e2a\u521d\u5b66\u8005\u7684\u89c6\u89d2\u53bb\u8bb2\u8ff0\uff0c\u8ba9\u8fd9\u7bc7\u77ed\u6587\u7684\u5185\u5bb9\u5bf9\u521d\u5b66\u8005\u6765\u8bf4\u662f\u53cb\u597d\u7684\u548c\u53ef\u5b9e\u73b0\u7684 ;-)

"},{"location":"wiki/skills/QS4writing/#_2","title":"\u7ec3\u4e60\uff01\u7ec3\u4e60......\u7ec3\u4e60\uff1f","text":"

\u7b14\u8005\u76f8\u4fe1\u76f8\u5f53\u4e00\u90e8\u5206\u4eba\u5728\u4e00\u4e9b\u4e66\u91cc\uff08\u6216\u8005\u5176\u4ed6\u5730\u65b9\uff09\u542c\u5230\u201c\u7ec3\u4e60\uff01\u7ec3\u4e60\uff01\u7ec3\u4e60\uff01\u201d\u8fd9\u53e5\u8bdd\u7684\u65f6\u5019\u4f1a\u8bf4\u4e00\u58f0\uff1a\u201c\u5c31\u8fd9\uff1f\u5c31\u8fd9\uff1f\u201d\u542c\u8d77\u6765\u662f\u4e2a\u5b8c\u5168\u6b63\u786e\u7684\u5e9f\u8bdd\u3002\u603b\u4f53\u6765\u8bf4\u7b14\u8005\u540c\u610f\u4f60\u4eec\u7684\u89c2\u70b9\uff0c\u5982\u679c\u4f60\u4eec\u5728\u7ec3\u4e60\u7684\u65f6\u5019\u6ca1\u6709\u4e00\u4e2a\u5177\u4f53\u548c\u5408\u7406\u7684\u8ba1\u5212\u3002\u6beb\u65e0\u76ee\u7684\u7684\u7ec3\u4e60\u6709\u65f6\u5019\u4f1a\u4e8b\u500d\u529f\u534a\uff0c\u751a\u81f3\u56e0\u6253\u51fb\u4f60\u7684\u81ea\u4fe1\u800c\u8d77\u5230\u53cd\u6548\u679c\u3002\u56e0\u6b64\uff0c\u7b14\u8005\u4f1a\u63a8\u8350\u5927\u5bb6\u4ece \u81ea\u5df1\u7684\u6587\u7ae0\uff08\u4e00\u4e2a\u5177\u4f53\u7684\u4f8b\u5b50\uff09\u5f00\u59cb\u3002\u5bf9\u4e8e\u90a3\u4e9b\u8fd8\u6ca1\u6709\u6587\u7ae0\u7684\u540c\u5b66\uff0c\u4e0d\u59a8\u8bd5\u8bd5\u5b9e\u9a8c\u62a5\u544a\u4e4b\u7c7b\u7684\uff1f\u7136\u540e\uff0c\u5f00\u59cb\u4e00\u6b65\u6b65\u6253\u78e8\u4f60\u7684\u6587\u7ae0\u5427\uff01

"},{"location":"wiki/skills/QS4writing/#_3","title":"\u7b2c\u4e00\u6b65\uff1a\u786e\u5b9a\u6846\u67b6","text":"

\u8fd9\u662f\u6700\u91cd\u8981\u7684\u4e00\u6b65\uff0c\u867d\u7136\u770b\u8d77\u6765\u548c\u5199\u4f5c\u6ca1\u592a\u5927\u5173\u7cfb\u3002\u4e00\u4e2a\u597d\u7684\u6846\u67b6\u53ef\u4ee5\u5927\u5927\u8282\u7701\u540e\u7eed\u5199\u4f5c\u7684\u65f6\u95f4\u2014\u2014\u60f3\u60f3\u91cd\u5199\u4e00\u6574\u4e2a\u6bb5\u843d\uff01\u901a\u5e38\u6765\u8bf4\uff0c\u6574\u4e2a\u9879\u76ee\uff08\u6587\u7ae0\uff09\u7684\u5927\u6846\u67b6\u5e94\u8be5\u662f\u5728\u9879\u76ee\u8fdb\u884c\u4e4b\u524d\u5c31\u548c\u5bfc\u5e08\u6572\u5b9a\u597d\u7684\uff0c\u8fd9\u4e2a\u4e0d\u4f1a\u6709\u5927\u95ee\u9898\u3002\u95ee\u9898\u5728\u54ea\u5462\uff1f\u4e0b\u4e00\u4e2a\u5c3a\u5ea6\uff1a\u6bb5\u843d\u95f4\u548c\u53e5\u5b50\u95f4\u7684\u8fde\u63a5\u3002\u5c31\u7b14\u8005\u4e2a\u4eba\u7ecf\u9a8c\u800c\u8a00\uff0c\u8fd9\u6b65\u662f\u5f88\u591a\u5b66\u751f\uff08\u548c\u5bfc\u5e08\uff09\u7684\u5669\u68a6...

\u90a3\u6211\u4eec\u5728\u5199\u6846\u67b6\u7684\u65f6\u5019\u5e94\u8be5\u5199\u5230\u4ec0\u4e48\u7a0b\u5ea6\u5462\uff1f\u6765\u770b\u770b\u4e00\u4e2a\u4f8b\u5b50\u3002\u8fd9\u91cc\uff0c\u6211\u4eec\u5e0c\u671b\u4ecb\u7ecd\u7535\u5316\u5b66\u754c\u9762\u7684\u4e00\u4e9b\u6a21\u62df\u65b9\u6cd5\uff08\u7740\u91cd\u5728\u53cc\u7535\u5c42\u6a21\u62df\uff09\u3002\u5e76\u4e14\u5728\u4e0a\u4e00\u8282\u91cc\uff0c\u6211\u4eec\u5df2\u7ecf\u4ecb\u7ecd\u8fc7\u4e86\u6eb6\u6db2\u76f8\u7684\u4e00\u4e9b\u6a21\u62df\u65b9\u6cd5\u3002\u6839\u636e\u8fd9\u4e9b\u5185\u5bb9\uff0c\u6211\u4eec\u53ef\u4ee5\u5199\u4e2a\u5927\u81f4\u5982\u4e0b\u7684\u6846\u67b6\uff0c\u7136\u540e\u548c\u6211\u4eec\u7684\u5408\u4f5c\u8005\u6216\u8005\u5bfc\u5e08\u8fdb\u884c\u4e0b\u4e00\u6b65\u8ba8\u8bba\u3002

the electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)\n==>\nEDL is hard to be probed (reason)\n==>\nwe can get some info with in situ techniques and ab initio simulations\n==>\nOne of the key characteristics of EDL is its capcacitance\n==>\nEDL capacitance can be measured by experiment (CV/impedance) and be a bencemark for modelling\n==>\nreplace the solute by the electrode (from solution electrochemistry to interface electrochemistry)\n==>\nuse similar simulation methods and focus on their performace on EDL modelling\n

\u5728\u8fd9\u4e00\u6b65\u4e2d\uff0c\u4f60\u4e0d\u9700\u8981\u8003\u8651\u8bed\u8a00\u7684\u4f18\u7f8e\u3002\u7b80\u5355\u800c\u7cbe\u51c6\u7684\u6587\u5b57\u5728\u63a5\u4e0b\u6765\u7684\u4fee\u6539\u4e2d\u66f4\u65b9\u4fbf\u3002

"},{"location":"wiki/skills/QS4writing/#abt","title":"\u7b2c\u4e8c\u6b65\uff1aABT \u7ed3\u6784","text":"

\u201c\u4f60\u9700\u8981\u82b1\u591a\u957f\u65f6\u95f4\u53bb\u8bb2\u8ff0\u4e00\u4e2a\u6545\u4e8b\uff1f\u201d Randy Olson \u5728\u4ed6\u7684TEDMED \u6f14\u8bb2\u4e2d\u95ee\u4e86\u8fd9\u4e2a\u95ee\u9898\u3002\uff08\u8fd9\u662f\u4e2a\u6cb9\u7ba1\u94fe\u63a5\uff0cB \u7ad9\u6ca1\u627e\u7740...\uff09\u5728\u8fd9\u4e2a\u6f14\u8bb2\u4ee5\u53ca\u4ed6\u7684\u4e66Houston, We Have a Narrative: Why Science Needs Story\u4e2d\uff0cOlson \u4ecb\u7ecd\u4e86\u4e00\u79cd\u975e\u5e38\u7b80\u5355\u7684\u53d9\u4e8b\u65b9\u6cd5\uff0cABT \u7ed3\u6784\uff1a

(...) AND (...), BUT (...), THEREFORE (...)

\u8ba9\u6211\u4eec\u8bd5\u7740\u628a\u4e0a\u4e00\u8282\u91cc\u7684\u6846\u67b6\u7528 ABT \u7ed3\u6784\u6539\u9020\u4e00\u4e0b\uff01

%% start the ABT structure\n% EDL is important (... AND ...)\nthe electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)\n% BUT it is hard to be probed\nHowever, EDL is hard to be probed, not only because xxx but xxx\n% THEREFORE, we need some tools\nTo address this difficulty, both in situ experimental techniques and modelling are required.\n%% END the ABT structure\n

\u5982\u679c\u4f60\u4e0d\u77e5\u9053\u600e\u4e48\u4e0b\u7b14\uff0c\u90a3\u5c31\u5148\u628a\u6240\u6709\u60f3\u5230\u7684\u70b9\u5199\u4e0b\u6765\u5e76\u628a\u5b83\u4eec\u5f80 ABT \u7ed3\u6784\u91cc\u5957\u3002

\u5982\u679c\u4f60\u8ba4\u4e3a\u67d0\u5904\u7684\u8fc7\u6e21\u4e0d\u591f\u81ea\u7136\uff0c\u4e5f\u53ef\u4ee5\u8003\u8651\u7528 ABT \u7ed3\u6784\u91cd\u5199\u4e00\u4e0b \u3002

"},{"location":"wiki/skills/QS4writing/#_4","title":"\u7b2c\u4e09\u6b65\uff1a\u91cd\u590d\u4f60\u7684\u8bcd\u6c47","text":"

\u7ecf\u8fc7\u4e0a\u9762\u7684\u4e24\u4e2a\u6b65\u9aa4\uff0c\u6211\u76f8\u4fe1\u4f60\u5df2\u7ecf\u83b7\u5f97\u4e86\u4e00\u4e2a\u53ef\u7406\u89e3\u7684\u5927\u7eb2\u3002 \u73b0\u5728\uff0c\u6211\u4eec\u6765\u8fdb\u884c\u6700\u540e\u4e00\u6b65\uff1a\u5c3d\u91cf\u4f7f\u6bcf\u4e2a\u53e5\u5b50\u4e2d\u90fd\u51fa\u73b0\u4e0a\u4e00\u4e2a\u53e5\u5b50\u4e2d\u7684\u5355\u8bcd\u3002\u8fd9\u4e2a\u65b9\u6cd5\u53ef\u4ee5\u52a0\u5f3a\u53e5\u5b50\u4e4b\u95f4\u7684\u8fde\u63a5\uff0c\u4f7f\u903b\u8f91\u66f4\u52a0\u6e05\u6670\uff0c\u4ece\u800c\u8ba9\u4f60\u7684\u6587\u5b57\u53ef\u4ee5\u66f4\u5bb9\u6613\u88ab\u9605\u8bfb\uff01 \u770b\u770b\u8fd9\u4e2a\u4f8b\u5b50\uff1a

Electric double layers (EDL) at the electrode/electrolyte interfaces are where electrochemical reactions occur, and thus are of paramount importance in electrochemistry.\n% Electric double layers (EDL) <==> EDL\nHowever, microscopic understanding of the EDL is still lacking due to its complexity and difficulty to probe.\n% microscopic understanding <==> valuable insight\nThanks to the development of computational methods, modelling has shown great potential in studying the interface of the electrode and the electrolyte in the past few years, and provided valuable insight into EDL structures and dielectric properties.\n

\u597d\u5427\uff0c\u770b\u4e0a\u53bb\u7b2c\u4e8c\u4e2a\u53e5\u5b50\u548c\u7b2c\u4e09\u4e2a\u53e5\u5b50\u4e4b\u95f4\u7684\u91cd\u590d\u6709\u70b9\u9690\u6666\uff1f\u4f46\u662f\u90a3\u4e2a\u610f\u601d\u4e86\u3002\u5c3d\u7ba1\u5982\u6b64\uff0c\u540c\u4e49\u66ff\u6362\u5bf9\u4e8e\u65b0\u624b\u6765\u8bf4\u9700\u8981\u7279\u522b\u8c28\u614e\uff0c\u4ee5\u9632\u51fa\u73b0\u8868\u8ff0\u504f\u5dee\u3002\u5982\u679c\u4f60\u4e0d\u662f\u7279\u522b\u786e\u5b9a\uff0c\u90a3\u5c31\u7b80\u5355\u5730\u91cd\u590d\uff01\u8ba9\u4f60\u7684\u6587\u7ae0\u5148\u53d8\u5f97\u6e05\u6670\uff01

Albert Einstein: When you are out to describe the truth, leave elegance to the tailor.

\u8fd9\u91cc\u662f\u53e6\u4e00\u4e2a\u4f8b\u5b50 (\u5f15\u81ea DOI: 10.1126/SCIADV.ABB1219)\u3002\u7b14\u8005\u4e2a\u4eba\u5f88\u559c\u6b22\u8fd9\u4e2a\u7b80\u6d01\u7d27\u51d1\u7684\u4f8b\u5b50\uff01

An electric double layer (EDL) formed at an electrified interface can afford a potential change of a few volts within a very thin layer of 3 to 5 \u00c5, amounting to an extremely large electric field of similar strength to that in a particle accelerator.\n% an extremely large electric field <==> a strong electric field\nNaturally, one would wonder how solvent molecules such as water or any other reactive species inside the EDL would behave in response to such a strong electric field.\n% how ... behave <==> this question\nAnswering this question is not only of fundamental interest but also of technological importance in a broad range of research areas in science and technology, to name a few, energy storage in supercapacitors, electrocatalysis of relevance to energy and environmental applications, self-assembly of colloidal particles, ion transport across biological membranes, and mineralization processes in earth science.\n% fundamental interest, technological importance <==> its significance\nDespite its significance, molecular-level understanding of EDL is largely missing, owing to its complexity and difficulty to probe.\n% molecular-level understanding <==> microscopic structures\nBecause of the advent of advanced experimental (e.g., synchrotron-based techniques and Raman spectroscopy) and computational methods [e.g., ab intio molecular dynamics (AIMD)], it is not until recently that the microscopic structures of EDL have started to be unveiled.\n

\u6587\u7ae0\u5230\u8fd9\u91cc\u5c31\u7ed3\u675f\u4e86\uff01\u7b14\u8005\u4e0d\u60f3\u5199\u592a\u591a\u70b9\u4ee5\u81f3\u4e8e\u8ba9\u4f60\u4eec\u6709\u70b9\u6293\u72c2\uff08\u4f46\u662f\u7b14\u8005\u8fd8\u662f\u975e\u5e38\u63a8\u8350\u4f60\u4eec\u53bb\u8bfb\u8bfb\u4e0a\u9762\u63d0\u5230\u7684\u4e66\uff01\u5f88\u6709\u8da3\uff01\uff09\u603b\u7684\u6765\u8bf4\uff0c\u7b14\u8005\u8ba4\u4e3a\u4e0a\u9762\u63d0\u53ca\u7684\u4e09\u70b9\u5df2\u7ecf\u8db3\u4ee5\u5199\u51fa\u4e00\u4e2a\u6e05\u6670\u7684\u521d\u7a3f\u7ed9\u4f60\u4eec\u7684\u8001\u677f\u4e86\u3002

\u6700\u540e\uff0c\u5411\u6211\u7b2c\u4e00\u4e2a\u9879\u76ee\u7684\u65e5\u5e38\u5bfc\u5e08\uff0c\u8377\u5170\u83b1\u987f\u5927\u5b66\u7684 Dr. Katharina Doblhoff-Dier \u8868\u793a\u8bda\u631a\u7684\u611f\u8c22\u3002

"},{"location":"wiki/skills/QS4writing/#useful-websites-for-writing","title":"Useful websites for writing","text":"

vocabulary

https://www.vocabulary.com

https://www.oxfordlearnersdictionaries.com

synonym

https://www.wordhippo.com

https://www.thesaurus.com

collocation

https://www.linggle.com

https://netspeak.org

sentence

https://www.phrasebank.manchester.ac.uk

rephrase

https://quillbot.com

translation

http://www.onedict.com/index.php

https://www.deepl.com/translator

"},{"location":"wiki/skills/QS4writing/#_5","title":"\u5e74\u5ea6\u6c47\u62a5\u6807\u51c6","text":"

\u7814\u7a76\u751f\uff08\u535a\u58eb\u548c\u7855\u58eb\uff09\u524d\u4e24\u5e74\u6bcf\u5e74\u5e94\u5199\u4e00\u6b21\u6c47\u62a5\u3002\u6c47\u62a5\u4f7f\u7528 **LaTeX**\u64b0\u5199\u3002\u6a21\u677f\u9009\u7528 revtex \u7684 AIP \u6a21\u677f\u3002\u4ee5 **\u82f1\u6587**\u64b0\u5199

\u7855\u58eb\u751f\u7b2c\u4e00\u5e74\u62a5\u544a\u7684\u8bcd\u6570\u5e94\u5728 4500 \u5de6\u53f3\uff0c\u7b2c\u4e8c\u5e74\u62a5\u544a\u5e94\u5728 6000 \u5de6\u53f3\u3002

\u535a\u58eb\u751f\u7b2c\u4e00\u5e74\u62a5\u544a\u7684\u8bcd\u6570\u5e94\u5728 6000 \u5de6\u53f3\uff0c\u7b2c\u4e8c\u5e74\u62a5\u544a\u5e94\u5728 7500 \u5de6\u53f3\u3002

"},{"location":"wiki/skills/QS4writing/#overleaf","title":"\u4f7f\u7528 Overleaf \u5199\u4f5c","text":"

Overleaf\u662f\u4e00\u4e2a\u5728\u7ebf\u7684 LaTeX \u7f16\u8f91\u5668\uff0c\u53ef\u4ee5\u76f4\u63a5\u5728\u6d4f\u89c8\u5668\u4e2d\u7f16\u8f91 LaTeX \u6587\u6863\u3002\u4f7f\u7528 Overleaf \u53ef\u4ee5\u65b9\u4fbf\u5730\u8fdb\u884c\u5408\u4f5c\u5199\u4f5c\uff0c\u540c\u65f6\u4e5f\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fdb\u884c\u7248\u672c\u63a7\u5236\u3002\u73b0\u9636\u6bb5\uff0c\u8bfe\u9898\u7ec4\u7684\u79d1\u7814\u8bba\u6587\u57fa\u672c\u90fd\u662f\u4f7f\u7528 Overleaf \u8fdb\u884c\u5199\u4f5c\u3002\u57fa\u672c\u64cd\u4f5c\u6d41\u7a0b\u4e3a\uff1a\u5728\u9700\u8981\u5199\u6587\u7ae0\u7684\u65f6\u5019\u8054\u7cfb\u7ba1\u7406\u5458\uff0c\u8bf7\u7ba1\u7406\u5458\u5c06\u6587\u7ae0\u76f8\u5173\u4eba\u5458\u7684\u90ae\u7bb1\u6dfb\u52a0\u5230\u4e00\u4e2a\u7a7a\u767d\u9879\u76ee\u4e2d\uff0c\u7136\u540e\u7528\u4e2a\u4eba Overleaf \u8d26\u53f7\u8fdb\u884c\u540e\u7eed\u7f16\u8f91\u3002\u9879\u76ee\u76f8\u5173\u6587\u4ef6\u8bfe\u9898\u7ec4\u4f1a\u7edf\u4e00\u8fdb\u884c\u5f52\u6863\u7ba1\u7406\u3002

\u5728\u9700\u8981\u5199\u6587\u7ae0\u7684\u65f6\u5019\u8bf7\u5c06\u4ee5\u4e0b\u4fe1\u606f\u53d1\u7ed9\u7ba1\u7406\u5458\uff1a

  • \u6240\u9700\u6a21\u7248\uff08\u5e38\u7528\u7684\u5982 ACS \u548c AIP\uff0c\u5982\u679c\u6709\u53e6\u5916\u9700\u6c42\u4e5f\u53ef\u4ee5\u544a\u77e5\u7ba1\u7406\u5458\uff09
  • \u9879\u76ee\u540d\u79f0\uff08\u6309\u7167\uff1a\u4f5c\u8005\u540d-\u5e8f\u53f7-\u6587\u7ae0\u540d \u8fdb\u884c\u547d\u540d\uff0c\u6bd4\u5982\uff1ajxzhu-1-pt_oh_100\uff09
  • \u9700\u8981\u6dfb\u52a0\u7684\u6210\u5458\u90ae\u7bb1\uff08\u9664\u7ba1\u7406\u5458\u5916\u4e0a\u9650 5 \u4eba\u6bcf\u9879\u76ee\uff09 \u7ba1\u7406\u5458\u6dfb\u52a0\u76f8\u5173\u4eba\u5458\u90ae\u7bb1\u540e\uff0c\u8bf7\u6240\u6709\u6210\u5458\u67e5\u770b\u90ae\u7bb1/\u767b\u5f55 Overleaf \u8d26\u53f7\u786e\u8ba4\u9080\u8bf7\u3002
"},{"location":"wiki/skills/QS4writing/#_6","title":"\u7248\u672c\u7ba1\u7406","text":"

Overleaf \u53ef\u4ee5\u5728\u4fee\u6539\u7684\u65f6\u5019\u5b9e\u73b0\u7248\u672c\u8bb0\u5f55\uff0c\u4e5f\u53ef\u4ee5\u6dfb\u52a0\u8bc4\u8bba\uff0c\u5177\u4f53\u7684\u4f7f\u7528\u65b9\u6cd5\u53ef\u4ee5\u53c2\u8003\u6b64\u6559\u7a0b\u3002

  1. \u53f3\u4e0a\u89d2History\uff0c\u53ef\u4ee5\u67e5\u770b\u5386\u53f2\u7248\u672c\uff0c\u5e76\u81ea\u884c\u6807\u8bb0\u7248\u672c\u3002
  2. \u53f3\u4e0a\u89d2Menu-Sync\uff0c\u53ef\u4ee5\u8fdb\u884c\u624b\u52a8\u5907\u4efd\u3002\u4f46\u662f\u73b0\u9636\u6bb5 GitHub \u8d26\u53f7\u7ed1\u5b9a\u4ec5\u9650\u4e8e\u4f1a\u5458\uff08\u65e9\u671f\u5df2\u7ed1\u5b9a\u7528\u6237\u540c\u6b65\u529f\u80fd\u4e0d\u53d7\u5f71\u54cd\uff09\uff0c\u6545\u63a8\u8350\u4f7f\u7528 git+\u672c\u5730\u8fdb\u884c\u5907\u4efd\uff08\u4e5f\u53ef\u5728\u672c\u5730\u81ea\u884c\u9009\u62e9\u5176\u4ed6\u7684\u6258\u7ba1\u5e73\u53f0\uff09\u3002git \u76f8\u5173\u6559\u7a0b\u53c2\u89c1\u6b64\u6559\u7a0b\u3002
"},{"location":"wiki/skills/research_skill/","title":"\u7814\u7a76\u6280\u80fd\u5165\u95e8","text":""},{"location":"wiki/skills/research_skill/#_2","title":"\u5982\u4f55\u9605\u8bfb\u6587\u732e","text":"

\u9605\u8bfb\u6587\u732e\u5165\u95e8

"},{"location":"wiki/skills/research_skill/#_3","title":"\u4e3a\u4ec0\u4e48\u8981\u5199\u4f5c","text":""},{"location":"wiki/skills/research_skill/#_4","title":"\u5982\u4f55\u5199\u4f5c","text":"

Whitesides\u6559\u6388\u7684\u5927\u4f5c

Whitesides, G. M. Whitesides\u2019 Group: Writing a Paper. Adv. Mater. 2004, 16 (15 SPEC. ISS.), 1375\u20131377.

"},{"location":"wiki/skills/research_skill/#_5","title":"\u5982\u4f55\u7528\u82f1\u8bed\u6f14\u8bb2","text":"

English for Presentations at International Conferences

"},{"location":"wiki/software_development/lammps/installation/","title":"\u5728\u96c6\u7fa4\u5b89\u88c5LAMMPS","text":""},{"location":"wiki/software_development/lammps/installation/#zeus","title":"Zeus \u96c6\u7fa4","text":"
# Load the necessary modules\nmodule load cmake/3.20\nmodule load intel/17.5.239 mpi/intel/2017.5.239 gcc/7.4.0\n\n# find the ver in https://download.lammps.org/tars/index.html\nwget -c https://download.lammps.org/tars/lammps-23Jun2022.tar.gz\ntar -zxvf lammps-23Jun2022.tar.gz\ncd lammps-23Jun2022\nmkdir -p build\ncd build\ncmake ../cmake -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ \\\n-DCMAKE_Fortran_COMPILER=gfortran \\\n-D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=mpi \\\n-D CMAKE_INSTALL_PREFIX=/data/jxzhu/apps/lammps/install/23Jun2022 \\\n-D CMAKE_INSTALL_LIBDIR=lib \\\n-D CMAKE_INSTALL_FULL_LIBDIR=/data/jxzhu/apps/lammps/install/23Jun2022/lib \\\n-C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake \\\n-D BUILD_SHARED_LIBS=yes\nmake -j 32\nmake install\n

\u68c0\u67e5\u662f\u5426\u5b89\u88c5\u5b8c\u6210

./lmp_mpi -h\n

\u5bf9\u4e8e\u4e2a\u4eba\u7528\u6237\uff0c\u53ef\u4ee5\u5c06\u53ef\u6267\u884c\u6587\u4ef6\u6240\u5728\u8def\u5f84\uff08\u5982/data/jxzhu/apps/lammps/lammps-23Jun2022/build\uff09\u5199\u5165\u67d0\u4e2a\u865a\u62df\u73af\u5883\u7684\u73af\u5883\u53d8\u91cf\uff0c\u4ee5\u5b9e\u73b0\u7248\u672c\u63a7\u5236\u3002

"},{"location":"wiki/software_development/lammps/installation/#ikkem","title":"IKKEM \u96c6\u7fa4","text":"
module load intel/2021.1\nmodule load dev/cmake/3.26.3\nmodule load gcc/9.3\n\n# find the ver in https://download.lammps.org/tars/index.html\n# find the ver in https://download.lammps.org/tars/index.html\nwget -c https://download.lammps.org/tars/lammps-23Jun2022.tar.gz\ntar -zxvf lammps-23Jun2022.tar.gz\ncd lammps-23Jun2022\nmkdir -p build\ncd build\ncmake ../cmake -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ \\\n      -DCMAKE_Fortran_COMPILER=gfortran \\\n      -D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=intel_cpu_intelmpi \\\n      -D CMAKE_INSTALL_PREFIX=/public/home/jxzhu/apps/lammps/install/lammps-23Jun2022 \\\n      -D CMAKE_INSTALL_LIBDIR=lib \\\n      -D CMAKE_INSTALL_FULL_LIBDIR=/public/home/jxzhu/apps/lammps/install/lammps-23Jun2022/lib \\\n      -C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake \\\n      -D BUILD_SHARED_LIBS=yes \nmake -j 32\nmake install\n

\u68c0\u67e5\u662f\u5426\u5b89\u88c5\u5b8c\u6210

./lmp_intel_cpu_intelmpi -h\n
"},{"location":"wiki/software_development/lammps/plugin/","title":"\u57fa\u4e8e\u63d2\u4ef6\u6a21\u5f0f\u5f00\u53d1LAMMPS","text":"
  • \u63d2\u4ef6\u529f\u80fd\u4ecb\u7ecd\uff1aLAMMPS Plugin
  • \u63d2\u4ef6\u5f00\u53d1\u6307\u5357\uff1aLAMMPS Plugin Developer Guide

\u4e00\u822c\u6765\u8bf4\uff0c\u5bf9\u4ee3\u7801\u8fdb\u884c\u529f\u80fd\u6dfb\u52a0/\u4fee\u6539\u9700\u8981\u76f4\u63a5\u5728\u6e90\u4ee3\u7801\u4e2d\u8fdb\u884c\uff0c\u8fd9\u6837\u53ef\u80fd\u5bf9\u539f\u6709\u4ee3\u7801\u4ea7\u751f\u5f71\u54cd\u3002\u4e3a\u4e86\u89e3\u51b3\u8fd9\u4e2a\u95ee\u9898\uff0cLAMMPS\u5f15\u5165\u4e86\u63d2\u4ef6\u6a21\u5f0f\uff0c\u4f7f\u5f97\u7528\u6237\u53ef\u4ee5\u5728\u4e0d\u6539\u52a8\u6e90\u4ee3\u7801\u7684\u60c5\u51b5\u4e0b\u5bf9LAMMPS\u8fdb\u884c\u529f\u80fd\u6269\u5c55\u3002\u63a5\u4e0b\u6765\uff0c\u6211\u4eec\u901a\u8fc7\u5b98\u65b9\u7684\u4f8b\u5b50\u5bf9\u63d2\u4ef6\u7684\u8fd0\u884c\u65b9\u5f0f\u8fdb\u884c\u5927\u81f4\u7684\u4e86\u89e3\uff1a

```bash\ncd lammps-23Jun2022/examples/plugins\n```\n

make\u7f16\u8bd1\uff1a

```bash\nmake \n```\n

\u6216\u8005cmake\uff1a

```bash\nmkdir -p build\ncd build\ncmake ../\nmake\n```\n

\u7f16\u8bd1\u540e\u53ef\u4ee5\u5f97\u5230\u591a\u4e2a\u52a8\u6001\u5e93\u6587\u4ef6.so\u3002\u53ef\u4ee5\u901a\u8fc7\u4e24\u79cd\u65b9\u5f0f\u8c03\u7528\u63d2\u4ef6\uff1a

  1. \u5728lammps\u7684input\u4e2d\uff0c\u901a\u8fc7plugin load\u547d\u4ee4\u52a0\u8f7d\u63d2\u4ef6\uff0c\u5373\u53ef\u4f7f\u7528\u63d2\u4ef6\u4e2d\u7684\u529f\u80fd\u3002
    plugin load morse2plugin.so\n
  2. \u5c06\u52a8\u6001\u5e93\u6240\u5728\u8def\u5f84\u52a0\u5165LAMMPS_PLUGIN_PATH\uff0c\u7a0b\u5e8f\u4f1a\u81ea\u52a8\u52a0\u8f7d\u641c\u7d22\u5230\u7684\u6240\u6709\u63d2\u4ef6\u3002

\u6ce8\u610f\uff1a\u5982\u679c\u79fb\u52a8examples/plugins\u4e2d\u4f8b\u5b50\u6240\u5728\u8def\u5f84\uff0c\u9700\u8981\u4fee\u6539\u7f16\u8bd1\u8bbe\u7f6e\u3002\u5982\u679c\u91c7\u7528make\u7f16\u8bd1\uff0c\u9700\u8981\u4fee\u6539Makefile\u4e2d\u7684CXXFLAGS

```bash\nCXXFLAGS=-I$(LAMMPS_SOURCE_DIR) -Wall -Wextra -O3 -fPIC -I$(LAMMPS_SOURCE_DIR)/OPENMP -fopenmp\n```\n

\u5e76\u8bbe\u7f6eLAMMPS_SOURCE_DIR\u4e3alammps\u6e90\u4ee3\u7801\u6240\u5728\u8def\u5f84\u3002

```bash\nexport LAMMPS_SOURCE_DIR=/data/jxzhu/software/lammps/lammps-23Jun2022/src\nmake\n```\n

\u5982\u679c\u91c7\u7528cmake\u7f16\u8bd1\uff0c\u9700\u8981\u5c06plugins/CMakeLists.txt\u4e2d22\u884c\u6ce8\u91ca\u6389\uff08get_filename_component(LAMMPS_SOURCE_DIR ${PROJECT_SOURCE_DIR}/../../src ABSOLUTE)\uff09\uff0c\u5e76\u5728\u6267\u884ccmake\u65f6\u6307\u5b9alammps\u6e90\u4ee3\u7801\u6240\u5728\u76ee\u5f55

```bash\nmkdir -p build\ncd build\nrm *\ncmake -DLAMMPS_SOURCE_DIR=/data/jxzhu/apps/lammps/lammps-23Jun2022/src ..\nmake\n```\n
"},{"location":"wiki/software_development/lammps/quick_start/","title":"LAMMPS\u5f00\u53d1\u51c6\u5907","text":""},{"location":"wiki/software_development/lammps/quick_start/#lammps_1","title":"\u4e3a\u4ec0\u4e48\u8981\u5b66\u4e60LAMMPS\u5f00\u53d1\uff1f","text":"

\u4f5c\u4e3a\u4e00\u4e2a\u5f00\u6e90\u7684\u5206\u5b50\u52a8\u529b\u5b66\u6a21\u62df\u8f6f\u4ef6\uff0cLAMMPS\u5728\u8ba1\u7b97\u5316\u5b66\u4e2d\u6709\u975e\u5e38\u5e7f\u6cdb\u7684\u5e94\u7528\u3002\u73b0\u6709\u7684LAMMPS\u53d1\u884c\u7248\u672c\u63d0\u4f9b\u4e86\u5927\u91cf\u7684\u529f\u80fd\uff0c\u5927\u591a\u6570\u65f6\u5019\u53ef\u4ee5\u6ee1\u8db3\u7528\u6237\u7684\u9700\u6c42\u3002\u4f46\u662f\uff0c\u6709\u65f6\u5019\u6211\u4eec\u4ecd\u9700\u8981\u5b9e\u73b0\u4e00\u4e9b\u65b0\u7684\u529f\u80fd\uff0c\u6216\u8005\u5bf9\u73b0\u6709\u529f\u80fd\u8fdb\u884c\u4fee\u6539\u3002\u6b64\u65f6\uff0c\u5c31\u9700\u8981\u6211\u4eec\u5bf9LAMMPS\u5f00\u53d1\u6709\u5927\u81f4\u4e86\u89e3\u3002\u672c\u6559\u7a0b\u9762\u5411\u5df2\u638c\u63e1LAMMPS\u7684\u57fa\u672c\u529f\u80fd\u7684\u7528\u6237\uff0c\u5e0c\u671b\u901a\u8fc7\u672c\u6559\u7a0b\u7684\u5b66\u4e60\uff0c\u8bfb\u8005\u53ef\u4ee5\u638c\u63e1LAMMPS\u7684\u57fa\u672c\u5f00\u53d1\u65b9\u6cd5\uff0c\u4e3a\u81ea\u5df1\u7684\u7814\u7a76\u5de5\u4f5c\u63d0\u4f9b\u66f4\u591a\u7684\u53ef\u80fd\u6027\u3002\u8003\u8651\u5230\u73b0\u5728\u5df2\u7ecf\u6709\u4e00\u4e9b\u5173\u4e8eLAMMPS\u5f00\u53d1\u7684\u6559\u7a0b\uff08\u8d34\u4e8e\u4e0b\u65b9\uff09\uff0c\u672c\u6559\u7a0b\u5c06\u57fa\u4e8echenglab\u7ec4\u5185\u60c5\u51b5\u8fdb\u884c\u4ecb\u7ecd\u3002

"},{"location":"wiki/software_development/lammps/quick_start/#_1","title":"\u9605\u8bfb\u8d44\u6599","text":"
  1. \u5b98\u65b9\u5f00\u53d1\u6307\u5357 \u975e\u5e38\u5168\u9762\u7684\u5f00\u53d1\u6307\u5357\uff0c\u5305\u62ec\u4e86LAMMPS\u7684\u4ee3\u7801\u7ed3\u6784\u3001\u5e76\u884c\u7b97\u6cd5\u7b49\uff0c\u4f46\u662f\u7bc7\u5e45\u8f83\u957f\u3002\u5efa\u8bae\u4f18\u5148\u9605\u8bfb\u4ee3\u7801\u67b6\u6784\u548c\u5355\u6b65\u4e2d\u8c03\u7528\u7684\u529f\u80fd\u3002
  2. Extending and Modifying LAMMPS Writing Your Own Source Code: A pragmatic guide to extending LAMMPS as per custom simulation requirements \u8be6\u7ec6\u4ecb\u7ecd\u4e86\u5982\u4f55\u5728LAMMPS\u4e2d\u6dfb\u52a0\u65b0\u7684\u529f\u80fd\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u627e\u5230\u5bf9\u5e94\u7684\u6848\u4f8b\u8fdb\u884c\u5b66\u4e60\u3002

\u5982\u679c\u4f60\u6ca1\u6709\u4efb\u4f55\u4ee3\u7801\u7ecf\u9a8c\uff0c\u5efa\u8bae\u5148\u6839\u636e\u57fa\u7840\u5b8c\u6210\u4ee5\u4e0b\u7684\u5185\u5bb9\u5b66\u4e60\uff1a

  1. LAMMPS\u57fa\u7840
  2. Git\u57fa\u7840
  3. C++\u57fa\u7840\uff08\u8bf7\u6839\u636e\u81ea\u5df1\u7684\u4ee3\u7801\u57fa\u7840\u9009\u62e9\u5408\u9002\u7684\u6559\u7a0b\uff0c\u6bd4\u5982C++ Primer Plus\uff09
"},{"location":"wiki/software_installation/cp2k-7.1/","title":"CP2K 7.1 \u5b89\u88c5\u6559\u7a0b","text":"

\u8fd9\u91cc\u4ee5 7.1 \u7248\u672c\u4e3a\u4f8b\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5\u7f16\u8bd1 CP2K\uff0c\u5176\u4ed6\u7248\u672c\u53ef\u53c2\u7167\u4fee\u6539\u3002

"},{"location":"wiki/software_installation/cp2k-7.1/#_1","title":"\u73af\u5883\u51c6\u5907","text":"

\u53ef\u53c2\u8003\u5b98\u65b9\u652f\u6301\u7f16\u8bd1\u73af\u5883\uff1a

  • \u4f7f\u7528 GCC 5.5.0 \u4ee5\u4e0a
  • Intel MPI \u73af\u5883

\u4e00\u5207\u5c31\u7eea\u540e\uff0c\u52a0\u8f7d\u4e0a\u8ff0\u73af\u5883\uff1a

module load intel/17.5.239 mpi/intel/2017.5.239\nmodule load gcc/5.5.0\n
"},{"location":"wiki/software_installation/cp2k-7.1/#_2","title":"\u5b89\u88c5\u6d41\u7a0b","text":"

\u9996\u5148\uff0c\u5728 Release \u9875\u9762 \u4e0b\u8f7d CP2K \u5b89\u88c5\u5305\uff0c\u4ee5 7.1 \u4e3a\u4f8b\uff1a

wget -c https://github.com/cp2k/cp2k/releases/download/v7.1.0/cp2k-7.1.tar.bz2\n

\u62f7\u8d1d cp2k-7.1.tar.bz2 \u5230\u5b89\u88c5\u8def\u5f84\u4e0b\u5e76\u89e3\u538b\u3002\u7531\u4e8e\u9700\u8981\u9884\u7f16\u8bd1\u6240\u9700\u7684\u5e93\u7b49\uff0c\u8fd9\u91cc\u4e3a\u4e86\u9632\u6b62\u540e\u7eed\u4f7f\u7528\u65f6\u4ea7\u751f\u989d\u5916\u8def\u5f84\u4f9d\u8d56\uff0c\u63a8\u8350\u76f4\u63a5\u5728\u5b89\u88c5\u8def\u5f84\u4e0b\u7f16\u8bd1\u3002 \u4ee5/share/apps/cp2k\u4e3a\u4f8b\uff1a

cp cp2k-7.1.tar.bz2 /share/apps/cp2k\ncd /share/apps/cp2k/\ntar -jxf cp2k-7.1.tar.bz2\n

\u66f4\u6539\u76ee\u5f55\u540d\u4e3a 7.1\uff0c\u4e3a\u540e\u7eed\u6dfb\u52a0 module \u6587\u4ef6\u4f5c\u51c6\u5907\uff08\u672c\u6b65\u9aa4\u53ef\u9009\uff0c\u4e5f\u53ef\u4fdd\u7559\u9ed8\u8ba4\u540d\u79f0\uff0c\u540e\u7eed\u73af\u5883\u914d\u7f6e\u65f6\u9700\u8981\u76f8\u5e94\u4fee\u6539\uff09\uff1a

mv cp2k-7.1 7.1\n

\u8fdb\u5165\u5230 toolchain \u76ee\u5f55\u4e0b\uff0c\u5e76\u4fee\u6539install_mpich.sh, \u5c06\u5176\u4e2d\u7684check_command mpic++ \"mpich\"\u6539\u4e3acheck_command mpicxx \"mpich\"\uff1a

cd 7.1/tools/toolchain\nsed -i 's/check_command mpic++/check_command mpicxx/g' scripts/install_mpich.sh\n

\uff08\u53ef\u9009\uff09 \u82e5\u9700\u5b89\u88c5 ELPA \u5305\uff0c\u9700\u8981\u5c06\u9759\u6001\u5e93\u66ff\u6362\u4e3a\u52a8\u6001\u5e93\uff0c\u5426\u5219\u4f1a\u62a5\u9519undefined reference to ...\uff1a

sed -i 's/a libmkl_core.a libmkl_sequential.a/so libmkl_sequential.so libmkl_core.so/g' scripts/install_mkl.sh\nsed -i 's/libmkl_gf_lp64.a/libmkl_gf_lp64.so/g' scripts/install_mkl.sh\nsed -i 's/libmkl_core.a/libmkl_sequential.so/g' scripts/install_mkl.sh\nsed -i 's/libmkl_scalapack_lp64.a/libmkl_scalapack_lp64.so/g' scripts/install_mkl.sh\nsed -i 's/libmkl_blacs_intelmpi_lp64.a/libmkl_blacs_intelmpi_lp64.so/g' scripts/install_mkl.sh\nsed -i 's/libmkl_blacs_openmpi_lp64.a/libmkl_blacs_openmpi_lp64.so/g' scripts/install_mkl.sh\nsed -i 's/libmkl_core.a/libmkl_sequential.so/g' scripts/install_mkl.sh\n

ref 1 ref 2

\uff08\u53ef\u9009\uff09 \u4e3a\u52a0\u901f\u5b89\u88c5\u3001\u9632\u6b62\u8d85\u65f6\u62a5\u9519\uff0c\u5728\u4e2d\u56fd\u5927\u9646\u53ef\u5c06 Github \u7edf\u4e00\u66ff\u6362\u4e3a\u955c\u50cf\u3002\u4f46\u540e\u7eed\u4ece cp2k \u5b98\u65b9\u7f51\u7ad9\u4e0b\u8f7d\u7684\u5305\u4e5f\u53ef\u80fd\u51fa\u73b0\u8d85\u65f6\u62a5\u9519\uff0c\u53ef\u80fd\u9700\u8981\u501f\u52a9\u5176\u4ed6\u5e73\u53f0\u4e0b\u8f7d\u76f8\u5e94\u7684\u8f6f\u4ef6\u5305\u5e76\u653e\u5230build\u76ee\u5f55\u4e0b\u3002

sed -i 's/github.com/hub.fastgit.org/g' scripts/install_*.sh\n

\u968f\u540e\u8fd0\u884c toolchain \u811a\u672c\u5b89\u88c5\u4f9d\u8d56\u8f6f\u4ef6\uff1a

./install_cp2k_toolchain.sh --gpu-ver=no   --enable-cuda=no  --with-mpich=system --with-sirius=no --with-openmpi=no  --with-spfft=no --with-hdf5=no\n

\u8fc7\u7a0b\u4e2d\u8bf7\u6ce8\u610f\u8f93\u51fa\u4fe1\u606f\u548c\u62a5\u9519\u7b49\uff0c\u5e76\u76f8\u5e94\u5730\u4e88\u4ee5\u89e3\u51b3\u3002\u5982\u679c\u4e00\u5207\u987a\u5229\uff0c\u4f1a\u63d0\u793a\u9700\u8981\u62f7\u8d1d arch \u6587\u4ef6\uff0c\u5e76 source \u6240\u9700\u7684\u73af\u5883\uff0c\u6309\u7167\u63d0\u793a\u64cd\u4f5c\u5373\u53ef\u3002\u6ce8\u610f\u7531\u4e8e\u6b65\u9aa4\u4e0d\u540c\u8fd9\u91cc\u7684\u547d\u4ee4\u53ef\u80fd\u4e0d\u540c\uff0c\u4ec5\u4f9b\u53c2\u8003\uff1a

cp install/arch/local* /share/apps/cp2k/7.1/arch/\nsource /share/apps/cp2k/7.1/tools/toolchain/install/setup\n

\u4e4b\u540e\u8fdb\u884c\u7f16\u8bd1\u5b89\u88c5\uff1a

cd /share/apps/cp2k/7.1/\nmake -j 8 ARCH=local VERSION=\"popt psmp\"\n

\u5982\u679c\u4e00\u5207\u987a\u5229\uff0c\u53ef\u4ee5\u5f97\u5230\u7f16\u8bd1\u597d\u7684\u4e8c\u8fdb\u5236\u53ef\u6267\u884c\u6587\u4ef6\uff0c\u521b\u5efabin\u76ee\u5f55\uff0c\u5e76\u62f7\u8d1dexe\u76ee\u5f55\u91cc\u7684\u6587\u4ef6\u5230bin\uff1a

mkdir bin\ncp ./exe/local/* ./bin\n

\u6700\u540e\u5220\u9664bin\u548ctools\u4e4b\u5916\u7684\u6240\u6709\u6587\u4ef6\uff0c\u5e76\u5220\u9664tools/toolchain\u91cc\u7684build\u548cinstall\u76ee\u5f55\u3002

"},{"location":"wiki/software_installation/cp2k-7.1/#module","title":"Module \u6587\u4ef6\u751f\u6210","text":"

\u82e5\u96c6\u7fa4\u4f7f\u7528 module \u7ba1\u7406\u73af\u5883\u53d8\u91cf\uff0c\u8bf7\u5728 modulefile \u76ee\u5f55\u4e0b\uff08\u53d6\u51b3\u4e8e\u96c6\u7fa4\u7684\u8bbe\u7f6e\uff09\u65b0\u5efa\u76ee\u5f55cp2k\u5e76\u521b\u5efa\u6587\u4ef6.module\uff1a

#%Module\n\n# Help message\nproc ModulesHelp { } {\n    set nameversion [module-info name]\n    regsub \"/.*\" $nameversion \"\" name\n    regsub \".*/\" $nameversion \"\" version\n    puts stderr \"\\tLoads the $version $name environment\"\n}\n\n# Set variables\nset nameversion [module-info name]\nregsub \"/.*\" $nameversion \"\" name\nregsub \".*/\" $nameversion \"\" version\nmodule-whatis    \"$name $version\"\n\n# set environment variables\nset basedir /share/apps/$name/$version\n\nmodule load intel/17.5.239 mpi/intel/2017.5.239\nmodule load gcc/5.5.0\n\nprepend-path    PATH            ${basedir}/bin\n

\u7136\u540e\u521b\u5efa\u7b26\u53f7\u94fe\u63a5\uff0c\u63d0\u4f9b\u76f8\u5e94\u7248\u672c\u53f7\u7684\u73af\u5883\uff1a

ln -s .module 7.1\n
"},{"location":"wiki/software_installation/cp2k-7.1/#qa","title":"Q&A","text":"
  1. \u5982\u679c\u6240\u6709\u6807\u79f0\u4e3ahttps://www.cp2k.org\u7684\u538b\u7f29\u5305\u5747\u65e0\u6cd5\u4e0b\u8f7d\uff0c\u4e14\u5355\u72ecwget\u8be5\u538b\u7f29\u5305\u65f6\u63d0\u793aIssued certificate has expired\uff0c\u53ef\u4ee5\u5c1d\u8bd5\u66f4\u65b0\u8bc1\u4e66\u670d\u52a1\uff0cCentOS 7 \u547d\u4ee4\u5982\u4e0b\uff1a
yum install ca-certificates\n
  1. \u4ee5\u4e0a\u6b3a\u9a97\u624b\u6bb5\u4ec5\u9002\u7528\u4e8e Intel MPI <= 2018 \u7684\u7248\u672c\uff0c\u5bf9\u9ad8\u7248\u672c MPI \u63a8\u8350\u76f4\u63a5\u5b89\u88c5\u66f4\u9ad8\u7248\u672c\u7684 CP2K\uff0cToolchain \u53ef\u63d0\u4f9b\u5b8c\u6574\u652f\u6301\u3002

  2. \u5982\u679cmake\u8fc7\u7a0b\u4e2d\u9891\u7e41\u62a5\u9519\uff0c\u8fd8\u53ef\u80fd\u662f\u7cfb\u7edf\u6ca1\u6709\u6b63\u786e\u914d\u7f6e\u5730\u533a\u8bbe\u7f6e\uff0c\u8bf7\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u52a0\u8f7d\u73af\u5883\u53d8\u91cf\uff1a

export LANG=en_US.UTF-8\nexport LC_ALL=en_US.UTF-8\nexport LC_CTYPE=\"en_US.UTF-8\"\n
"},{"location":"wiki/software_installation/gcc/","title":"GCC \u5b89\u88c5\u6559\u7a0b","text":"

\u8fd9\u91cc\u4ee5 5.5.0 \u7248\u672c\u4e3a\u4f8b\uff0c\u5176\u4ed6\u7248\u672c\u53ef\u4ee5\u53c2\u8003\uff0c\u53ea\u9700\u5c06\u7248\u672c\u53f7\u66ff\u6362\u5373\u53ef\u3002

\u9996\u5148\u4e0b\u8f7d gcc \u5b89\u88c5\u5305\uff0c\u56fd\u5185\u76f4\u63a5\u8bbf\u95ee gnu \u5b98\u7f51\u8f83\u6162\uff0c\u53ef\u4ee5\u901a\u8fc7 tuna \u7b49\u955c\u50cf\u5b89\u88c5

wget https://mirrors.tuna.tsinghua.edu.cn/gnu/gcc/gcc-5.5.0/gcc-5.5.0.tar.gz\n

\u89e3\u538b\u5e76\u4e0b\u8f7d\u7f16\u8bd1\u6240\u9700\u73af\u5883\uff1a

tar -zxvf gcc-5.5.0.tar.gz\ncd gcc-5.5.0\n./contrib/download_prerequisites\ncd ..\n

\u521b\u5efa\u7f16\u8bd1\u76ee\u5f55\uff0c\u5e76\u5728\u5176\u4e2d\u8fdb\u884c\u7f16\u8bd1\uff1a

mkdir objdir\ncd objdir\n../gcc-5.5.0/configure --prefix=/share/apps/gcc/5.5.0 --enable-languages=c,c++,fortran,go --disable-multilib\nmake\nmake install\n

\u7f16\u5199 modulefile \uff0c\u4fee\u6539\u73af\u5883\u53d8\u91cf\uff1a

#%Module1.0#####################################################################\n##\n## GCC modulefile\n##\nproc ModulesHelp { } {\n        global version\n\n        puts stderr \"\\tSets up environment for GCC v$version\"\n}\n\nmodule-whatis   \"sets up environment for GCC v5.5.0\"\n\n# for Tcl script use only\nset     version 5.5.0\nset     root    /share/apps/gcc/$version\n\nprepend-path    INFOPATH        $root/share/info\nprepend-path    LD_LIBRARY_PATH $root/lib64:$root/lib:$root/libexec\nprepend-path    INCLUDE         $root/include\nprepend-path    MANPATH         $root/share/man\nprepend-path    PATH            $root/bin\n
"},{"location":"wiki/software_installation/install_from_src_in_conda/","title":"\u865a\u62df\u73af\u5883\u4e0b\u6e90\u7801\u5b89\u88c5 C/C++\u7a0b\u5e8f\uff1a\u4ee5 valgrind \u4e3a\u4f8b","text":"

\u6e90\u7801\u5b89\u88c5\u4e00\u822c\u7531 3 \u4e2a\u6b65\u9aa4\u7ec4\u6210\uff1a\u914d\u7f6e(configure)\u3001\u7f16\u8bd1(make)\u3001\u5b89\u88c5(make install)\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\u8fdb\u5165\u6e90\u7801\u6240\u5728\u6587\u4ef6\u5939\u4e0b\u987a\u5e8f\u6267\u884c./configure && make && make install \u4f1a\u5c06\u6587\u4ef6\u5b89\u88c5\u5728/usr/local\u4e0b\u3002\u4f46\u662f\uff0c\u8fd9\u79cd\u505a\u6cd5\u6709\u4e24\u4e2a\u4e0d\u8db3\uff1a

  • \u67d0\u4e9b\u8f6f\u4ef6\uff08\u7248\u672c\uff09\u4ec5\u5e94\u7528\u4e8e\u7279\u5b9a\u5de5\u4f5c\u4efb\u52a1\u4e2d\uff0c\u4e0d\u540c\u4efb\u52a1\u4e2d\u7684\u8f6f\u4ef6\uff08\u7248\u672c\uff09\u53ef\u80fd\u4f1a\u6709\u51b2\u7a81
  • \u96c6\u7fa4\u4e0a\u666e\u901a\u7528\u6237\u6ca1\u6709\u6743\u9650\u4fee\u6539/usr/local\u8fdb\u884c\u5b89\u88c5

\u662f\u5426\u53ef\u4ee5\u91c7\u7528\u7c7b\u4f3c\u5c06 python \u5305\u5b89\u88c5\u5230\u7279\u5b9a\u865a\u62df\u73af\u5883\u4e0b\u7684\u505a\u6cd5\uff0c\u628a C/C++\u7a0b\u5e8f\u901a\u8fc7\u6e90\u7801\u5b89\u88c5\u5230\u7279\u5b9a\u865a\u62df\u73af\u5883\u4e2d\u5462\uff1f\u7b54\u6848\u662f\uff1a\u53ef\u4ee5\uff01\u63a5\u4e0b\u6765\uff0c\u4ee5 Valgrind \u4e3a\u4f8b\u8bf4\u660e\u5982\u4f55\u5c06 C/C++\u8f6f\u4ef6\u5305\u5b89\u88c5\u5230\u7279\u5b9a\u865a\u62df\u73af\u5883\u4e0b\u3002

\u865a\u62df\u73af\u5883\u5730\u5740\uff08\u6839\u636e\u81ea\u5df1\u60c5\u51b5\u4fee\u6539\uff09\uff1a/new_data/jxzhu/envs/test_env

  1. \u4e0b\u8f7d\u6e90\u7801\u5e76\u89e3\u538b
# download source code from official website\nwget -c https://sourceware.org/pub/valgrind/valgrind-3.19.0.tar.bz2\n# decompress\ntar -jxvf valgrind-3.19.0.tar.bz2\n
  1. \u8fdb\u5165\u6587\u4ef6\u5939\u5e76\u6267\u884c\u5b89\u88c5\u524d\u5e8f\u5de5\u4f5c\uff08\u6b64\u5904\u6839\u636e\u9700\u5b89\u88c5\u8f6f\u4ef6\u7684\u6307\u5f15\u8fdb\u884c\uff09
# enter the source code folder\ncd valgrind-3.19.0\n# NOTE: This is not a general procedure\n# Please check the installation guide for your package\n./autogen.sh\n
  1. \u901a\u8fc7--prefix\u5c06\u5b89\u88c5\u5730\u5740\u6307\u5b9a\u4e3a\u865a\u62df\u73af\u5883\u6240\u5728\u5730\u5740
# configure with given installation path\n./configure --prefix=/new_data/jxzhu/envs/test_env/\n
  1. \u7f16\u8bd1\u53ca\u5b89\u88c5
# make in parallel\nmake -j20\n# install software\nmake install\n

\u5feb\u901f\u6d4b\u8bd5

(base) [jxzhu@login01:] /data/jxzhu/software/valgrind-3.19.0 $ which valgrind\n/usr/bin/which: no valgrind in (...)\n(base) [jxzhu@login01:] /data/jxzhu/software/valgrind-3.19.0 $ conda activate /new_data/jxzhu/envs/test_env/\n(test_env) [jxzhu@login01:] /data/jxzhu/software/valgrind-3.19.0 $ which valgrind\n/new_data/jxzhu/envs/test_env/bin/valgrind\n
"},{"location":"wiki/software_installation/softwares/","title":"Installation Guide for Codes and Libraries","text":""},{"location":"wiki/software_installation/softwares/#first-of-all-load-the-environments","title":"First of all! Load the environments!","text":"

Before you install anything, especially when you need to compile codes, make sure the type of compiler and the version of compiler you have. Usually, in your personal computer, you can use compiler command directly, for instance, gcc, gfortran, ifort,mpic++. In remote cluster(High Performance Cluster), the compiler is managed by module. You cannnot use it unless you load it in advance. Therefore, make sure which compiler you have in module, and use command such as module load gcc/4.9.4 to load required compilers.

"},{"location":"wiki/software_installation/softwares/#general-protocal-for-installation","title":"General Protocal for Installation:","text":"
  1. Compile the Code
  2. Quick test the code at server node
  3. Write module files to code (we recommend to manage codes by module)
  4. Test the code in the client node
  5. write example lsf file in /share/base/scripts
"},{"location":"wiki/software_installation/softwares/#where-to-install","title":"Where to Install?","text":"

Install in the /share/ directory. /share/ directory is the one synchronized to all the nodes by nfs.

  1. Libraries: /share/apps/lib/<library name>/<version>
  2. Codes, Pacakges, Softwares: /share/apps/<packages name>/<version>
"},{"location":"wiki/software_installation/softwares/#standard-in-writing-module-file","title":"Standard in Writing Module file","text":"
  1. module name: <package name>/<version>, like cp2k/6.1
"},{"location":"wiki/software_installation/softwares/#standard-in-writing-lsf-file","title":"Standard in Writing lsf file","text":"
  1. export necessary environmental variable
  2. load prerequisite module
"},{"location":"wiki/software_installation/softwares/#anaconda-installation-guide","title":"Anaconda Installation Guide","text":""},{"location":"wiki/software_installation/softwares/#short-introduction","title":"Short Introduction","text":"

The open-source Anaconda Distribution is the easiest way to perform Python/R data science and machine learning on Linux, Windows, and Mac OS X. Choose the one suitable for you usage. If you'd like to use Anaconda in Cluster, ask cluster administrator if Anaconda have been installed, which avoid storage waste in your cluster's storage.

Tip

A minimum version of Conda is install in cluster51 by Yunpei Liu. Use it by module command

"},{"location":"wiki/software_installation/softwares/#installation-guide","title":"Installation Guide","text":"
  • Go to this website, choose the right version for you. Personally, I recommend command line Installer for Linux and Mac OS System, while the Graphical Installer for Windows System
  • Follow the instruction in this page
"},{"location":"wiki/software_installation/softwares/#quip-installation-guide","title":"QUIP Installation Guide","text":""},{"location":"wiki/software_installation/softwares/#short-introduction_1","title":"Short Introduction","text":"

The QUIP package is a collection of software tools to carry out molecular dynamics simulations. It implements a variety of interatomic potentials and tight binding quantum mechanics, and is also able to call external packages, and serve as plugins to other software such as LAMMPS, CP2K and also the python framework ASE. Various hybrid combinations are also supported in the style of QM/MM, with a particular focus on materials systems such as metals and semiconductors.

Tip

The tested compiler version: and for your information."},{"location":"wiki/software_installation/softwares/#use-quip-and-quippy-in-cluster-51","title":"Use QUIP and quippy in cluster 51","text":"

If you need use QUIP/GAP in cluster 51, please used command:

module load gcc/6.3.0 mpi/openmpi/3.0.0\nmodule load QUIP/GAP\n

If you want to use quippy:

module load miniconda/3\nsource activate /share/apps/QUIP/quippy-py3/\n
"},{"location":"wiki/software_installation/softwares/#install-guide","title":"Install Guide","text":"
  • Git clone from repository
git clone --recursive https://github.com/libAtoms/QUIP.git\n
  • Go to the package root and export variable
export QUIP_ARCH=linux_x86_64_gfortran\n
  • Make configuration
make config\n#if everything fine\nmake\n
"},{"location":"wiki/software_installation/softwares/#packages-and-extra-interfaces-of-quip","title":"Packages and Extra Interfaces of QUIP","text":""},{"location":"wiki/software_installation/softwares/#add-gap-packages","title":"Add GAP Packages","text":"
  • Download GAP file from here, then you obtain a tar file named GAP.tar, unzip it
tar -xvf GAP.tar\n
  • You will obtain a directory named GAP/, copy this directory into QUIP root/src.
cp -r GAP <QUIP root>/src/\n
  • Reconfig your make by choose install GAP as y
#recompile this code again\nmake\n
"},{"location":"wiki/software_installation/softwares/#build-quippy-a-quip-python-interface","title":"Build QUIPPY, A QUIP Python Interface","text":"
  • Export another environmental variable
#install for your self\nexport QUIPPY_INSTALL_OPTS=--user\n#choose the location for installation of quippy\nexport QUIPPY_INSTALL_OPTS=--prefix=<directory>\n
  • Go to <QUIP root>/src/f90wrap, and install f90wrap by:
pip install .\n
  • Back to <QUIP root>
make install-quippy\n
  • Test whether installed successfully.
make test\n
"},{"location":"wiki/software_installation/softwares/#trouble-shooting","title":"Trouble Shooting","text":""},{"location":"wiki/software_installation/softwares/#importerror-dynamic-module-does-not-define-module-export-function","title":"ImportError: dynamic module does not define module export function","text":"
Example:\nTraceback (most recent call last):\n  File \"<stdin>\", line 1, in <module>\n  File \"/share/apps/QUIP/quippy-py3/lib/python3.8/site-packages/quippy-https_github.com_libAtoms_QUIP.git_ec1ed34_dirty-py3.8-linux-x86_64.egg/quippy/__init__.py\", line 2, in <module>\n    import _quippy\nImportError: dynamic module does not define module export function (PyInit__quippy)\n

Solution: add /build/${QUIP_ARCH} into your Python PATH"},{"location":"wiki/software_installation/softwares/#vasp","title":"VASP","text":""},{"location":"wiki/software_installation/softwares/#short-introduction_2","title":"Short Introduction","text":"

(TODO)

"},{"location":"wiki/software_installation/softwares/#install-guide_1","title":"Install Guide","text":"
  1. Get the VASP source code and pseudopotentials.

  2. Load environment

    module load intel\n

  3. Choose makefile.include according to the platform and make

    cd vasp.5.4.4\nmake std\nmake gam\n

  4. If everything is right, you will find vasp_std in vasp.5.4.4/build/std and you can run it with mpirun -np 24 vasp_std.

"},{"location":"wiki/software_installation/softwares/#plugins","title":"Plugins","text":""},{"location":"wiki/software_installation/softwares/#wannier90","title":"Wannier90","text":"
  1. Download Wannier90 from http://www.wannier.org/download/ . Notice: currently VASP only support Wannier90-1.2

  2. Modify compile file for Wannier90 make.sys.intel. Here we use the MKL.

    #LIBDIR = /opt/intel/mkl721/lib/32\n#LIBS = -L$(LIBDIR) -lmkl_lapack -lmkl_ia32 -lguide -lpthread\nLIBDIR = $(MKLROOT)/lib/intel64\nLIBS = -L$(LIBDIR) -mkl -lpthread\n

  3. Compile and test

    cp ./config/make.inc.ifort make.inc\nmake \nmake lib # compile to get the libary: libwannier.a \nmake tests # test whether the compilation is success\n

  4. Copy the libwannier.a libary file to VASP libary path and modify VASP makefile.include.

#Precompiler options\nCPP_OPTIONS= -DHOST=\\\"LinuxIFC\\\"\\\n             -DMPI -DMPI_BLOCK=8000 \\\n             -Duse_collective \\\n             -DscaLAPACK \\\n             -DCACHE_SIZE=4000 \\\n             -Davoidalloc \\\n             -Duse_bse_te \\\n             -Dtbdyn \\\n             -Duse_shmem \\\n             -DVASP2WANNIER90   ## modify this line for Wannier90\n\nLLIBS += ../../libwannier.a  ## change here to the location of libwannier.a\n
"},{"location":"wiki/software_installation/softwares/#compilation-optimization","title":"Compilation optimization","text":"

If you use Intel Xeon Silver/Gold/Platium CPU, using the following compilation parameters will get a 2\u2716 speedup! (Already test on 205 server)

OFLAG      = -O3 -xCORE-AVX512\n

"},{"location":"wiki/software_installation/softwares/#todo-in-the-future","title":"TODO in the future","text":"
  1. Install vasp_gpu version
  2. Benchmark different libary (FFTW/MKL)
  3. other plugins: VASP-neb, vasp-beef
  4. vasp6
"},{"location":"wiki/software_installation/softwares/#lammps-installation-guide","title":"LAMMPS Installation Guide","text":""},{"location":"wiki/software_installation/softwares/#short-introduction_3","title":"Short Introduction","text":"

LAMMPS is a classical molecular dynamics code with a focus on materials modeling. It's an acronym for Large-scale Atomic/Molecular Massively Parallel Simulator.

Tip

I have installed one in cluster51, in directory /share/apps/lammps-7Aug19/. The compiler version: and for your information."},{"location":"wiki/software_installation/softwares/#install-guide_2","title":"Install Guide","text":"

  • Git clone or download package from website
# command for git\ngit clone -b stable https://github.com/lammps/lammps.git mylammps\n
  • We assume you the package path is
    cd <lammps-root>/src\n#choose one of the following or both\n# build a serial LAMMPS executable\nmake serial \n# build a parallel LAMMPS executable with MPI\nmake mpi        \n
    • You will see the executable binary in src/lmp_serial or src/lmp_mpi
    "},{"location":"wiki/software_installation/softwares/#packages-and-extra-interfaces-of-lammps","title":"Packages and Extra Interfaces of LAMMPS","text":"

    Tip

    Contact Cluster Administrator if you need any uninstalled packages

    "},{"location":"wiki/software_installation/softwares/#general-for-installing-package","title":"General for Installing Package","text":"
    • To install package of LAMMPS, just type make yes-<package name> for example, make yes-user-intel
    "},{"location":"wiki/software_installation/softwares/#building-user-atc-package","title":"Building USER-ATC Package","text":"
    • Before you install this package by make yes-user-atc, you should install lib-atc which is a library for atc package
    • Go to the directory <LAMMPS root>/lib/atc, you can follow the instruction in the README. Remember to load module gcc and open mpi
    cd <LAMMPS root>/lib/atc\n
    • lib-atc need library lapack and blas installed. Check whether this library installed or not by command:
    #check for lapack library\nldconfig -p | grep lapack\n#check for blas library\nldconfig -p | grep blas\n
    • If lapack and blas are installed. Change the value of EXTRAMAKE variable to Makefile.lammps.installed in the file Makefile.mpi.
    EXTRAMAKE= Makefile.lammps.installed\n
    • Make library by following command
    make -f Makefile.mpi\n
    • Make sure you have libatc.a and Makefile.lammps in your current directory
    • Back to directory <LAMMPS root>/src/ and type make mpi to compile mpi version of LAMMPS
    "},{"location":"wiki/software_installation/softwares/#building-inteface-with-n2p2","title":"Building Inteface with n2p2","text":"
    • make sure you have shared library libnnpif-shared in your <path to n2p2>/lib/
    • export the following in your environmental variable(optional)
    #export this if you use shared library, skip if you are using static library\nexport LD_LIBRARY_PATH=<path to n2p2>/lib:${LD_LIBRARY_PATH}\n
    • Go to LAMMPS root
    cd <LAMMPS root>/\nln -s <path to n2p2> lib/nnp\ncp -r <path to n2p2>/src/interface/LAMMPS/src/USER-NNP <LAMMPS root>/src\ncd <LAMMPS root>/src\nmake yes-user-nnp\nmake mpi\n
    "},{"location":"wiki/software_installation/softwares/#building-with-plumed","title":"Building with Plumed","text":"
    • Before you install, make sure the Plumed has installed
    • To directory <LAMMPS root>/src/
    make lib-plumed args=\"-p <path to plumed directory>\"\nmake yes-user-plumed\nmake mpi\n
    "},{"location":"wiki/software_installation/softwares/#deepmd-installation-guide","title":"DeePMD Installation Guide","text":""},{"location":"wiki/software_installation/softwares/#short-introduction_4","title":"Short Introduction","text":"

    DeePMD-kit is a package written in Python/C++, designed to minimize the effort required to build deep learning based model of interatomic potential energy and force field and to perform molecular dynamics (MD). This brings new hopes to addressing the accuracy-versus-efficiency dilemma in molecular simulations. Applications of DeePMD-kit span from finite molecules to extended systems and from metallic systems to chemically bonded systems. Ref. Paper

    "},{"location":"wiki/software_installation/softwares/#install-guide_3","title":"Install Guide","text":"
    • Here, we display the most easiest way to install DeePMD Code.
    • Make sure you have GPU install in your computer. Usually, you can check with the drive of GPU
    • Install the anaconda3 from website. After you installed anaconda3, you can use conda command.
    • Install DeePMD with cpu or gpu version. Installation by this way will install lammps as well.
    #install of cpu version\nconda install deepmd-kit=*=*cpu lammps-dp=*=*cpu -c deepmodeling\n#install of gpu version\nconda install deepmd-kit=*=*gpu lammps-dp=*=*gpu -c deepmodeling\n
    • That's all for installation. Check the install package use command:
    conda list | grep deep\n
    • You will find four packages related with DeePMD code. You can now directly use command dp , lmp.
    • To test DeePMD Code. Download DeePMD code from github by:
    git clone https://github.com/deepmodeling/deepmd-kit.git\n
    • Go to the directory examples/water/train/
    • Test training by
    dp train water_se_a.json\n
    "},{"location":"wiki/software_installation/softwares/#install-guide-of-deepmd","title":"Install Guide of DeePMD","text":"

    \u5feb\u901f\u5b89\u88c5

    "},{"location":"wiki/software_installation/softwares/#n2p2-installation-guide","title":"n2p2 Installation Guide","text":""},{"location":"wiki/software_installation/softwares/#short-introduction_5","title":"Short Introduction","text":"

    n2p2 is a machine learning code to training a machine learning potential. It original paper is from J. Behler and M. Parrinello, Phys. Rev. Lett. 98, 146401 (2007)

    "},{"location":"wiki/software_installation/softwares/#install-guide_4","title":"Install Guide","text":"
    • Before Installation, make sure you have installed the Eigen Library and the GSL Library.
    • Make sure you have gcc compiler (including gfortran), I haven't successfully compiled by intel compiler. Make sure you have open MPI(i. e. for mpic++ command).
    • Download the n2p2 code from github: https://github.com/CompPhysVienna/n2p2. For example, using the following command.
    git clone https://github.com/CompPhysVienna/n2p2.git\n
    • You can see a directory named n2p2, now go into that by:
    cd n2p2/src\n
    • Modify the configure file makefile.gnu
    #modify this file, I just pick out the part you need to modify\n# Enter here paths to GSL or EIGEN if they are not in your standard include\n# path. DO NOT completely remove the entry, leave at least \"./\".\nPROJECT_GSL=<path to gsllib>/gsl/include/ # substitute <path> with real path\nPROJECT_EIGEN=<path to eigen>/eigen-eigen-323c052e1731 # substitute <path> with real path\n\n ###############################################################################\n # COMPILERS AND FLAGS\n ###############################################################################\nPROJECT_CFLAGS=-O3 -march=native -std=c++11 -fopenmp -L<pato to gsllib>gsl/lib\nPROJECT_LDFLAGS_BLAS=-lblas -lgslcblas\n
    • Save and quit this file, use the following command to compile code:
    #choose one of the following command\nmake MODE=shared # compile a binary with shared library\nmake MODE=static # compile a binary with static library, I use this one\n
    • After you compiled successfully, you will have all the excutable binary at n2p2/bin/ directory
    • Add n2p2/bin/ to your PATH environmental variable, you can easily use this. The most important binary is nnp-train, this is used for training.
    • Add n2p2 library to your LD_LIBRARY_PATH in .bashrc
    export LD_LIBRARY_PATH=<Path to n2p2>/lib/:$LD_LIBRARY_PATH\n
    "},{"location":"wiki/software_installation/softwares/#plumed-installation-guide","title":"Plumed Installation Guide","text":""},{"location":"wiki/software_installation/softwares/#short-introduction_6","title":"Short Introduction","text":"

    PLUMED is an open-source, community-developed library that provides a wide range of different methods, which include:

    • enhanced-sampling algorithms
    • free-energy methods
    • tools to analyze the vast amounts of data produced by molecular dynamics (MD) simulations.

    These techniques can be used in combination with a large toolbox of collective variables that describe complex processes in physics, chemistry, material science, and biology.

    Tip

    I have installed one in cluster51. Use module load plumed/2.6.0 to use this library. The compiler version: for your information"},{"location":"wiki/software_installation/softwares/#install-guide_5","title":"Install Guide","text":"

    • Download package from here.
    • Basic Configure
    ./configure --prefix=<path you want to install> LDFLAGS=-L'/share/apps/lib/fftw/3.3.8/lib' CPPFLAGS=-I'/share/apps/lib/fftw/3.3.8/lib '\n
    • Compile
    make -j 32\nmake install\n
    "},{"location":"wiki/software_installation/softwares/#eigen-library-installation-guide","title":"Eigen Library Installation Guide","text":""},{"location":"wiki/software_installation/softwares/#short-introduction_7","title":"Short Introduction","text":"

    Eigen is a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms.

    "},{"location":"wiki/software_installation/softwares/#install-guide_6","title":"Install Guide","text":"
    • Download the package from wiki:http://eigen.tuxfamily.org/index.php?title=Main_Page#Overview. For me, I choose the Eigen 3.3.7 released version.
    wget http://bitbucket.org/eigen/eigen/get/3.3.7.tar.bz2\n
    • Unpack this tar file by
    tar -zxvf 3.3.7.tar.gz\n
    • You will have eigen-eigen-* directory in your computer
    • These are all steps you need to install eigen library
    "},{"location":"wiki/software_installation/softwares/#gsl-library-installation-guide","title":"GSL Library Installation Guide","text":""},{"location":"wiki/software_installation/softwares/#short-introduction_8","title":"Short Introduction","text":"

    The GNU Scientific Library (GSL) is a numerical library for C and C++ programmers. It is a free open source library under the GNU General Public License.

    This guide is from: website tutorial

    Tip

    I have installed one in cluster51, in directory /share/apps/lib/gsl-2.6. The compiler version: for your information"},{"location":"wiki/software_installation/softwares/#install-guide_7","title":"Install Guide","text":"

    • Download the latest version of gsl library. For reference: ftp://ftp.gnu.org/gnu/gsl/
    wget ftp://ftp.gnu.org/gnu/gsl/gsl-latest.tar.gz\n
    • Place the file in whatever directory you want to install and unpack the file with the following command:
    tar -zxvf gsl-latest.tar.gz\n
    • This will create a directory called gsl-*.* in your home directory. Change to this directory.
    cd gsl-*.*\n
    • The next step is to configure the installation and tell the system where to install the files. Create a directory to install your gsl package, say <Path to libgsl>/gsl with the following command
    mkdir <Path to libgsl>/gsl\n
    • Now configure the installation and tell it to use your new directory. This step may take a few minutes.
    ./configure --prefix=<Path to libgsl>/gsl\n
    • If there are no errors, compile the library. This step will take several minutes.
    make\n
    • Now it is necessary to check and test the library before actually installing it. This step will take some time.
    make check\n
    • If there are no errors, go ahead and install the library with:
    make install\n
    • Now we can write a test program to see if the library works. Create the following program and name it example.c
    #include <stdio.h>\n#include <gsl/gsl_sf_bessel.h>\n\nint\nmain (void)\n{\n    double x = 15.0;\n    double y = gsl_sf_bessel_J0 (x);\n    printf (\"J0(%g) = %.18e/n\", x, y);\n    return 0;\n}\n
    • Compile and link the program with the following commands (but use the correct path for your username):
    gcc -Wall -I<Path to libgsl>/gsl/include -c example.c\ngcc -L<Path to libgsl>/gsl/lib example.o -lgsl -lgslcblas -lm\n
    • Now run your program!
    ./a.out\n
    • If it is succesfully installed, it will print a number in your screen.
    • add libray path to LD_LIBRARY_PATH in .bashrc
    export LD_LIBRARY_PATH=<path to libgsl>/lib:$LD_LIBRARY_PATH\n
    "},{"location":"wiki/software_installation/softwares/#libxc-library-installation-guide","title":"Libxc Library Installation Guide","text":"
    • Download the latest stable version of libxc from official website:
    wget http://www.tddft.org/programs/libxc/down.php?file=4.3.4/libxc-4.3.4.tar.gz\n
    "},{"location":"wiki/software_installation/softwares/#fftw-library-installation-guide","title":"FFTW Library Installation Guide","text":""},{"location":"wiki/software_installation/softwares/#short-introduction_9","title":"Short Introduction","text":"

    FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data (as well as of even/odd data, i.e. the discrete cosine/sine transforms or DCT/DST).

    Tip

    I have installed one in cluster51, in directory /share/apps/lib/fftw/3.3.8. Use module load fftw/3.3.8 to use this library. The compiler version: for your information"},{"location":"wiki/software_installation/softwares/#install-guide_8","title":"Install Guide","text":"

    • Download the release version from official website using wget
    wget http://www.fftw.org/fftw-3.3.8.tar.gz\n
    • Unzi the package
    tar -xvf fftw-3.3.8.tar.gz\n
    • Go to the directory fftw-3.3.8
    ./configure --prefix=<path to you want to install>    \\\n            --enable-shared  \\\n            --enable-threads \\\n            --enable-sse2    \\\n            --enable-avx     \n
    • If configure is finished
    make\n#check if you install finished\nmake check\n#install to the final directory which you have set in --prefix\nmake install\n
    "},{"location":"wiki/software_installation/softwares/#cp2k-installation-guide","title":"CP2K Installation Guide","text":"
    • Download the release version from official website using wget like
    wget https://github.com/cp2k/cp2k/releases/download/v6.1.0/cp2k-6.1.tar.bz2\n
    • Unzip the cp2k package
    tar -xvf cp2k-6.1.tar.bz2\n
    • Go into directory cp2k-6.1/tools/toolchains/
    • Stop here! you should check you compiler version, if you are in the High Performance Cluster, Please load the module for compiler and MPI/Open MPI
    • Note: for gcc version, gcc <= 7.4.0
    • Execute the following script to see the help message
    ./install_cp2k_toolchain.sh -h\n
    • Choose which package you want to install before cp2k.

    Some packages are essential for cp2k, please check this in the official web site

    • the minimum required is with-openblas=install, if you want to compile successfully.
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/","title":"DeepMD-kit\u5feb\u901f\u5b89\u88c5","text":"

    \u4e3a\u51cf\u5c11\u540e\u7eed\u5b89\u88c5\u7684\u56f0\u96be\uff0c\u8bf7\u4f18\u5148\u53c2\u8003\u6700\u4f73\u5b9e\u8df5\u3002\u672c\u6587\u4ecb\u7ecd\u7684\u65b9\u6cd5\u6210\u578b\u65f6\uff0cDP\u5c1a\u672a\u5b9e\u73b0\u5bf9Lammps\u7684\u89e3\u8026\uff0c\u4f46\u4ecd\u7136\u53ef\u7528\u3002

    \u672c\u90e8\u5206\u4e3b\u4f53\u5199\u4e8e2021\u5e74\uff0c\u622a\u81f3\u76ee\u524d\uff082022.08\uff09\u4ecd\u9002\u7528\uff0c\u5e76\u4e14\u968f\u7248\u672c\u5347\u7ea7\u4ecd\u5728\u66f4\u65b0\u3002

    \u6559\u7a0b\u4e2d\u4f7f\u7528\u7684\u5c1a\u4e14\u662fCUDA 10.1\uff0c\u4f46\u5bf9CUDA 11.x\u4e5f\u9002\u7528\u3002

    \u80cc\u666f\uff1a\u4ee5 Zeus \u96c6\u7fa4\u4e3a\u4f8b\uff0c\u5728\u670d\u52a1\u5668\u901a\u8fc7\u6e90\u4ee3\u7801\u7f16\u8bd1\u5b89\u88c5DeepMD-kit\u548c\u5305\u542b\u5b8c\u6574\u63a5\u53e3\u7684LAMMPS\u3002\u867d\u7136\u5b98\u65b9\u5df2\u7ecf\u63d0\u4f9b\u4e86\u901a\u8fc7 Conda \u4e00\u952e\u5b89\u88c5\u7684\u65b9\u6cd5\uff0c\u4f46\u7531\u4e8e\u6b64\u6cd5\u6240\u5b89\u88c5\u7684\u5404\u4e2a\u7ec4\u4ef6\u5747\u4e3a\u9884\u7f16\u8bd1\u7248\u672c\uff0c\u56e0\u800c\u65e0\u6cd5\u505a\u66f4\u591a\u62d3\u5c55\u548c\u6539\u52a8\uff0c\u4e14\u901a\u8fc7 Conda \u5b89\u88c5\u7684 Protobuf \u5b58\u5728\u7248\u672c\u51b2\u7a81\uff0c\u65e0\u6cd5\u8fdb\u4e00\u6b65\u7f16\u8bd1\u5176\u4ed6\u63a5\u53e3\u3002\u8fd9\u91cc\u4ecb\u7ecd\u4e00\u79cd\u65b9\u6cd5\uff0c\u901a\u8fc7 Conda \u5b89\u88c5\u901a\u5e38\u4e0d\u9700\u8981\u8f83\u5927\u6539\u52a8\u7684TensorFlow C++ Interface\uff0c\u5176\u4f59\u90e8\u5206\u4ecd\u624b\u52a8\u7f16\u8bd1\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/#_1","title":"\u521d\u59cb\u73af\u5883\u8bf4\u660e","text":"

    \u4ee5\u4e0b\u8fc7\u7a0b\u4ee5 Zeus \u96c6\u7fa4\u4e3a\u4f8b\uff0c\u64cd\u4f5c\u7cfb\u7edf\u53ca\u7248\u672c\u4e3aCentOS 7\uff0c\u7ba1\u7406\u8282\u70b9\u8054\u7f51\uff0c\u91c7\u7528module\u4f5c\u4e3a\u73af\u5883\u7ba1\u7406\u3002

    \u4ee5\u4e0b\u662f\u9884\u5148\u914d\u7f6e\u597d\u7684\u73af\u5883\uff0c\u5bf9\u4e8e\u5176\u4ed6\u96c6\u7fa4\uff0c\u53ef\u4ee5\u6b64\u8981\u6c42\u51c6\u5907\u73af\u5883\uff0c\u5176\u4e2d Intel MPI \u53ef\u4ee5\u7528 MPICH \u4ee3\u66ff\uff0c\u5176\u4f59\u7ec4\u4ef6\u8bf7\u81ea\u884c\u5b89\u88c5\u3002\u6ce8\u610fCUDA 10.1\u5bf9Nvidia\u9a71\u52a8\u7248\u672c\u6709\u8981\u6c42\uff0c\u9700\u8981\u9884\u5148\u68c0\u67e5\u597d\uff08\u53ef\u7528nvidia-smi\u5feb\u901f\u67e5\u770b\uff09\u3002

    • \u901a\u8fc7yum\u5b89\u88c5
    • Git >= 1.8.2
    • \u901a\u8fc7module\u52a0\u8f7d
    • CUDA 10.1
    • Miniconda 3
    • GCC >= 7.4.0
    • Intel MPI 2017 \uff08\u6682\u672a\u5bf9\u5176\u4ed6\u7248\u672c\u8fdb\u884c\u6d4b\u8bd5\uff09

    \u7248\u672c\u53f7\u4ec5\u4f9b\u53c2\u8003\uff0c\u5b9e\u9645\u5b89\u88c5\u53ef\u80fd\u4f1a\u4e0d\u4e00\u6837\uff0c\u53c2\u8003\u6267\u884c\u5373\u53ef\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/#_2","title":"\u521b\u5efa\u65b0\u7684\u73af\u5883","text":"

    \u9996\u5148\u51c6\u5907\u5fc5\u8981\u7684\u4f9d\u8d56\u3002

    \u68c0\u67e5\u53ef\u7528\u7684\u6a21\u5757\uff0c\u5e76\u52a0\u8f7d\u5fc5\u8981\u7684\u6a21\u5757\uff1a

    module avail\nmodule add cuda/10.1\nmodule add gcc/7.4.0\n

    \u6ce8\u610f\u8fd9\u91cc\u5bfc\u5165\u7684\u662fGCC 7.4.0\u7248\u672c\uff0c\u5982\u679c\u91c7\u7528\u4f4e\u4e8e4.9.4\u7684\u7248\u672c\uff08\u4e0d\u5bfc\u5165GCC\uff09\u5219dp_ipi\u4e0d\u4f1a\u88ab\u7f16\u8bd1\u3002

    \u7136\u540e\u521b\u5efa\u865a\u62df\u73af\u5883\uff0c\u6b65\u9aa4\u8bf7\u53c2\u8003Anaconda \u4f7f\u7528\u6307\u5357\u3002

    \u5047\u8bbe\u521b\u5efa\u7684\u865a\u62df\u73af\u5883\u540d\u79f0\u662f deepmd\uff0c\u5219\u8bf7\u5c06\u6b65\u9aa4\u6700\u540e\u7684 <your env name> \u66ff\u6362\u4e3a deepmd\u3002\u82e5\u91c7\u7528\u8be5\u6b65\u9aa4\u7684\u8bbe\u7f6e\uff0c\u5219\u865a\u62df\u73af\u5883\u5c06\u88ab\u521b\u5efa\u5728/data/user/conda/env/deepmd\u4e0b\uff08\u5047\u8bbe\u7528\u6237\u540d\u4e3auser\uff09\u3002

    \u6ce8\u610f\u8bf7\u52a1\u5fc5\u4e3a\u521b\u5efa\u7684\u865a\u62df\u73af\u5883\u5b89\u88c5\u6240\u9700\u7684Python\u73af\u5883\u3002\u901a\u5e38\u4e0d\u6307\u5b9aPython\u7248\u672c\u53f7\u7684\u60c5\u51b5\u4e0b\uff08\u4f8b\u5982\u6587\u4e2d\u7684\u6b65\u9aa4conda create -n <your env name> python\uff09\u4f1a\u5b89\u88c5conda\u63a8\u8350\u7684\u6700\u65b0\u7248\u672c\uff0c\u5982\u9700\u8981\u66ff\u4ee3\u8bf7\u5bf9\u5e94\u6307\u5b9a\uff0c\u5982conda create -n deepmd python=3.8\u3002

    \u7531\u4e8eZeus\u7684GPU\u8282\u70b9\u4e0d\u80fd\u8054\u7f51\uff0c\u6545\u9700\u8981\u5c06\u6240\u9700\u7684\u9a71\u52a8\u7a0b\u5e8f\u5e93libcuda.so\u4ee5libcuda.so.1\u7684\u540d\u79f0\u624b\u52a8\u94fe\u63a5\u5230\u67d0\u4e2a\u5177\u6709\u6743\u9650\u7684\u8def\u5f84/some/local/path\u5e76\u5206\u522b\u52a0\u5165\u73af\u5883\u53d8\u91cf\u3002

    ln -s /share/cuda/10.0/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/share/cuda/10.0/lib64/stubs:/some/local/path\n

    \u63d0\u793a

    \u82e5\u5728Zeus \u96c6\u7fa4\u4e0a\u5b89\u88c5\uff0c\u7ba1\u7406\u5458\u5df2\u4e8b\u5148\u628alibcuda.so.1 \u94fe\u63a5\u5728/share/cuda/10.0/lib64/stubs/\u4e0b\uff0c\u6545\u65e0\u9700\u989d\u5916\u521b\u5efa\u8f6f\u94fe\u63a5\uff0c\u540c\u7406/some/local/path\u4e5f\u65e0\u9700\u52a0\u5165\u73af\u5883\u53d8\u91cf\uff0c\u4f46\u4ecd\u9700\u8981\u9a71\u52a8\u7a0b\u5e8f\u5e93\u7684\u7b26\u53f7\u94fe\u63a5libcuda.so\u3002\u6ce8\u610f\u8fd9\u4e00\u6b65\u9aa4\u6267\u884c\u540e\uff0c\u5b9e\u9645\u8fd0\u884c\u65f6\u9700\u8981\u4ece\u73af\u5883\u53d8\u91cf\u4e2d\u79fb\u9664

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/#tensorflowc","title":"\u5b89\u88c5Tensorflow\u7684C++ \u63a5\u53e3","text":"

    \u4ee5\u4e0b\u5b89\u88c5\uff0c\u5047\u8bbe\u8f6f\u4ef6\u5305\u4e0b\u8f7d\u8def\u5f84\u5747\u4e3a/some/workspace\uff0c \u4ee5TensorFlow 2.3.0\u7248\u672c\u3001DeePMD-kit 1.3.3 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u8bf4\u660e\uff0c\u5176\u4ed6\u7248\u672c\u7684\u6b65\u9aa4\u8bf7\u53c2\u7167\u4fee\u6539\u3002

    \u9996\u5148\u521b\u5efa\u5e76\u8fdb\u5165\u865a\u62df\u73af\u5883\uff0c\u8fd9\u91cc\u5047\u8bbe\u547d\u540d\u4e3adeepmd\uff1a

    conda create -n deepmd python=3.8\nconda activate deepmd\n

    \u641c\u7d22\u4ed3\u5e93\uff0c\u67e5\u627e\u53ef\u7528\u7684TensorFlow\u7684C++ \u63a5\u53e3\u7248\u672c\u3002

    conda search libtensorflow_cc -c https://conda.deepmodeling.com\n

    \u7ed3\u679c\u5982\u4e0b\uff1a

    Loading channels: done\n# Name                       Version           Build  Channel\nlibtensorflow_cc              1.14.0  cpu_h9a2eada_0\nlibtensorflow_cc              1.14.0  gpu_he292aa2_0\nlibtensorflow_cc               2.0.0  cpu_h9a2eada_0\nlibtensorflow_cc               2.0.0  gpu_he292aa2_0\nlibtensorflow_cc               2.1.0  cpu_cudaNone_0\nlibtensorflow_cc               2.1.0  gpu_cuda10.0_0\nlibtensorflow_cc               2.1.0  gpu_cuda10.1_0\nlibtensorflow_cc               2.1.0   gpu_cuda9.2_0\nlibtensorflow_cc               2.3.0  cpu_cudaNone_0\nlibtensorflow_cc               2.3.0  gpu_cuda10.1_0\nlibtensorflow_cc               2.4.1  gpu_cuda11.0_0\nlibtensorflow_cc               2.4.1  gpu_cuda11.1_0\nlibtensorflow_cc               2.5.0  cpu_cudaNone_0\nlibtensorflow_cc               2.5.0  gpu_cuda10.1_0\nlibtensorflow_cc               2.5.0  gpu_cuda11.3_0\nlibtensorflow_cc               2.7.0  cpu_h6ddf1b9_0\nlibtensorflow_cc               2.7.0 cuda101h50fd26c_0\nlibtensorflow_cc               2.7.0 cuda113h3372e5c_0\nlibtensorflow_cc               2.7.0 cuda113hbf71e95_1\nlibtensorflow_cc               2.9.0  cpu_h681ccd4_0\nlibtensorflow_cc               2.9.0 cuda102h929c028_0\nlibtensorflow_cc               2.9.0 cuda116h4bf587c_0\n

    \u8fd9\u91cc\u6240\u5e0c\u671b\u5b89\u88c5\u7684\u7248\u672c\u662f2.3.0\u7684GPU\u7248\u672c\uff0cCUDA\u7248\u672c\u4e3a10.1\uff0c\u56e0\u6b64\u8f93\u5165\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5\uff1a

    conda install libtensorflow_cc=2.3.0=gpu_cuda10.1_0 -c https://conda.deepmodeling.org\n

    \u82e5\u6240\u5b89\u88c5\u7684\u73af\u5883\u6ca1\u6709\u5b9e\u9645\u7684GPU\u9a71\u52a8\uff08\u6bd4\u5982\u96c6\u7fa4\u7684\u767b\u5f55\u8282\u70b9\uff09\u6216\u9700\u8981\u7528\u5230Conda\u5b89\u88c5CudaToolkit\uff0c\u53ef\u80fd\u9700\u8981\u53c2\u7167\u6b64\u5904\u8bf4\u660e\u5f3a\u5236\u6307\u5b9aGPU\u73af\u5883\u3002\u6bd4\u5982\uff1a

    CONDA_OVERRIDE_CUDA=\"11.3\" conda install libtensorflow_cc=2.7.0=cuda113hbf71e95_1 -c https://conda.deepmodeling.com\n

    \u8bf7\u6ce8\u610fCONDA_OVERRIDE_CUDA\u7684\u503c\u9700\u8981\u4e0eGPU\u652f\u6301\u4ee5\u53ca\u5e0c\u671b\u7528\u5230\u7684CUDA\u7248\u672c\u76f8\u5339\u914d\u3002

    \u63d0\u793a

    \u6ce8\u610fA100\u4ec5\u652f\u6301TF 2.4.0\u4ee5\u4e0a\u3001CUDA11.2\u4ee5\u4e0a\uff0c\u5b89\u88c5\u65f6\u8bf7\u5bf9\u5e94\u9009\u62e9\u3002

    \u63d0\u793a

    \u4e2a\u522b\u7248\u672c\u5728\u540e\u7eed\u7f16\u8bd1\u65f6\u53ef\u80fd\u4f1a\u63d0\u793a\u9700\u8981libiomp5.so\uff0c\u8bf7\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u786e\u5b9a\u662f\u5426\u9700\u8981\u63d0\u524d\u8f7d\u5165Intel\u73af\u5883\uff08\u89c1\u4e0b\u6587Lammps\u7f16\u8bd1\u90e8\u5206\uff09\u6216\u8005conda install intel-openmp\u3002

    \u63d0\u793a

    conda\u547d\u4ee4\u53ef\u80fd\u901f\u5ea6\u8f83\u6162\uff0c\u4e5f\u53ef\u4ee5\u8003\u8651\u5207\u6362\u4e3amamba\uff0c\u540e\u8005\u53ef\u5927\u5e45\u52a0\u901fConda\u7684\u6027\u80fd\uff0c\u4e14\u5b8c\u5168\u517c\u5bb9\u3002\u53ea\u9700\u53c2\u7167\u524d\u8ff0\u94fe\u63a5\u5b89\u88c5\u540e\u5c06conda\u66ff\u6362\u4e3amamba\u5373\u53ef

    \u82e5\u6210\u529f\u5b89\u88c5\uff0c\u5219\u5b9a\u4e49\u73af\u5883\u53d8\u91cf\uff1a

    export tensorflow_root=/data/user/conda/env/deepmd\n

    \u5373\u865a\u62df\u73af\u5883\u521b\u5efa\u7684\u8def\u5f84\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/#deepmd-kitpython","title":"\u5b89\u88c5DeePMD-kit\u7684Python\u63a5\u53e3","text":"

    \u4ee5\u9632\u4e07\u4e00\u53ef\u4ee5\u5347\u7ea7\u4e0bpip\u7684\u7248\u672c\uff1a

    pip install --upgrade pip\n

    \u63a5\u4e0b\u6765\u5b89\u88c5Tensorflow\u7684Python\u63a5\u53e3

    pip install tensorflow==2.3.0\n

    \u82e5\u63d0\u793a\u5df2\u5b89\u88c5\uff0c\u8bf7\u4f7f\u7528--upgrade\u9009\u9879\u8fdb\u884c\u8986\u76d6\u5b89\u88c5\u3002\u82e5\u63d0\u793a\u6743\u9650\u4e0d\u8db3\uff0c\u8bf7\u4f7f\u7528--user\u9009\u9879\u5728\u5f53\u524d\u8d26\u53f7\u4e0b\u5b89\u88c5\u3002

    \u7136\u540e\u4e0b\u8f7dDeePMD-kit\u7684\u6e90\u4ee3\u7801\uff08\u6ce8\u610f\u628av1.3.3\u66ff\u6362\u4e3a\u9700\u8981\u5b89\u88c5\u7684\u7248\u672c\uff0c\u5982v2.0.3\u7b49\uff09

    cd /some/workspace\ngit clone --recursive https://github.com/deepmodeling/deepmd-kit.git deepmd-kit -b v1.3.3\n

    \u5728\u8fd0\u884cgit clone\u65f6\u8bb0\u5f97\u8981--recursive\uff0c\u8fd9\u6837\u624d\u53ef\u4ee5\u5c06\u5168\u90e8\u6587\u4ef6\u6b63\u786e\u4e0b\u8f7d\u4e0b\u6765\uff0c\u5426\u5219\u5728\u7f16\u8bd1\u8fc7\u7a0b\u4e2d\u4f1a\u62a5\u9519\u3002

    \u63d0\u793a

    \u5982\u679c\u4e0d\u614e\u6f0f\u4e86--recursive\uff0c \u53ef\u4ee5\u91c7\u53d6\u4ee5\u4e0b\u7684\u8865\u6551\u65b9\u6cd5\uff1a

    git submodule update --init --recursive\n

    \u82e5\u96c6\u7fa4\u4e0a Cmake 3\u6ca1\u6709\u5b89\u88c5\uff0c\u53ef\u4ee5\u7528pip\u8fdb\u884c\u5b89\u88c5\uff1a

    pip install cmake\n

    \u4fee\u6539\u73af\u5883\u53d8\u91cf\u4ee5\u4f7f\u5f97cmake\u6b63\u786e\u6307\u5b9a\u7f16\u8bd1\u5668\uff1a

    export CC=`which gcc`\nexport CXX=`which g++`\nexport FC=`which gfortran`\n

    \u82e5\u8981\u542f\u7528CUDA\u7f16\u8bd1\uff0c\u8bf7\u5bfc\u5165\u73af\u5883\u53d8\u91cf\uff1a

    export DP_VARIANT=cuda\n

    \u968f\u540e\u901a\u8fc7pip\u5b89\u88c5DeePMD-kit\uff1a

    cd deepmd-kit\npip install .\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/#deepmd-kitc","title":"\u5b89\u88c5DeePMD-kit\u7684C++ \u63a5\u53e3","text":"

    \u5ef6\u7eed\u4e0a\u9762\u7684\u6b65\u9aa4\uff0c\u4e0b\u9762\u5f00\u59cb\u7f16\u8bd1DeePMD-kit C++\u63a5\u53e3\uff1a

    deepmd_source_dir=`pwd`\ncd $deepmd_source_dir/source\nmkdir build \ncd build\n

    \u5047\u8bbeDeePMD-kit C++ \u63a5\u53e3\u5b89\u88c5\u5728/some/workspace/deepmd_root\u4e0b\uff0c\u5b9a\u4e49\u5b89\u88c5\u8def\u5f84deepmd_root\uff1a

    export deepmd_root=/some/workspace/deepmd_root\n

    \u5728build\u76ee\u5f55\u4e0b\u8fd0\u884c\uff1a

    cmake -DLAMMPS_VERSION_NUMBER=<value> -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..\n

    \u8bf7\u6839\u636e\u81ea\u5df1\u5373\u5c06\u5b89\u88c5\u7684Lammps\u7248\u672c\u6307\u5b9a-DLAMMPS_VERSION_NUMBER\u7684\u503c\uff0c\u76ee\u524d\u6700\u65b0\u7248\u672c\u7684DeePMD-kit\u9ed8\u8ba4\u4e3a20210929\uff0c\u5982\u9700\u5b89\u88c5Lammps 29Oct2020\uff0c\u8bf7\u8bbe\u5b9a\u4e3a20201029\u3002

    \u82e5\u901a\u8fc7yum\u540c\u65f6\u5b89\u88c5\u4e86Cmake 2\u548cCmake 3\uff0c\u8bf7\u5c06\u4ee5\u4e0a\u7684cmake\u5207\u6362\u4e3acmake3\u3002

    \u6700\u540e\u7f16\u8bd1\u5e76\u5b89\u88c5\uff1a

    make\nmake install\n

    \u82e5\u65e0\u62a5\u9519\uff0c\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u6267\u884c\u68c0\u67e5\u662f\u5426\u6709\u6b63\u786e\u8f93\u51fa\uff1a

    $ ls $deepmd_root/bin\ndp_ipi\n$ ls $deepmd_root/lib\nlibdeepmd_ipi.so  libdeepmd_op.so  libdeepmd.so\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/#lammpsdeepmd-kit","title":"\u5b89\u88c5LAMMPS\u7684DeePMD-kit\u6a21\u5757","text":"

    \u63a5\u4e0b\u6765\u5b89\u88c5

    cd $deepmd_source_dir/source/build\nmake lammps\n

    \u6b64\u65f6\u5728$deepmd_source_dir/source/build\u4e0b\u4f1a\u51fa\u73b0USER-DEEPMD\u7684LAMMPS\u62d3\u5c55\u5305\u3002

    \u4e0b\u8f7dLAMMPS\u5b89\u88c5\u5305\uff0c\u5e76\u628a\u63a5\u53e3\u4ee3\u7801\u590d\u5236\u5230src\u76ee\u5f55\u4e0b\uff1a

    cd /some/workspace\n# Download Lammps latest release\nwget -c https://lammps.sandia.gov/tars/lammps-stable.tar.gz\ntar xf lammps-stable.tar.gz\ncd lammps-*/src/\ncp -r $deepmd_source_dir/source/build/USER-DEEPMD .\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/#make","title":"Make\u547d\u4ee4\u5b89\u88c5","text":"

    \u9009\u62e9\u9700\u8981\u7f16\u8bd1\u7684\u5305\uff08\u82e5\u9700\u8981\u5b89\u88c5\u5176\u4ed6\u5305\uff0c\u8bf7\u53c2\u8003Lammps\u5b98\u65b9\u6587\u6863\uff09\uff1a

    make yes-user-deepmd\nmake yes-kspace\n

    \u5982\u679c\u6ca1\u6709make yes-kspace \u4f1a\u56e0\u7f3a\u5c11pppm.h\u62a5\u9519\u3002

    \u8fd9\u91cc\u4e5f\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u6279\u91cf\u5b89\u88c5\u5176\u4ed6\u5305\uff1a

    make yes-all                        # install all packages\nmake no-lib                         # uninstall packages that require extra libraries\nmake no-ext                         # uninstall packages that require external libraries\n

    \u6ce8\u610f\u5982Plumed\u3001SMD\u3001COLVARS\u7b49\u7b49\u9700\u8981\u63d0\u524d\u914d\u7f6e\u6216\u9884\u5148\u7f16\u8bd1\u7684\u63d2\u4ef6\u5982\u9700\u5b89\u88c5\u8bf7\u53c2\u8003Lammps\u5b98\u65b9\u6587\u6863\uff0c\u540c\u65f6\u8bf8\u5982 Intel\u3001GPU\u7b49\u52a0\u901f\u5305\u5982\u679c\u4e0d\u9700\u8981\u7f16\u8bd1\u53ef\u80fd\u9700\u8981\u989d\u5916\u624b\u52a8\u53d6\u6d88\u5b89\u88c5\u3002

    \u76ee\u524d\u5b98\u65b9\u6587\u6863\u6539\u52a8\u8f83\u5927\uff0c\u4e14\u672a\u63d0\u4f9b\u5386\u53f2\u7248\u672c\uff0c\u56e0\u800c\u4ec5\u9002\u7528\u4e8e\u5b98\u65b9\u6700\u65b0Release\u7248\u672c\uff08\u76ee\u524d\u4ec5\u9002\u7528\u4e8eLammps 29Sep2021\u4ee5\u540e\u7684\u7248\u672c\uff0c\u4f46\u53ef\u80fd\u968f\u7740\u540e\u7eed\u66f4\u65b0\u9002\u7528\u9762\u8fdb\u4e00\u6b65\u7f29\u7a84\u3002\uff09\uff0c\u4f7f\u7528\u65e7\u7248\u8bf7\u6ce8\u610f\u7504\u522b\u3002

    \u52a0\u8f7dMPI\u73af\u5883\uff0c\u5e76\u91c7\u7528MPI\u65b9\u5f0f\u7f16\u8bd1Lammps\u53ef\u6267\u884c\u6587\u4ef6\uff1a

    module load intel/17.5.239 mpi/intel/2017.5.239\nmake mpi -j4\n

    \u6ce8\u610f

    \u6b64\u5904\u4f7f\u7528\u7684GCC\u7248\u672c\u5e94\u4e0e\u4e4b\u524d\u7f16\u8bd1Tensorflow C++\u63a5\u53e3\u548cDeePMD-kit C++\u63a5\u53e3\u4e00\u81f4\uff0c\u5426\u5219\u53ef\u80fd\u4f1a\u62a5\u9519\uff1a@GLIBCXX_3.4.XX\u3002\u5982\u679c\u5728\u524d\u9762\u7684\u5b89\u88c5\u4e2d\u5df2\u7ecf\u52a0\u8f7d\u4e86GCC 7.4.0\uff0c\u8bf7\u5728\u8fd9\u91cc\u4e5f\u4fdd\u6301\u76f8\u5e94\u73af\u5883\u7684\u52a0\u8f7d\u3002

    \u7ecf\u8fc7\u4ee5\u4e0a\u8fc7\u7a0b\uff0cLammps\u53ef\u6267\u884c\u6587\u4ef6lmp_mpi\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u6210\uff0c\u7528\u6237\u53ef\u4ee5\u6267\u884c\u8be5\u7a0b\u5e8f\u8c03\u7528\u8bad\u7ec3\u7684\u52bf\u51fd\u6570\u8fdb\u884cMD\u6a21\u62df\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/#cmake","title":"Cmake\u5b89\u88c5","text":"

    \u4e5f\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528Cmake\u8fdb\u884c\u7f16\u8bd1\uff0c\u66f4\u52a0\u5e72\u51c0\u3001\u5feb\u6377\u3002

    \u5982\u9700\u8981\u5b89\u88c5Plumed\uff0c\u8bf7\u9996\u5148\u5229\u7528Conda\u5b89\u88c5GSL\u73af\u5883\uff1a

    conda install gsl\n

    \u7136\u540e\u8bf7\u7f16\u8f91lammps-stable/cmake/CMakeLists.txt\uff0c\u627e\u5230set(STANDARD_PACKAGES\u8fd9\u4e00\u884c\uff0c\u5e76\u5728\u672b\u5c3e\u62ec\u53f7\u5185\u589e\u52a0\u4e00\u9879\uff1aUSER-DEEPMD\uff1a

    set(STANDARD_PACKAGES\n  ...  \n  USER-DEEPMD)\n

    \u7136\u540e\u5728lammps-stable\u76ee\u5f55\u4e0b\uff0c\u65b0\u5efabuild\u76ee\u5f55\uff1a

    cd lammps-stable\nmkdir build\ncd build\n

    \u8fdb\u884c\u914d\u7f6e\uff1a

    cmake -C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake \\\n-D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=mpi \\\n-D WITH_JPEG=no -D WITH_PNG=no -D WITH_FFMPEG=no \\\n-D PKG_PLUMED=yes -D PKG_COLVARS=yes -D PKG_USER-DEEPMD=ON \\\n-D CMAKE_INSTALL_PREFIX=/data/user/conda/env/deepmd \\\n-D CMAKE_CXX_FLAGS=\"-std=c++14 -DHIGH_PREC -DLAMMPS_VERSION_NUMBER=20220623 -I${deepmd_root}/include -I${tensorflow_root}/include -L${deepmd_root}/lib -L${tensorflow_root}/lib -Wl,--no-as-needed -ldeepmd_cc -ltensorflow_cc -ltensorflow_framework -Wl,-rpath=${deepmd_root}/lib -Wl,-rpath=${tensorflow_root}/lib\" \\\n../cmake\n

    \u6ce8\u610fCMAKE_INSTALL_PREFIX\u6307\u793a\u7684\u662f\u5b89\u88c5\u8def\u5f84\uff0c\u8bf7\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u4fee\u6539\u3002

    \u6ce8\u610f

    \u8fd9\u91cc\u989d\u5916\u5173\u95ed\u4e86\u56fe\u5f62\u8f93\u51fa\u6a21\u5757\uff08JPEG\u3001PNG\u3001FFMPEG\uff09\uff0c\u56e0\u4e3aConda\u81ea\u5e26\u7684\u56fe\u5f62\u5e93\u4f1a\u4e0e\u7cfb\u7edf\u6709\u51b2\u7a81\uff0c\u6682\u65f6\u6ca1\u6709\u89e3\u51b3\uff0c\u4e14\u4f7f\u7528make\u9ed8\u8ba4\u4e5f\u4e0d\u4f1a\u5b89\u88c5\u3002

    \u6ce8\u610f

    \u7531\u4e8e\u672a\u77e5\u539f\u56e0\uff0c\u6709\u65f6\u5019CMake\u4f1a\u627e\u4e0d\u5230Conda\u5b89\u88c5\u7684GSL\u3002\u4f46\u82e5\u63d0\u524d\u7f16\u8bd1\u597dPlumed\u5e76\u91c7\u7528Runtime\u65b9\u5f0f\u8f7d\u5165\uff0c\u53ef\u4e0d\u9700\u8981GSL\uff1a-D PLUMED_MODE=runtime

    \u7136\u540e\u8fdb\u884c\u7f16\u8bd1\uff1a

    make -j 16\nmake install\n

    \u7ecf\u8fc7\u4ee5\u4e0a\u8fc7\u7a0b\uff0cLammps\u53ef\u6267\u884c\u6587\u4ef6lmp_mpi\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u6210\uff0c\u7528\u6237\u53ef\u4ee5\u6267\u884c\u8be5\u7a0b\u5e8f\u8c03\u7528\u8bad\u7ec3\u7684\u52bf\u51fd\u6570\u8fdb\u884cMD\u6a21\u62df\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/#dp-cp2k","title":"DP-CP2K \u5b89\u88c5\u6307\u5f15","text":"

    \u9996\u5148clone\u5bf9\u5e94\u7684\u5b89\u88c5\u5305\uff1a

    git clone https://github.com/Cloudac7/cp2k.git -b deepmd_latest --recursive --depth=1\n

    \u7136\u540e\u8fd0\u884c\u76f8\u5e94\u7684Toolchain\u811a\u672c\uff1a

    cd tools/toolchain/\n./install_cp2k_toolchain.sh --enable-cuda=no --with-deepmd=$deepmd_root --with-tfcc=$tensorflow_root --deepmd-mode=cuda --mpi-mode=no --with-libint=no --with-libxc=no --with-libxsmm=no\n

    \u6839\u636e\u811a\u672c\u8fd0\u884c\u7ed3\u5c3e\u7684\u63d0\u793a\u590d\u5236arch\u6587\u4ef6\u5e76source\u6240\u9700\u7684\u73af\u5883\u53d8\u91cf\u3002\u6700\u540e\u56de\u5230\u4e3b\u76ee\u5f55\u8fdb\u884c\u7f16\u8bd1\uff1a

    make -j 4 ARCH=local VERSION=\"ssmp sdbg\"\n

    \u7f16\u8bd1\u6b63\u786e\u5b8c\u6210\u540e\uff0c\u53ef\u6267\u884c\u6587\u4ef6\u751f\u6210\u5728exe/\u4e0b\uff0c\u5373cp2k.sopt\u3002

    \u6ce8\u610f\u76ee\u524dDP-CP2K\u6682\u672a\u652f\u6301MPI\uff0c\u56e0\u800c\u8bf7\u5355\u72ec\u7f16\u8bd1\u6b64Serial\u7248\u672c\u3002\u4e14CP2K\u7531\u4e8eIO\u95ee\u9898\uff0c\u6027\u80fd\u76f8\u6bd4Lammps\u4f4e50%\u4ee5\u4e0a\uff0c\u5982\u975e\u521a\u9700\u8fd8\u662f\u5efa\u8bae\u4f7f\u7528Lammps\u8fdb\u884cMD\u6a21\u62df\uff0c\u540e\u8005\u53ef\u63d0\u4f9b\u66f4\u591a\u7279\u6027\u548c\u52a0\u901f\u7684\u652f\u6301\u3002

    \u540c\u65f6\u76ee\u524d\u5f00\u53d1\u8005\u9047\u5230\u4e00\u4e9b\u56f0\u96be\uff0c\u6545\u63d0\u4ea4\u7684PR\u5c1a\u672a\u66f4\u65b0\u4e14\u7531\u4e8e\u6c89\u9ed8\u8fc7\u4e45\u5df2\u88ab\u5b98\u65b9\u5173\u95ed\u3002\u5982\u8bfb\u8005\u6709\u5728CP2K\u5b9e\u73b0\u5171\u4eab\u72b6\u6001\u7684\u5f00\u53d1\u7ecf\u9a8c\uff0c\u8bf7\u8054\u7cfb\u4f5c\u8005\uff0c\u8c22\u8c22\u3002

    Now there is some difficulty in implemetion of shared state in CP2K run to decrease IO in each MD step. However, the developer has not find out a proper way as a solution, making the PR silent. If you could provide any experience, please contact me. Thanks!

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/","title":"DeepMD-kit\u5b89\u88c5\uff1a\u65e7\u7248","text":"

    \u672c\u90e8\u5206\u5199\u4e8e2020\u5e74\uff0c\u9002\u7528\u4e8eDeePMD-kit 1.x \u548c TensorFlow 1.14\u3002\u5bf9\u76ee\u524d\u8f83\u65b0\u7684\u7248\u672c\u53ef\u80fd\u4e0d\u9002\u7528\uff0c\u8bf7\u79fb\u6b65\u5b89\u88c5\u6700\u4f73\u5b9e\u8df5\u548c\u5feb\u901f\u5b89\u88c5\u6559\u7a0b

    \u80cc\u666f\uff1a\u4ee5 Zeus \u96c6\u7fa4\u4e3a\u4f8b\uff0c\u5728\u670d\u52a1\u5668\u5b89\u88c5DeepMD-kit\u548c\u5305\u542b\u5b8c\u6574\u63a5\u53e3\u7684LAMMPS\u3002

    \u53c2\u8003\uff1a

    DeepMD-kit

    TensorFlow

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/#_1","title":"\u521d\u59cb\u73af\u5883\u8bf4\u660e","text":"

    \u4ee5\u4e0b\u8fc7\u7a0b\u4ee5 Zeus \u96c6\u7fa4\u4e3a\u4f8b\uff0c\u64cd\u4f5c\u7cfb\u7edf\u53ca\u7248\u672c\u4e3aCentOS 7\uff0c\u91c7\u7528module\u4f5c\u4e3a\u73af\u5883\u7ba1\u7406\u3002

    • \u901a\u8fc7yum\u5b89\u88c5\uff1a
    • Cmake 3.7
    • GCC 4.8.5
    • Git 1.8.2
    • \u901a\u8fc7module\u52a0\u8f7d
    • CUDA 10.0
    • Miniconda3 (Python 3.7)
    • GCC 4.9.4
    • Intel MPI 2017
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/#_2","title":"\u521b\u5efa\u65b0\u7684\u73af\u5883","text":"

    \u9996\u5148\u51c6\u5907\u5fc5\u8981\u7684\u4f9d\u8d56\u3002

    \u68c0\u67e5\u53ef\u7528\u7684\u6a21\u5757\uff0c\u5e76\u52a0\u8f7d\u5fc5\u8981\u7684\u6a21\u5757\uff1a

    module avail\nmodule add cuda/10.0\nmodule add gcc/4.9.4\n

    \u6ce8\u610f\u8fd9\u91cc\u5bfc\u5165\u7684\u662fgcc 4.9.4\u7248\u672c\uff0c\u5982\u679c\u91c7\u7528\u66f4\u4f4e\u7684\u7248\u672c\uff08\u4e0d\u5bfc\u5165gcc\uff09\u5219dp_ipi\u4e0d\u4f1a\u88ab\u7f16\u8bd1\u3002

    \u7136\u540e\u521b\u5efa\u865a\u62df\u73af\u5883\uff0c\u6b65\u9aa4\u8bf7\u53c2\u8003Anaconda \u4f7f\u7528\u6307\u5357\u3002

    \u5047\u8bbe\u521b\u5efa\u7684\u865a\u62df\u73af\u5883\u540d\u79f0\u662f deepmd\uff0c\u5219\u8bf7\u5c06\u6b65\u9aa4\u6700\u540e\u7684 <your env name> \u66ff\u6362\u4e3a deepmd\u3002\u82e5\u91c7\u7528\u8be5\u6b65\u9aa4\u7684\u8bbe\u7f6e\uff0c\u5219\u865a\u62df\u73af\u5883\u5c06\u88ab\u521b\u5efa\u5728/data/user/conda/env/deepmd\u4e0b\uff08\u5047\u8bbe\u7528\u6237\u540d\u4e3auser\uff09\u3002

    \u7531\u4e8eGPU\u8282\u70b9\u4e0d\u80fd\u8054\u7f51\uff0c\u6545\u6211\u4eec\u9700\u8981\u5c06\u6240\u9700\u7684\u9a71\u52a8\u7a0b\u5e8f\u5e93libcuda.so\u548clibcuda.so.1\u624b\u52a8\u94fe\u63a5\u5230\u67d0\u4e2a\u8def\u5f84/some/local/path\u5e76\u52a0\u5165\u73af\u5883\u53d8\u91cf\u3002

    ln -s /share/cuda/10.0/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/share/cuda/10.0/lib64/stubs:/some/local/path\n

    \u63d0\u793a

    \u82e5\u5728 Zeus \u96c6\u7fa4\u4e0a\u5b89\u88c5\uff0c\u7ba1\u7406\u5458\u5df2\u4e8b\u5148\u628alibcuda.so.1 \u94fe\u63a5\u5728/share/cuda/10.0/lib64/stubs/\u4e0b\uff0c\u6545\u65e0\u9700\u989d\u5916\u521b\u5efa\u8f6f\u94fe\u63a5\uff0c\u540c\u7406/some/local/path\u4e5f\u65e0\u9700\u52a0\u5165\u73af\u5883\u53d8\u91cf\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/#tensorflowc","title":"\u5b89\u88c5Tensorflow\u7684C++ \u63a5\u53e3","text":"

    \u4ee5\u4e0b\u5b89\u88c5\uff0c\u5047\u8bbe\u8f6f\u4ef6\u5305\u4e0b\u8f7d\u8def\u5f84\u5747\u4e3a/some/workspace\uff0c \u4ee5TensorFlow 1.14.0\u7248\u672c\u3001DeePMD-kit 1.2.0 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u8bf4\u660e\uff0c\u5176\u4ed6\u7248\u672c\u7684\u6b65\u9aa4\u8bf7\u53c2\u7167\u4fee\u6539\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/#bazel","title":"\u4e0b\u8f7d\u5bf9\u5e94\u7684bazel\u5b89\u88c5\u5305","text":"
    cd /some/workspace\nwget https://github.com/bazelbuild/bazel/releases/download/0.24.0/bazel-0.24.0-installer-linux-x86_64.sh\nchmod +x bazel-0.24.0-installer-linux-x86_64.sh\n./bazel-0.24.0-installer-linux-x86_64.sh --user\nexport PATH=\"$HOME/bin:$PATH\"\n

    \u6ce8\u610f

    \u6ce8\u610fbazel\u7684\u517c\u5bb9\u6027\u95ee\u9898\uff0c\u5408\u7406\u7684bazel\u7248\u672c\u8bbe\u7f6e\u8bf7\u53c2\u9605Tensorflow\u5b98\u65b9\u6587\u6863\u4e2d\u7684\u8bf4\u660e\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/#tensorflow","title":"\u4e0b\u8f7dTensorFlow\u6e90\u4ee3\u7801","text":"
    cd /some/workspace \ngit clone https://github.com/tensorflow/tensorflow tensorflow -b v1.14.0 --depth=1\ncd tensorflow\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/#tensorflow-c-interface","title":"\u7f16\u8bd1TensorFlow C++ Interface","text":"

    \u5728tensorflow\u6587\u4ef6\u5939\u4e0b\u8fd0\u884cconfigure\uff0c\u8bbe\u7f6e\u7f16\u8bd1\u53c2\u6570\u3002

    ./configure\nPlease specify the location of python. [Default is xxx]:\n\nFound possible Python library paths:\n  /xxx/xxx/xxx\nPlease input the desired Python library path to use.  Default is [xxx]\n\nDo you wish to build TensorFlow with XLA JIT support? [Y/n]:\nXLA JIT support will be enabled for TensorFlow.\n\nDo you wish to build TensorFlow with OpenCL SYCL support? [y/N]:\nNo OpenCL SYCL support will be enabled for TensorFlow.\n\nDo you wish to build TensorFlow with ROCm support? [y/N]:\nNo ROCm support will be enabled for TensorFlow.\n\nDo you wish to build TensorFlow with CUDA support? [y/N]: y\nCUDA support will be enabled for TensorFlow.\n\nDo you wish to build TensorFlow with TensorRT support? [y/N]:\nNo TensorRT support will be enabled for TensorFlow.\n\nFound CUDA 10.0 in:\n    /share/cuda/10.0/lib64\n    /share/cuda/10.0/include\nFound cuDNN 7 in:\n    /share/cuda/10.0/lib64\n    /share/cuda/10.0/include\n\nPlease specify a list of comma-separated CUDA compute capabilities you want to build with.\nYou can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.\nPlease note that each additional compute capability significantly increases your build time and binary size, and that TensorFlow only supports compute capabilities >= 3.5 [Default is: 3.5,7.0]:\n\nDo you want to use clang as CUDA compiler? [y/N]:\nnvcc will be used as CUDA compiler.\n\nPlease specify which gcc should be used by nvcc as the host compiler. [Default is /share/apps/gcc/4.9.4/bin/gcc]:\n\nDo you wish to build TensorFlow with MPI support? [y/N]:\nNo MPI support will be enabled for TensorFlow.\n\nPlease specify optimization flags to use during compilation when bazel option \"--config=opt\" is specified [Default is -march=native -Wno-sign-compare]:\n\nWould you like to interactively configure ./WORKSPACE for Android builds? [y/N]:\nNot configuring the WORKSPACE for Android builds.\n\nPreconfigured Bazel build configs. You can use any of the below by adding \"--config=<>\" to your build command. See .bazelrc for more details.\n    --config=mkl             # Build with MKL support.\n    --config=monolithic      # Config for mostly static monolithic build.\n    --config=gdr             # Build with GDR support.\n    --config=verbs           # Build with libverbs support.\n    --config=ngraph          # Build with Intel nGraph support.\n    --config=numa            # Build with NUMA support.\n    --config=dynamic_kernels    # (Experimental) Build kernels into separate shared objects.\n    --config=v2              # Build TensorFlow 2.x instead of 1.x.\nPreconfigured Bazel build configs to DISABLE default on features:\n    --config=noaws           # Disable AWS S3 filesystem support.\n    --config=nogcp           # Disable GCP support.\n    --config=nohdfs          # Disable HDFS support.\n    --config=noignite        # Disable Apache Ignite support.\n    --config=nokafka         # Disable Apache Kafka support.\n    --config=nonccl          # Disable NVIDIA NCCL support.\nConfiguration finished\n

    \u6ce8\u610f

    \u82e5\u91c7\u7528\u524d\u6587\u5bfc\u5165\u7684GCC 4.9.4\u7248\u672c\uff0c\u8bf7\u6839\u636ewhich gcc\u7684\u8f93\u51fa\u5224\u65adGCC\u7684\u5b89\u88c5\u8def\u5f84\u3002\u4f46\u4e00\u822c\u60c5\u51b5\u4e0b\u5b89\u88c5\u7a0b\u5e8f\u53ef\u4ee5\u76f4\u63a5\u68c0\u6d4b\u5230\u6b63\u786e\u8def\u5f84\u3002

    \u968f\u540e\u8fdb\u884c\u7f16\u8bd1\uff0c\u7531\u4e8e\u65f6\u95f4\u8f83\u957f\uff0c\u53ef\u4ee5\u8003\u8651\u4f7f\u7528screen\u6216\u8005tmux\u5c06\u8fdb\u7a0b\u653e\u7f6e\u5728\u540e\u53f0\u3002

    bazel build -c opt --verbose_failures //tensorflow:libtensorflow_cc.so\n

    \u8bf4\u660e

    \u5b89\u88c5\u9ad8\u7248\u672cTensorflow\uff08\u59822.1.0\uff09\u65f6\uff0c\u82e5\u63d0\u793a\u6ca1\u6709git -c\u7684\u547d\u4ee4\uff0c\u8bf7\u5347\u7ea7git\u5230\u6700\u65b0\u7248\u3002\u7528\u6237\u53ef\u80fd\u9700\u8981\u5728\u672c\u5730\u8fdb\u884c\u7f16\u8bd1\u5e76\u52a0\u5165\u73af\u5883\u53d8\u91cf\u3002

    \u63d0\u793a

    \u4e00\u822c\u60c5\u51b5\u4e0b\uff0cbazel\u9ed8\u8ba4\u5728~/.cache/bazel\u4e0b\u8fdb\u884c\u7f16\u8bd1\u3002\u7531\u4e8e\u7f16\u8bd1\u6240\u9700\u786c\u76d8\u7a7a\u95f4\u8f83\u5927\uff0c\u5982\u6709\u9700\u8981\uff0c\u8bf7\u5728\u8fd0\u884cbazel\u524d\u91c7\u7528\u73af\u5883\u53d8\u91cf\u6307\u5b9a\u7f16\u8bd1\u7528\u4e34\u65f6\u6587\u4ef6\u5939\uff0c\u4ee5/data/user/.bazel\u4e3a\u4f8b\uff1a

    export TEST_TMPDIR=/data/user/.bazel

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/#_3","title":"\u6574\u5408\u8fd0\u884c\u5e93\u4e0e\u5934\u6587\u4ef6","text":"

    \u5047\u8bbeTensorflow C++ \u63a5\u53e3\u5b89\u88c5\u5728/some/workspace/tensorflow_root\u4e0b\uff0c\u5219\u5b9a\u4e49\u73af\u5883\u53d8\u91cf\uff1a

    export tensorflow_root=/some/workspace/tensorflow_root\n

    \u521b\u5efa\u4e0a\u8ff0\u6587\u4ef6\u5939\u5e76\u4ece\u7f16\u8bd1\u7ed3\u679c\u4e2d\u62bd\u53d6\u8fd0\u884c\u5e93\u548c\u5934\u6587\u4ef6\u3002

    mkdir -p $tensorflow_root\n\nmkdir $tensorflow_root/lib\ncp -d bazel-bin/tensorflow/libtensorflow_cc.so* $tensorflow_root/lib/\ncp -d bazel-bin/tensorflow/libtensorflow_framework.so* $tensorflow_root/lib/\ncp -d $tensorflow_root/lib/libtensorflow_framework.so.1 $tensorflow_root/lib/libtensorflow_framework.so\n\nmkdir -p $tensorflow_root/include/tensorflow\ncp -r bazel-genfiles/* $tensorflow_root/include/\ncp -r tensorflow/cc $tensorflow_root/include/tensorflow\ncp -r tensorflow/core $tensorflow_root/include/tensorflow\ncp -r third_party $tensorflow_root/include\ncp -r bazel-tensorflow/external/eigen_archive/Eigen/ $tensorflow_root/include\ncp -r bazel-tensorflow/external/eigen_archive/unsupported/ $tensorflow_root/include\nrsync -avzh --include '*/' --include '*.h' --include '*.inc' --exclude '*' bazel-tensorflow/external/protobuf_archive/src/ $tensorflow_root/include/\nrsync -avzh --include '*/' --include '*.h' --include '*.inc' --exclude '*' bazel-tensorflow/external/com_google_absl/absl/ $tensorflow_root/include/absl\n

    \u6e05\u7406\u76ee\u6807\u76ee\u5f55\u4e0b\u8d58\u4f59\u7684\u6e90\u4ee3\u7801\u6587\u4ef6\uff0c\u4fdd\u7559\u7f16\u8bd1\u597d\u7684\u63a5\u53e3\u3002

    cd $tensorflow_root/include\nfind . -name \"*.cc\" -type f -delete\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/#deepmd-kitpython","title":"\u5b89\u88c5DeePMD-kit\u7684Python\u63a5\u53e3","text":"

    \u9996\u5148\u5b89\u88c5Tensorflow\u7684Python\u63a5\u53e3

    pip install tensorflow-gpu==1.14.0\n

    \u82e5\u63d0\u793a\u5df2\u5b89\u88c5\uff0c\u8bf7\u4f7f\u7528--upgrade\u9009\u9879\u8fdb\u884c\u8986\u76d6\u5b89\u88c5\u3002\u82e5\u63d0\u793a\u6743\u9650\u4e0d\u8db3\uff0c\u8bf7\u4f7f\u7528--user\u9009\u9879\u5728\u5f53\u524d\u8d26\u53f7\u4e0b\u5b89\u88c5\u3002

    \u7136\u540e\u4e0b\u8f7dDeePMD-kit\u7684\u6e90\u4ee3\u7801\u3002

    cd /some/workspace\ngit clone --recursive https://github.com/deepmodeling/deepmd-kit.git deepmd-kit\n

    \u5728\u8fd0\u884cgit clone\u65f6\u8bb0\u5f97\u8981--recursive\uff0c\u8fd9\u6837\u624d\u53ef\u4ee5\u5c06\u5168\u90e8\u6587\u4ef6\u6b63\u786e\u4e0b\u8f7d\u4e0b\u6765\uff0c\u5426\u5219\u5728\u7f16\u8bd1\u8fc7\u7a0b\u4e2d\u4f1a\u62a5\u9519\u3002

    \u63d0\u793a

    \u5982\u679c\u4e0d\u614e\u6f0f\u4e86--recursive\uff0c \u53ef\u4ee5\u91c7\u53d6\u4ee5\u4e0b\u7684\u8865\u6551\u65b9\u6cd5\uff1a

    git submodule update --init --recursive\n

    \" %}

    \u968f\u540e\u901a\u8fc7pip\u5b89\u88c5DeePMD-kit\uff1a

    cd deepmd-kit\npip install .\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/#deepmd-kitc","title":"\u5b89\u88c5DeePMD-kit\u7684C++ \u63a5\u53e3","text":"

    \u5ef6\u7eed\u4e0a\u9762\u7684\u6b65\u9aa4\uff0c\u4e0b\u9762\u5f00\u59cb\u7f16\u8bd1DeePMD-kit C++\u63a5\u53e3\uff1a

    deepmd_source_dir=`pwd`\ncd $deepmd_source_dir/source\nmkdir build \ncd build\n

    \u5047\u8bbeDeePMD-kit C++ \u63a5\u53e3\u5b89\u88c5\u5728/some/workspace/deepmd_root\u4e0b\uff0c\u5b9a\u4e49\u5b89\u88c5\u8def\u5f84deepmd_root\uff1a

    export deepmd_root=/some/workspace/deepmd_root\n

    \u4fee\u6539\u73af\u5883\u53d8\u91cf\u4ee5\u4f7f\u5f97cmake\u6b63\u786e\u6307\u5b9a\u7f16\u8bd1\u5668\uff1a

    export CC=`which gcc`\nexport CXX=`which g++`\n

    \u5728build\u76ee\u5f55\u4e0b\u8fd0\u884c\uff1a

    cmake -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..\n

    \u82e5\u901a\u8fc7yum\u540c\u65f6\u5b89\u88c5\u4e86Cmake 2\u548cCmake 3\uff0c\u8bf7\u5c06\u4ee5\u4e0a\u7684cmake\u5207\u6362\u4e3acmake3\u3002

    \u6700\u540e\u7f16\u8bd1\u5e76\u5b89\u88c5\uff1a

    make\nmake install\n

    \u82e5\u65e0\u62a5\u9519\uff0c\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u6267\u884c\u68c0\u67e5\u662f\u5426\u6709\u6b63\u786e\u8f93\u51fa\uff1a

    $ ls $deepmd_root/bin\ndp_ipi\n$ ls $deepmd_root/lib\nlibdeepmd_ipi.so  libdeepmd_op.so  libdeepmd.so\n

    \u56e0\u4e3aGCC\u7248\u672c\u5dee\u522b\uff0c\u53ef\u80fd\u6ca1\u6709$deepmd_root/bin/dp_ipi\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/#lammpsdeepmd-kit","title":"\u5b89\u88c5LAMMPS\u7684DeePMD-kit\u6a21\u5757","text":"

    \u63a5\u4e0b\u6765\u5b89\u88c5

    cd $deepmd_source_dir/source/build\nmake lammps\n

    \u6b64\u65f6\u5728$deepmd_source_dir/source/build\u4e0b\u4f1a\u51fa\u73b0USER-DEEPMD\u7684LAMMPS\u62d3\u5c55\u5305\u3002

    \u4e0b\u8f7dLAMMPS\u5b89\u88c5\u5305\uff0c\u6309\u7167\u5e38\u89c4\u65b9\u6cd5\u7f16\u8bd1LAMMPS\uff1a

    cd /some/workspace\n# Download Lammps latest release\nwget -c https://lammps.sandia.gov/tars/lammps-stable.tar.gz\ntar xf lammps-stable.tar.gz\ncd lammps-*/src/\ncp -r $deepmd_source_dir/source/build/USER-DEEPMD .\n

    \u9009\u62e9\u9700\u8981\u7f16\u8bd1\u7684\u5305\uff08\u82e5\u9700\u8981\u5b89\u88c5\u5176\u4ed6\u5305\uff0c\u8bf7\u53c2\u8003Lammps\u5b98\u65b9\u6587\u6863\uff09\uff1a

    make yes-user-deepmd\nmake yes-kspace\n

    \u5982\u679c\u6ca1\u6709make yes-kspace \u4f1a\u56e0\u7f3a\u5c11pppm.h\u62a5\u9519\u3002

    \u52a0\u8f7dMPI\u73af\u5883\uff0c\u5e76\u91c7\u7528MPI\u65b9\u5f0f\u7f16\u8bd1Lammps\u53ef\u6267\u884c\u6587\u4ef6\uff1a

    module load intel/17u5 mpi/intel/17u5\nmake mpi -j4\n

    \u6ce8\u610f

    \u6b64\u5904\u4f7f\u7528\u7684GCC\u7248\u672c\u5e94\u4e0e\u4e4b\u524d\u7f16\u8bd1Tensorflow C++\u63a5\u53e3\u548cDeePMD-kit C++\u63a5\u53e3\u4e00\u81f4\uff0c\u5426\u5219\u53ef\u80fd\u4f1a\u62a5\u9519\uff1a@GLIBCXX_3.4.XX\u3002\u5982\u679c\u5728\u524d\u9762\u7684\u5b89\u88c5\u4e2d\u5df2\u7ecf\u52a0\u8f7d\u4e86GCC 4.9.4\uff0c\u8bf7\u5728\u8fd9\u91cc\u4e5f\u4fdd\u6301\u76f8\u5e94\u73af\u5883\u7684\u52a0\u8f7d\u3002

    \u7ecf\u8fc7\u4ee5\u4e0a\u8fc7\u7a0b\uff0cLammps\u53ef\u6267\u884c\u6587\u4ef6lmp_mpi\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u6210\uff0c\u7528\u6237\u53ef\u4ee5\u6267\u884c\u8be5\u7a0b\u5e8f\u8c03\u7528\u8bad\u7ec3\u7684\u52bf\u51fd\u6570\u8fdb\u884cMD\u6a21\u62df\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/","title":"DeePMD-kit\u5b89\u88c5\u5b9e\u6218\uff1a\u5609\u5e9a\u8d85\u7b97","text":"

    \u5609\u5e9a\u8d85\u7b97\u4e2d\u5fc3\u6ca1\u6709\u7edf\u4e00\u5b89\u88c5DeepMD-kit\u8f6f\u4ef6\uff0c\u7528\u6237\u4f7f\u7528\u524d\u9700\u8981\u81ea\u884c\u7f16\u8bd1\u3002\u672c\u6587\u53c2\u8003\u6700\u4f73\u5b9e\u8df5\uff0c\u57fa\u4e8e\u5609\u5e9a\u8d85\u7b97\u9884\u88c5\u7684\u6a21\u5757\u8fdb\u884c\u3002\u6b64\u5904\u4ee5DeepMD-kit v2.2.0\u7248\u672c\u4e3a\u4f8b\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/#_1","title":"\u521d\u6b21\u5b89\u88c5","text":"
    1. \u521b\u5efa\u865a\u62df\u73af\u5883\uff08\u6b64\u5904\u4ee5deepmd\u4e3a\u4f8b\uff09

      module load anaconda/2020.3\nconda create -n deepmd python=3.9\n
    2. \uff08\u53ef\u9009\uff09\u865a\u62df\u73af\u5883\u6fc0\u6d3b/\u9000\u51fa\u7684\u914d\u7f6e\uff0c\u4e5f\u53ef\u5c06activate.sh\u4e2d\u4ee3\u7801\u6bcf\u6b21\u624b\u52a8\u8bbe\u7f6e

      # replace your own username here!\nmkdir -p $CONDA_PREFIX/etc/conda/activate.d\ntouch $CONDA_PREFIX/etc/conda/activate.d/activate.sh\nmkdir -p $CONDA_PREFIX/etc/conda/deactivate.d\ntouch $CONDA_PREFIX/etc/conda/deactivate.d/deactivate.sh\nconda env config vars set LD_LIBRARY_PATH=$tensorflow_root/lib:$deepmd_root/lib:$CONDA_PREFIX/lib:$LD_LIBRARY_PATH\n
      • $CONDA_PREFIX/etc/conda/activate.d/activate.sh
      module load intel/2018.3\nmodule load gcc/9.2\nmodule load cmake/3.21\nmodule load cuda/11.3\nmodule load lammps/2022.6.23\n\nexport CC=`which gcc`\nexport CXX=`which g++`\nexport FC=`which gfortran`\n\n# replace CONDA_PREFIX and deepmd_source_dir!!!\nexport deepmd_source_dir=/public/home/username/apps/deepmd-2.2.0\nexport tensorflow_root=$deepmd_source_dir/_skbuild/tensorflow_root\nexport deepmd_root=$deepmd_source_dir/_skbuild/deepmd_root\nexport LAMMPS_PLUGIN_PATH=$deepmd_root/lib/deepmd_lmp\n
      • $CONDA_PREFIX/etc/conda/deactivate.d/deactivate.sh
      module unload intel/2018.3\nmodule unload gcc/9.2\nmodule unload cmake/3.21\nmodule unload cuda/11.3\nmodule unload lammps/2022.6.23\n\nunset deepmd_source_dir\nunset tensorflow_root\nunset deepmd_root\nunset LAMMPS_PLUGIN_PATH\n

      \u8bbe\u7f6e\u597d\u540e\uff0c\u91cd\u542f\u865a\u62df\u73af\u5883\u3002\u6b64\u540e\u6bcf\u6b21\u6fc0\u6d3b\u865a\u62df\u73af\u5883\u65f6\uff0c\u4f1a\u81ea\u52a8\u52a0\u8f7d\u76f8\u5e94\u7684\u6a21\u5757\u3002 3. \u8bad\u7ec3\u4ee3\u7801\u5b89\u88c5

      pip install tensorflow==2.7 --upgrade\npip install scikit-build ninja\npip install protobuf==3.20\ncd $deepmd_source_dir\nexport DP_VARIANT=cuda\npip install .\n
      4. (\u53ef\u9009)\u7b2c\u4e09\u65b9\u63a5\u53e3\u5b89\u88c5

      mkdir -p $tensorflow_root/lib \ncd $tensorflow_root\nln -s $CONDA_PREFIX/lib/python3.9/site-packages/tensorflow/include .\ncd lib\nln -s $CONDA_PREFIX/lib/python3.9/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so libtensorflow_cc.so\nln -s $CONDA_PREFIX/lib/python3.9/site-packages/tensorflow/libtensorflow_framework.so.2 .\nln -s libtensorflow_framework.so.2 libtensorflow_framework.so\n\nmkdir -p $deepmd_source_dir/source/build\nmkdir -p $deepmd_root\ncd $deepmd_source_dir/source/build\ncmake -DLAMMPS_SOURCE_ROOT=/public/software/lammps/lammps-2022.6.23-intel -DUSE_TF_PYTHON_LIBS=TRUE -DUSE_CUDA_TOOLKIT=TRUE -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..\nmake -j20\nmake install\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/#_2","title":"\u4ee3\u7801\u66f4\u65b0","text":"
    1. Python\u4ee3\u7801

      cd $deepmd_source_dir\nexport DP_VARIANT=cuda\npip install .\n
    2. C++\u4ee3\u7801

      cd $deepmd_source_dir/source/build\nmake -j20\nmake install\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/","title":"DeepMD-kit\u5b89\u88c5\u6700\u4f73\u5b9e\u8df5","text":"

    \u80cc\u666f\uff1a\u4ee5 Zeus \u96c6\u7fa4\u4e3a\u4f8b\uff0c\u5728\u670d\u52a1\u5668\u901a\u8fc7\u6e90\u4ee3\u7801\u7f16\u8bd1\u5b89\u88c5DeepMD-kit\u548c\u5305\u542b\u5b8c\u6574\u63a5\u53e3\u7684LAMMPS\u3002\u867d\u7136\u5b98\u65b9\u5df2\u7ecf\u63d0\u4f9b\u4e86\u901a\u8fc7 Conda \u4e00\u952e\u5b89\u88c5\u7684\u65b9\u6cd5\uff0c\u4f46\u7531\u4e8e\u6b64\u6cd5\u6240\u5b89\u88c5\u7684\u5404\u4e2a\u7ec4\u4ef6\u5747\u4e3a\u9884\u7f16\u8bd1\u7248\u672c\uff0c\u56e0\u800c\u9488\u5bf9\u8bfe\u9898\u5b9e\u9645\u60c5\u51b5\u65e0\u6cd5\u505a\u66f4\u591a\u62d3\u5c55\u548c\u6539\u52a8\uff0c\u4e14\u901a\u8fc7 Conda \u5b89\u88c5\u7684 Protobuf \u5b58\u5728\u7248\u672c\u51b2\u7a81\uff0c\u65e0\u6cd5\u8fdb\u4e00\u6b65\u7f16\u8bd1\u5176\u4ed6\u63a5\u53e3\u3002\u8fd9\u91cc\u4ecb\u7ecd\u4e00\u79cd\u65b9\u6cd5\uff0c\u901a\u8fc7 Conda \u5b89\u88c5\u901a\u5e38\u4e0d\u9700\u8981\u6539\u52a8\u7684TensorFlow C++ Interface\uff0c\u5176\u4f59\u90e8\u5206\u4ecd\u624b\u52a8\u7f16\u8bd1\u3002\u7531\u4e8e\u76ee\u524d\u65b0\u7248Lammps\u5df2\u7ecf\u63d0\u4f9bPlugin\u652f\u6301\uff0cDeePMD\u4ea6\u652f\u6301\u901a\u8fc7Plugin\u8c03\u7528\uff0c\u6545\u53ef\u4ee4\u7ec4\u4ef6\u4e4b\u95f4\u76f8\u4e92\u89e3\u8026\u3001\u51cf\u5c11\u540e\u7eed\u5b89\u88c5\u7684\u5de5\u5e8f\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#_1","title":"\u521d\u59cb\u73af\u5883\u8bf4\u660e","text":"

    \u4ee5\u4e0b\u8fc7\u7a0b\u4ee5 Zeus \u96c6\u7fa4\u4e3a\u4f8b\uff0c\u64cd\u4f5c\u7cfb\u7edf\u53ca\u7248\u672c\u4e3aCentOS 7\uff0c\u7ba1\u7406\u8282\u70b9\u8054\u7f51\uff0c\u91c7\u7528module\u4f5c\u4e3a\u73af\u5883\u7ba1\u7406\u3002

    \u4ee5\u4e0b\u662f\u9884\u5148\u914d\u7f6e\u597d\u7684\u73af\u5883\uff0c\u5bf9\u4e8e\u5176\u4ed6\u96c6\u7fa4\uff0c\u53ef\u4ee5\u6b64\u8981\u6c42\u51c6\u5907\u73af\u5883\uff0c\u5176\u4e2d Intel MPI \u53ef\u4ee5\u7528 MPICH \u4ee3\u66ff\uff0c\u5176\u4f59\u7ec4\u4ef6\u8bf7\u81ea\u884c\u5b89\u88c5\u3002\u6ce8\u610fCUDA 11.3\u5bf9Nvidia\u9a71\u52a8\u7248\u672c\u6709\u8981\u6c42\uff0c\u9700\u8981\u9884\u5148\u68c0\u67e5\u597d\uff08\u53ef\u7528nvidia-smi\u5feb\u901f\u67e5\u770b\uff09\u3002

    • \u901a\u8fc7yum\u5b89\u88c5
    • Git >= 1.8.2
    • \u901a\u8fc7module\u52a0\u8f7d
    • CUDA 11.3
    • Miniconda 3
    • GCC >= 7.4.0
    • Intel MPI 2017 \uff08\u6682\u672a\u5bf9\u5176\u4ed6\u7248\u672c\u8fdb\u884c\u6d4b\u8bd5\uff09

    \u7248\u672c\u53f7\u4ec5\u4f9b\u53c2\u8003\uff0c\u5b9e\u9645\u5b89\u88c5\u56e0\u4eba\u800c\u5f02\uff0c\u53c2\u8003\u6267\u884c\u5373\u53ef\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#deepmd-kit_1","title":"DeePMD-kit \u5e38\u7528\u7ec4\u4ef6\u5173\u7cfb","text":"
    flowchart TB\n  tfcpp(TensorFlow C++ Interface) -.-> tfpy(TensorFlow Python Interface)\n  tfpy --> dppy(DeePMD Python Interface)\n  dpcpp(DeePMD C++ Interface) -.-> dppy\n  tfcpp --> dpcpp\n  dpcpp --> lmp(DeePMD Lammps API)\n  tfcpp --> lmp

    \u5982\u56fe\u6240\u793a\u5c55\u793a\u4e86DeePMD-kit\u5404\u4e2a\u5e38\u7528\u7ec4\u4ef6\u4e4b\u95f4\u7684\u8054\u7cfb\uff0c\u9700\u8981\u58f0\u660e\u7684\u662f\uff0c\u56fe\u793a\u5e76\u975e\u5bf9\u4ee3\u7801\u7684\u4e25\u8c28\u89e3\u6790\uff0c\u4ec5\u4ec5\u662f\u5bf9\u7ec4\u7ec7\u7ed3\u6784\u7684\u76f4\u89c2\u8868\u73b0\u3002

    \u52bf\u51fd\u6570\u8bad\u7ec3\u8fc7\u7a0b\u901a\u5e38\u4f9d\u8d56\u4e8eDeePMD Python Interface\uff0c\u8fd9\u4e00\u90e8\u5206\u5728\u7528 Pip \u5b89\u88c5\u65f6\u5373\u4f9d\u8d56\u4e8eTensorFlow\u7684Python Interface\uff0c\u56e0\u6b64\u5728\u56fe\u4e2d\u7528\u5b9e\u7ebf\u7bad\u5934\u8868\u793a\u3002\u800c\u7528Pip\u5b89\u88c5\u7684TensorFlow Wheel\u5df2\u7ecf\u9884\u5148\u7f16\u8bd1\u4e86\u5e95\u5c42\u6240\u9700\u7684Tensorflow C++ Interface\uff0c\u8fd9\u4e00\u9690\u542b\u7684\u4f9d\u8d56\u7528\u865a\u7ebf\u7bad\u5934\u8868\u793a\u3002\u7c7b\u4f3c\u5730\uff0cDeePMD-kit\u5728Pip\u5b89\u88c5\u65f6\u4e5f\u4f1a\u8c03\u7528CMake\u6765\u7f16\u8bd1\u4e00\u90e8\u5206\u6240\u9700\u7684C++\u5e93\uff0c\u56e0\u800c\u4e5f\u5b58\u5728\u7c7b\u4f3c\u7684\u5173\u7cfb\u3002

    \u5f53\u7528\u8bad\u7ec3\u597d\u7684\u52bf\u51fd\u6570\u6765\u8fdb\u884cMD\u6a21\u62df\u65f6\uff0c\u5219\u9700\u8981\u8fd0\u884cLammps\u7b49\u5206\u5b50\u52a8\u529b\u5b66\u8f6f\u4ef6\u8c03\u7528DeePMD-kit\u63a5\u53e3\u3002\u4ee5Lammps\u4e3a\u4f8b\uff0c\u73b0\u6709\u7684\u4e24\u79cd\u65b9\u5f0f\u5206\u522b\u662f\uff1a - \u5728Lammps\u5b89\u88c5\u65f6\u5373\u989d\u5916\u7f16\u8bd1DeePMD API\uff08\u5373USER-DEEPMD\uff09 - \u7f16\u8bd1DeePMD Plugin\uff0c\u7531\u652f\u6301Plugin\u7684Lammps\u7248\u672c\u8c03\u7528

    \u8fd9\u4e24\u79cd\u65b9\u5f0f\u5728\u7f16\u8bd1\u65f6\u5747\u9700\u8981\u8c03\u7528DeePMD-kit\u548cTensorFlow\u7684C++ Interface\uff0c\u6545\u5728\u56fe\u4e2d\u4e5f\u7528\u5b9e\u7ebf\u8868\u793a\u3002\u800cTensorFlow C++ Interface\u5b9e\u9645\u4e0a\u53ef\u7531\u6e90\u4ee3\u7801\u7ed3\u5408\u5fc5\u8981\u7684\u5e95\u5c42\u4f9d\u8d56\uff08\u5982GCC\u3001CUDA\u7b49\uff09\u72ec\u7acb\u7f16\u8bd1\uff0cDeePMD-kit C++ Interface\u53ea\u9700\u5728TensorFlow C++ Interface\u57fa\u7840\u4e0a\u8fdb\u884c\u7f16\u8bd1\uff08\u56fe\u4e2d\u5b9e\u7ebf\u7bad\u5934\uff09\u3002

    \u56e0\u800c\u5728\u5b9e\u9645\u5904\u7406\u5b89\u88c5\u5173\u7cfb\u65f6\uff0c\u6211\u4eec\u4e5f\u53ef\u4ee5\u91c7\u7528\u76f8\u5bf9\u72ec\u7acb\u7684\u7f16\u8bd1\u65b9\u5f0f\u6765\u6700\u5927\u5316\u89e3\u8026\u7ec4\u4ef6\u3002\u4e0b\u6587\u7684\u601d\u8def\u5c06\u6309\u4ee5\u4e0b\u6b65\u9aa4\u5c55\u5f00\uff1a

    1. \u5efa\u7acb\u72ec\u7acb\u7684Conda\u73af\u5883\uff0c\u7528 Pip \u5b89\u88c5 TensorFlow \u548c DeePMD-kit\uff0c\u63d0\u4f9b\u52bf\u51fd\u6570\u8bad\u7ec3\u529f\u80fd\uff1b
    2. \u7ed3\u5408\u5fc5\u8981\u7684\u7ec4\u4ef6\u3001\u73af\u5883\u7b49\u7f16\u8bd1Lammps\uff0c\u63d0\u4f9b\u7ecf\u5178\u5206\u5b50\u52a8\u529b\u5b66\u6a21\u62df\u529f\u80fd\uff1b
    3. \u7f16\u8bd1 DeePMD C++ Interface\uff0c\u5728\u6b64\u57fa\u7840\u4e0a\u7f16\u8bd1 DeePMD-kit Lammps Plugin\u4f9bLammps\u8c03\u7528\uff0c\u63d0\u4f9b DeePMD \u6a21\u62df\u529f\u80fd\uff1b
    4. \u7f16\u8bd1 DeePMD CP2K API \u548c\u5bf9\u5e94\u7684CP2K\u7248\u672c\uff08No free lunch.\uff09
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#deepmd-kit-python-interface","title":"\u5b89\u88c5DeePMD-kit Python Interface","text":""},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#_2","title":"\u521b\u5efa\u65b0\u7684\u73af\u5883","text":"

    \u9996\u5148\u51c6\u5907\u5fc5\u8981\u7684\u4f9d\u8d56\u3002

    \u68c0\u67e5\u53ef\u7528\u7684\u6a21\u5757\uff0c\u5e76\u52a0\u8f7d\u5fc5\u8981\u7684\u6a21\u5757\uff1a

    module avail\nmodule add cuda/11.3\nmodule add gcc/7.4.0\n

    \u6ce8\u610f\u8fd9\u91cc\u5bfc\u5165\u7684\u662fGCC 7.4.0\u7248\u672c\uff0c\u5982\u679c\u91c7\u7528\u4f4e\u4e8e4.9.4\u7684\u7248\u672c\uff08\u4e0d\u5bfc\u5165GCC\uff09\u5219dp_ipi\u4e0d\u4f1a\u88ab\u7f16\u8bd1\u3002

    \u7136\u540e\u521b\u5efa\u865a\u62df\u73af\u5883\uff0c\u6b65\u9aa4\u8bf7\u53c2\u8003Anaconda \u4f7f\u7528\u6307\u5357\u3002

    \u5047\u8bbe\u521b\u5efa\u7684\u865a\u62df\u73af\u5883\u540d\u79f0\u662f deepmd\uff0c\u5219\u8bf7\u5c06\u6b65\u9aa4\u6700\u540e\u7684 <your env name> \u66ff\u6362\u4e3a deepmd\u3002\u82e5\u91c7\u7528\u8be5\u6b65\u9aa4\u7684\u8bbe\u7f6e\uff0c\u5219\u865a\u62df\u73af\u5883\u5c06\u88ab\u521b\u5efa\u5728/data/user/conda/env/deepmd\u4e0b\uff08\u5047\u8bbe\u7528\u6237\u540d\u4e3auser\uff09\u3002

    conda create -n deepmd python=3.9\nconda activate deepmd\n

    \u6ce8\u610f\u8bf7\u52a1\u5fc5\u4e3a\u521b\u5efa\u7684\u865a\u62df\u73af\u5883\u5b89\u88c5\u6240\u9700\u7684Python\u73af\u5883\u3002\u901a\u5e38\u4e0d\u6307\u5b9aPython\u7248\u672c\u53f7\u7684\u60c5\u51b5\u4e0b\uff08\u4f8b\u5982\u6587\u4e2d\u7684\u6b65\u9aa4conda create -n <your env name> python\uff09\u4f1a\u5b89\u88c5Conda\u63a8\u8350\u7684\u6700\u65b0\u7248\u672c\uff0c\u5982\u9700\u8981\u66ff\u4ee3\u8bf7\u5bf9\u5e94\u6307\u5b9a\uff0c\u5982conda create -n deepmd python=3.10\u3002

    \u5bf9\u4e8e\u65e0\u6cd5\u8054\u7f51\u7684\u8282\u70b9\uff0c\u5728\u7f16\u8bd1\u65f6\u9700\u8981\u5c06\u6240\u9700\u7684\u9a71\u52a8\u7a0b\u5e8f\u5e93\u7684\u7b26\u53f7\u5e93libcuda.so\u4ee5libcuda.so.1\u7684\u540d\u79f0\u624b\u52a8\u94fe\u63a5\u5230\u67d0\u4e2a\u5177\u6709\u6743\u9650\u7684\u8def\u5f84/some/local/path\u5e76\u5206\u522b\u52a0\u5165\u73af\u5883\u53d8\u91cf\uff0c\u4ee5\u901a\u8fc7\u7f16\u8bd1\u6d41\u7a0b\uff1a

    ln -s /data/share/apps/cuda/11.3/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/share/apps/cuda/11.3/lib64/stubs:/some/local/path\n

    \u63d0\u793a

    \u82e5\u5728Zeus \u96c6\u7fa4\u4e0a\u5b89\u88c5\uff0c\u7ba1\u7406\u5458\u5df2\u4e8b\u5148\u628alibcuda.so.1 \u94fe\u63a5\u5728/data/share/apps/cuda/11.3/lib64/stubs/\u4e0b\uff0c\u6545\u65e0\u9700\u989d\u5916\u521b\u5efa\u8f6f\u94fe\u63a5\uff0c\u540c\u7406/some/local/path\u4e5f\u65e0\u9700\u52a0\u5165\u73af\u5883\u53d8\u91cf\uff0c\u4f46\u4ecd\u9700\u8981\u9a71\u52a8\u7a0b\u5e8f\u5e93\u7684\u7b26\u53f7\u94fe\u63a5libcuda.so\u3002\u6ce8\u610f\u8fd9\u4e00\u6b65\u9aa4\u6267\u884c\u540e\uff0c\u5b9e\u9645\u8fd0\u884c\u65f6\u9700\u8981\u4ece\u73af\u5883\u53d8\u91cf\u4e2d\u79fb\u9664

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#deepmd-kitpython","title":"\u5b89\u88c5DeePMD-kit\u7684Python\u63a5\u53e3","text":"

    \u4ee5\u9632\u4e07\u4e00\u53ef\u4ee5\u5347\u7ea7\u4e0bpip\u7684\u7248\u672c\uff1a

    pip install --upgrade pip\n

    \u63a5\u4e0b\u6765\u5b89\u88c5Tensorflow\u7684Python\u63a5\u53e3

    pip install tensorflow\n

    \u82e5\u63d0\u793a\u5df2\u5b89\u88c5\uff0c\u8bf7\u4f7f\u7528--upgrade\u9009\u9879\u8fdb\u884c\u8986\u76d6\u5b89\u88c5\u3002\u82e5\u63d0\u793a\u6743\u9650\u4e0d\u8db3\uff0c\u8bf7\u4f7f\u7528--user\u9009\u9879\u5728\u5f53\u524d\u8d26\u53f7\u4e0b\u5b89\u88c5\u3002

    \u7136\u540e\u4e0b\u8f7dDeePMD-kit\u7684\u6e90\u4ee3\u7801\uff08\u6ce8\u610f\u628av2.1.5\u66ff\u6362\u4e3a\u9700\u8981\u5b89\u88c5\u7684\u7248\u672c\uff0c\u5982v2.0.3\u7b49\uff09

    cd /some/workspace\ngit clone --recursive https://github.com/deepmodeling/deepmd-kit.git deepmd-kit -b v2.1.5\n

    \u5728\u8fd0\u884cgit clone\u65f6\u8bb0\u5f97\u8981--recursive\uff0c\u8fd9\u6837\u624d\u53ef\u4ee5\u5c06\u5168\u90e8\u6587\u4ef6\u6b63\u786e\u4e0b\u8f7d\u4e0b\u6765\uff0c\u5426\u5219\u5728\u7f16\u8bd1\u8fc7\u7a0b\u4e2d\u4f1a\u62a5\u9519\u3002

    \u63d0\u793a

    \u5982\u679c\u4e0d\u614e\u6f0f\u4e86--recursive\uff0c \u53ef\u4ee5\u91c7\u53d6\u4ee5\u4e0b\u7684\u8865\u6551\u65b9\u6cd5\uff1a

    git submodule update --init --recursive\n

    \u82e5\u96c6\u7fa4\u4e0a CMake 3\u6ca1\u6709\u5b89\u88c5\uff0c\u53ef\u4ee5\u7528pip\u8fdb\u884c\u5b89\u88c5\uff1a

    pip install cmake\n

    \u4fee\u6539\u73af\u5883\u53d8\u91cf\u4ee5\u4f7f\u5f97cmake\u6b63\u786e\u6307\u5b9a\u7f16\u8bd1\u5668\uff1a

    export CC=`which gcc`\nexport CXX=`which g++`\nexport FC=`which gfortran`\n

    \u82e5\u8981\u542f\u7528CUDA\u7f16\u8bd1\uff0c\u8bf7\u5bfc\u5165\u73af\u5883\u53d8\u91cf\uff1a

    export DP_VARIANT=cuda\n

    \u968f\u540e\u901a\u8fc7pip\u5b89\u88c5DeePMD-kit\uff1a

    cd deepmd-kit\npip install .\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#lammps","title":"\u5b89\u88c5Lammps","text":"

    \u6ce8\u610f\u8fd9\u4e00\u90e8\u5206\u53ef\u4ee5\u4eceDeePMD\u5b89\u88c5\u4e2d\u89e3\u8026\u51fa\u6765\uff0c\u56e0\u800c\u517c\u987e\u5bf9Lammps\u7684\u4e0d\u540c\u9700\u6c42\uff0c\u800c\u4e0d\u5fc5\u4e3aDeePMD\u4e13\u95e8\u7f16\u8bd1\u4e00\u4e2aLammps\u53ef\u6267\u884c\u6587\u4ef6\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#_3","title":"\u73af\u5883\u51c6\u5907","text":"

    \u9996\u5148\u52a0\u8f7d\u6240\u9700\u7684\u73af\u5883\uff0c\u5305\u62ecCMake\u3001Intel MPI\u7b49\u3002\u82e5\u4e0d\u9700\u8981\u7f16\u8bd1Lammps\u539f\u751f\u7684GPU\u52a0\u901f\uff0c\u53ef\u4e0d\u9700\u8981\u52a0\u8f7dCUDA\u73af\u5883\u3002\u6ce8\u610f\u9700\u8981\u628aIntel MPI\u63d0\u4f9b\u7684\u5934\u6587\u4ef6\uff08mpi.h\u7b49\uff09\u6240\u5728\u8def\u5f84\u52a0\u5165C_INCLUDE_PATH\u4e2d\u3002

    \u4ecd\u4ee5Zeus\u4e3a\u4f8b\uff0c\u5982\u4e0b\u6240\u793a\u3002\u6ce8\u610f\u8fd9\u91cc\u4f7f\u7528\u7684\u662f\u5168\u5c40\u7684CMake\uff0c\u5982\u679c\u4e0e\u4e0a\u4e00\u90e8\u5206\u91c7\u7528\u540c\u4e00\u4e2a\u73af\u5883\uff0c\u53ef\u4e0d\u9700\u91cd\u590d\u52a0\u8f7d\u3002

    module load cmake/3.20\nmodule load intel/17.5.239 mpi/intel/2017.5.239 gcc/7.4.0\n# if not included\nexport C_INCLUDE_PATH=<intel_installation_dir>/impi/2017.4.239/include64:$C_INCLUDE_PATH\n

    \u82e5\u9700\u8981\u7f16\u8bd1\u5bf9\u5e94\u7684Lammps\u7ec4\u4ef6\uff08\u5982Plumed\u3001NetCDF\u7b49\uff09\uff0c\u8bf7\u5bf9\u5e94\u52a0\u8f7d\u6240\u9700\u7684\u73af\u5883\uff1a

    module load netcdf/4.9.0_intel17\nmodule load plumed\n

    \u5982\u9700\u7f16\u8bd1Lammps\u539f\u751f\u7684GPU\u52a0\u901f\uff0c\u53ef\u52a0\u8f7dCUDA\u73af\u5883\uff0c\u6ce8\u610f\u8fd9\u4f1a\u4f7f\u5f97\u7f16\u8bd1\u5f97\u5230\u7684Lammps\u65e0\u6cd5\u5728\u4e0d\u5305\u62ecGPU\u7684\u8282\u70b9\u4e0a\u8fd0\u884c\u3002

    # gpu acceleration support\nmodule load cuda/11.3\n

    Warning

    \u82e5\u7f16\u8bd1Lammps\u539f\u751f\u7684GPU\u52a0\u901f\uff0c\u8bf7\u6ce8\u610f\u539f\u751f\u9ed8\u8ba4\u91c7\u7528\u534a\u7cbe\u5ea6\u3002Lammps\u5728\u5f00\u542fGPU\u52a0\u901f\u65f6\u901f\u5ea6\u53ef\u6709\u8f83\u5927\u63d0\u5347\uff0c\u4f46\u7cbe\u5ea6\u95ee\u9898\u5df2\u77e5\u53ef\u80fd\u5bfc\u81f4DeePMD\u52bf\u51fd\u6570\u6a21\u62df\u8bef\u5dee\u4e0a\u5347\uff08\u4f53\u73b0\u4e3aModel Deviation\u76f8\u6bd4\u4e0d\u5f00\u542fGPU\u52a0\u901f\u663e\u8457\u4e0a\u5347\uff09\uff0c\u8bf7\u9488\u5bf9\u4f53\u7cfb\u505a\u6d4b\u8bd5\u786e\u8ba4\u8bef\u5dee\u662f\u5426\u7b26\u5408\u9884\u671f\u3002DeePMD\u63a5\u53e3\u5b98\u65b9\u672a\u63d0\u4f9bLammps\u7684GPU\u52a0\u901f\u652f\u6301\uff0c\u4e14\u9ed8\u8ba4\u7f16\u8bd1\u7684\u662f\u53cc\u7cbe\u5ea6\u7248\u672c\uff0c\u8bf7\u52a1\u5fc5\u6ce8\u610f\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#_4","title":"\u914d\u7f6e\u7f16\u8bd1","text":"

    \u521b\u5efa\u6587\u4ef6\u5939

    cd <lammps_source_code>\nmkdir build\ncd build\n

    \u8fdb\u884c\u7f16\u8bd1

    cmake  -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ \\\n-DCMAKE_Fortran_COMPILER=gfortran \\\n-D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=mpi \\\n-D BUILD_SHARED_LIBS=yes \\\n-D CMAKE_INSTALL_PREFIX=<lammps_installation_dir> \\\n-D CMAKE_INSTALL_LIBDIR=lib \\\n-D CMAKE_INSTALL_FULL_LIBDIR=<lammps_installation_dir>/lib \\\n-C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake ../cmake\n

    CMAKE_INSTALL_PREFIX \u53ef\u4ee5\u6839\u636e\u5b89\u88c5\u5b9e\u9645\u8def\u5f84\u4fee\u6539\uff0c\u4f46\u8fd9\u4e00\u65b9\u6cd5\u5f97\u5230\u7684\u662f\u5171\u4eab\u5e93\uff08 *.so \uff09\uff0c\u6240\u4ee5\u5305\u62ecLammps\u6e90\u4ee3\u7801\u5728\u5185\u90fd\u4e0d\u8981\u79fb\u52a8\u3002

    \u82e5\u5f00\u542f\u5bf9\u5e94\u63d2\u4ef6\uff0c\u8bf7\u6ce8\u610f\u5728 ../cmake \u524d\u63d2\u5165\u5bf9\u5e94\u9009\u9879\uff0c\u5982\uff1a

    -D PKG_PLUMED=yes -D PLUMED_MODE=shared \\\n-D PKG_H5MD=yes -D PKG_NETCDF=yes \\\n-D NETCDF_INCLUDE_DIR=<netcdf_installation_dir>/include \n

    \u82e5\u5e0c\u671b\u5f00\u542fGPU\u52a0\u901f\uff0c\u8bf7\u589e\u52a0\u9009\u9879\uff1a

    -D PKG_GPU=on -D GPU_API=cuda\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#_5","title":"\u5f00\u59cb\u7f16\u8bd1","text":"

    \u8fd0\u884c

    make\nmake install\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#deepmd-kit-lammps-plugin","title":"\u7f16\u8bd1DeePMD-kit Lammps Plugin","text":""},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#_6","title":"\u65b9\u6cd5\u4e00\uff1a\u9759\u6001\u7f16\u8bd1","text":""},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#tensorflowc","title":"\u5b89\u88c5Tensorflow\u7684C++ \u63a5\u53e3","text":"

    \u4ee5\u4e0b\u5b89\u88c5\uff0c\u5047\u8bbe\u8f6f\u4ef6\u5305\u4e0b\u8f7d\u8def\u5f84\u5747\u4e3a /some/workspace\uff0c \u4ee5 TensorFlow 2.7.0\u7248\u672c\u3001DeePMD-kit 2.1.5 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u8bf4\u660e\uff0c\u5176\u4ed6\u7248\u672c\u7684\u6b65\u9aa4\u8bf7\u53c2\u7167\u4fee\u6539\u3002\u6ce8\u610f\u4e3a\u4fdd\u8bc1\u6a21\u578b\u517c\u5bb9\u6027\uff0c\u7248\u672c\u53f7\u6700\u597d\u4e0e Python Interface\u5bf9\u5e94\u3002

    \u672c\u6b65\u9aa4\u9700\u8981\u4f7f\u7528 Conda\uff0c\u56e0\u6b64\u5728\u524d\u6587\u57fa\u7840\u4e0a\u8fdb\u884c\u3002

    \u641c\u7d22\u4ed3\u5e93\uff0c\u67e5\u627e\u53ef\u7528\u7684 TensorFlow \u7684 C++ \u63a5\u53e3\u7248\u672c\u3002

    conda search libtensorflow_cc -c https://conda.deepmodeling.com\n

    \u7ed3\u679c\u5982\u4e0b\uff1a

    Loading channels: done\n# Name                       Version           Build  Channel\nlibtensorflow_cc              1.14.0  cpu_h9a2eada_0\nlibtensorflow_cc              1.14.0  gpu_he292aa2_0\nlibtensorflow_cc               2.0.0  cpu_h9a2eada_0\nlibtensorflow_cc               2.0.0  gpu_he292aa2_0\nlibtensorflow_cc               2.1.0  cpu_cudaNone_0\nlibtensorflow_cc               2.1.0  gpu_cuda10.0_0\nlibtensorflow_cc               2.1.0  gpu_cuda10.1_0\nlibtensorflow_cc               2.1.0   gpu_cuda9.2_0\nlibtensorflow_cc               2.3.0  cpu_cudaNone_0\nlibtensorflow_cc               2.3.0  gpu_cuda10.1_0\nlibtensorflow_cc               2.4.1  gpu_cuda11.0_0\nlibtensorflow_cc               2.4.1  gpu_cuda11.1_0\nlibtensorflow_cc               2.5.0  cpu_cudaNone_0\nlibtensorflow_cc               2.5.0  gpu_cuda10.1_0\nlibtensorflow_cc               2.5.0  gpu_cuda11.3_0\nlibtensorflow_cc               2.7.0  cpu_h6ddf1b9_0\nlibtensorflow_cc               2.7.0 cuda101h50fd26c_0\nlibtensorflow_cc               2.7.0 cuda113h3372e5c_0\nlibtensorflow_cc               2.7.0 cuda113hbf71e95_1\nlibtensorflow_cc               2.9.0  cpu_h681ccd4_0\nlibtensorflow_cc               2.9.0 cuda102h929c028_0\nlibtensorflow_cc               2.9.0 cuda116h4bf587c_0\n

    \u8fd9\u91cc\u6240\u5e0c\u671b\u5b89\u88c5\u7684\u7248\u672c\u662f2.7.0\u7684GPU\u7248\u672c\uff0cCUDA\u7248\u672c\u4e3a11.3\uff0c\u56e0\u6b64\u8f93\u5165\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5\uff1a

    conda install libtensorflow_cc=2.7.0=cuda113hbf71e95_1 -c https://conda.deepmodeling.com\n

    \u82e5\u6240\u5b89\u88c5\u7684\u73af\u5883\u6ca1\u6709\u5b9e\u9645\u7684GPU\u9a71\u52a8\uff08\u6bd4\u5982\u96c6\u7fa4\u7684\u767b\u5f55\u8282\u70b9\uff09\u6216\u9700\u8981\u7528\u5230Conda\u5b89\u88c5CudaToolkit\uff0c\u53ef\u80fd\u9700\u8981\u53c2\u7167\u6b64\u5904\u8bf4\u660e\u5f3a\u5236\u6307\u5b9aGPU\u73af\u5883\u3002\u6bd4\u5982\uff1a

    CONDA_OVERRIDE_CUDA=\"11.3\" conda install libtensorflow_cc=2.7.0=cuda113hbf71e95_1 -c https://conda.deepmodeling.com\n

    \u8bf7\u6ce8\u610f CONDA_OVERRIDE_CUDA \u7684\u503c\u9700\u8981\u4e0eGPU\u652f\u6301\u4ee5\u53ca\u5e0c\u671b\u7528\u5230\u7684CUDA\u7248\u672c\u76f8\u5339\u914d\u3002

    \u63d0\u793a

    \u6ce8\u610fA100\u4ec5\u652f\u6301TF 2.4.0\u4ee5\u4e0a\u3001CUDA11.2\u4ee5\u4e0a\uff0c\u5b89\u88c5\u65f6\u8bf7\u5bf9\u5e94\u9009\u62e9\u3002

    \u63d0\u793a

    \u4e2a\u522b\u7248\u672c\u5728\u540e\u7eed\u7f16\u8bd1\u65f6\u53ef\u80fd\u4f1a\u63d0\u793a\u9700\u8981libiomp5.so\uff0c\u8bf7\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u786e\u5b9a\u662f\u5426\u9700\u8981\u8f7d\u5165Intel\u73af\u5883\u6216\u8005conda install intel-openmp\u3002

    \u63d0\u793a

    conda\u547d\u4ee4\u53ef\u80fd\u901f\u5ea6\u8f83\u6162\uff0c\u4e5f\u53ef\u4ee5\u8003\u8651\u5207\u6362\u4e3amamba\uff0c\u540e\u8005\u53ef\u5927\u5e45\u52a0\u901fConda\u7684\u6027\u80fd\uff0c\u4e14\u5b8c\u5168\u517c\u5bb9\u3002\u53ea\u9700\u53c2\u7167\u524d\u8ff0\u94fe\u63a5\u5b89\u88c5\u540e\u5c06conda\u66ff\u6362\u4e3amamba\u5373\u53ef

    \u82e5\u6210\u529f\u5b89\u88c5\uff0c\u5219\u5b9a\u4e49\u73af\u5883\u53d8\u91cf\uff1a

    export tensorflow_root=/data/user/conda/env/deepmd\n

    \u5373\u865a\u62df\u73af\u5883\u521b\u5efa\u7684\u8def\u5f84\u3002\u540e\u6587\u5c06\u4f7f\u7528 $tensorflow_root \u6765\u6307\u5b9a\u8be5\u8def\u5f84\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#deepmd-kitc","title":"\u5b89\u88c5DeePMD-kit\u7684C++ \u63a5\u53e3","text":"

    \u4e0b\u9762\u5f00\u59cb\u7f16\u8bd1DeePMD-kit C++\u63a5\u53e3\uff1a

    deepmd_source_dir=`pwd`\ncd $deepmd_source_dir/source\nmkdir build \ncd build\n

    \u5047\u8bbeDeePMD-kit C++ \u63a5\u53e3\u5b89\u88c5\u5728 /some/workspace/deepmd_root \u4e0b\uff0c\u5b9a\u4e49\u5b89\u88c5\u8def\u5f84 deepmd_root\uff1a

    export deepmd_root=/some/workspace/deepmd_root\n

    \u5728build\u76ee\u5f55\u4e0b\u8fd0\u884c\uff1a

    cmake -DLAMMPS_SOURCE_ROOT=<lammps_source_code> \\\n-DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root \\\n-DUSE_CUDA_TOOLKIT=TRUE ..\n

    \u6ce8\u610f\u8fd9\u91cc\u7684 <lammps_source_code> \u5bf9\u5e94\u524d\u6587\u4e2dLammps\u7684\u6e90\u7801\u8def\u5f84\u3002

    \u6700\u540e\u7f16\u8bd1\u5e76\u5b89\u88c5\uff1a

    make\nmake install\n

    \u82e5\u65e0\u62a5\u9519\uff0c\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u6267\u884c\u68c0\u67e5\u662f\u5426\u6709\u6b63\u786e\u8f93\u51fa\uff1a

    $ ls $deepmd_root/lib\ndeepmd_lmp/           libdeepmd_cc_low.so   libdeepmd_gromacs.so  libdeepmd_ipi.so      libdeepmd_lmp.so      libdeepmd_op.so\ndeepmd_lmp_low/       libdeepmd_cc.so       libdeepmd_ipi_low.so  libdeepmd_lmp_low.so  libdeepmd_op_cuda.so  libdeepmd.so\n

    \u6ce8\u610f\u5e94\u5f53\u5305\u542bdeepmd_lmp/\u548clibdeepmd_lmp.so\uff0c\u540e\u4e24\u8005\u5373\u4e3aLammps\u63d2\u4ef6\u7684\u4f4d\u7f6e\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#tensorflow-python","title":"\u65b9\u6cd5\u4e8c\uff1a\u91c7\u7528TensorFlow Python \u7248\u672c\u7684\u5e93","text":"

    \u4ece DeePMD-kit v2.2 \u8d77\uff0ccmake \u652f\u6301\u8bbe\u7f6e -DUSE_TF_PYTHON_LIBS=TRUE\u7684\u65b9\u5f0f\uff0c\u4ece\u800c\u514d\u53bb\u4e86\u5b89\u88c5 libtensorflow_cc \u7684\u9ebb\u70e6\u3002

    cmake -DLAMMPS_SOURCE_ROOT=<lammps_source_code> \\\n-DUSE_TF_PYTHON_LIBS=TRUE -DUSE_CUDA_TOOLKIT=TRUE \\\n-DCMAKE_INSTALL_PREFIX=$deepmd_root ..\n

    Tip

    \u8bf7\u6ce8\u610f\uff0c\u8fd9\u79cd\u65b9\u6cd5\u91c7\u7528Python Wheel\u63d0\u4f9b\u7684 libtensorflow_framework.so.2 \u548c _pywrap_tensorflow_internal.so \uff08\u4f5c\u4e3a libtensorflow_cc.so\u7684\u66ff\u4ee3\uff09\u8fdb\u884c\u7f16\u8bd1\u3002 \u540e\u8005\u4f9d\u8d56 Python \u5e93 libpython3.*.so.*\uff08\u56e0\u7248\u672c\u4e0d\u540c\u800c\u5f02\uff09\uff0c\u8bf7\u6ce8\u610f\u57fa\u4e8e\u4e0a\u8ff0\u5e93\u7684\u7f16\u8bd1\u5e94\u4fdd\u8bc1\u540e\u8005\u8def\u5f84\u4e5f\u5728 LD_LIBRARY_PATH \u4e2d\u3002

    \u4e3a\u4f7f\u5f97\u7f16\u8bd1\u597d\u7684\u5e93\u6587\u4ef6\u53ef\u4ee5\u66f4\u5bb9\u6613\u627e\u5230\u4e0a\u8ff0\u4f9d\u8d56\uff0c\u8bf7\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\uff0c\u5efa\u7acb\u4e00\u4e2a\u4f2a tensorflow_root \u76ee\u5f55\uff0c\u5047\u8bbe\u8be5\u8def\u5f84\u4f4d\u4e8e /some/workspace/tensorflow_root \u4e0b\uff0c\u540c\u65f6\u5047\u8bbe Conda \u73af\u5883\u4ecd\u4f4d\u4e8e /data/user/conda/env/deepmd \u4e0b\uff1a

    export tensorflow_root=/some/workspace/tensorflow_root\nmkdir -p $tensorflow_root/lib \ncd $tensorflow\nln -s /data/user/conda/env/deepmd/lib/python3.10/site-packages/tensorflow/include .\ncd lib\nln -s /data/user/conda/env/deepmd/lib/python3.10/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so libtensorflow_cc.so\nln -s /data/user/conda/env/deepmd/lib/python3.10/site-packages/tensorflow/libtensorflow_framework.so.2 .\nln -s libtensorflow_framework.so.2 libtensorflow_framework.so\n

    \u4e8e\u662f\uff0c\u6211\u4eec\u4fbf\u6784\u5efa\u4e86\u4e00\u4e2a\u4f2a tensorflow_root \u76ee\u5f55\u3002\u6ce8\u610f\u540e\u6587\u7684 $tensorflow_root \u6b64\u65f6\u5e94\u6307\u5411\u8be5\u8def\u5f84\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#_7","title":"\u8c03\u7528\u65b9\u6cd5","text":"

    \u4f7f\u7528\u524d\u8bf7\u52a0\u8f7d\u597d\u73af\u5883\u53d8\u91cf\u3002\u6ce8\u610f\u82e5\u672a\u5b9a\u4e49 $deepmd_root\u3001$tensorflow_root\uff0c\u8bf7\u8865\u5168\u4e3a\u5b8c\u6574\u8def\u5f84\u3002\u8fd9\u91cc\u7684 /data/user/conda/env/deepmd \u4ecd\u662f Conda \u73af\u5883\u7684\u8def\u5f84\uff0c\u8bf7\u76f8\u5e94\u66ff\u6362\u3002

    export LD_LIBRARY_PATH=$tensorflow_root/lib:$deepmd_root/lib:/data/user/conda/env/deepmd/lib:$LD_LIBRARY_PATH\nexport LAMMPS_PLUGIN_PATH=$deepmd_root/lib/deepmd_lmp\n

    Lammps\u4fbf\u4f1a\u81ea\u52a8\u5bfb\u627e\u63d2\u4ef6\u5e76\u52a0\u8f7d\uff0c\u4ece\u800c\u53ef\u4ee5\u5b9e\u73b0DeePMD\u7684\u652f\u6301\u3002

    pair_style      deepmd ../graph.pb\npair_coeff      * *\n

    \u82e5\u65e0\u6cd5\u81ea\u52a8\u627e\u5230\uff0c\u4e5f\u53ef\u4ee5\u624b\u52a8\u5728 \u8f93\u5165\u6587\u4ef6 \u4e2d\u52a0\u8f7d\uff0c\u5199\u5728 pair_style \u4e0a\u4e00\u884c\u5373\u53ef\uff0c\u6ce8\u610f $deepmd_root\u3001$tensorflow_root \u987b\u66ff\u6362\u4e3a\u5b8c\u6574\u8def\u5f84\u3002

    plugin load     $deepmd_root/lib/libdeepmd_lmp.so\npair_style      deepmd ../graph.pb\npair_coeff      * *\n

    \u8fd0\u884c\u547d\u4ee4\u4ecd\u7136\u662f lmp_mpi -i <input_file>\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/#dp-cp2k","title":"DP-CP2K \u5b89\u88c5\u6307\u5f15","text":"

    \u9996\u5148clone\u5bf9\u5e94\u7684\u5b89\u88c5\u5305\uff1a

    git clone https://github.com/cp2k/cp2k.git --recursive --depth=1\n

    \u7136\u540e\u8fd0\u884c\u76f8\u5e94\u7684Toolchain\u811a\u672c\uff1a

    module unload mpi/intel/2017.5.239 # (1)!\nmodule load mpi/openmpi/4.1.6-gcc # (2)!\ncd tools/toolchain/\n./install_cp2k_toolchain.sh --with-gcc=system --mpi-mode=openmpi --with-deepmd=$deepmd_root\n
    1. \u65b0\u7248CP2K\u4f1a\u81ea\u52a8\u68c0\u6d4b Intel MPI \u4e14\u65e0\u89c6\u5f3a\u5236\u4f7f\u7528\u5176\u4ed6\u73af\u5883\u5982 OpenMPI \u7684\u8bbe\u5b9a\uff0c\u65e7\u7248 Intel MPI\u4e0d\u88ab\u517c\u5bb9
    2. \u7531\u4e8e --with-openmpi=install \u5728 Zeus \u4e0a\u65e0\u6cd5\u6b63\u786e\u5b89\u88c5\uff0c\u8fd9\u91cc\u9884\u5148\u5b89\u88c5\u597d\u4e86 OpenMPI\u3002

    \u5982\u4e0d\u9700\u8981 MPI \u548c DFT \u76f8\u5173\u529f\u80fd\uff0c\u53ef\u4ee5\u5982\u4e0b\u8bbe\u7f6e\u4ee5\u51cf\u5c11\u6b65\u9aa4\uff08\u6ce8\u610f\u540e\u7eed\u7f16\u8bd1\u79fb\u9664\u6389 psmp pdbg \u9009\u9879\uff09\uff1a

    cd tools/toolchain/\nmodule unload mpi/intel/2017.5.239 # (1)!\n./install_cp2k_toolchain.sh --with-deepmd=$deepmd_root --mpi-mode=no --with-libint=no --with-libxc=no --with-libxsmm=no\n
    1. \u65b0\u7248CP2K\u4f1a\u81ea\u52a8\u68c0\u6d4b Intel MPI \u4e14\u65e0\u89c6\u5f3a\u5236\u4f7f\u7528\u5176\u4ed6\u73af\u5883\u5982 OpenMPI \u7684\u8bbe\u5b9a\uff0c\u65e7\u7248 Intel MPI\u4e0d\u88ab\u517c\u5bb9

    \u6839\u636e\u811a\u672c\u8fd0\u884c\u7ed3\u5c3e\u7684\u63d0\u793a\u590d\u5236arch\u6587\u4ef6\u5e76source\u6240\u9700\u7684\u73af\u5883\u53d8\u91cf\u3002

    \u8fd9\u91cc\u7684\u76ee\u7684\u662f\u8ba9\u7f16\u8bd1\u65f6\u53ef\u4ee5\u6b63\u786e\u94fe\u63a5 libpython3.*.so.*\uff0c\u56e0\u800c /data/user/conda/env/deepmd/ \u4ecd\u65e7\u662f Conda \u73af\u5883\u8def\u5f84\u3002

    \u6700\u540e\u56de\u5230\u4e3b\u76ee\u5f55\u8fdb\u884c\u7f16\u8bd1\uff1a

    make -j 4 ARCH=local VERSION=\"psmp pdbg ssmp sdbg\" # (1)!\n
    1. \u5982\u4e0d\u9700\u8981 MPI \uff0c\u8bf7\u79fb\u9664\u6389 psmp pdbg\u3002

    \u7f16\u8bd1\u6b63\u786e\u5b8c\u6210\u540e\uff0c\u53ef\u6267\u884c\u6587\u4ef6\u751f\u6210\u5728 exe/ \u4e0b\uff0c\u5373 cp2k.ssmp \u6216 cp2k.psmp\u3002

    \u5173\u4e8e DP-CP2K \u7684\u4f7f\u7528\uff0c\u8bf7\u53c2\u8003 CP2K: DeePMD\u63d2\u4ef6\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/","title":"DeePMD-kit\u5b89\u88c5\u5b9e\u6218\uff1aPC\u7bc7","text":""},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/#_1","title":"\u80cc\u666f","text":"

    \u9700\u8981\u5bf9DeePMD-kit\u7684\u6e90\u7801\u8fdb\u884c\u4e00\u4e9b\u4fee\u6539\uff0c\u9488\u5bf9\u65b0\u7684\u7269\u7406\u91cf\u6784\u5efa\u6a21\u578b\u3002\u5bf9\u4ee3\u7801\u7684\u8c03\u8bd5\u9700\u8981GPU\uff0c\u4f46\u662f\u4e0d\u9700\u8981\u5f88\u597d\u7684\u6027\u80fd\uff0c\u6240\u4ee5\u5728PC\u7aef\u8fdb\u884c\u53ef\u4ee5\u8282\u7701\u5728\u96c6\u7fa4\u4e0a\u7684\u6392\u961f\u65f6\u95f4\u3002

    \u5b89\u88c5\u7cfb\u7edf\uff1aUbuntu 20.04

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/#deepmd-kit","title":"DeePMD-kit\u4ee3\u7801\u7ed3\u6784","text":"

    \u5728\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u4e4b\u524d\u5148\u7b80\u5355\u63cf\u8ff0\u4e00\u4e0bDeePMD-kit\u7684\u4ee3\u7801\u7ed3\u6784\u3002

    DeePMD-kit\u5728\u8bad\u7ec3\u90e8\u5206\u7684\u4ee3\u7801\u662f\u5728.py\u6587\u4ef6\u4e2d\u8c03\u7528 TensorFlow \u5b9e\u73b0\u7684\uff08TF\u81ea\u5e26OP/\u81ea\u5b9a\u4e49OP\uff09\u3002\u4f46\u662fTF\u7684\u5e95\u5c42\u662f\u7528 C++ \u6784\u5efa\u7684\uff0c\u6240\u4ee5\u5728\u4f7f\u7528 DeePMD-kit \u65f6\u9700\u8981\u5b89\u88c5 TF/python \u63a5\u53e3\u3002

    \u8fdb\u5165\u5230\u4fee\u6539\u8fc7\u4ee3\u7801\u7684\u6587\u4ef6\u5939\uff0c\u6267\u884c\uff1a

    pip install .\n

    \u6b64\u65f6\u4f1a\u57fa\u4e8e\u5df2\u4fee\u6539\u7684\u4ee3\u7801\u751f\u6210\u65b0\u7684\u53ef\u6267\u884c\u6587\u4ef6\u3002

    \u5982\u679c\u60f3\u57fa\u4e8eDeePMD-kit\u751f\u6210\u7684\u6a21\u578b\u548clammps/CP2K\u7b49\u8f6f\u4ef6\u7684\u5bf9\u63a5\uff0c\u9700\u8981\u53e6\u5916\u5b89\u88c5C++\u63a5\u53e3\u3002\u8fd9\u90e8\u5206\u53ef\u4ee5\u53c2\u8003\u4e4b\u524d\u7684\u6559\u7a0b\uff08\u7f16\u8bd1/\u4fee\u6539\u4ee3\u7801\u540e\u91cd\u65b0\u7f16\u8bd1\uff09\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/#conda","title":"conda\u5b89\u88c5","text":"

    \u5982\u679c\u4e0d\u9700\u8981\u5bf9\u6e90\u7801\u8fdb\u884c\u4fee\u6539\uff0c\u53ef\u4ee5\u5229\u7528\u5b98\u65b9\u6559\u7a0b easy installation \u4e2d\u7684 conda \u5b89\u88c5

    #(base)\nconda create -n deepmd deepmd-kit=*=*gpu libdeepmd=*=*gpu lammps-dp cudatoolkit=11.3 horovod -c https://conda.deepmodeling.org\n

    \u6b64\u547d\u4ee4\u65b0\u5efa\u4e86\u4e00\u4e2a\u540d\u4e3adeepmd\u7684\u865a\u62df\u73af\u5883\uff0c\u5e76\u5c06deepmd-kit\u5b89\u88c5\u5728\u8fd9\u4e2a\u73af\u5883\u4e2d\u3002 Conda \u5b89\u88c5\u4f1a\u4e00\u5e76\u5b89\u88c5 CUDA Toolkit\uff0c\u56e0\u6b64\u53ea\u8981\u4fdd\u8bc1\u7535\u8111\u7684\u9a71\u52a8\u652f\u6301\u5373\u53ef\u3002\u53ef\u901a\u8fc7\u4ee5\u4e0b\u6307\u4ee4\u67e5\u770b\u9a71\u52a8\u7248\u672c\u53ca\u5176\u652f\u6301\u7684cuda\u7248\u672c\uff1a

    nvidia-smi\n

    \u76ee\u524d\u901a\u8fc7conda\u9ed8\u8ba4\u5b89\u88c5\u7684\u662f10.1\u7248\u672c\u7684CUDA Toolkit\uff0c\u7531\u4e8eCUDA\u5411\u4e0b\u517c\u5bb9\uff0c\u6545\u7248\u672c\u9ad8\u4e8e10.1\u5373\u53ef\u3002\u5982\u679c\u9a71\u52a8\u652f\u6301\u7684CUDA\u7248\u672c\u8fc7\u4f4e\uff0c\u53ef\u4ee5\u5728Ubuntu\u7684Software&Updates/Additional Drivers\u91cc\u9009\u62e9\u65b0\u7248\u7684\u9a71\u52a8\u8fdb\u884c\u5347\u7ea7\u3002

    \u5229\u7528 Conda \u4fbf\u6377\u5b89\u88c5\u65f6\uff0cDeePMD-kit\u7684C++\u5e95\u5c42\u6587\u4ef6\u5168\u90e8\u90fd\u5df2\u7ecf\u7f16\u8bd1\u6210\u53ef\u6267\u884c\u6587\u4ef6.so\uff0c\u5728\u672c\u5730\u53ea\u80fd\u67e5\u770b\u5230\u53ef\u6267\u884c\u6587\u4ef6.so\u548c.py\u6587\u4ef6\uff0c\u65e0\u6cd5\u5bf9\u5e95\u5c42\u8fdb\u884c\u4fee\u6539\u3002\u6240\u4ee5\u5982\u679c\u9700\u8981\u5bf9\u6e90\u7801\u8fdb\u884c\u4fee\u6539\uff0c\u9700\u8981\u624b\u52a8\u5b89\u88c5\u7f16\u8bd1\u3002

    Conda\u5b89\u88c5\u5305\u62ec\u4e86\u9884\u7f16\u8bd1\u7684 TF/C++ \u63a5\u53e3\uff0c\u53ef\u901a\u8fc7\u5b9a\u4e49\u73af\u5883\u53d8\u91cf\u7701\u53bb\u4ee5\u524d\u6559\u7a0b\u4e2d\u63d0\u5230\u7684\u7f16\u8bd1\u7684\u6b65\u9aa4\u3002\uff08\u89c1\u4e0b\u6587\uff09

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/#_2","title":"\u624b\u52a8\u7f16\u8bd1","text":"

    \u4e0a\u4e00\u8282\u7684 Conda \u5b89\u88c5\u662f\u5728deepmd\u865a\u62df\u73af\u5883\u4e0b\u5b89\u88c5\u7684\uff0c\u624b\u52a8\u5b89\u88c5\u6211\u4eec\u65b0\u5efa\u4e00\u4e2a\u73af\u5883dp-tf\uff1a

    conda info -e\n# if you have been in `deepmd`, deactivate first\nconda deactivate\n# create a new environment\nconda create -n dp-tf\n# if you want to specify the version of python in dp-tf\n#conda create -n dp-tf python=3.9\n

    tip

    \u5efa\u8bae\u5728\u65b0\u5efa\u73af\u5883dp-tf \u65f6\u8bbe\u7f6epython\u7248\u672c\u548cdeepmd\u4fdd\u6301\u4e00\u81f4\uff0c\u5426\u5219\u540e\u7eed\u5b89\u88c5tensorflow\u65f6\u53ef\u80fd\u56e0\u4e3apython\u7248\u672c\u4e0d\u517c\u5bb9\u62a5\u9519No matching distribution found for tensorflow\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/#_3","title":"\u4e0b\u8f7d\u6e90\u7801&\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf","text":"

    \u4e0b\u8f7d\u6e90\u7801\uff08\u6ce8\u610f\u4e00\u5b9a\u8981\u6709--recursive\uff0c\u5177\u4f53\u89c1[wiki](./deepmd-kit_installation_51.md\uff09

    #(tf-dp)\ngit clone --recursive https://github.com/deepmodeling/DeePMD-kit.git DeePMD-kit\n

    \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf

    #(tf-dp)\ncd DeePMD-kit\n# set $deepmd_source_dir as the directory of the deepmd source code\ndeepmd_source_dir=$(pwd)\n# set $tensorflow_root as the directory of the TF/C++ interface\n# the dir of the environment with conda DP\ntensorflow_root=/dir/for/env/with/condaDP\n

    \u53ef\u4ee5\u7528conda env list\u6307\u4ee4\u67e5\u770b\u73af\u5883deepmd\u7684\u5730\u5740(/dir/for/env/with/condaDP)

    \u5982\u679c\u62c5\u5fc3\u5b89\u88c5\u8fc7\u7a0b\u4e2d\u9700\u8981\u9000\u51fa\uff0c\u53ef\u4ee5\u4e34\u65f6\u52a0\u5230~/.bashrc\u6587\u4ef6\u4e2d\u5e76source ~/.bashrc\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/#tfpython","title":"TF/Python \u63a5\u53e3","text":"

    \u9996\u5148\u53ef\u4ee5\u66f4\u65b0\u4e00\u4e0bpip\uff0c\u5e76\u5b89\u88c5\u65b0\u7248TensorFlow\uff1a

    #(tf-dp)\npip install --upgrade pip\npip install --upgrade tensorflow==2.5.0\n

    tip

    \u5229\u7528conda\u4fbf\u6377\u5b89\u88c5\u53ef\u4ee5\u7701\u53bb\u540e\u9762TF/C++\u63a5\u53e3\u7684\u5b89\u88c5\uff0c\u6240\u4ee5\u8fd9\u91cc\u7684TF\u5b89\u88c5\u548cconda\u5b89\u88c5\u4e2d\u7684TF\u4fdd\u6301\u4e00\u81f4\u3002\uff08\u5177\u4f53\u7248\u672c\u5728conda\u5b89\u88c5\u8fc7DeePMD-kit\u7684\u73af\u5883(deepmd)\u4e0b\u67e5\u770b\u5df2\u5b89\u88c5\u7684tensorflow-base\u7248\u672c\u3002

    \u4f8b\u5982\uff1a

    # assume you have been in dp-tf env\n#(tf-dp)\nconda deactivate\n#(base)\nconda activate deepmd\n#(deepmd)\nconda list\n>>> tensorflow-base           2.5.0           gpu_py39h7c1560b_0    https://conda.deepmodeling.org\n#(deepmd)\nconda deactivate\n#(base)\nconda activate dp-tf\n#(tf-dp)\npip install --upgrade tensorflow==2.5.0\n

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/#deepmd-kitpython","title":"DeePMD-kit/Python \u63a5\u53e3","text":"
    #(tf-dp)\ncd $deepmd_source_dir\nDP_VARIANT=cuda\npip install .\n

    \u8fd9\u4e00\u6b65\u7684pip install\u5bf9deepmd_source_dir\u4e0b\u7684\u6587\u4ef6\u8fdb\u884c\u7f16\u8bd1\u3002

    warning

    \u73af\u5883\u53d8\u91cfDP_VARIANT\u7684\u9ed8\u8ba4\u503c\u662fcpu\uff0c\u8981\u8bb0\u5f97\u6839\u636e\u9700\u8981\u8fdb\u884c\u4fee\u6539\uff01

    info

    \u5982\u679c\u5bf9\u6e90\u7801\u8fdb\u884c\u4e86\u4fee\u6539\uff0c\u9700\u8981\u91cd\u65b0\u7f16\u8bd1\u3002

    \u8fd9\u4e00\u6b65\u4e2d\u62a5\u9519\u53ef\u80fd\u7684\u5e94\u5bf9\u63aa\u65bd\uff1a

    • \u7f51\u7edc\u95ee\u98981

    \u4fee\u6539\u955c\u50cf\u6e90\uff08\u5177\u4f53\u53ef\u53c2\u8003\u4f7f\u7528\u5e2e\u52a9\uff09

    pip install pip -U\npip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple\n
    • \u7f51\u7edc\u95ee\u98982\uff08...timed out...\uff09

    \u591a\u8bd5\u51e0\u6b21...

    • \u5347\u7ea7setuptools
    pip install --upgrade setuptools --no-build-isolation\n
    • \u7f3a\u5404\u79cd\u5305

    \u5982\u679c\u76f4\u63a5pip install\u4f1a\u53d1\u73b0\u6240\u6709\u90fd\u662f\u5df2\u5b89\u88c5\u7684\uff0c\u9700\u8981pip uninstall\u518dpip install\u3002

    \u7528conda list\u68c0\u67e5\u53d1\u73b0\u5e94\u8be5\u662f\u6ca1\u6709\u5b89\u88c5\u5230\u8fd9\u4e2a\u73af\u5883\u91cc\u3002

    \u5982\u679c\u6709\u62a5\u9519\u800c\u65e0\u6cd5\u76f4\u63a5\u5378\u8f7d\uff1a

    It is a distuils installed project and thus we cannot accurately determine which files belongs to it which would lead to only a partial uninstall.\n

    \u53ef\u4ee5\u8003\u8651\u5f3a\u5236\u8986\u76d6\u5b89\u88c5\uff1a

    pip install some_package --ignore-installed\n
    • GCC\u7248\u672c\u95ee\u9898
        138 | #error -- unsupported GNU version! gcc versions later than 8 are not supported!\n

    Ubuntu 20.04\u9ed8\u8ba4\u7684GCC\u7248\u672c\u662f9.3.0\uff08gcc --version\u67e5\u770b\uff09\uff0c\u9700\u8981\u5378\u8f7d\u518d\u91cd\u88c5\u4f4e\u7248\u672c\uff08\u6bd4\u59827.5\uff09

    sudo apt remove gcc\nsudo apt-get install gcc-7 g++-7 -y\nsudo ln -s /usr/bin/gcc-7 /usr/bin/gcc\nsudo ln -s /usr/bin/g++-7 /usr/bin/g++\nsudo ln -s /usr/bin/gcc-7 /usr/bin/cc\nsudo ln -s /usr/bin/g++-7 /usr/bin/c++\ngcc --version\n
    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/#deepmd-kitc","title":"DeePMD-kit/C++ \u63a5\u53e3","text":"

    \u89c1\u5b98\u65b9\u6559\u7a0b\uff08\u53ef\u80fd\u9700\u8981apt-get\u5b89\u88c5cmake\uff0c\u5982\u679c\u6ca1\u6709\u8db3\u591f\u6743\u9650\u4e5f\u53ef\u4ee5\u901a\u8fc7pip\u5b89\u88c5\uff09\u3002

    "},{"location":"wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/#lammps","title":"\u548c\u5176\u4ed6\u8ba1\u7b97\u8f6f\u4ef6\uff08\u5982lammps\uff09\u7684\u63a5\u53e3","text":"

    \u89c1\u5b98\u65b9\u6559\u7a0b\u548c\u8fd9\u91cc\u3002

    "},{"location":"wiki/software_usage/DP-GEN/","title":"DP-GEN\u4f7f\u7528\u5165\u95e8","text":""},{"location":"wiki/software_usage/DP-GEN/#_1","title":"\u7b80\u4ecb","text":"

    Deep Potential Generator (DP-GEN) \u662f\u4e00\u4e2a\u5c06\u795e\u7ecf\u7f51\u7edc\u52bf\u80fd\uff08machine learning potential\uff09\u548c\u4e3b\u52a8\u5b66\u4e60\uff08active learing\uff09\u7ed3\u5408\u8d77\u6765\u7684\u5de5\u4f5c\u6d41\u3002\u8be5\u5305\u4e3b\u8981\u7531\u5f20\u6797\u5cf0\uff08\u666e\u6797\u65af\u987f\u5927\u5b66\uff09\uff0c\u738b\u6db5\uff08\u5317\u4eac\u5e94\u7528\u7269\u7406\u4e0e\u8ba1\u7b97\u6570\u5b66\u7814\u7a76\u6240\uff09\u5f00\u53d1\u3002\u5982\u6709\u95ee\u9898\uff0c\u53ef\u4ee5\u5411\u4ed6\u4eec\u8be2\u95ee\u3002

    \u63d0\u793a

    \u8003\u8651\u5230 DP-GEN \u5728\u96c6\u7fa4\u8fd0\u884c\u53ef\u80fd\u5b58\u5728\u4e00\u5b9a\u7684\u6027\u80fd\u95ee\u9898\uff0c\u63a8\u8350\u5c1d\u8bd5 ai2-kit \u8fd0\u884c\u52bf\u51fd\u6570\u8bad\u7ec3\u7684 Close Loop Learning (CLL) \u4efb\u52a1\u3002

    \u4ee5\u4e0b\u4e3a\u53c2\u8003\u4fe1\u606f\uff1a

    • GitHub
    • \u53c2\u8003\u6587\u732e\uff1aActive learning of uniformly accurate interatomic potentials for materials simulation

    Warning

    \u6b64\u9875\u9762\u4ec5\u9650\u63d0\u4f9b\u8d21\u732e\u8005\u5bf9\u4e8e\u8be5\u8f6f\u4ef6\u7684\u7406\u89e3\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\u8bf7\u8054\u7cfb\u8d21\u732e\u8005\u3002\u5efa\u8bae\u5728\u9605\u8bfb\u6b64\u7bc7\u524d\u5148\u5bf9DeePMD-kit\u6709\u4e00\u5b9a\u4e86\u89e3\u3002 \u6307\u8def\uff1aDeePMD-kit

    DP-GEN\u7684\u5de5\u4f5c\u6d41\u662f\u7531\u4ee5\u4e0b\u4e09\u6b65\u7ec4\u6210\u7684\u5faa\u73af\uff1a

    • \u8bad\u7ec3\uff1aDeePMD-kit\u540c\u65f6\u8bad\u7ec3 \u591a\u6761\uff08\u4e00\u822c\u662f4\u6761\uff09\u53c2\u6570\u521d\u59cb\u5316\u4e0d\u540c\u7684\u52bf\u51fd\u6570\uff08GPU\uff09\u3002
    • \u91c7\u6837\u548c\u7b5b\u9009\uff1a\u57fa\u4e8e\u8bad\u7ec3\u5f97\u5230\u7684\u52bf\u51fd\u6570\u548c\u6307\u5b9a\u7684\u521d\u59cb\u7ed3\u6784\u5229\u7528LAMMPS\u8fdb\u884cclassical MD\uff0c\u6269\u5c55\u6784\u578b\u7a7a\u95f4\u3002\u7136\u540e\u5bf9MD\u4e2d\u5f97\u5230\u7684\u6784\u578b\u4f9d\u7167\u7279\u5b9a\u6307\u6807\uff08\u5bf9\u67d0\u4e2a\u6784\u578b\u7528\u4e0d\u540c\u7684\u52bf\u51fd\u6570\u9884\u6d4b\u6240\u5f97\u7684\u539f\u5b50\u529b\u7684\u6807\u51c6\u5dee\uff09\u8fdb\u884c\u7b5b\u9009\uff08GPU\uff09\u3002
    • \u6807\u8bb0\uff1a\u5c06\u7b5b\u9009\u6240\u5f97\u7684\u6784\u578b\u8fdb\u884cDFTMD\u5355\u70b9\u80fd\u8ba1\u7b97\uff0c\u5f97\u5230\u529b\u548c\u80fd\u91cf\uff0c\u52a0\u5165\u8bad\u7ec3\u96c6\u8fdb\u884c\u65b0\u4e00\u8f6e\u7684\u8bad\u7ec3\uff0851\u621652\uff09\u3002
    "},{"location":"wiki/software_usage/DP-GEN/#_2","title":"\u8f93\u5165\u6587\u4ef6","text":"

    \u4e3a\u4e86\u4f7fdpgen\u8fd0\u884c\u8d77\u6765\uff0c\u6211\u4eec\u9700\u8981\u51c6\u5907\u5982\u4e0b\u7684\u6587\u4ef6\uff1a

    • param.json

    \u4e09\u6b65\u8ba1\u7b97\u4e2d\u6240\u7528\u7684\u53c2\u6570\uff0c\u5177\u4f53\u6307\u795e\u7ecf\u7f51\u7edc\u8bad\u7ec3\u7684\u53c2\u6570\uff0clammps\u4e2dMD\u7684\u53c2\u6570\u548cDFTMD\u8ba1\u7b97\u5355\u70b9\u80fd\u7684\u53c2\u6570\u3002

    • machine.json

    \u5236\u5b9a\u4e0a\u8ff0\u4e09\u4e2a\u6b65\u9aa4\u5206\u522b\u5728\u54ea\u4e2a\u670d\u52a1\u5668\u8ba1\u7b97\u3002

    Tip

    \u5728 Zeus \u96c6\u7fa4\u4e0a\u914d\u7f6e machine.json`\uff0c\u8bf7\u53c2\u9605GPU\u4f7f\u7528\u8bf4\u660e

    • \u521d\u59cb\u8bad\u7ec3\u96c6\u6570\u636e

    \u653e\u5728\u63d0\u4ea4dpgen\u6240\u5728\u7684\u670d\u52a1\u5668\u4e0a\uff0c\u7528\u4e8e\u8bad\u7ec3\u52bf\u51fd\u6570\uff0c\u53c2\u7167DeePMD-kit\u4e2d\u65b9\u6cd5\u751f\u6210\u3002

    • MD\u91c7\u6837\u7684\u521d\u59cb\u7ed3\u6784

    \u653e\u5728\u63d0\u4ea4dpgen\u6240\u5728\u7684\u670d\u52a1\u5668\u4e0a\uff0c\u5fc5\u987b\u4f7f\u7528vasp5.x\u7684POSCAR\uff0c\u628a.xyz\u6587\u4ef6\u8f6c\u5316\u4e3aPOSCAR\u7684\u811a\u672c\u53ef\u89c1\u6587\u672b\u3002

    "},{"location":"wiki/software_usage/DP-GEN/#_3","title":"\u8f93\u51fa\u6587\u4ef6","text":"

    \u5728\u63d0\u4ea4dpgen\u7684\u6587\u4ef6\u5939\u4e0b\u4f1a\u51fa\u73b0\u4ee5\u4e0b\u8f93\u51fa\u6587\u4ef6\uff0c\u7528\u4e8e\u6307\u793a\u4efb\u52a1\u8fd0\u884c\u7684\u72b6\u51b5\uff1a

    • dpgen.log

    \u5305\u62ec\u4e86\u8fd0\u884c\u8f6e\u6570\uff0c\u5355\u4e2a\u4efb\u52a1\u63d0\u4ea4\u7684\u60c5\u51b5\uff0c\u91c7\u6837\u51c6\u786e\u5ea6\u7b49\u8be6\u7ec6\u7684\u4fe1\u606f\u3002

    • record.dpgen

    \u7531\u591a\u884c x y \u7ec4\u6210\uff0c\u8bb0\u5f55\u4efb\u52a1\u8fdb\u7a0b\u3002\u5176\u4e2dx\u4e3a\u8fd0\u884c\u7684\u8f6e\u6570\uff08iteration\uff09\uff0c\u4ece0\u5f00\u59cb\uff1by\u53d60-8\uff0c\u5176\u4e2d0-2\u6307\u4ee3\u8bad\u7ec3\uff0c3-5\u6307\u4ee3\u91c7\u6837\u548c\u7b5b\u9009\uff0c6-8\u6307\u4ee3\u6807\u8bb0\u3002

    dpgen\u901a\u8fc7\u8bfb\u53d6\u8fd9\u4e2a\u6587\u4ef6\u6765\u51b3\u5b9a\u4ece\u54ea\u91cc\u91cd\u542f\u8ba1\u7b97\uff0c\u6240\u4ee5\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u624b\u52a8\u4fee\u6539\u8fd9\u4e2a\u6587\u4ef6\u6765\u51b3\u5b9a\u91cd\u542f\u7684\u70b9\u3002\u4f8b\u5982\uff0c\u5728\u7b2cx\u8f6e\u4e2d\u6211\u4eec\u53d1\u73b0\u91c7\u6837\u7684\u51c6\u786e\u5ea6\u8fc7\u4f4e\uff0c\u9700\u8981\u589e\u52a0\u521d\u59cb\u7ed3\u6784\u7684\u6570\u91cf\u91cd\u65b0\u8dd1MD\uff0c\u6211\u4eec\u5c31\u53ef\u4ee5\u628arecord.dpgen\u6587\u4ef6\u5728x 2\u4e4b\u540e\u7684\u5185\u5bb9\u5220\u9664\uff0c\u91cd\u65b0\u63d0\u4ea4dpgen\u4efb\u52a1\u3002

    • nohup.out

    \u8fd9\u4e2a\u5e76\u4e0d\u662f\u5fc5\u8981\u8f93\u51fa\uff0c\u4f46\u662f\u5efa\u8bae\u4f7f\u7528nohup\u547d\u4ee4\u628adpgen\u6302\u5728\u540e\u53f0\u8fd0\u884c\u3002\u8fd9\u4e2a\u6587\u4ef6\u4e2d\u8f93\u51fa\u7684\u4fe1\u606f\u548cdpgen.log\u7684\u57fa\u672c\u4e00\u81f4\u3002

    "},{"location":"wiki/software_usage/DP-GEN/#_4","title":"\u4f8b\u5b50","text":"

    \u63a5\u4e0b\u6765\uff0c\u628a\u94c2\u6c34\u754c\u9762\u52bf\u51fd\u6570\u8bad\u7ec3\u6240\u7528\u7684param.json\u5206\u89e3\u6210\u51e0\u4e2a\u90e8\u5206\u8fdb\u884c\u89e3\u91ca\uff0c\u5728\u5b9e\u9645\u4f7f\u7528\u4e2d\u9700\u8981\u628a\u51e0\u6bb5\u653e\u5728\u4e00\u8d77\u3002

    comment

    \u6587\u4ef6\u4e2d\u7684\u6ce8\u91ca\u7528_comment\u6807\u6ce8\u3002

    "},{"location":"wiki/software_usage/DP-GEN/#paramsjson","title":"\u57fa\u672c\u53c2\u6570\u8bbe\u7f6e: params.json","text":"param.json
    { \n    \"type_map\": [        \n        \"O\", \n        \"H\",\n        \"Pt\"\n    ], \n    \"mass_map\": [ \n        15.999,\n        1.0079,\n        195.08\n    ], \n    \"_comment\": \" atoms in your systems \",\n    \"init_data_prefix\": \"/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh\", \n    \"init_data_sys\": [\n        \"init/system-000\",\"init/system-001\"\n    ], \n    \"_comment\": \" path of training set \",\n    \"init_batch_size\": [\n        1,1\n    ], \n    \"sys_configs\": [\n        [\"/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh/init/configs/POSCAR_0[0-9]\"],\n        [\"/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh/init/configs/POSCAR_1[0-9]\"]\n    ], \n    \"_comment\": \" path of initial structure for sampling \",\n    \"sys_batch_size\": [\n        1,1\n    ], \n\n    ......\n}\n
    • \u52bf\u51fd\u6570\u8bad\u7ec3\uff08DPMD\uff09
    param.json
      {\n      ......\n      \"numb_models\": 4, \n      \"_comment\": \" number of NNP for model deviation \",\n      \"train_param\": \"input.json\", \n      \"_comment\": \" name of automatically generated input file for DPMD \",\n      \"default_training_param\": {\n          \"model\": {\n          \"descriptor\": {\n          \"type\": \"se_a\",\n    \"_comment\": \"could be bigger than the number of atoms of the very element\",\n          \"sel\": [68, 136, 64], \n          \"rcut_smth\": 0.50, \n          \"rcut\": 5.00, \n          \"neuron\": [25, 50, 100], \n          \"resnet_dt\": false, \n          \"axis_neuron\": 16,\n          \"seed\": 1\n          },\n          \"fitting_net\": {\n          \"n_neuron\": [240, 240, 240], \n          \"resnet_dt\": true, \n          \"seed\": 1\n          }},\n          \"learning_rate\": {\n          \"type\": \"exp\",\n          \"start_lr\": 0.005, \n          \"decay_steps\": 2000,\n          \"_comment\": \"last 20000 or 400000\", \n          \"decay_rate\": 0.95\n          },\n          \"loss\": {\n          \"start_pref_e\": 0.02, \n          \"limit_pref_e\": 1, \n          \"start_pref_f\": 1000, \n          \"limit_pref_f\": 1, \n          \"start_pref_v\": 0, \n          \"limit_pref_v\": 0\n          },\n          \"training\": {\n          \"systems\": [ ], \n          \"set_prefix\": \"set\", \n          \"stop_batch\": 400000, \n          \"batch_size\": 1, \n          \"seed\": 1,\n          \"disp_file\": \"lcurve.out\", \n          \"disp_freq\": 100, \n          \"numb_test\": 4, \n          \"save_freq\": 1000, \n          \"save_ckpt\": \"model.ckpt\", \n          \"load_ckpt\": \"model.ckpt\", \n          \"disp_training\": true, \n          \"time_training\": true, \n          \"profiling\": false, \n          \"profiling_file\": \"timeline.json\"\n          }},\n      \"_comment\": \"modify according your systems!\", \n      ......\n  }\n
    • \u91c7\u6837\u548c\u7b5b\u9009\uff08Lammps\uff09
    param.json
    {  \n    \"model_devi_dt\":            0.0005,\n    \"_comment\": \"model_devi_dt: Timesteps for MD. Consistent with DFTMD!\",\n    \"model_devi_skip\":          0,\n    \"_comment\": \"model_devi_skip: the first x frames of the recorded frames\",\n    \"model_devi_f_trust_lo\":    0.075,\n    \"model_devi_f_trust_hi\":    0.10,\n    \"_comment\": \"modify according to the error distribution of system\",\n    \"model_devi_e_trust_lo\":    1e10,\n    \"model_devi_e_trust_hi\":    1e10,\n    \"model_devi_clean_traj\":    false,\n    \"model_devi_jobs\": [\n    {\"temps\": [300,400],\"sys_idx\": [0,1],\"trj_freq\": 10,\"nsteps\":  2000,\"ensemble\": \"nvt\",\"_idx\": 0},\n    {\"temps\": [300,400],\"sys_idx\": [0,1],\"trj_freq\": 10,\"nsteps\":  2000,\"ensemble\": \"nvt\",\"_idx\": 1}\n    ],\n    \"_comment\": \"sys_idx should correspond to sys_configs in the beginning\",\n    \"_comment\": \"add the _idx step by step\",\n    \"_comment\": \"modify nsteps and sys_idx based on model deviation accuracy\",\n    ......\n}\n
    • \u6807\u8bb0\uff08\u8ba1\u7b97\u5355\u70b9\u80fd\uff0c\u6b64\u5904\u4ee5CP2K\u4e3a\u4f8b\uff0cVASP\u7684\u8bbe\u7f6e\u53ef\u5728\u5b98\u65b9\u6587\u6863\u4e2d\u67e5\u770b\uff09
    param.json
    {\n    ......\n    \"fp_style\":     \"cp2k\",\n    \"shuffle_poscar\":   false,\n    \"fp_task_max\":  200,\n    \"_comment\":         \"the maximum number of stcs to calc.\",\n    \"fp_task_min\":  5,\n    \"fp_pp_path\":   \".\",\n    \"fp_pp_files\":  [],\n    \"_comment\":\"the maximum number of stcs to calc.\",\n     \"_comment\": \"fp_params: modify according your systems!\",\n    \"fp_params\": {\n        \"FORCE_EVAL\":{\n            \"DFT\":{\n                \"BASIS_SET_FILE_NAME\": \"/data/kmr/BASIC_SET/BASIS_MOLOPT\",\n                \"POTENTIAL_FILE_NAME\": \"/data/kmr/BASIC_SET/GTH_POTENTIALS\",\n                \"MGRID\":{\n                    \"CUTOFF\": 400\n                },\n                \"QS\":{\n                    \"EPS_DEFAULT\": 1.0E-13\n                },\n                \"SCF\":{\n                    \"SCF_GUESS\": \"ATOMIC\",\n                    \"EPS_SCF\": 1.0E-6,\n                    \"MAX_SCF\": 500,\n                    \"ADDED_MOS\": 500,\n                    \"CHOLESKY\": \"INVERSE\",\n                    \"SMEAR\":{\"ON\"\n                        \"METHOD\": \"FERMI_DIRAC\",\n                        \"ELECTRONIC_TEMPERATURE\": 300\n                    },\n                    \"DIAGONALIZATION\":{\n                        \"ALGORITHM\": \"STANDARD\"\n                    },\n                    \"MIXING\":{\n                               \"METHOD\": \"BROYDEN_MIXING\",\n                               \"ALPHA\":   0.3,\n                               \"BETA\":    1.5,\n                               \"NBROYDEN\":  14\n                    }\n                },\n                \"XC\":{\n                        \"XC_FUNCTIONAL\":{\"_\": \"PBE\"},\n                        \"XC_GRID\":{\n                                \"XC_SMOOTH_RHO\": \"NN50\",\n                                \"XC_DERIV\": \"NN50_SMOOTH\"\n                        },\n                        \"vdW_POTENTIAL\":{\n                                \"DISPERSION_FUNCTIONAL\": \"PAIR_POTENTIAL\",\n                                \"PAIR_POTENTIAL\":{\n                                        \"TYPE\": \"DFTD3\",\n                                        \"PARAMETER_FILE_NAME\": \"/data/kmr/BASIC_SET/dftd3.dat\",\n                                        \"REFERENCE_FUNCTIONAL\": \"PBE\"\n                                }\n                        }\n                }\n           },\n            \"SUBSYS\":{\n                        \"KIND\":{\n                                \"_\": [\"O\", \"H\",\"Pt\"],\n                                \"POTENTIAL\": [\"GTH-PBE-q6\", \"GTH-PBE-q1\",\"GTH-PBE-q10\"],\n                                \"BASIS_SET\": [\"DZVP-MOLOPT-SR-GTH\", \"DZVP-MOLOPT-SR-GTH\",\"DZVP-A5-Q10-323-MOL-T1-DERIVED_SET-1\"]\n                        }\n            }\n        }\n    }\n}\n

    \u8ba1\u7b97\u8bbe\u7f6e

    CP2K\u7684input\u4e2d\u90e8\u5206\u53c2\u6570\u6709\u9ed8\u8ba4\u8bbe\u7f6e\u5199\u5165\uff0c\u5177\u4f53\u53ef\u53c2\u7167cp2k.py\u3002

    \u6307\u8def\uff1acp2k.py

    \u8ba1\u7b97\u8bbe\u7f6e

    \u91d1\u5c5e\u4f53\u7cfbOT section\u9700\u8981\u624b\u52a8\u5173\u95ed\uff0c\u5177\u4f53\u89c1\u4e0a\u65b9\u7684\u8bbe\u7f6e\u3002

    "},{"location":"wiki/software_usage/DP-GEN/#machinejson","title":"\u4efb\u52a1\u63d0\u4ea4\u8bbe\u7f6e: machine.json","text":"

    \u4ece DP-GEN 0.10.0 \u7248\u672c\u5f00\u59cb\uff0c\u5b98\u65b9\u5f15\u5165\u4e86\u5bf9 DPDispatcher \u7684\u652f\u6301\uff0c\u5e76\u8ba1\u5212\u5c06 machine.json \u8fc1\u79fb\u5230 DPDispatcher \u4e0a\u3002 DPDispatcher \u76f8\u6bd4\u539f\u672c DP-GEN \u81ea\u5e26\u7684 Dispatcher\uff0c\u5728\u63a5\u53e3\u548c\u8bed\u6cd5\u4e0a\u6709\u8f83\u5927\u53d8\u5316\uff0c\u9700\u8981\u989d\u5916\u6307\u5b9a api_version \u5927\u4e8e\u6216\u7b49\u4e8e 1.0\u3002

    \u5173\u4e8e DPDispatcher \u9879\u76ee\u7684\u8bf4\u660e\uff0c\u8bf7\u53c2\u9605\u8fd9\u91cc\u3002

    DPDispatcher \u76f8\u6bd4\u65e7\u7248\uff0c\u57fa\u4e8e\u914d\u7f6e\u5b57\u5178\u800c\u975e\u6587\u4ef6Flag\u6765\u7ba1\u7406\u6240\u63d0\u4ea4\u7684\u4efb\u52a1\uff0c\u7a33\u5b9a\u6027\u66f4\u4f18\uff0c\u4e14\u5bf9\u4f5c\u4e1a\u7ba1\u7406\u7cfb\u7edf\u7684\u652f\u6301\u66f4\u52a0\u7075\u6d3b\u591a\u6837\uff0c\u5185\u7f6e\u63a5\u53e3\u53ef\u652f\u6301\u591a\u4efb\u52a1\u5e76\u884c\u63d0\u4ea4\u3002 \u4f46\u65b0\u7248\u5728\u64cd\u4f5c\u4e60\u60ef\u4e0a\u6709\u8f83\u5927\u6539\u53d8\uff0c\u9700\u8981\u9002\u5e94\u548c\u8c03\u6574\u3002

    \u4ee5 LSF \u4e3a\u4f8b\uff0c\u5bf9 machine.json \u7684\u5199\u6cd5\u4e3e\u4f8b\u5982\u4e0b\uff0c\u8bf7\u7559\u610f\u4ee5\u4e0b\u7684\u6ce8\u610f\u4e8b\u9879\u3002

    \u6ce8\u610f

    train \u90e8\u5206\u548cmodel_devi\u90e8\u5206\u4f7f\u7528\u4e86\u5bf9\u65b0\u7248 LSF \u63d0\u4f9b\u652f\u6301\u7684\u5199\u6cd5\uff0c\u5373\u540c\u65f6\u6307\u5b9a gpu_usage \u548c gpu_new_syntax \u4e3a True\uff0c\u4ece\u800c\u53ef\u5728\u63d0\u4ea4\u811a\u672c\u4e2d\u4f7f\u7528\u65b0\u7248 LSF \u7684\u8bed\u6cd5\u3002

    para_deg\u8868\u793a\u5728\u540c\u4e00\u5f20\u5361\u4e0a\u540c\u65f6\u8fd0\u884c\u7684\u4efb\u52a1\u6570\uff0c\u901a\u5e38\u53ef\u4e0d\u5199\u51fa\uff0c\u6b64\u65f6\u9ed8\u8ba4\u503c\u4e3a1\u3002\u8fd9\u91cc\u7ed9\u51fa\u7684\u4f8b\u5b50\u8868\u793a\u5728\u540c\u4e00\u5f20\u5361\u4e0a\u540c\u65f6\u8fd0\u884c\u4e24\u4e2aLammps\u4efb\u52a1\u3002

    fp \u90e8\u5206\u4f7f\u7528\u7684\u662f\u9488\u5bf9CPU\u8ba1\u7b97\u4f7f\u7528\u7684\u8bed\u6cd5\u3002

    \u6ce8\u610f

    \u6ce8\u610f\u5728fp\u90e8\u5206\uff0cmpiexec.hydra\u9700\u8981\u660e\u786e\u5199\u51fa\u4ee5\u786e\u4fdd\u4efb\u52a1\u662f\u5e76\u884c\u6267\u884c\u7684\uff0c\u53ef\u53c2\u8003\u4ee5\u4e0b\u4f8b\u5b50\u4e2d\u7684\u5199\u6cd5\uff1ampiexec.hydra -genvall vasp_gam\u3002\u82e5\u4f60\u4e0d\u77e5\u9053\u8fd9\u90e8\u5206\u8be5\u5982\u4f55\u4e66\u5199\uff0c\u8bf7\u53c2\u8003\u96c6\u7fa4\u4e0a\u7684\u63d0\u4ea4\u811a\u672c\u8bf4\u660e(/data/share/base/scripts)\u3002

    \u82e5\u5728191\u4e0a\u5411191\u4e0a\u63d0\u4ea4\u4efb\u52a1\uff0c\u53ef\u4ee5\u8003\u8651\u4f7f\u7528LocalContext\uff0c\u53ef\u4ee5\u51cf\u5c11\u6587\u4ef6\u538b\u7f29\u4f20\u8f93\u7684\u989d\u5916IO\u5f00\u9500\u3002

    machine.json
    {\n  \"api_version\": \"1.0\",\n  \"train\": [\n    {\n      \"command\": \"dp\",\n      \"machine\": {\n        \"batch_type\": \"Slurm\",\n        \"context_type\": \"LocalContext\",\n        \"local_root\": \"./\",\n        \"remote_root\": \"/data/tom/dprun/train\",\n      },\n      \"resources\": {\n        \"number_node\": 1,\n        \"cpu_per_node\": 1,\n        \"gpu_per_node\": 1,\n        \"queue_name\": \"gpu3\",\n        \"group_size\": 1,\n        \"module_list\": [\n          \"deepmd/2.0\"\n        ]\n      }\n    }\n  ],\n  \"model_devi\":[\n    {\n      \"command\": \"lmp_mpi\",\n      \"machine\":{\n        \"batch_type\": \"Slurm\",\n        \"context_type\": \"SSHContext\",\n        \"local_root\": \"./\",\n        \"remote_root\": \"/data/jerry/dprun/md\",\n        \"remote_profile\": {\n          \"hostname\": \"198.76.54.32\",\n          \"username\": \"jerry\",\n          \"port\": 6666\n        }\n      },\n      \"resources\": {\n        \"number_node\": 1,\n        \"cpu_per_node\": 1,\n        \"gpu_per_node\": 1,\n        \"queue_name\": \"gpu2\",\n        \"group_size\": 5,\n        \"kwargs\": {\n          \"custom_gpu_line\": [\n            \"#SBATCH --gres=gpu:1g.10gb:1\"\n          ]\n        },\n        \"strategy\": {\"if_cuda_multi_devices\": false},\n        \"para_deg\": 2,\n        \"module_list\": [\n          \"deepmd/2.1\"\n        ],\n        \"source_list\": []\n      }\n    }\n  ],\n  \"fp\":[\n    {\n      \"command\": \"mpiexec.hydra -genvall cp2k.popt input.inp\",\n      \"machine\":{\n        \"batch_type\": \"Slurm\",\n        \"context_type\": \"SSHContext\",\n        \"local_root\": \"./\",\n        \"remote_root\": \"/data/jerry/dprun/fp\",\n        \"remote_profile\": {\n          \"hostname\": \"198.76.54.32\",\n          \"username\": \"jerry\",\n          \"port\": 6666\n        }\n      },\n      \"resources\": {\n        \"number_node\": 2,\n        \"cpu_per_node\": 32,\n        \"gpu_per_node\": 0,\n        \"queue_name\": \"c53-medium\",\n        \"group_size\": 10,\n        \"module_list\": [\n          \"intel/17.5.239\",\n          \"mpi/intel/2017.5.239\",\n          \"gcc/5.5.0\"\n          \"cp2k/7.1\"\n        ]\n      }\n    }\n  ]\n}\n

    \u76f8\u5173\u53c2\u6570\u542b\u4e49\uff0c\u8be6\u60c5\u8bf7\u53c2\u9605\u5b98\u65b9\u6587\u6863 machine \u548c resources \u90e8\u5206\u7684\u8bf4\u660e\u3002

    \u4ee5\u4e0b\u662f\u90e8\u5206\u53c2\u6570\u542b\u4e49\uff1a

    \u53c2\u6570 \u63cf\u8ff0 machine \u6307\u5b9a\u8fdc\u7a0b\u670d\u52a1\u5668\u7684\u914d\u7f6e\u4fe1\u606f\u3002 batch_type \u63d0\u4ea4\u4f5c\u4e1a\u7cfb\u7edf\u7684\u7c7b\u578b\uff0c\u53ef\u6307\u5b9a LSF, Slurm, Shell \u7b49\u3002 context_type \u8fde\u63a5\u5230\u8fdc\u7a0b\u670d\u52a1\u5668\u7684\u65b9\u5f0f\uff0c\u5e38\u7528\u53ef\u9009\u53c2\u6570SSHContext, LocalContext, LazyLocalContext\u7b49\u3002\u8be6\u89c1\u5b98\u65b9\u6587\u6863\u8bf4\u660e\u3002 SSHContext \u901a\u8fc7SSH\u8fde\u63a5\u5230\u8fdc\u7a0b\u4e3b\u673a\uff0c\u901a\u5e38\u60c5\u51b5\u4e0b\u4ece\u4e00\u4e2a\u670d\u52a1\u5668\u63d0\u4ea4\u5230\u53e6\u4e00\u4e2a\u65f6\u53ef\u4f7f\u7528\u3002 LocalContext \u82e5\u9700\u8981\u5728\u5f53\u524d\u670d\u52a1\u5668\u4e0a\u63d0\u4ea4\u4efb\u52a1\uff0c\u53ef\u9009\u62e9\u6b64\u9009\u9879\uff0c\u5219\u4e0d\u5fc5\u901a\u8fc7SSH\u8fde\u63a5\u3002\u6b64\u65f6 remote_profile \u90e8\u5206\u53ef\u4e0d\u5199\u3002 remote_root \u4efb\u52a1\u5728\u76ee\u6807\u4e3b\u673a\u4e0a\u63d0\u4ea4\u7684\u7edd\u5bf9\u8def\u5f84\u3002 remote_profile \u8fdc\u7a0b\u4e3b\u673a\u8bbe\u7f6e\uff0c\u82e5context_type\u4e3aLocalContext, LazyLocalContext\u53ef\u4e0d\u5199\u3002 hostname \u8fdc\u7a0b\u4e3b\u673aIP\u3002 username \u8fdc\u7a0b\u4e3b\u673a\u7528\u6237\u540d\u3002 password \u8fdc\u7a0b\u4e3b\u673a\u5bc6\u7801\u3002\u82e5\u901a\u8fc7\u5bc6\u94a5\u767b\u9646\u53ef\u4e0d\u5199\u3002 port SSH\u8fde\u63a5\u7684\u7aef\u53e3\uff0c\u9ed8\u8ba4\u4e3a22\u3002 key_filename SSH\u5bc6\u94a5\u5b58\u653e\u7684\u8def\u5f84\u3002\u9ed8\u8ba4\u653e\u5728~/.ssh\u4e0b\uff0c\u6b64\u65f6\u53ef\u4e0d\u5199\u3002 passphrase \u5bc6\u94a5\u5b89\u5168\u53e3\u4ee4\uff0c\u901a\u5e38\u5728\u521b\u5efa\u5bc6\u94a5\u65f6\u8bbe\u7f6e\u3002\u82e5\u4e3a\u7a7a\u53ef\u4e0d\u5199\u3002 resource \u4f5c\u4e1a\u63d0\u4ea4\u76f8\u5173\u914d\u7f6e\u4fe1\u606f\u3002 number_node \u4f5c\u4e1a\u4f7f\u7528\u7684\u8282\u70b9\u6570\u3002 cpu_per_node \u6bcf\u4e2a\u8282\u70b9\u4e0a\u4f7f\u7528CPU\u6838\u6570\u3002 gpu_per_node \u6bcf\u4e2a\u8282\u70b9\u4e0a\u4f7f\u7528GPU\u5361\u6570\u3002 kwargs \u53ef\u9009\u53c2\u6570\uff0c\u4f9d\u636e\u5404\u4f5c\u4e1a\u7cfb\u7edf\u652f\u6301\u7684\u914d\u7f6e\u800c\u5b9a\u3002\u8be6\u89c1\u5b98\u65b9\u6587\u6863\u3002 custom_gpu_line \u81ea\u5b9a\u4e49GPU\u63d0\u4ea4\u547d\u4ee4\uff0c\u53ef\u6839\u636e\u8bed\u6cd5\u81ea\u5b9a\u4e49\u3002\u6839\u636e\u4f5c\u4e1a\u7ba1\u7406\u7cfb\u7edf\u4e0d\u540c\uff0c\u4ee5 #BSUB (LSF) \u6216 #SBATCH (Slurm) \u5f00\u5934\u3002\u6587\u4e2d\u7684\u4f8b\u5b50\u5373\u5728gpu2\u4e0a\u4f7f\u7528MIG\u5b9e\u4f8b\uff081g.10gb\uff09\u3002 custom_flags \u5176\u4ed6\u9700\u8981\u4f7f\u7528\u7684Flag\uff0c\u4f8b\u5982Walltime\u3001\u4f5c\u4e1a\u540d\u7b49\u8bbe\u7f6e\u3002 queue_name \u4efb\u52a1\u63d0\u4ea4\u7684\u961f\u5217\u540d\u3002 group_size \u6bcf\u4e2a\u4f5c\u4e1a\u7ed1\u5b9a\u7684\u4efb\u52a1\u4e2a\u6570\u3002 if_cuda_multi_devices \u662f\u5426\u5141\u8bb8\u4efb\u52a1\u8fd0\u884c\u5728\u591a\u5361\u4e0a\uff0c\u9ed8\u8ba4\u4e3a True\u3002\u5728Zeus\u4e0a\u5efa\u8bae\u5199\u6210 False\u3002 para_deg \u540c\u4e00\u5361\u4e0a\u540c\u65f6\u8fd0\u884c\u7684\u4efb\u52a1\u6570\u3002\u9ed8\u8ba4\u4e3a1\u3002 module_list \u9700\u8981load\u7684module\u3002\u53ef\u4e0d\u5199\u3002 module_unload_list \u9700\u8981unload\u7684module\u3002\u53ef\u4e0d\u5199\u3002 source_list \u9700\u8981source\u7684\u811a\u672c\u8def\u5f84\u3002\u53ef\u4e0d\u5199\u3002 envs \u9700\u8981\u5f15\u5165\u7684\u73af\u5883\u53d8\u91cf\u3002\u53ef\u4e0d\u5199\u3002

    \u767b\u5f55\u8bbe\u7f6e

    \u5982\u679c\u670d\u52a1\u5668\u662f\u5bc6\u7801\u767b\u5f55\uff0c\u5728username\u4e4b\u540e\u52a0\u4e0a\u5173\u952e\u8bcdpassword\u5e76\u5199\u4e0a\u5bc6\u7801\u3002\u8f93\u5165\u7684\u5185\u5bb9\u8981\u7528\u5f15\u53f7\u62ec\u8d77\uff01

    \u51c6\u5907\u597d\u6240\u6709\u7684\u8f93\u5165\u6587\u4ef6\u540e\uff0c\u5c31\u53ef\u4ee5\u7528\u4ee5\u4e0b\u6307\u4ee4\u63d0\u4ea4dpgen\u4efb\u52a1\u5566\uff01

    dpgen run param.json machine.json

    \u63d0\u4ea4\u4efb\u52a1

    \u5982\u679c\u5728191\u63d0\u4ea4\uff0c\u9700\u8981\u5728\u670d\u52a1\u5668\u4e0a\u81ea\u884c\u5b89\u88c5dpgen\u3002\u5177\u4f53\u505a\u6cd5\u89c1\u5b98\u65b9GitHub\u3002 \u4e00\u822c\u6765\u8bf4\u8fd0\u884c\u5982\u4e0b\u547d\u4ee4\u5373\u53ef\uff1a

    pip install --user dpgen\n

    Slurm\u83b7\u53d6\u72b6\u6001\u5f02\u5e38\u95ee\u9898\u7684\u89e3\u51b3

    \u82e5\u9047\u5230\u4ee5\u4e0b\u62a5\u9519\uff0c\u5f88\u5927\u53ef\u80fd\u662f\u56e0\u4e3aSlurm\u6682\u65f6\u65e0\u6cd5\u83b7\u53d6\u4efb\u52a1\u72b6\u6001\u3002\u7531\u4e8e\u65e7\u7248\u672cDPDispatcher\u5bf9\u8fd9\u7c7b\u6ce2\u52a8\u5bfc\u81f4\u7684\u62a5\u9519\u6ca1\u6709\u5145\u5206\u8003\u8651\uff0c\u4f1a\u76f4\u63a5\u9000\u51fa\uff1a

    RuntimeError: status command squeue fails to execute.job_id:13544 \nerror message:squeue: error: Invalid user for SlurmUser slurm, ignored\nsqueue: fatal: Unable to process configuration file\n

    \u65b0\u7248\u8fd9\u4e00\u90e8\u5206\u5df2\u7ecf\u505a\u4e86\u8c03\u6574\uff0c\u4f46\u7531\u4e8e\u4e4b\u524d\u7684\u7248\u672c\u7a7a\u6587\u4ef6\u5939\u590d\u5236\u8fc7\u7a0b\u5b58\u5728\u4e25\u91cdbug\uff0c\u8bf7\u52a1\u5fc5\u4fdd\u8bc1DPDispatcher\u7248\u672c\u57280.5.6\u4ee5\u4e0a\u3002

    pip install --upgrade --user dpdispatcher\n

    \u652f\u6301

    \u76ee\u524dDP-GEN 0.11\u4ee5\u4e0a\u7248\u672c\u5df2\u7ecf\u79fb\u9664\u4e86\u65e7\u7248 dispatcher \u7684\u652f\u6301\uff0c\u63a8\u8350\u8fc1\u79fb\u5230 DPDispatcher \u4e0a\u3002\u4e3a\u9632\u6b62\u517c\u5bb9\u6027\u95ee\u9898\uff0c\u8fd9\u91cc\u4ecd\u4fdd\u7559\u4e86\u65e7\u7248\u7684\u8f93\u5165\uff0c\u8bf7\u6ce8\u610f\u7504\u522b\u3002

    machine_old.json
    {\n  \"train\": [\n    {\n      \"machine\": {\n        \"machine_type\": \"slurm\",\n        \"hostname\": \"123.45.67.89\",\n        \"port\": 22,\n        \"username\": \"kmr\",\n        \"work_path\": \"/home/kmr/pt-oh/train\"\n      },\n      \"resources\": {\n        \"node_gpu\": 1,\n        \"numb_node\": 1,\n        \"task_per_node\": 1,\n        \"partition\": \"large\",\n        \"exclude_list\": [],\n        \"source_list\": [],\n        \"module_list\": [\n            \"deepmd/2.1\"\n        ],\n        \"time_limit\": \"23:0:0\"\n      },\n      \"python_path\": \"/share/apps/deepmd/2.1/bin/python\"\n    }\n  ],\n  \"model_devi\": [\n    {\n      \"machine\": {\n        \"machine_type\": \"slurm\",\n        \"hostname\": \"123.45.67.89\",\n        \"port\": 22,\n        \"username\": \"kmr\",\n        \"work_path\": \"/home/kmr/pt-oh/dpmd\"\n      },\n      \"resources\": {\n        \"node_gpu\": 1,\n        \"numb_node\": 1,\n        \"task_per_node\": 1,\n        \"partition\": \"large\",\n        \"exclude_list\": [],\n        \"source_list\": [],\n        \"module_list\": [\n            \"deepmd/2.1\"\n        ],\n        \"time_limit\": \"23:0:0\"\n      },\n      \"command\": \"lmp_mpi\",\n      \"group_size\": 80\n    }\n  ],\n  \"fp\": [\n    {\n      \"machine\": {\n        \"machine_type\": \"slurm\",\n        \"hostname\": \"123.45.67.90\",\n        \"port\": 6666,\n        \"username\": \"kmr\",\n        \"work_path\": \"/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh/labelling\"\n      },\n      \"resources\": {\n        \"cvasp\": false,\n        \"task_per_node\": 28,\n        \"numb_node\": 1,\n        \"node_cpu\": 28,\n        \"exclude_list\": [],\n        \"with_mpi\": true,\n        \"source_list\": [\n        ],\n        \"module_list\": [\n            \"intel/17.5.239\",\n            \"mpi/intel/17.5.239\",\n            \"cp2k/6.1\"\n        ],\n        \"time_limit\": \"12:00:00\",\n        \"partition\": \"medium\",\n        \"_comment\": \"that's Bel\"\n      },\n      \"command\": \"cp2k.popt input.inp\",\n      \"group_size\": 50 \n    }\n  ]\n}\n
    "},{"location":"wiki/software_usage/DP-GEN/#_5","title":"\u8bad\u7ec3\u96c6\u6536\u96c6","text":"

    DP-GEN\u4ee3\u7801\u8fed\u4ee3\u751f\u6210\u7684\u8bad\u7ec3\u96c6\u662f\u5206\u6563\u50a8\u5b58\u7684\u3002\u53ef\u4ee5\u7528DP-GEN\u81ea\u5e26\u7684collect\u51fd\u6570\u8fdb\u884c\u6570\u636e\u6536\u96c6\u3002

    \u9996\u5148\u53ef\u4ee5\u4f7f\u7528dpgen collect -h \u67e5\u770b\u4f7f\u7528\u8bf4\u660e

    \u5e38\u7528\u7528\u6cd5\u662f

    dpgen collect JOB_DIR OUTPUT_DIR -p param.json\n

    JOB_DIR\u5c31\u662fDP-GEN\u7684\u8f93\u51fa\u76ee\u5f55\uff0c\u5305\u542b\u6709iter.0000*\u4e00\u7cfb\u5217\u7684\u76ee\u5f55\u3002OUTPUT_DIR\u5c31\u662f\u6536\u96c6\u7684\u6570\u636e\u51c6\u5907\u653e\u5230\u54ea\u3002param.json\u5c31\u662f\u8fd0\u884cDP-GEN\u8dd1\u7684param\u6587\u4ef6\u3002

    \u4f8b\u5982\uff1a

    dpgen collect ./ ./collect -p param-ruo2.json\n

    \u4ee5\u4e0a\u547d\u4ee4\u4f1a\u628a\u5f53\u524d\u6587\u4ef6\u5939\u7684DP-GEN\u6570\u636e\u6536\u96c6\u597d\u653e\u5165collect\u76ee\u5f55\u91cc\u3002

    init.000  init.001  sys.000  sys.001\n

    init.*\u662f\u521d\u59cb\u8bad\u7ec3\u96c6\uff0csys.*\u662f\u540e\u6765DP-GEN\u751f\u6210\u7684\u8bad\u7ec3\u96c6\uff0c\u6309\u7167param\u7684sys\u5206\u7c7b\u3002

    "},{"location":"wiki/software_usage/DP-GEN/#bonus","title":"Bonus\uff01","text":""},{"location":"wiki/software_usage/DP-GEN/#_6","title":"\u5e38\u89c1\u62a5\u9519\u95ee\u9898\uff08\u6b22\u8fce\u8865\u5145&\u4fee\u6b63\uff09","text":"
    • ... expecting value ...

    \u53ef\u80fd\u662f\u6570\u7ec4\u6216\u8005\u5b57\u5178\u672b\u5c3e\u591a\u5199\u4e86\u9017\u53f7

    • ERROR: lost atoms ...

    \u53ef\u80fd\u662fLammps\u7b97model_devi\u7684\u65f6\u5019\u56e0\u4e3a\u52bf\u51fd\u6570\u592a\u5dee\u5bfc\u81f4\u6709\u539f\u5b50\u91cd\u5408\u800c\u62a5\u9519\u3002\u53ef\u4ee5\u624b\u52a8\u5728\u5bf9\u5e94\u7684\u5355\u6761\u8f68\u8ff9\u7684input.lammps\u4e2d\u52a0\u5165

      thermo_modify   lost ignore flush yes\n

    \u7136\u540e\u5728\u4e0a\u4e00\u7ea7\u6587\u4ef6\u5939\u4e0b\u9762\u624b\u52a8\u63d0\u4ea4\u4efb\u52a1

      bsub<*.sub\n
    - AssertionError

    \u67d0\u4e2a\u5355\u70b9\u80fd\u8ba1\u7b97\u4e2d\u65ad\u540e\u91cd\u65b0\u5f00\u59cb\uff0c\u5bfc\u81f4cp2k\u7684output\u4e2d\u6709\u91cd\u53e0\u3002\u53ef\u4ee5\u572802.fp\u6587\u4ef6\u5939\u4e0b\u7528\u4ee5\u4e0b\u811a\u672c\u8fdb\u884c\u68c0\u67e5\uff1a

    import dpdata\nimport glob\nl = glob.glob(\"task.002*\")\nl.sort()\nstc = dpdata.LabeledSystem(l[0]+'/output',fmt='cp2k/output')\nfor i in l[1:]:\n    print(i)\n    stc += dpdata.LabeledSystem(i+'/output',fmt='cp2k/output')\n

    \u5176\u4e2dtask.002.*\u4ee3\u8868\u904d\u5386002system\u4e2d\u7684\u88ab\u6807\u8bb0\u7684\u7ed3\u6784\u3002\u5982\u679c\u4e0d\u540c\u7cfb\u7edf\u7684\u539f\u5b50\u6570\u76f8\u540c\uff0c\u4e5f\u53ef\u4ee5\u76f4\u63a5\u7528task.00*\u4e00\u6b21\u6027\u68c0\u67e5\u6240\u6709\u7684\u7ed3\u6784\u3002

    • \u5982\u679c\u4f60\u53d1\u73b0\u8fdb\u884c model deviation \u4ece\u4e00\u5f00\u59cb\u5c31\u975e\u5e38\u5927\uff0c\u5e76\u4e14\u6d4b\u8bd5\u96c6\u7684\u7ed3\u6784\u88ab\u6253\u4e71\uff0c\u6709\u53ef\u80fd\u662f\u5728 param \u6587\u4ef6\u4e2d\u8bbe\u7f6e\u4e86\"shuffle_poscar\": true\u3002\u8be5\u9009\u9879\u4f1a\u968f\u673a\u6253\u4e71\u6d4b\u8bd5\u96c6\u539f\u59cb POSCAR \u4e2d\u7684\u884c\uff0c\u5e76\u7528\u6253\u4e71\u540e\u7684\u7ed3\u6784\u8fdb\u884c model deviation \u6d4b\u8bd5\u3002\u8be5\u9009\u9879\u4e3b\u8981\u7528\u4e8e\u6253\u4e71\u5408\u91d1\u4f53\u7cfb\u7684\u7ed3\u6784\uff0c\u7136\u800c\u5bf9\u4e8e\u754c\u9762\u6216\u8005\u5171\u4ef7\u952e\u8fde\u63a5\u7684\u4f53\u7cfb\uff08\u5982\u534a\u5bfc\u4f53\uff09\uff0c\u968f\u673a\u6253\u4e71\u539f\u5b50\u7684\u5c06\u4f1a\u4f7f\u754c\u9762\u7ed3\u6784\u6216\u8005\u534a\u5bfc\u4f53\u7ed3\u6784\u53d8\u6210\u6df7\u4e71\u7684\u4e00\u9505\u7ca5\uff0c\u6ca1\u6709\u4efb\u4f55\u5316\u5b66\u542b\u4e49\uff0c\u56e0\u6b64\u6211\u4eec\u4e0d\u7528\u8fdb\u884cshuffle\uff08\u4e5f\u4e0d\u53ef\u4ee5\uff09\u3002\u8bf7\u5728 param \u6587\u4ef6\u4e2d\u8bbe\u7f6e:
      ...\n\"shuffle_poscar\": false\n...\n
    "},{"location":"wiki/software_usage/DP-GEN/#script-from-xyz-to-poscar","title":"script from xyz to POSCAR","text":"

    from ase.io import iread, write\nimport ase.build\n\nfor j in range(2):\n    i=0\n    for atoms in iread('./traj_'+str(j)+'.xyz', format='xyz'):\n        atoms.set_cell([11.246, 11.246, 35.94,90,90,90])\n        i=i+1\n        if i%20==0:\n            atoms=ase.build.sort(atoms)\n            ase.io.write('POSCAR_'+str(j)+'_'+str(int(i/20)-1), atoms, format='vasp',vasp5=True)\n
    \u6216\u8005\u8c03\u7528ase.io.vasp\u91cc\u7684write:

    def write_vasp(filename, atoms, label=None, direct=False, sort=None,\nsymbol_count=None, long_format=True, vasp5=False,\nignore_constraints=False):\n
    "},{"location":"wiki/software_usage/DeePMD-kit/","title":"DeePMD-kit 2.x \u4f7f\u7528\u5165\u95e8","text":""},{"location":"wiki/software_usage/DeePMD-kit/#_1","title":"\u7b80\u4ecb","text":"

    DeePMD-kit\u662f\u4e00\u4e2a\u8bad\u7ec3\u795e\u7ecf\u7f51\u7edc\u52bf\u80fd(Machine Learning Potential)\u7684\u4ee3\u7801\u5305\u3002\u8be5\u5305\u4e3b\u8981\u7531\u5f20\u6797\u5cf0\uff08\u666e\u6797\u65af\u987f\u5927\u5b66\uff09\uff0c\u738b\u6db5\uff08\u5317\u4eac\u5e94\u7528\u7269\u7406\u4e0e\u8ba1\u7b97\u6570\u5b66\u7814\u7a76\u6240\uff09\u5f00\u53d1\u3002\u9ec4\u5251\u5174\u548c\u5e84\u6c38\u658c\u66fe\u7ecf\u77ed\u65f6\u95f4\u53c2\u4e0e\u5f00\u53d1\u3002\u5982\u6709\u95ee\u9898\uff0c\u53ef\u4ee5\u5411\u4ed6\u4eec\u8be2\u95ee\u3002

    Danger

    \u6211\u4eec\u5df2\u7ecf\u820d\u5f03\u4e861.x\u7248\u672c\u7684\u6559\u7a0b\u3002

    \u4ee5\u4e0b\u4e3a\u53c2\u8003\u4fe1\u606f:

    • \u5b98\u7f51
    • \u5b98\u65b9\u6587\u6863
    • \u5b89\u88c5\u65b9\u6cd5: Installation Guide

    Warning

    \u6b64\u9875\u9762\u4ec5\u9650\u63d0\u4f9b\u8d21\u732e\u8005\u5bf9\u4e8e\u8be5\u8f6f\u4ef6\u7684\u7406\u89e3\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\u8bf7\u8054\u7cfb\u8d21\u732e\u8005

    "},{"location":"wiki/software_usage/DeePMD-kit/#_2","title":"\u7b2c\u4e00\u6b21\u5c1d\u8bd5","text":""},{"location":"wiki/software_usage/DeePMD-kit/#_3","title":"\u8fd0\u884c\u7b2c\u4e00\u6b21\u673a\u5668\u5b66\u4e60","text":"

    \u5982\u679c\u4f60\u6b63\u5728\u4f7f\u7528 Zeus \u96c6\u7fa4\uff0c\u8bf7\u4f7f\u7528 slurm \u811a\u672c\u6765\u63d0\u4ea4 DeePMD-kit \u4efb\u52a1\u3002

    \u8bf7\u4ece Github \u4e0b\u8f7d DeePMD-kit \u7684\u4ee3\u7801\uff0c\u6211\u4eec\u5c06\u4f1a\u4f7f\u7528\u91cc\u9762\u7684\u6c34\u6a21\u578b\u505a\u4e3a\u4f8b\u5b50\u3002

    git clone https://github.com/deepmodeling/deepmd-kit.git\n

    \u9996\u5148\u8fdb\u5165\u542b\u6709\u6c34\u6a21\u578b\u7684\u4f8b\u5b50\u7684\u76ee\u5f55

    cd <deepmd repositoy>/examples/water/se_e2_a/\n

    \u4f60\u4f1a\u770b\u5230input.json\u6587\u4ef6\uff0c\u8fd9\u662fDeePMD-kit\u4f7f\u7528\u7684\u8f93\u5165\u6587\u4ef6\u3002\u73b0\u5728\u590d\u5236/data/share/base/script/deepmd.lsf\u5230\u5f53\u524d\u6587\u4ef6\u5939\uff0c\u5e76\u4e14\u4fee\u6539\u5b83\u3002

    cp /data/share/base/script/deepmd.lsf ./\nvim deepmd.lsf\n

    Warning

    \u5982\u679c\u8c03\u7528\u7684\u662f1.0\u7684\u7248\u672c\uff0c\u9700\u8981\u5728learning_rate\u4e0b\u52a0\u5165decay_rate\u5173\u952e\u8bcd\uff0c\u4e00\u822c\u8bbe\u4e3a0.95.

    \u4f60\u73b0\u5728\u4ec5\u9700\u8981\u4fee\u6539 slurm \u811a\u672c\u4e2d\u7684\u8f93\u5165\u6587\u4ef6\u540d\u79f0\u5373\u53ef\u3002\u628a\u811a\u672c\u4e2d\u7684input.json\u66ff\u6362\u6210water_se_a.json\u3002

    #!/bin/bash\n\n#BSUB -q gpu\n#BSUB -W 24:00\n#BSUB -J train\n#BSUB -o %J.stdout\n#BSUB -e %J.stderr\n#BSUB -n 8\n#BSUB -R \"span[ptile=8]\"\n# ============================================\n# modify the number of cores to use\n# according to the number of GPU you select\n# for example, 8 cores for one GPU card\n# while there are 32 cores in total\n# ============================================\n\n# add modulefiles\nmodule add deepmd/2.2.7\n\n# automatic select the gpu\nsource /data/share/base/script/find_gpu.sh\n\ndp train input.json -l train.log\n

    \u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u63d0\u4ea4\u4efb\u52a1\uff1a

    #submit your job\nbsub < deepmd.lsf\n#check your job by\nbjobs \n

    \u5f53\u4efb\u52a1\u6267\u884c\u4e2d\uff0c\u5f53\u524d\u76ee\u5f55\u4f1a\u751f\u6210\u4ee5\u4e0b\u6587\u4ef6\uff1a

    • train.log: \u8bad\u7ec3\u7684\u8bb0\u5f55\u6587\u4ef6
    • lcurve.out: \u673a\u5668\u5b66\u4e60\u7684\u5b66\u4e60\u66f2\u7ebf
    • model.ckpt.data-00000-of-00001, model.ckpt.index, checkpoint, model.ckpt.meta: \u4ee5\u4e0a\u4e09\u4e2a\u4e3a\u8bad\u7ec3\u5b58\u6863\u70b9

    \u975e\u5e38\u597d\uff01\u5df2\u7ecf\u6210\u529f\u5f00\u59cb\u7b2c\u4e00\u6b21\u673a\u5668\u5b66\u4e60\u8bad\u7ec3\u4e86\uff01

    "},{"location":"wiki/software_usage/DeePMD-kit/#_4","title":"\u6d4f\u89c8\u8f93\u51fa\u6587\u4ef6","text":"

    \u4f7f\u7528 less \u547d\u4ee4\u6765\u6d4f\u89c8\u8f93\u51fa\u6587\u4ef6

    less train.log\n

    \u4f60\u5c06\u4f1a\u770b\u5230\u5982\u4e0b\u5185\u5bb9

    # DEEPMD: initialize model from scratch\n# DEEPMD: start training at lr 1.00e-03 (== 1.00e-03), final lr will be 3.51e-08\n2019-12-07 00:03:49.659876: I tensorflow/stream_executor/platform/default/dso_loader.cc:42] Successfully opened dynamic library libcublas.so.10.0\n# DEEPMD: batch     100 training time 5.95 s, testing time 0.18 s\n# DEEPMD: batch     200 training time 4.58 s, testing time 0.20 s\n# DEEPMD: batch     300 training time 4.56 s, testing time 0.14 s\n# DEEPMD: batch     400 training time 4.49 s, testing time 0.13 s\n# DEEPMD: batch     500 training time 4.60 s, testing time 0.14 s\n# DEEPMD: batch     600 training time 4.61 s, testing time 0.15 s\n# DEEPMD: batch     700 training time 4.43 s, testing time 0.18 s\n# DEEPMD: batch     800 training time 4.59 s, testing time 0.13 s\n# DEEPMD: batch     900 training time 4.41 s, testing time 0.17 s\n# DEEPMD: batch    1000 training time 4.66 s, testing time 0.11 s\n# DEEPMD: saved checkpoint model.ckpt\n# DEEPMD: batch    1100 training time 4.45 s, testing time 0.15 s\n# DEEPMD: batch    1200 training time 4.37 s, testing time 0.14 s\n

    \u5728batch\u540e\u9762\u7684\u6570\u5b57\u8868\u660e\u7a0b\u5e8f\u5df2\u7ecf\u653e\u5165\u4e86\u591a\u5c11\u6570\u636e\u8fdb\u884c\u8bad\u7ec3\u3002\u8fd9\u4e2a\u6570\u5b57\u7684\u663e\u793a\u95f4\u9694\uff0c\u5373100\uff0c\u662f\u5728\u8f93\u5165\u6587\u4ef6\u7684\"disp_freq\": 100 \u8bbe\u7f6e\u7684\u3002

    \u73b0\u5728\u6765\u770b\u770b\u4f60\u7684\u5b66\u4e60\u66f2\u7ebf lcurve.out

    less lcurve.out\n

    \u4f60\u5c06\u4f1a\u770b\u5230\uff1a

    #  step      rmse_val    rmse_trn    rmse_e_val  rmse_e_trn    rmse_f_val  rmse_f_trn         lr\n      0      1.69e+01    1.58e+01      1.52e+00    5.69e-01      5.35e-01    5.00e-01    1.0e-03\n   1000      4.74e+00    4.68e+00      3.88e-02    4.02e-01      1.50e-01    1.48e-01    1.0e-03\n   2000      5.06e+00    3.93e+00      1.86e-01    1.54e-01      1.60e-01    1.24e-01    1.0e-03\n   3000      4.73e+00    4.34e+00      9.08e-02    3.90e-01      1.49e-01    1.37e-01    1.0e-03\n   4000      4.65e+00    6.09e+00      2.24e-01    1.92e-01      1.47e-01    1.93e-01    1.0e-03\n   5000      3.84e+00    3.25e+00      5.26e-02    2.40e-02      1.25e-01    1.06e-01    9.4e-04\n   6000      4.17e+00    2.78e+00      6.35e-02    3.89e-02      1.36e-01    9.03e-02    9.4e-04\n   7000      3.24e+00    3.00e+00      5.55e-02    8.58e-03      1.05e-01    9.76e-02    9.4e-04\n   8000      2.97e+00    2.83e+00      2.97e-02    2.46e-02      9.68e-02    9.22e-02    9.4e-04\n   9000      1.01e+01    6.92e+00      1.36e-01    1.89e-01      3.28e-01    2.25e-01    9.4e-04\n  10000      3.73e+00    3.39e+00      4.38e-02    3.23e-02      1.25e-01    1.14e-01    8.9e-04\n  11000      3.51e+00    2.76e+00      1.31e-01    3.47e-01      1.17e-01    8.98e-02    8.9e-04\n  12000      2.59e+00    2.89e+00      1.35e-01    1.18e-01      8.57e-02    9.65e-02    8.9e-04\n  13000      5.65e+00    4.68e+00      3.08e-01    3.28e-01      1.88e-01    1.55e-01    8.9e-04\n

    \u8fd9\u4e9b\u6570\u5b57\u5c55\u793a\u4e86\u5f53\u524d\u673a\u5668\u5b66\u4e60\u6a21\u578b\u5bf9\u4e8e\u6570\u636e\u9884\u6d4b\u7684\u8bef\u5dee\u6709\u591a\u5927\u3002 rmse_e_trn \u610f\u5473\u7740\u5728\u6d4b\u8bd5\u96c6\u4e0a\u4f7f\u7528\u673a\u5668\u5b66\u4e60\u6a21\u578b\u9884\u6d4b\u7684\u80fd\u91cf\u8bef\u5dee\u4f1a\u6709\u591a\u5927\u3002 rmse_e_val \u610f\u5473\u7740\u5728\u8bad\u7ec3\u96c6\u4e0a\u4f7f\u7528\u673a\u5668\u5b66\u4e60\u6a21\u578b\u9884\u6d4b\u7684\u80fd\u91cf\u8bef\u5dee\u4f1a\u6709\u591a\u5927\u3002 rmse_f_tst and rmse_f_trn \u8868\u793a\u76f8\u540c\u610f\u4e49\uff0c\u4e0d\u8fc7\u662f\u5bf9\u4e8e\u529b\u7684\u9884\u6d4b. \u4f60\u53ef\u4ee5\u4f7f\u7528Matplotlib Python\u5305\u8fdb\u884c\u4f5c\u56fe\u3002

    "},{"location":"wiki/software_usage/DeePMD-kit/#_5","title":"\u4f7f\u7528\u8fdb\u9636","text":""},{"location":"wiki/software_usage/DeePMD-kit/#_6","title":"\u51c6\u5907\u8bad\u7ec3\u6570\u636e","text":"

    \u524d\u534a\u90e8\u5206\u4ec5\u4ec5\u662f\u8ba9\u4f60\u8fd0\u884cDeePMD-kit\u8fdb\u884c\u8bad\u7ec3\u3002\u4e3a\u4e86\u8bad\u7ec3\u4e00\u4e2a\u9488\u5bf9\u4f60\u7684\u4f53\u7cfb\u7684\u6a21\u578b\uff0c\u4f60\u9700\u8981\u81ea\u5df1\u6765\u51c6\u5907\u6570\u636e\u3002\u8fd9\u4e9b\u6570\u636e\u90fd\u662f\u7b2c\u4e00\u6027\u539f\u7406\u8ba1\u7b97\u5f97\u5230\u7684\u6570\u636e\u3002\u8fd9\u4e9b\u6570\u636e\u53ef\u4ee5\u662f\u5355\u70b9\u80fd\u8ba1\u7b97\u5f97\u5230\u7684\u6570\u636e\uff0c\u6216\u8005\u662f\u5206\u5b50\u52a8\u529b\u5b66\u6a21\u62df\u5f97\u5230\u7684\u6570\u636e\u3002\u4f5c\u4e3a\u6570\u636e\u96c6\u9700\u8981\u7684\u6570\u636e\u6709\uff1a

    • \u4f53\u7cfb\u7684\u7ed3\u6784\u6587\u4ef6\uff1acoord.npy
    • \u4f53\u7cfb\u7684\u7ed3\u6784\u6587\u4ef6\u5bf9\u5e94\u7684\u5143\u7d20\u6807\u8bb0\uff1atype.raw
    • \u4f53\u7cfb\u7684\u7ed3\u6784\u6587\u4ef6\u5bf9\u5e94\u7684\u80fd\u91cf\uff1aenergy.npy
    • \u4f53\u7cfb\u7684\u7ed3\u6784\u6587\u4ef6\u5bf9\u5e94\u7684\u529b\uff1aforce.npy
    • \u4f53\u7cfb\u7684\u7ed3\u6784\u6587\u4ef6\u5bf9\u5e94\u7684\u6676\u80de\u5927\u5c0f\uff0c\u5982\u679c\u662f\u975e\u5468\u671f\u6027\u4f53\u7cfb\uff0c\u8bf7\u5728\u8bad\u7ec3\u6587\u4ef6\u91cc\u51c6\u5907\u4e00\u4e2a\u8d85\u5927\u5468\u671f\u8fb9\u754c\u6761\u4ef6\uff1abox.npy

    \u4ee3\u7801\u5757\u91cc\u7684\u6587\u4ef6\u540d\u4e3aDeePMD-kit\u4f7f\u7528\u7684\u547d\u540d\u3002npy\u540e\u7f00\u4e3aPython\u7684numpy\u4ee3\u7801\u5305\u751f\u6210\u7684\u6587\u4ef6\uff0c\u8bf7\u5728\u6b64\u4e4b\u524d\u5b66\u4e60numpy\u3002\u5982\u679c\u4f60\u4f7f\u7528cp2k\u5f97\u5230\u6570\u636e\uff0c\u4f60\u4f1a\u6709 *pos-1.xyz \u548c *frc-1.xyz \u6587\u4ef6\u3002\u4f60\u53ef\u4ee5\u4f7f\u7528\u5e2e\u52a9\u7684\u811a\u672c\u8f6c\u5316\u6210DeePMD-kit\u7684\u6570\u636e\u96c6\u683c\u5f0f\u3002

    \u73b0\u5728\u6211\u4eec\u6765\u770b\u770bDeePMD-kit\u7684\u8bad\u7ec3\u6570\u636e\u683c\u5f0f\u3002\u4e4b\u524d\u6211\u4eec\u8bad\u7ec3\u7684\u6c34\u6a21\u578b\u7684\u6570\u636e\u96c6\u50a8\u5b58\u5728 <deepmd repository>/examples/water/data/data_0. \u8ba9\u6211\u4eec\u6765\u770b\u770b\u6570\u636e\u96c6\u7684\u76ee\u5f55\u7ed3\u6784\uff1a

    # directory structre for training data\n.\n\u251c\u2500\u2500 data_0\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 set.000\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 box.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 coord.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 energy.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 force.npy\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 type.raw\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 type_map.raw\n\u251c\u2500\u2500 data_1\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 set.000\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 box.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 coord.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 energy.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 force.npy\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 set.001\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 box.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 coord.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 energy.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 force.npy\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 type.raw\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 type_map.raw\n\u251c\u2500\u2500 data_2\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 set.000\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 box.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 coord.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 energy.npy\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 force.npy\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 type.raw\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 type_map.raw\n\u2514\u2500\u2500 data_3\n    \u251c\u2500\u2500 set.000\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 box.npy\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 coord.npy\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 energy.npy\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 force.npy\n    \u251c\u2500\u2500 type.raw\n    \u2514\u2500\u2500 type_map.raw\n

    \u663e\u7136\uff0c\u6211\u4eec\u4f1a\u770b\u5230type.raw\u6587\u4ef6\u548c\u4e00\u5806\u4ee5set\u5f00\u5934\u7684\u76ee\u5f55\u3002type.raw\u6587\u4ef6\u8bb0\u5f55\u4e86\u4f53\u7cfb\u7684\u5143\u7d20\u4fe1\u606f\u3002\u5982\u679c\u4f60\u6253\u5f00\u4f60\u4f1a\u53d1\u73b0\u5b83\u4ec5\u4ec5\u8bb0\u5f55\u4e86\u4e00\u5806\u6570\u5b57\u3002\u8fd9\u4e9b\u6570\u5b57\u5bf9\u5e94\u7740\u4f60\u5728water_se_a.json\u4e2d\"type_map\":[\"O\",\"H\"]\u7684\u4fe1\u606f\u3002\u6b64\u65f60\u4ee3\u8868O,1\u4ee3\u8868H\u3002\u5bf9\u5e94\u7740[\"O\",\"H\"]\u4e2d\u7684\u4f4d\u7f6e\uff0c\u5176\u4e2d\u7b2c\u4e00\u4f4d\u4e3a0\u3002

    0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n

    box.npy, coord.npy, energy.npy \u548c force.npy \u50a8\u5b58\u7684\u4fe1\u606f\u5728\u4e0a\u6587\u5df2\u7ecf\u8bf4\u8fc7\u3002\u552f\u4e00\u9700\u8981\u6ce8\u610f\u7684\u662f\u8fd9\u4e9b\u6587\u4ef6\u90fd\u50a8\u5b58\u7740\u4e00\u4e2a\u8d85\u5927\u7684\u77e9\u9635\u3002\u5982\u679c\u6211\u4eec\u6709Y\u4e2a\u7ed3\u6784\uff0c\u6bcf\u4e2a\u7ed3\u6784\u6709X\u4e2a\u539f\u5b50\u3002box.npy, coord.npy, energy.npy \u548c force.npy \u5bf9\u5e94\u7684\u77e9\u9635\u5f62\u72b6\u5206\u522b\u662f (Y, 9), (Y, X*3), (Y, 1), (Y, X*3)\u3002

    "},{"location":"wiki/software_usage/DeePMD-kit/#_7","title":"\u8bbe\u7f6e\u4f60\u7684\u8f93\u5165\u6587\u4ef6","text":"

    \u8f93\u5165\u6587\u4ef6\u662fjson\u6587\u4ef6\u3002\u4f60\u53ef\u4ee5\u4f7f\u7528\u4e4b\u524d\u6211\u4eec\u7684json\u6587\u4ef6\u8fdb\u884c\u7ec6\u5fae\u6539\u52a8\u5c31\u6295\u5165\u5230\u81ea\u5df1\u4f53\u7cfb\u7684\u8bad\u7ec3\u4e2d\u3002\u8fd9\u4e9b\u9700\u8981\u4fee\u6539\u7684\u5173\u952e\u8bcd\u5982\u4e0b\uff1a

    • type\": \"se_a\": \u8bbe\u7f6e\u63cf\u8ff0\u7b26\uff08descriptor\uff09\u7c7b\u578b\u3002\u4e00\u822c\u4f7f\u7528se_a\u3002
    • \"sel\": [46, 92]: \u8bbe\u7f6e\u6bcf\u4e2a\u539f\u5b50\u7684\u622a\u65ad\u534a\u5f84\u5185\u6240\u62e5\u6709\u7684\u6700\u5927\u539f\u5b50\u6570\u3002\u6ce8\u610f\u8fd9\u91cc\u7684\u4e24\u4e2a\u6570\u5b5746\uff0c92\u5206\u522b\u5bf9\u5e94\u7684\u662fO\u539f\u5b50\u548cH\u539f\u5b50\u3002\u4e0e\u4f60\u5728type_map\u91cc\u8bbe\u7f6e\u7684\u5143\u7d20\u7c7b\u578b\u662f\u76f8\u5bf9\u5e94\u7684\u3002

    \"descriptor\" :{\n         \"type\":     \"se_a\",\n         \"sel\":      [46, 92],\n         \"rcut_smth\":    0.50,\n         \"rcut\":     6.00,\n         \"neuron\":       [25, 50, 100],\n         \"resnet_dt\":    false,\n         \"axis_neuron\":  16,\n         \"seed\":     1,\n         \"_comment\":     \" that's all\"\n     },\n
    \u5728\"training\"\u7684\"training_data\"\u4e0b - \"systems\": [\"../data/data_0/\", \"../data/data_1/\", \"../data/data_2/\"]: \u8bbe\u7f6e\u5305\u542b\u8bad\u7ec3\u6570\u636e\u7684\u76ee\u5f55\u3002 - \"batch_size\": auto, \u8fd9\u4e2a\u4f1a\u6839\u636e\u4f53\u7cfb\u539f\u5b50\u6570\u8fdb\u884c\u5206\u914d\uff0c\u4e0d\u8fc7\u6211\u4eec\u81ea\u5df1\u901a\u5e38\u8bbe\u7f6e\u4e3a1\uff0c\u56e0\u4e3a\u4f53\u7cfb\u539f\u5b50\u6570\u6709400-800\u4e2a\u5de6\u53f3\u3002

        \"training_data\": {\n        \"systems\":      [\"../data/data_0/\", \"../data/data_1/\", \"../data/data_2/\"],\n        \"batch_size\":   \"auto\",\n        \"_comment\":     \"that's all\"\n    }\n
    \u5728\"training\"\u7684\"validation_data\"\u4e0b - \"systems\": [\"../data/data_3\"]: \u8bbe\u7f6e\u5305\u542b\u6d4b\u8bd5\u6570\u636e\u7684\u76ee\u5f55\u3002 - \"batch_size\": 1, \u8fd9\u4e2a\u4f1a\u6839\u636e\u4f53\u7cfb\u539f\u5b50\u6570\u8fdb\u884c\u5206\u914d\uff0c\u4e0d\u8fc7\u6211\u4eec\u81ea\u5df1\u901a\u5e38\u8bbe\u7f6e\u4e3a1\uff0c\u56e0\u4e3a\u4f53\u7cfb\u539f\u5b50\u6570\u6709400-800\u4e2a\u5de6\u53f3\u3002 - \"numb_btch\": 3 , \u6bcf\u6b21\u8fed\u4ee3\u4e2d\uff0c\u6d4b\u8bd5\u7684\u7ed3\u6784\u6570\u91cf\u4e3abatch_size\u4e58\u4ee5numb_btch\u3002 - \u66f4\u591a\u53c2\u6570\u8bf4\u660e\uff0c\u8bf7\u53c2\u8003\u5b98\u65b9\u6587\u6863\uff1ahttps://deepmd.readthedocs.io/en/latest/train-input.html

    Warning

    \u8bb0\u4f4f\u5728\u96c6\u7fa4\u4e0a\u8bad\u7ec3\uff0c\u8bf7\u4f7f\u7528lsf\u811a\u672c\u3002

    "},{"location":"wiki/software_usage/DeePMD-kit/#_8","title":"\u5f00\u59cb\u4f60\u7684\u8bad\u7ec3","text":"

    \u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u5f00\u59cb:

    dp train input.json\n

    Warning

    \u8bb0\u4f4f\u5728\u96c6\u7fa4\u4e0a\u8bad\u7ec3\uff0c\u8bf7\u4f7f\u7528 Slurm \u811a\u672c\u3002

    "},{"location":"wiki/software_usage/DeePMD-kit/#_9","title":"\u91cd\u542f\u4f60\u7684\u8bad\u7ec3","text":"

    \u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u91cd\u542f:

    dp train input.json --restart model.ckpt\n

    Warning

    \u8bb0\u4f4f\u5728\u96c6\u7fa4\u4e0a\u8bad\u7ec3\uff0c\u8bf7\u4f7f\u7528 Slurm \u811a\u672c\u3002

    "},{"location":"wiki/software_usage/DeePMD-kit/#md","title":"\u4f7f\u7528\u751f\u6210\u7684\u52bf\u80fd\u51fd\u6570\u8fdb\u884c\u5206\u5b50\u52a8\u529b\u5b66(MD)\u6a21\u62df","text":"

    \u5f53\u6211\u4eec\u5b8c\u6210\u8bad\u7ec3\u4e4b\u540e\uff0c\u6211\u4eec\u9700\u8981\u6839\u636e\u8282\u70b9\u6587\u4ef6(model.ckpt*)\u51bb\u7ed3(Freeze)\u51fa\u4e00\u4e2a\u6a21\u578b\u6765\u3002

    \u5229\u7528\u5982\u4e0b\u547d\u4ee4\uff0c\u53ef\u4ee5\u51bb\u7ed3\u6a21\u578b\uff1a

    dp freeze\n

    \u4f60\u5c06\u4f1a\u5f97\u5230\u4e00\u4e2a*.pb\u6587\u4ef6\u3002\u5229\u7528\u6b64\u6587\u4ef6\u53ef\u4ee5\u4f7f\u7528LAMMPS, ASE, CP2K \u7b49\u8f6f\u4ef6\u8fdb\u884c\u5206\u5b50\u52a8\u529b\u5b66\u6a21\u62df\u3002

    "},{"location":"wiki/software_usage/DeePMD-kit/#production","title":"\u5229\u7528\u538b\u7f29\u6a21\u578b\u8fdb\u884c\u4ea7\u51fa(Production)","text":"

    \u673a\u5668\u5b66\u4e60\u52bf\u80fd*.pb\u6587\u4ef6\u8fdb\u884cMD\u6a21\u62df\u867d\u7136\u5df2\u7ecf\u975e\u5e38\u8fc5\u901f\u4e86\u3002\u4f46\u662f\u8fd8\u6709\u63d0\u5347\u7684\u7a7a\u95f4\u3002\u9996\u5148\u6211\u4eec\u9700\u8981\u75282.0\u4ee5\u4e0a\u7248\u672c\u7684deepmd\u8fdb\u884c\u8bad\u7ec3\u52bf\u80fd\u51fd\u6570\uff0c\u5e76\u5f97\u5230*.pb\u6587\u4ef6\u3002\u5229\u75281.2/1.3\u7248\u672c\u7684deepmd\u8bad\u7ec3\u5f97\u5230\u52bf\u80fd\u51fd\u6570\u4e5f\u4e0d\u7528\u62c5\u5fc3\uff0c\u53ef\u4ee5\u5229\u7528\u4ee5\u4e0b\u547d\u4ee4\u5bf9\u65e7\u7248\u672c\u7684\u52bf\u80fd\u51fd\u6570\u8fdb\u884c\u8f6c\u6362\u3002\u4f8b\u5982\u60f3\u8981\u4ece1.2\u8f6c\u6362\u7684\u8bdd\uff1a

    dp convert-from 1.2 -i old_frozen_model.pb -o new_frozen_model.pb\n

    \u5173\u4e8e\u517c\u5bb9\u6027\u7684\u8bf4\u660e

    \u5173\u4e8e\u76ee\u524d\u52bf\u51fd\u6570\u7684\u517c\u5bb9\u6027\uff0c\u8bf7\u53c2\u8003\u5b98\u65b9\u6587\u6863\u3002 \u76ee\u524dDeePMD-kit\u652f\u6301\u4ece v0.12, v1.0, v1.1, v1.2, v1.3 \u7248\u672c\u5230\u65b0\u7248\u672c\u7684\u8f6c\u6362\u3002

    \u5efa\u8bae\u5c06\u539f\u8bad\u7ec3\u6587\u4ef6\u5939\u5907\u4efd\u540e\u590d\u5236\uff0c\u6211\u4eec\u5229\u7528\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u538b\u7f29\uff08\u6587\u4ef6\u5939\u4e0b\u5e94\u8be5\u542b\u6709\u5bf9\u5e94\u7684input.json\u6587\u4ef6\u548ccheckpoint\u6587\u4ef6\uff09\uff1a

    module load deepmd/2.0-cuda11.3\ndp compress -i normal-model.pb -o compressed-model.pb -l compress.log\n

    \u9002\u7528\u8303\u56f4

    \u6ce8\u610f\u6a21\u578b\u538b\u7f29\u4ec5\u9002\u7528\u4e8e\u90e8\u5206\u6a21\u578b\uff0c\u5982 se_e2_a, se_e3, se_e2_r \u548c\u4e0a\u8ff0\u6a21\u578b\u7684 Hybrid \u6a21\u578b\u3002

    \u82e5\u4f7f\u7528\u5176\u4ed6\u6a21\u578b\uff0c\u5982 se_attn \u6a21\u578b (DPA-1)\uff0c\u6a21\u578b\u538b\u7f29\u5c1a\u672a\u88ab\u652f\u6301\uff0c\u53ef\u80fd\u4f1a\u62a5\u9519\u3002

    \u53e6\u5916\u8bf7\u6ce8\u610f\uff0c\u538b\u7f29\u6a21\u578b\u662f\u901a\u8fc7\u4f7f\u7528 5 \u6b21\u591a\u9879\u5f0f\u62df\u5408 Embedding-net \u4ece\u800c\u6362\u53d6\u6027\u80fd\u63d0\u5347\uff0c\u8fd9\u4e00\u6539\u52a8 \u51e0\u4e4e \u4e0d\u4f1a\u5bf9\u9884\u6d4b\u7cbe\u5ea6\u4ea7\u751f\u5f71\u54cd\uff0c\u4f46\u5b9e\u9645\u4e0a\u90e8\u5206\u727a\u7272\u4e86\u7cbe\u5ea6\u3002 \u56e0\u800c\u4f7f\u7528\u65f6\u8bf7\u52a1\u5fc5\u6ce8\u610f\u89c2\u5bdf\u9ed8\u8ba4\u53c2\u6570\u662f\u5426\u9002\u7528\u4e8e\u5f53\u524d\u4f53\u7cfb\u7684\u60c5\u51b5\uff0c\u5982\u662f\u5426\u51fa\u73b0\u8bef\u5dee\u6f02\u79fb\uff0c\u5e76\u9488\u5bf9\u4fee\u6539\u53c2\u6570\uff0c\u5982\u62df\u5408\u65f6\u91c7\u7528\u7684\u6b65\u6570 --step\u3002

    "},{"location":"wiki/software_usage/DeePMD-kit/#_10","title":"\u538b\u7f29\u6a21\u578b\u4e0e\u539f\u59cb\u6a21\u578b\u5bf9\u6bd4","text":"

    \u6d4b\u8bd52080Ti, \u663e\u5b5811G

    \u4f53\u7cfb \u539f\u5b50\u6570 \u63d0\u901f\u524d (ns/day) \u63d0\u901f\u540e(ns/day) \u63d0\u5347\u500d\u7387 LIGePS 5000 0.806 3.569 4.42 SnO2/water interface 6021 0.059 0.355 6.01 SnO2/water interface 5352 0.067 0.382 5.70 SnO2/water interface 2676 0.132 0.738 5.59 SnO2/water interface 1338 0.261 1.367 5.23 SnO2/water interface 669 0.501 2.236 4.46 LiGePS 400 7.461 23.992 3.21 Cu13 13 51.268 65.944 1.28

    SnO2/water interface: \u539f\u59cb\u6a21\u578bMaximum 6021 \u2014\u2014> \u538b\u7f29\u6a21\u578bMaximum 54189\u4e2a\u539f\u5b50

    "},{"location":"wiki/software_usage/DeePMD-kit/#trouble-shooting","title":"Trouble Shooting","text":""},{"location":"wiki/software_usage/DeePMD-kit/#warning-loc-idx-out-of-lower-bound","title":"warning: loc idx out of lower bound","text":"

    Solution: https://github.com/deepmodeling/deepmd-kit/issues/21

    "},{"location":"wiki/software_usage/DeePMD-kit/#valueerror-nodedef-missing-attr-t-from","title":"ValueError: NodeDef missing attr 'T' from ...","text":"

    \u5f53\u4e00\u4e2a\u6a21\u578b\u4f7f\u7528 deepmd/1.2 \u8bad\u7ec3\uff0c\u4f46\u662f\u7528\u66f4\u9ad8\u7248\u672c\u7684 deepmd-kit (> v1.3) \u8fdb\u884c lammps \u4efb\u52a1\u7684\u65f6\u5019\u7ecf\u5e38\u4f1a\u62a5\u8fd9\u4e2a\u9519\uff0c\u4f8b\u5b50\uff1a

    • error: Not found: No attr named 'T' in NodeDef when running lammps

    \u4f46\u662f\uff0c\u73b0\u5728\u53d1\u73b0\u8fd9\u4e2a\u62a5\u9519\u5728\u538b\u7f29 v1.3 \u7248\u672c\u6a21\u578b\u7684\u65f6\u5019\u4e5f\u4f1a\u51fa\u73b0\u3002\u4f7f\u7528\u4e0b\u5217\u547d\u4ee4\uff1a

    dp compress ${input} --checkpoint-folder ${ckpt} 1.3-model.pb -o compressed-model.pb -l compress.log\n

    \u5176\u4e2d${input}\u548c${ckpt}\u5206\u522b\u662f\u5bf9\u5e94\u6a21\u578b\u7684\u8f93\u5165\u811a\u672c\u6240\u5728\u8def\u5f84\u548c\u68c0\u67e5\u70b9\u76ee\u5f55\u3002\u5728\u8fd9\u4e2a\u4f8b\u5b50\u91cc\uff0c\u6211\u4eec\u4ec5\u628a\u9700\u8981\u538b\u7f29\u7684\u6a21\u578b\u590d\u5236\u5230\u4e86\u5de5\u4f5c\u6587\u4ef6\u5939\u4e0b\uff0c\u8f93\u5165\u811a\u672c\u6240\u5728\u8def\u5f84\u548c\u68c0\u67e5\u70b9\u76ee\u5f55\u4eba\u5de5\u6307\u8ba4\u3002\u81f3\u4e8e\u4e3a\u4ec0\u4e48\u8fd9\u6837\u4f1a\u62a5\u9519 \u2018ValueError\u2019\uff0c\u76ee\u524d\u8fd8\u6ca1\u6709\u627e\u5230\u539f\u56e0\u3002

    \u56e0\u6b64\uff0c\u6211\u4eec\u5efa\u8bae \u5907\u4efd\u4e4b\u524d\u7684\u8bad\u7ec3\u6587\u4ef6\u5939\uff0c\u5728\u8bad\u7ec3\u6587\u4ef6\u5939\u7684\u4e00\u4e2a copy \u4e0b\u8fdb\u884c\u538b\u7f29\u4efb\u52a1\u3002

    "},{"location":"wiki/software_usage/DeePMD-kit/#extra-support","title":"Extra Support","text":""},{"location":"wiki/software_usage/DeePMD-kit/#script-for-convertion-from-cp2k-xyz-to-numpy-set","title":"Script for convertion from cp2k xyz to numpy set","text":"
    from ase.io import read\nimport numpy as np\nimport os, sys\nimport glob\nimport shutil\n\n\n#############################\n# USER INPUT PARAMETER HERE #\n#############################\n\n# input data path here, string, this directory should contains\n#   ./data/*frc-1.xyz ./data/*pos-1.xyz\ndata_path = \"./data\"\n\n#input the number of atom in system\natom_num = 189\n\n#input cell paramter here\ncell = [[10.0,0,0],[0,10.0,0],[0,0,10.0]]\n\n# conversion unit here, modify if you need\nau2eV = 2.72113838565563E+01\nau2A = 5.29177208590000E-01\n\n\n####################\n# START OF PROGRAM #\n####################\n\ndef xyz2npy(pos, atom_num, output, unit_convertion=1.0):\n    total = np.empty((0,atom_num*3), float)\n    for single_pos in pos:\n        tmp=single_pos.get_positions()\n        tmp=np.reshape(tmp,(1,atom_num*3))\n        total = np.concatenate((total,tmp), axis=0)\n    total = total * unit_convertion\n    np.save(output, total)\n\ndef energy2npy(pos, output, unit_convertion=1.0):\n     total = np.empty((0), float)\n     for single_pos in pos:\n         tmp=single_pos.info.pop('E')\n         tmp=np.array(tmp,dtype=\"float\")\n         tmp=np.reshape(tmp,1)\n         total = np.concatenate((total,tmp), axis=0)\n     total = total * unit_convertion\n     np.save(output,total)\n\ndef cell2npy(pos, output, cell, unit_convertion=1.0):\n    total = np.empty((0,9),float)\n    frame_num = len(pos)\n    cell = np.array(cell, dtype=\"float\")\n    cell = np.reshape(cell, (1,9))\n    for frame in range(frame_num):\n        total = np.concatenate((total,cell),axis=0)\n    total = total * unit_convertion\n    np.save(output,total)\n\ndef type_raw(single_pos, output):\n    element = single_pos.get_chemical_symbols()\n    element = np.array(element)\n    tmp, indice = np.unique(element, return_inverse=True)\n    np.savetxt(output, indice, fmt='%s',newline=' ')\n\n\n# read the pos and frc\ndata_path = os.path.abspath(data_path)\npos_path = os.path.join(data_path, \"*pos-1.xyz\")\nfrc_path = os.path.join(data_path, \"*frc-1.xyz\")\n#print(data_path)\npos_path = glob.glob(pos_path)[0]\nfrc_path = glob.glob(frc_path)[0]\n#print(pos_path)\n#print(frc_path)\npos = read(pos_path, index = \":\" )\nfrc = read(frc_path, index = \":\" )\n\n# numpy path\nset_path = os.path.join(data_path, \"set.000\")\nif os.path.isdir(set_path):\n    print(\"detect directory exists\\n now remove it\")\n    shutil.rmtree(set_path)\n    os.mkdir(set_path)\nelse:\n    print(\"detect directory doesn't exist\\n now create it\")\n    os.mkdir(set_path)\ntype_path = os.path.join(data_path, \"type.raw\")\ncoord_path = os.path.join(set_path, \"coord.npy\")\nforce_path = os.path.join(set_path, \"force.npy\")\nbox_path = os.path.join(set_path, \"box.npy\")\nenergy_path = os.path.join(set_path, \"energy.npy\")\n\n\n#tranforrmation\nxyz2npy(pos, atom_num, coord_path)\nxyz2npy(frc, atom_num, force_path, au2eV/au2A)\nenergy2npy(pos, energy_path, au2eV)\ncell2npy(pos, box_path, cell)\ntype_raw(pos[0], type_path)\n
    "},{"location":"wiki/software_usage/DeePMD-kit/#deepmd-kit-20","title":"\u5347\u7ea7\u5230DeePMD-kit 2.0","text":"

    \u76ee\u524d DeePMD-kit 2.0 \u6b63\u5f0f\u7248\u5df2\u7ecf\u53d1\u5e03\uff0c\u76f8\u6bd4\u65e7\u7248\u5df2\u6709\u4f17\u591a\u63d0\u5347\uff0c\u4e14\u538b\u7f29\u6a21\u578b\u4e3a\u6b63\u5f0f\u7248\u7279\u6027\u3002\u76ee\u524d\u6211\u4eec\u96c6\u7fa4\u4e0a\u5df2\u5b89\u88c5 DeePMD-kit 2.0.3\u3002

    "},{"location":"wiki/software_usage/DeePMD-kit/#_11","title":"\u8f93\u5165\u6587\u4ef6","text":"

    DeePMD-kit 2.0 \u76f8\u6bd4 1.x \u5728\u8f93\u5165\u6587\u4ef6\u4e0a\u505a\u4e86\u4e00\u5b9a\u6539\u52a8\uff0c\u4ee5\u4e0b\u7ed9\u51fa\u4e00\u4e2a DeePMD-kit 2.0 \u8f93\u5165\u6587\u4ef6\u7684\u4f8b\u5b50\uff1a

    {\n    \"_comment\": \" model parameters\",\n    \"model\": {\n        \"type_map\": [\n            \"O\",\n            \"H\"\n        ],\n        \"descriptor\": {\n            \"type\": \"se_e2_a\",\n            \"sel\": [\n                46,\n                92\n            ],\n            \"rcut_smth\": 0.50,\n            \"rcut\": 6.00,\n            \"neuron\": [\n                25,\n                50,\n                100\n            ],\n            \"resnet_dt\": false,\n            \"axis_neuron\": 16,\n            \"seed\": 1,\n            \"_comment\": \" that's all\"\n        },\n        \"fitting_net\": {\n            \"neuron\": [\n                240,\n                240,\n                240\n            ],\n            \"resnet_dt\": true,\n            \"seed\": 1,\n            \"_comment\": \" that's all\"\n        },\n        \"_comment\": \" that's all\"\n    },\n    \"learning_rate\": {\n        \"type\": \"exp\",\n        \"decay_steps\": 5000,\n        \"start_lr\": 0.001,\n        \"stop_lr\": 3.51e-8,\n        \"_comment\": \"that's all\"\n    },\n    \"loss\": {\n        \"type\": \"ener\",\n        \"start_pref_e\": 0.02,\n        \"limit_pref_e\": 1,\n        \"start_pref_f\": 1000,\n        \"limit_pref_f\": 1,\n        \"start_pref_v\": 0,\n        \"limit_pref_v\": 0,\n        \"_comment\": \" that's all\"\n    },\n    \"training\": {\n        \"training_data\": {\n            \"systems\": [\n                \"../data/data_0/\",\n                \"../data/data_1/\",\n                \"../data/data_2/\"\n            ],\n            \"batch_size\": \"auto\",\n            \"_comment\": \"that's all\"\n        },\n        \"validation_data\": {\n            \"systems\": [\n                \"../data/data_3\"\n            ],\n            \"batch_size\": 1,\n            \"numb_btch\": 3,\n            \"_comment\": \"that's all\"\n        },\n        \"numb_steps\": 1000000,\n        \"seed\": 10,\n        \"disp_file\": \"lcurve.out\",\n        \"disp_freq\": 100,\n        \"save_freq\": 1000,\n        \"_comment\": \"that's all\"\n    },\n    \"_comment\": \"that's all\"\n}\n

    DeePMD-kit 2.0 \u63d0\u4f9b\u4e86\u5bf9\u9a8c\u8bc1\u96c6\uff08Validation Set\uff09\u7684\u652f\u6301\uff0c\u56e0\u800c\u7528\u6237\u53ef\u6307\u5b9a\u67d0\u4e00\u6570\u636e\u96c6\u4f5c\u4e3a\u9a8c\u8bc1\u96c6\uff0c\u5e76\u8f93\u51fa\u6a21\u578b\u5728\u8be5\u6570\u636e\u96c6\u4e0a\u7684\u8bef\u5dee\u3002 \u76f8\u6bd4\u65e7\u7248\u800c\u8a00\uff0c\u65b0\u7248\u8f93\u5165\u6587\u4ef6\u53c2\u6570\u7684\u5177\u4f53\u542b\u4e49\u53d8\u5316\u4e0d\u5927\uff0c\u9664\u4e86\u5bf9\u6570\u636e\u96c6\u7684\u5b9a\u4e49\u5916\uff0c\u5927\u90e8\u5206\u53c2\u6570\u542b\u4e49\u4fdd\u6301\u4e00\u81f4\u3002

    \u4ee5\u4e0b\u5217\u51fa\u4e00\u4e9b\u9700\u8981\u6ce8\u610f\u7684\u4e8b\u9879\uff1a

    1. \u8bad\u7ec3\u6570\u636e\u96c6\u4e0d\u518d\u76f4\u63a5\u5199\u5728 training \u4e0b\uff0c\u800c\u662f\u5199\u5728 training \u7684\u5b50\u952e training_data \u4e0b\uff0c\u683c\u5f0f\u5982\u4e0b\u6240\u793a\uff1a
      \"training_data\": {\n         \"systems\": [\n             \"../data/data_0/\",\n             \"../data/data_1/\",\n             \"../data/data_2/\"\n         ],\n         \"batch_size\": \"auto\"\n     }\n
      \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u6bcf\u4e00\u8bad\u7ec3\u6b65\u9aa4\u4e2d\uff0cDeePMD-kit\u968f\u673a\u4ece\u6570\u636e\u96c6\u4e2d\u6311\u9009\u7ed3\u6784\u52a0\u5165\u672c\u8f6e\u8bad\u7ec3\uff0c\u8fd9\u4e00\u6b65\u9aa4\u52a0\u5165\u6570\u636e\u7684\u591a\u5c11\u53d6\u51b3\u4e8e batch_size \u7684\u5927\u5c0f\uff0c\u6b64\u65f6\uff0c\u5404 system \u4e2d\u6570\u636e\u88ab\u4f7f\u7528\u7684\u6982\u7387\u662f\u5747\u7b49\u7684\u3002 \u82e5\u5e0c\u671b\u63a7\u5236\u5404 system \u6570\u636e\u7684\u6743\u91cd\uff0c\u53ef\u4f7f\u7528 auto_prob \u6765\u63a7\u5236\uff0c\u5176\u53c2\u6570\u9009\u9879\u5982\u4e0b\u6240\u793a
      • prob_uniform: \u5404 system \u6570\u636e\u6743\u91cd\u5747\u7b49\u3002
      • prob_sys_size: \u5404 system \u6570\u636e\u7684\u6743\u91cd\u53d6\u51b3\u4e8e\u5176\u5404\u81ea\u7684\u5927\u5c0f\u3002
      • prob_sys_size: \u5199\u6cd5\u793a\u4f8b\u5982\u4e0b\uff1asidx_0:eidx_0:w_0; sidx_1:eidx_1:w_1;...\u3002 \u8be5\u53c2\u6570\u4e2d\uff0csidx_i \u548c eidx_i \u8868\u793a\u7b2c i \u7ec4\u6570\u636e\u7684\u8d77\u6b62\u70b9\uff0c\u89c4\u5219\u540c Python \u8bed\u6cd5\u4e2d\u7684\u5207\u7247\uff0cw_i \u5219\u8868\u793a\u8be5\u7ec4\u6570\u636e\u7684\u6743\u91cd\u3002\u5728\u540c\u4e00\u7ec4\u4e2d\uff0c\u5404 system \u6570\u636e\u7684\u6743\u91cd\u53d6\u51b3\u4e8e\u5404\u81ea\u7684\u5927\u5c0f\u3002 batch_size \u7684\u503c\u53ef\u624b\u52a8\u8bbe\u5b9a\uff0c\u6839\u636e\u7ecf\u9a8c\u4e00\u822c\u6839\u636e\u201c\u4e58\u4ee5\u539f\u5b50\u6570\u226432\u201d\u7684\u89c4\u5219\u8bbe\u5b9a\u3002\u65b0\u7248\u5219\u652f\u6301\u81ea\u52a8\u8bbe\u5b9a\uff0c\u82e5\u8bbe\u5b9a\u4e3a\"auto\"\u5219\u8868\u793a\u6309\u7167\u6b64\u89c4\u5219\u81ea\u52a8\u8bbe\u7f6e\uff0c\u82e5\u8bbe\u5b9a\u4e3a\"auto:N\"\u5219\u6839\u636e\u201c\u4e58\u4ee5\u539f\u5b50\u6570\u2264N\u201d\u7684\u89c4\u5219\u8bbe\u5b9a\u3002
    2. save_ckpt, load_ckpt, decay_rate \u7b49\u4e3a\u8fc7\u65f6\u53c2\u6570\uff0c\u82e5\u7531 1.x \u8fc1\u79fb\uff0c\u8bf7\u5220\u9664\u8fd9\u4e9b\u53c2\u6570\uff0c\u5426\u5219\u4f1a\u5bfc\u81f4\u62a5\u9519\u3002
    3. n_neuron \u66f4\u540d\u4e3a neuron\uff0c stop_batch \u66f4\u540d\u4e3a numb_steps\uff0c\u8bf7\u6ce8\u610f\u66f4\u6539\u3002\u5bf9\u5e94\u5730\uff0cdecay rate \u7531 start_lr \u548c stop_lr \u51b3\u5b9a\u3002
    4. lcurve.out \u4e2d\u5220\u9664\u4e86\u6d4b\u8bd5\u6570\u636e\u7684 RMSE \u503c\uff0c\u56e0\u6b64\u65e7\u7248\u4f5c\u56fe\u811a\u672c\u9700\u8981\u5bf9\u5e94\u4fee\u6539\uff0c\u51cf\u5c11\u5217\u6570\uff08\u80fd\u91cf\u5728\u7b2c3\u5217\uff0c\u529b\u5728\u7b2c4\u5217\uff09\u3002\u82e5\u6307\u5b9a\u4e86\u9a8c\u8bc1\u96c6\uff0c\u5219\u4f1a\u8f93\u51fa\u6a21\u578b\u5728\u9a8c\u8bc1\u96c6\u4e0a\u7684 RMSE\u3002

    \u66f4\u591a\u8be6\u7ec6\u8bf4\u660e\uff0c\u8bf7\u53c2\u89c1\u5b98\u65b9\u6587\u6863\u3002

    "},{"location":"wiki/software_usage/MDAnalysis/","title":"MDAnalysis \u8f6f\u4ef6\u5305\u7684\u4f7f\u7528","text":""},{"location":"wiki/software_usage/MDAnalysis/#mdanalysis_1","title":"\u6211\u662f\u5426\u9700\u8981\u4f7f\u7528MDAnalysis","text":"

    MDAnalysis\u662f\u4e00\u4e2a\u5904\u7406\u5206\u5b50\u52a8\u529b\u5b66\u6a21\u62df\u8f68\u8ff9\u7684python\u8f6f\u4ef6\u5305\u3002\u5b83\u6700\u4e3a\u7a81\u51fa\u7684\u662f\u4f18\u70b9\u662f\u5168\u9762\u7684\u8f68\u8ff9io\u65b9\u6cd5\uff0c\u53ef\u4ee5\u5904\u7406\u5e38\u89c1\u5206\u5b50\u52a8\u529b\u5b66\u6a21\u62df\u7684\u8f93\u51fa\u8f68\u8ff9\u683c\u5f0f\u3002\u540c\u65f6\uff0cMDAnalysis\u548c\u7684io\u7406\u5ff5\u4f7f\u5176\u66f4\u52a0\u9002\u5408\u4f5c\u4e3a\u5927\u8f68\u8ff9\u6587\u4ef6\u9010\u5e27\u8fdb\u884c\u7edf\u8ba1\u5206\u6790\u7684\u5de5\u5177\u3002\u8be5\u8f6f\u4ef6\u5185\u7f6e\u4e86\u5f88\u591a\u5206\u5b50\u52a8\u529b\u5b66\u6a21\u62df\u5206\u6790\u65b9\u6cd5\uff0c\u6240\u4ee5\u4f60\u53ef\u4ee5\u7528\u5b83\u8f7b\u677e\u5730\u5b9e\u73b0\u4e00\u4e9b\u4f8b\u884c\u5206\u6790\u3002\u6bd4\u5982\uff0c\u5f84\u5411\u5206\u5e03\u51fd\u6570(RDF), \u6c34\u5bc6\u5ea6\uff08number density\uff09\u548c\u6c22\u952e\u5206\u6790\u7b49\u3002\u9664\u8fc7\u5185\u7f6e\u65b9\u6cd5\uff0c\u7528\u6237\u4e5f\u53ef\u4ee5\u7528MDAnalysis\u81ea\u5b9a\u4e49\u5206\u6790\u65b9\u6cd5\u3002

    (\u5185\u7f6e\u5206\u6790)[https://docs.mdanalysis.org/stable/documentation_pages/analysis_modules.html]

    (\u5982\u4f55DIY\u4f60\u81ea\u5df1\u7684\u5206\u6790)[https://userguide.mdanalysis.org/stable/examples/analysis/custom_trajectory_analysis.html]

    \u5982\u679c\u4f60\u9700\u8981\u4f5c\u5982\u4e0b\u7684\u5206\u6790\uff0cMDAnalysis\u5c31\u975e\u5e38\u9002\u5408\u4f60\uff1a

    • MD\u7edf\u8ba1\u5206\u6790\uff1a\u9700\u8981\u5bf9MD\u8f68\u8ff9\u4e2d\u6bcf\u4e00\u4e2a\u5355\u5e27\u8fdb\u884c\u76f8\u540c\u64cd\u4f5c\uff0c\u5e76\u4e14\u9700\u8981\u5faa\u73af\u6574\u6761\u8f68\u8ff9\u7684\u7edf\u8ba1\u3002\u4f8b\u5982\uff0c\u4f60\u9700\u8981\u7edf\u8ba1A\u539f\u5b50\u548cB\u539f\u5b50\u95f4\u7684\u8ddd\u79bb

    • \u5468\u671f\u6027\u4f53\u7cfb\u7684\u8ddd\u79bb\u8ba1\u7b97\uff1a\u9ad8\u6548\u5feb\u901f\u7684\u8ddd\u79bb\u8ba1\u7b97\u5e93\u51fd\u6570\uff0c\u63d0\u4f9b[a, b, c, alpha, beta, gamma] cell parameter\u5c31\u53ef\u4ee5\u8003\u8651PBC\u4e0b\u7684\u8ddd\u79bb\u3002

    "},{"location":"wiki/software_usage/MDAnalysis/#io","title":"IO \u7406\u5ff5","text":""},{"location":"wiki/software_usage/MDAnalysis/#1","title":"1. \u521d\u59cb\u5316","text":"

    MDAnalysis\u5c06\u8f68\u8ff9\u6587\u4ef6\uff0ctopology\u4fe1\u606f\u7b49\u62bd\u8c61\u4e3a\u4e00\u4e2aUniverse class. \u4f8b\u5982\u4e00\u6761xyz\u8f68\u8ff9\u53ef\u4ee5\u5982\u4e0b\u521d\u59cb\u5316\uff0c

    from MDAnalysis import Universe\nxyzfile = \"./tio2-water.xyz\"\nu = Universe(xyzfile)\nu.dimensions = np.array([10, 10, 10, 90, 90, 90])    # assign cell parameter\n

    \u8fd9\u6837\u521d\u59cb\u5316\u4e00\u4e2au\u5b9e\u4f8b\u5176\u5b9e\u5e76\u4e0d\u4f1a\u8bfb\u53d6\u6574\u4e2a\u6587\u4ef6\uff0c\u5728\u6b64\u9636\u6bb5\uff0c\u7528\u6237\u53ef\u4ee5\u4f7f\u7528u\u8fdb\u884c\u9009\u62e9\u90e8\u5206\u539f\u5b50\uff0c\u5f97\u5230\u4e00\u4e2aatomgroup\u5bf9\u8c61\u3002\u4f8b\u5982\uff0c\u4f7f\u7528

    ag      = u.atoms        # select all atoms\nxyz     = ag.positions   # get the coordinates for these atoms\nelement = ag.elements    # the element labels for theses atoms\n

    \u53ef\u4ee5\u5c06\u6240\u6709\u539f\u5b50\u9009\u53d6\u6210\u4e00\u4e2aatomgroup\u5bf9\u8c61\u3002\u5176\u5b9eMDAnalysis\u652f\u6301\u4e00\u4e9b\u66f4fancy\u7684\u9009\u62e9\u8bed\u6cd5\uff0c\u7c7b\u4f3c\u4e8eVMD\u7684\u8bed\u6cd5\uff0c\u8be6\u89c1MDAnalysis\u9009\u62e9\u8bed\u6cd5\u3002\u4f46\u662f\uff0c\u6839\u636e\u7b14\u8005\u7684\u7ecf\u9a8c\uff0c\u8fd9\u4e2d\u9009\u62e9\u8bed\u6cd5\u5bf9\u6211\u4eec\u7814\u7a76\u7684\u4f53\u7cfb\u6765\u8bf4\u4e0d\u597d\u7528\uff0c\u4f7f\u7528ASE\u8fdb\u884c\u8fd9\u4e9b\u9009\u62e9\u5c31\u4f1a\u66f4\u52a0\u65b9\u4fbf\u3002

    "},{"location":"wiki/software_usage/MDAnalysis/#2","title":"2. \u8f68\u8ff9\u7684\u8bfb\u53d6","text":"

    \u5728\u521d\u59cb\u5316\u4e00\u4e2aUniverse\u540e\uff0c\u4f60\u53ef\u4ee5\u901a\u8fc7\u5982\u4e0b\u65b9\u6cd5\u624b\u52a8\u6fc0\u6d3b\u8f68\u8ff9\u7684\u8bfb\u53d6\uff1a

    print(u.trajectory)                 # reading the trajectory\nn_frames = u.trajectory.n_frames    # get the number of frames of your traj\nu.trajectory.ts.dt = 0.0005         # set dt to 0.0005 ps, otherwise you will get a warning \n

    \u5426\u5219\uff0c\u5728\u8fd0\u884c\u5206\u6790\u4e4b\u524d\uff0cMDAnalysis\u4e0d\u4f1a\u81ea\u52a8\u8bfb\u53d6\u6587\u4ef6\u3002

    \u5b9e\u9645\u4e0a\uff0c\u5c31\u7b97\u5728\u4e0a\u9762\u7684\u8bfb\u53d6\u8fc7\u7a0b\u4e2d\uff0cMDAnalysis\u4e5f\u4e0d\u4f1a\u628a\u8f68\u8ff9\u8bfb\u5165\u5185\u5b58\uff0c\u800c\u662f\u8bfb\u53d6\u4e86\u4e00\u6761\u8f68\u8ff9\u7684\u5f00\u5934\u5728\u6587\u4ef6\u7684\u4f4d\u7f6e\u3002\u4ee5\u6211\u4eec\u6bd4\u8f83\u719f\u6089\u7684xyz\u6587\u4ef6\u4e3a\u4f8b\uff0c

    100                                 <- \u5e27\u5f00\u5934\nTIMESTEP: 0\n*.*****    *.*****    *.*****\n*.*****    *.*****    *.*****\n              \u00b7\n              \u00b7\n              \u00b7\n              \u00b7\n*.*****    *.*****    *.*****\n100                                 <- \u5e27\u5f00\u5934\nTIMESTEP: 2\n*.*****    *.*****    *.***** \u3002    \n

    MDAnalysis\u4f1a\u904d\u5386\u6574\u4e2a\u6587\u4ef6\u6d41\uff0c\u5c06\u8f68\u8ff9\u5f00\u5934\u5728\u6587\u4ef6\u6d41\u4e2d\u7684\u4f4d\u7f6e\u4fdd\u5b58\u5728u.trajectory._offsets\u4e2d\u3002

        |+----------------+----------------+----------------+--\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7--+----------------|\n    |*             \ufe0f   *            \ufe0f    *            \ufe0f    *              \ufe0f     *                |\n    |*             \ufe0f   *            \ufe0f    *            \ufe0f    *              \ufe0f     *                |\n    |*             \ufe0f   *            \ufe0f    *            \ufe0f    *              \ufe0f     *                |\n    |*             \ufe0f   *            \ufe0f    *            \ufe0f    *              \ufe0f     *                |\n    |v                v                v                v                   v                |\n    ------------------------------------------------------------------------------------------\n    |0                1                2                3                   N                |\narray(\n    [<_offsets(0)>,   <_offsets(1)>,   <_offsets(2)>,   <_offsets(3)>, ..., <_offsets(N)>    ]\n)  ---> u.trajectory._offsets\n

    \u6709\u4e86\u8fd9\u4e9b\u5e27\u5f00\u5934\u5728\u6587\u4ef6\u7684\u5730\u65b9\uff0cMDAnalysis\u5c31\u53ef\u4ee5\u4efb\u610f\u8bfb\u533a\u4efb\u610f\u4e00\u5e27\u8f68\u8ff9\u6587\u4ef6\u7684\u7684\u6570\u636e\u3002\u4f8b\u5982\uff0c\u5982\u679c\u4f60\u9700\u8981\u8bfb\u533a\u7b2c70\u5e27\u7684\u5750\u6807\uff0c\u4f60\u5c31\u53ef\u4ee5

    >>> print(u.trajectory)\n>>> ag = u.atoms\n>>> print(u.trajectory.ts)\n< Timestep 0 with unit cell dimensions None >\n>>> for ii in range(69):\n...     u.trajectory.next()\n>>> print(u.trajectory.ts) \n< Timestep 69 with unit cell dimensions None >\n>>> xyz70 = ag.atoms\n>>> u.trajectory.rewind()                       \n< Timestep 0 with unit cell dimensions None >\n

    \u53ef\u4ee5\u770b\u5230\uff0cu.trajectory\u5176\u5b9e\u662f\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u4f60\u53ef\u4ee5\u901a\u8fc7u.trajectory.next()\u65b9\u6cd5\u4e0b\u4e00\u5f97\u5230\u4e0b\u4e00\u5e27\u7684trajectory\u3002\u540c\u65f6\uff0c\u8fd9\u4e00\u5e27\u7684\u5750\u6807\u4e5f\u4f1a\u66f4\u65b0\u81f3atomgroup.position\u3002\u5b9e\u9645\u4e0a\uff0c\u5728\u4f7f\u7528MDAnalysis\u8fdb\u884c\u5206\u6790\u65f6\u4f60\u4e0d\u9700\u8981\u6267\u884c\u8fd9\u4e9b\u5e95\u5c42\u7684next\u548crewind\u65b9\u6cd5\uff0c\u8fd9\u4e9b\u7e41\u7410\u7684\u6b65\u9aa4\u5df2\u7ecf\u5305\u88c5\u597d\u4e86\u3002

    \u5b9e\u9645\u4e0a\u53ef\u4ee5\u76f4\u63a5\u901a\u8fc7\u7d22\u5f15\u7684\u65b9\u5f0f\u5bf9\u7b2c70\u5e27\u7684\u7ed3\u679c\u8fdb\u884c\u63d0\u53d6\uff1a

    >>> print(u.trajectory[70])\n< Timestep 70 with unit cell dimensions None >\n

    \u540c\u65f6\uff0c\u6b63\u56e0\u4e3au.trajectory\u662f\u4e00\u4e2a\u8fed\u4ee3\u5668\uff0c\u5bf9\u4e8e\u5176\u7236\u7c7bProtoReader\uff0c\u5176\u4e2d\u5b9a\u4e49\u4e86__iter__\u65b9\u6cd5\u6765\u8fd4\u56de\u4e00\u4e2a\u8fed\u4ee3\u5668\u5bf9\u8c61\uff0c\u540c\u65f6\u5b9a\u4e49\u4e86__next__\u65b9\u6cd5\u6765\u56de\u5e94\u5bf9\u5e94\u7684\u8fed\u4ee3\u8fc7\u7a0b\u3002\u800c\u5728__next__\u65b9\u6cd5\u8fd4\u56de\u7684\u5373\u4e3anext()\u51fd\u6570\u4e2d\u7684\u5185\u5bb9\u3002\u56e0\u6b64\u5982\u679c\u60f3\u5bf9\u4e00\u6761\u8f68\u8ff9\u8fdb\u884c\u5207\u7247\u5206\u6790\u800c\u4e0d\u662f\u9010\u5e27\u5206\u6790\uff0c\u6211\u4eec\u5c31\u53ef\u4ee5\u4f7f\u7528\u5207\u7247\u540e\u7684\u8f68\u8ff9\u6765\u8fdb\u884c\u8fed\u4ee3\uff1a

    >>> for ts in u.trajectory[10:10000:20]:    # \u4ece\u7b2c10\u5e27\u523010000\u5e27\u6bcf20\u6b65\u53d6\u4e00\u5e27\n...     print(ts.frame)\n10 30 50 70 90 110 130 150 ...\n

    \u7efc\u4e0a\u6240\u8ff0\uff0cMDAnalysis\u7684\u8f68\u8ff9\u8bfb\u53d6\u65b9\u5f0f\u6709\u5982\u4e0b\u4f18\u70b9:

    • \u56e0\u4e3a\u5b9e\u9645\u8bfb\u53d6\u7684\u662foffsets\uff0c\u4e5f\u5c31\u662f\u5e27\u5f00\u5934\u7684\u4f4d\u7f6e\uff0c\u4ec5\u4ec5\u8bfb\u4e86N\u4e2a\u6574\u6570\u3002\u4e0d\u50cf\u9694\u58c1ASE\uff0c\u4f1a\u5b9e\u4f8b\u5316N\u4e2aAtoms\uff08\u5305\u62ec\u4e86\u6574\u6761\u8f68\u8ff9\u7684\u5750\u6807\uff09\uff0c\u4e8e\u662f\u4f1a\u975e\u5e38\u5360\u7528\u5185\u5b58\u3002MDAnalysis\u7684io\u65b9\u6cd5\u5185\u5b58\u5360\u7528\u5c0f\uff0cloop\u4e5f\u66f4\u5feb\u3002

    • \u8bfb\u53d6offsets\u540e\u4f60\u53ef\u4ee5\u5c06Universe\u5bf9\u8c61\u4fdd\u5b58\u4e0b\u6765\u89c1\u4e0b\u6587\uff0c\u8bfb\u53d6\u540e\u4e0d\u9700\u8981\u518d\u904d\u5386\u6574\u4e2a\u8f68\u8ff9\u6587\u4ef6\u3002\u8fd9\u6837\uff0c\u5047\u5982\u4f60\u53c8\u6709\u4e86\u65b0\u7684\u5206\u6790\uff0c\u4f60\u5c31\u53ef\u4ee5\u7701\u4e0b\u904d\u5386\u6587\u4ef6\u7684\u65f6\u95f4\u3002

    "},{"location":"wiki/software_usage/MDAnalysis/#universe","title":"\u4fdd\u5b58\u4e00\u4e2aUniverse\u5b9e\u4f8b","text":"

    \u5047\u5982\u8bf4\u4f60\u73b0\u5728\u6709\u4e00\u6761\u8f68\u8ff9\u6587\u4ef6traj.xyz\uff0c\u4f60\u53ef\u4ee5\u901a\u8fc7\u5982\u4e0b\u65b9\u6cd5\u5c06\u5176\u4fdd\u5b58\u4e0b\u6765\uff0c\u8282\u7701\u4e8c\u6b21\u5206\u6790\u65f6\u8bfb\u53d6\u5e27\u5f00\u5934\u7684\u65f6\u95f4\u3002

    import pickle\nfrom MDAnalysis import Universe\n\n>>> xyzfile = \"/path/to/traj.xyz\"     # !!! Use absolute path. It's more robust.     \n>>> outuni  = \"./traj.uni\"\n>>> u = Universe(xyzfile)\n>>> print(u.trajectory)               # This will take some time\n<XYZReader /path/to/traj.xyz with 100 frames of 3240 atoms>\n>>> with open(outuni, 'wb') as f:\n...    pickle.dump(u, f)\n

    \u5efa\u8bae\u521d\u59cb\u5316Universe\u65f6\u4f7f\u7528\u7edd\u5bf9\u8def\u5f84\uff0c\u8fd9\u6837\u4f60\u53ef\u4ee5\u5c06\u590d\u5236\u5230traj.uni\u590d\u5236\u5230\u4efb\u610f\u8def\u5f84\u5bf9\u8f68\u8ff9\u5206\u6790\u3002\u5728\u4e8c\u6b21\u5206\u6790\u65f6\uff0c\u4f60\u53ef\u4ee5\u76f4\u63a5\u8fd9\u6837\u8bfb\u53d6\u4e00\u4e2aUniverse:

    >>> with open(outuni, 'rb') as f:\n...     v = pickle.load(f) \n>>> print(v.trajectory)\n<XYZReader /path/to/traj.xyz with 100 frames of 3240 atoms>\n

    \u7b14\u8005\u7684\u7ecf\u9a8c\u662f\uff0c\u5728\u6211\u4eec\u7684<fat>\u8282\u70b9\u4e0a\uff0c\u904d\u5386\u4e00\u4e2a 6G \u5927\u5c0f\u7684xyz\u8f68\u8ff9\u6587\u4ef6\u7684\u5e27\u5f00\u5934\u9700\u8981 3 min\u3002

    "},{"location":"wiki/software_usage/MDAnalysis/#_1","title":"\u8ddd\u79bb\u8ba1\u7b97\u5e93\u51fd\u6570","text":"

    MDAnalysis\u6709\u4f18\u79c0\u7684\u5e95\u5c42\u8ddd\u79bb\u8ba1\u7b97\u51fd\u6570\u5e93MDAnalysis.lib.distances\uff0c\u662f\u5f00\u53d1\u8005\u7528C\u8bed\u8a00\u7f16\u5199\u5e95\u5c42\u65b9\u6cd5\uff0c\u7528python\u5305\u88c5\u4e00\u4e2a\u5e93\uff0c\u8be6\u89c1lib.distances API\u3002\u5b83\u957f\u4e8e\u5728\u4e8e\u8ba1\u7b97\u5468\u671f\u6027\u8fb9\u754c\u6761\u4ef6\uff08PBC\uff09\u4e0b\u7684\u539f\u5b50\u95f4\u8ddd\u79bb\uff0c\u5e76\u4e14\u6587\u6863\u7fd4\u5b9e\u3002\u800c\u4e14\u4e0eMDAnalysis\u7684Universe\u3001Analysis\u7b49\u7c7b\u76f8\u72ec\u7acb\uff0c\u4f60\u53ea\u9700\u8981\u63d0\u4f9b\u539f\u5b50\u5750\u6807\uff0c\u76d2\u5b50\u5927\u5c0f\uff0ccutoff\u5927\u5c0f\uff0c\u5c31\u53ef\u4ee5\u5f97\u5230\u8ddd\u79bb\u3001\u89d2\u5ea6\u7b49\u6570\u636e\u3002

    \u4e0b\u9762\u662f\u7b14\u8005\u7528\u8be5\u51fd\u6570\u5e93\u91cccapped_distance\u65b9\u6cd5\u5305\u88c5\u7684\u7684\u4e00\u4e2a\u914d\u4f4d\u6570\u8ba1\u7b97\u5668\u3002

    def count_cn(atoms1, atoms2, cutoff_hi, cutoff_lo=None, cell=None):\n    \"\"\"count the coordination number(CN) for atoms1 (center atoms), where atoms2 are coordinate atom. This function will calculate CN within range cutoff_lo < d < cutoff_lo, where d is the distance between atoms1 and atoms2. Minimum image convention is applied if cell is not None\n\n    Args:\n        atoms1 (numpy.ndarray): Array with shape (N, 3), where N is the number of center atoms. 'atoms1' are the position of center atoms. \n        atoms2 (numpy.ndarray): Array with shape (M, 3), where M is the number of coordination atoms. 'atoms2' are the positions of coordination atoms.\n        cutoff_hi (float): Max cutoff for calculating coordination number. \n        cutoff_lo (float or None, optional): Min cutoff for calculating coordination number. This function will calculate CN within range cutoff_lo < d < cutoff_lo, where d is the distance between atoms1 and atoms2. Defaults to None.\n        cell (numpy.ndarray, optional): Array with shape (6,), Array([a, b, c, alpha, beta, gamma]). Simulation cell parameters. If it's not None, the CN calculation will use minimum image convention. Defaults to None.\n\n    Returns:\n        results: Array with shape (N,), CN of each atoms atoms1\n    \"\"\"\n    pairs, _ = capped_distance(reference=atoms1,\n                               configuration=atoms2,\n                               max_cutoff=cutoff_hi,\n                               min_cutoff=cutoff_lo,\n                               box=cell)\n    _minlength = atoms1.shape[0]\n    results = np.bincount(pairs[:, 0], minlength=_minlength)\n    return results\n

    \u5176\u5b9e\u9694\u58c1ASE.geometry\u4e0b\u4e5f\u6709\u7c7b\u4f3c\u7684\u5e95\u5c42\u65b9\u6cd5\uff0c\u4f46\u662f\u7b14\u8005\u8ba4\u4e3a\u4f7f\u7528\u4f53\u9a8c\u786e\u5b9e\u4e0d\u5982MDAnalysis.lib.distances\uff08\u8ba1\u7b97\u901f\u5ea6\u6162\uff0c\u6587\u6863\u5c11\uff09\u3002

    \u4e0b\u9762\u5bf9\u4e24\u7ec4\u539f\u5b50\u8ddd\u79bb\u77e9\u9635\u8fdb\u884cbenchmark\uff0c\u6bcf\u7ec4100\u4e2a\u539f\u5b50\uff0c\u7ed3\u679c\u662f\u4e00\u4e2a100x100\u7684numpy.array\uff0c\u53ef\u4ee5\u53d1\u73b0MDAnalysis.lib.distances\u4f1a\u5feb15\u500d\u3002\u6240\u4ee5\u5f53\u4f60\u6709\u4e0a\u4e07\u4e2a\u8fd9\u6837\u8ba1\u7b97\u7684\u65f6\u5019\uff0c\u4f7f\u7528ASE\u7684\u51fd\u6570\u5e93\u4f1a\u5f71\u54cd\u4f60\u7684\u6548\u7387\u3002

    >>> import numpy as np\n>>> from ase.geometry import get_distances\n>>> from MDAnalysis.lib.distances import distance_array\n                       \u00b7\n                       \u00b7\n                       \u00b7\n>>> print(xyz1.shape, xyz2.shape)\n(100, 3) (100, 3)\n>>> print(cell)\n[[50.5123      0.          0.        ]\n [ 5.05820546 13.34921731  0.        ]\n [ 0.          0.         47.8433    ]]\n>>> print(cellpar)\n[50.5123 14.2754 47.8433 90.     90.     69.2476]\n\nIn[1]: %%timeit\n...    dmatrix_mda = distance_array(xyz1, xyz2, box=cellpar)\n1.03 ms \u00b1 5.11 \u00b5s per loop (mean \u00b1 std. dev. of 7 runs, 1,000 loops each)\n\nIn[2]: %%timeit\n...    vec, dmatrix_ase = get_distances(xyz1, xyz2, cell=cell, pbc=True)\n16.6 ms \u00b1 133 \u00b5s per loop (mean \u00b1 std. dev. of 7 runs, 100 loops each)\n
    "},{"location":"wiki/software_usage/MDAnalysis/#_2","title":"\u6ce8\u610f\uff1a\u5982\u679c\u4f60\u5728\u5904\u7406\u975e\u6b63\u4ea4\u7684\u6a21\u62df\u76d2\u5b50","text":"

    \u6211\u4eec\u6ce8\u610f\u5230\uff0c\u5728\u4e0a\u8ff0\u8ddd\u79bb\u8ba1\u7b97\u7684\u4f8b\u5b50\u91cc\uff0c\u6211\u4eec\u9700\u8981\u901a\u8fc7cell parameter\uff0c[a, b, c, alpha, beta, gamma]\uff0c\u7ed9MDAnalysis.lib.distances\u63d0\u4f9b\u6a21\u62df\u76d2\u5b50\u7684\u4fe1\u606f\u3002\u800c\u5b9e\u9645\u4e0a\uff0c\u8ba1\u7b97\u8ddd\u79bb\u7684\u65f6\u5019cell parameter\u4f1a\u5148\u901a\u8fc7\u5185\u90e8\u65b9\u6cd5\u8f6c\u5316\u62103x3\u7684\u76d2\u5b50\u77e9\u9635\u3002\u5982\u679c\u4f60\u7684\u76d2\u5b50\u5e76\u4e0d\u662f\u6b63\u4ea4\u7684\uff0c\u5e94\u8be5\u5148\u68c0\u67e5\u4f60\u63d0\u4f9b\u7684cell parameter\u80fd\u5426\u6b63\u786e\u5f97\u52303x3\u7684\u77e9\u9635\uff0c\u518d\u4f7f\u7528\u8fd9\u4e2a\u51fd\u6570\u5e93\uff0c\u5426\u5219\u4f60\u53ef\u80fd\u4f1a\u5f97\u5230\u9519\u8bef\u7684\u7ed3\u679c\u3002\u8fd9\u91cc\u662f\u4ed6\u4eec\u4f7f\u7528\u7684python\u8f6c\u6362\u811a\u672c\u3002

    "},{"location":"wiki/software_usage/MDAnalysis/#_3","title":"\u590d\u6742\u7684\u8f68\u8ff9\u5206\u6790\u2014\u2014\u7b80\u8981\u4ecb\u7ecd\u9762\u5bf9\u5bf9\u8c61\u7f16\u7a0b\u7684\u65b9\u6cd5","text":"

    \u5728\u5b9e\u9645\u7684\u50ac\u5316\u81ea\u7531\u80fd\u8ba1\u7b97\u5f53\u4e2d\uff0c\u5728\u5206\u6790\u8fc7\u7a0b\u4e2d\u5b9e\u9645\u6d89\u53ca\u7684\u53d8\u91cf\u5f80\u5f80\u4e0d\u6b62\u4e8e\u4e00\u4e2a\u7b80\u5355\u7684\u5206\u5b50\u52a8\u529b\u5b66\u8f68\u8ff9\u6587\u4ef6\u3002\u5982\u5728\u589e\u5f3a\u91c7\u6837\u7684\u8f68\u8ff9\u5f53\u4e2d\uff0c\u5c31\u4f1a\u6709\u6d89\u53cabias\u504f\u7f6e\u52bf\u7684COLVAR\u6587\u4ef6\uff0c\u5176\u4e2d\u7684*bias\u662f\u83b7\u5f97\u4e0d\u540c\u7ed3\u6784\u5728\u5b9e\u9645\u7684\u76f8\u7a7a\u95f4\u4e2d\u7684\u6743\u91cd\u7684\u91cd\u8981\u53c2\u6570\u3002

    \u540c\u65f6\uff0c\u9762\u5bf9\u8fc7\u7a0b\u7684\u7f16\u7a0b\u65b9\u6cd5\u5728\u9047\u89c1\u5927\u91cf\u7684\u91cd\u590d\u8fc7\u7a0b\u7684\u65f6\u5019\u5f80\u5f80\u4f1a\u9762\u5bf9\u53d8\u91cf\u7e41\u591a\u3001\u8fc7\u7a0b\u590d\u6742\u7684\u95ee\u9898\u3002\u6bd4\u5982\u5bf9\u540c\u6837\u7684\u5316\u5b66\u4f53\u7cfb\u5728\u4e0d\u540c\u7684\u6e29\u5ea6\u4e0b\u8fdb\u884c\u5206\u5b50\u52a8\u529b\u5b66\u91c7\u6837\uff0c\u4f7f\u7528\u7a0b\u5e8f\u5bf9\u7ed3\u679c\u8fdb\u884c\u5206\u6790\u7684\u65f6\u5019\uff0c\u5982\u679c\u662f\u9762\u5bf9\u8fc7\u7a0b\u7f16\u7a0b\uff0c\u8981\u4e48\u4f7f\u7528\u5927\u91cf\u7684\u53d8\u91cf\u6765\u5bf9\u6570\u636e\u8fdb\u884c\u63d0\u53d6\uff0c\u8981\u4e48\u589e\u52a0\u4e00\u4e2a\u53d8\u91cf\u4e2d\u6570\u636e\u7684\u7ef4\u5ea6\u3002\u56e0\u6b64\u5728\u9047\u5230\u590d\u6742\u7684\u6570\u636e\u5904\u7406\u65f6\u5f88\u5bb9\u6613\u7406\u4e0d\u6e05\u8fc7\u7a0b\uff0c\u627e\u4e0d\u5230\u95ee\u9898\u6240\u5728\u3002\u56e0\u6b64\u5c31\u9700\u8981\u5f15\u5165\u9762\u5bf9\u5bf9\u8c61\u7f16\u7a0b\u7684\u601d\u8def\uff0c\u5728\u8fd9\u79cd\u6a21\u5f0f\u4e0b\u6211\u4eec\u89e3\u51b3\u95ee\u9898\u7684\u65b9\u6cd5\u4ece\u8fc7\u7a0b\u5bfc\u5411\u53d8\u6210\u4e86\u5bf9\u8c61\u5bfc\u5411\u3002\u8fd9\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u662f\u4efb\u4f55\u4e1c\u897f\uff0c\u6bd4\u5982\u4e00\u4e2a\u6587\u4ef6\uff0c\u4e00\u4e2a\u8def\u5f84\u7b49\u7b49\uff0c\u800c\u6211\u4eec\u60f3\u83b7\u5f97\u8fd9\u4e2a\u5bf9\u8c61\u7684\u4fe1\u606f\u6765\u89e3\u51b3\u6211\u4eec\u60f3\u77e5\u9053\u7684\u95ee\u9898\uff0c\u5c31\u9700\u8981\u5b9a\u4e49\u4e00\u4e9b\u65b9\u6cd5\u6765\u5904\u7406\u3002

    \u4e0b\u9762\u6211\u4eec\u7ed3\u5408\u4e00\u4e2a\u4f8b\u5b50\u6765\u5927\u81f4\u8bf4\u660e\u9762\u5bf9\u5bf9\u8c61\u7f16\u7a0b\u7684\u7b80\u8981\u601d\u8def\u8fc7\u7a0b\uff1a

    \u95ee\u9898\u63cf\u8ff0

    \u5728\u4e00\u6761\u589e\u5f3a\u91c7\u6837\u7684\u8f68\u8ff9\u5f53\u4e2d\uff0c\u6211\u4eec\u60f3\u8981\u63d0\u53d6\u7ed3\u6784\u4e2d\u4e00\u79cd\u539f\u5b50(A)\u7684\u53e6\u5916\u4e00\u79cd\u539f\u5b50(B)\u7684\u914d\u4f4d\u6570\u5206\u5e03\u4ee5\u53ca\u5728\u4e0d\u540c\u914d\u4f4d\u6570\u4e0b\u9762\u7684\u7ed3\u6784\u5206\u5e03\u7279\u5f81\uff0c\u5e76\u7ed3\u5408\u589e\u5f3a\u91c7\u6837\u7684\u6743\u91cd\u6765\u5bf9\u7ed3\u679c\u8fdb\u884c\u66f4\u4e3a\u4e25\u8c28\u7684\u5206\u6790\u3002\u9996\u5148\u6211\u4eec\u9700\u8981\u5f15\u5165\u8f68\u8ff9\u6587\u4ef6\u4e2d\u7684\u5206\u5b50\u7ed3\u6784\u4fe1\u606f\uff0c\u518d\u8ba1\u7b97\u7ed9\u5b9a\u73af\u5883\u7684\u914d\u4f4d\u6570\uff0c\u63d0\u53d6\u914d\u4f4d\u73af\u5883\u7684\u539f\u5b50\u5750\u6807\uff0c\u5e76\u5728\u540e\u7eed\u8ba1\u7b97\u5c40\u90e8\u7ed3\u6784\u7684\u7279\u5f81\u4fe1\u606f\uff08\u5982\u952e\u89d2\u3001\u4e8c\u9762\u89d2\u5206\u5e03\u7b49\uff09\u3002\u5728\u8fd9\u4e9b\u7ed3\u6784\u5f53\u4e2d\uff0c\u6211\u4eec\u9700\u8981\u540c\u65f6\u63d0\u53d6COLVAR\u6587\u4ef6\u4e2d\u5bf9\u5e94\u7684bias\u4fe1\u606f\uff0c\u7528\u4e8e\u5728\u540e\u7eed\u8fc7\u7a0b\u4e2d\u7684\u52a0\u6743\u3002

    \u5728\u4f7f\u7528MDAnalysis\u5e93\u5bf9\u8fd9\u6837\u7684\u6587\u4ef6\u7ed3\u6784\u5206\u6790\u7684\u65f6\u5019\uff0c\u5f97\u76ca\u4e8e\u5f3a\u5927\u7684\u5de5\u5177\u652f\u6301\uff0c\u6211\u4eec\u5904\u7406\u7684\u590d\u6742\u7684\u95ee\u9898\u4e5f\u5c31\u53ef\u4ee5\u66f4\u4e3a\u8fc5\u901f\u3002\u53c2\u8003\u672c\u6587\u5f00\u59cb\u65f6\u7ed9\u51fa\u7684\u7f51\u9875\u4e2d\u7684\u76f8\u5173\u6bb5\u843d\uff0c\u6211\u4eec\u662f\u9996\u5148\u5f15\u5165\u76f8\u5e94\u7684\u5750\u6807\u4fe1\u606f\u6587\u4ef6\u4ee5\u53caCOLVAR\u6587\u4ef6\u6765\u521d\u59cb\u5316\u7c7b\uff08\u5728\u95ee\u9898\u7684\u5904\u7406\u8fc7\u7a0b\u5f53\u4e2d\uff0c\u8fd9\u4e00\u6b65\u76f8\u5f53\u4e8e\u5f15\u5165\u539f\u59cb\u4fe1\u606f\uff0c\u5f53\u7136\u6211\u4eec\u4e5f\u53ef\u4ee5\u5728\u5bf9\u5e94\u7684\u65b9\u6cd5\u5f53\u4e2d\u518d\u5f15\u5165\uff0c\u5982\u679c\u6709\u65b9\u6cd5\u4e2d\u9700\u8981\u7528\u5230\u7684\u7edf\u4e00\u7684\u4fe1\u606f\uff0c\u5728__init__\u65b9\u6cd5\u4e2d\u5f15\u5165\u662f\u6700\u4e3a\u76f4\u89c2\u548c\u65b9\u4fbf\u7684\uff09\uff1a

    class A_center_analysis(AnalysisBase):\n    def __init__(self, lmpfile, COLVAR_file, verbose=True):\n        u = Universe(lmpfile, topology_format=\"LAMMPSDUMP\")\n        u.select_atoms(\"type 1\").masses = 114\n        u.select_atoms(\"type 2\").masses = 514\n        u.trajectory.ts.dt = 0.0005\n        self.cell = u.dimensions\n        self.bias = np.loadtxt(COLVAR_file, skiprows=1, usecols=-1)\n\n        assert u.trajectory.n_frames == len(self.bias) \n\n        self.atomgroup = u.select_atoms(\"all\")\n        super(A_center_analysis, self).__init__(self.atomgroup.universe.trajectory, verbose=verbose)\n
    \u5176\u4e2dsuper()\u51fd\u6570\u7528\u4e8e\u8c03\u7528\u7236\u7c7bAnalysisBase\u5f53\u4e2d\u7684\u521d\u59cb\u5316\u51fd\u6570\uff0c\u4ee5\u4fbf\u4e8e\u540e\u7eed\u53d8\u91cf\u7684\u6613\u7528\u6027\u3002

    \u6b64\u540e\u6211\u4eec\u5b9a\u4e49\u51c6\u5907\u597d\u7684\u7ed3\u679c\u53d8\u91cf\uff0c\u5728\u539f\u4f8b\u5f53\u4e2d\u53ea\u4f7f\u7528\u4e86\u4e00\u4e2aself.result\u53d8\u91cf\u6765\u5305\u542b\u6240\u6709\u7684\u7ed3\u679c\uff0c\u5728\u540e\u7eed\u7684\u8c03\u7528\u5f53\u4e2d\u4e0d\u662f\u5f88\u660e\u6717\uff0c\u56e0\u6b64\u5728\u6b64\u5904\u53ef\u4ee5\u591a\u5b9a\u4e49\u4e00\u4e9b\u3002

        def _prepare(self):\n        self.cn_2 = np.array([])        \n        self.angle_2 = np.array([])\n        self.bias_2 = np.array([])\n
    \u6b64\u540e\uff0c\u6211\u4eec\u53ef\u4ee5\u5b9a\u4e49\u6211\u4eec\u5bf9\u5e94\u7684\u5206\u6790\u65b9\u6cd5\uff0c\u5e76\u5c06\u5173\u5fc3\u7684\u7ed3\u679c\u653e\u5230\u5408\u9002\u7684\u7c7b\u5c5e\u6027\u5f53\u4e2d\uff08\u8fd9\u4e00\u6b65\u5c31\u662f\u5c06\u539f\u59cb\u4fe1\u606f\u8fdb\u884c\u5904\u7406\uff0c\u83b7\u5f97\u5bf9\u8c61\u7684\u4e00\u7cfb\u5217\u5c5e\u6027\u7684\u8fc7\u7a0b\uff09
        def _append(self, cn, data):\n        assert data.shape == (cn+1, 3)\n        if cn == 2:\n            self.cn_2 = np.append(self.cn_2, data)\n            self.cn_2 = self.cn_2.reshape(-1,3,3)\n\n            BAB_angle = calc_angles(data[1],data[0],data[2], box=self.cell)\n            self.angle_2 = np.append(self.angle_2, BAB_angle)\n            self.bias_2 = np.append(self.bias_2, self.bias[self.atomgroup.ts.frame])    # \u6b64\u5904\u5bf9\u7ed3\u6784\u5bf9\u5e94\u7684bias\u4fe1\u606f\u8fdb\u884c\u63d0\u53d6\n        else:\n            pass\n\n    def _single_frame(self):\n        A_coord = self.atomgroup.select_atoms(\"type 1\").ts[:]\n        B_coord = self.atomgroup.select_atoms(\"type 2\").ts[:]\n\n        pairs, _ = capped_distance(reference=A_coord,configuration=o_coord,\n                                   max_cutoff=2.6,min_cutoff=None,\n                                   box=self.cell)\n        _minlength = A_coord.shape[0]\n        cn_results = np.bincount(pairs[:, 0], minlength=_minlength)\n\n        for A_cn in range(2,5):\n            A_centers = A_coord[cn_results == A_cn]\n            for ag_center in ag_centers:\n                A_B_map = distance_array(A_center, B_coord, box=self.cell)[0]\n                coordinated_B_coord = B_coord[A_B_map.argsort() < A_cn]\n                self._append(A_cn, np.vstack((A_center, coordinated_B_coord)))\n
    \u5176\u4e2d\uff0c_single_frame()\u65b9\u6cd5\u662f\u540e\u7eed\u5728\u8dd1\u7684\u8fc7\u7a0b\u5f53\u4e2d\u5faa\u73af\u8fed\u4ee3\u7684\u4e3b\u8981\u7a0b\u5e8f\uff0c\u5176\u4e2d\u7684self.atomgroup\u53ef\u4ee5\u770b\u4f5c\u5355\u4e00\u4e00\u5e27\u7684\u6570\u636e\uff0c\u540e\u9762\u7684\u5904\u7406\u65b9\u6cd5\u4e5f\u662f\u9488\u5bf9\u4e8e\u8fd9\u4e00\u5e27\u7684\u3002\u5728\u7236\u7c7b\u5f53\u4e2d\u7684run()\u65b9\u6cd5\u4f1a\u81ea\u52a8\u5e2e\u5fd9\u8fdb\u884c\u8fed\u4ee3\uff0c\u53ea\u9700\u8981\u89c4\u5b9a\u597d\u8fed\u4ee3\u7684\u8303\u56f4\u4ee5\u53ca\u6b65\u957f\u5373\u53ef\u3002

    \u5728\u6b64\u5904\u591a\u5b9a\u4e49\u4e00\u4e2a_append()\u65b9\u6cd5\u7684\u76ee\u7684\u662f\u4e3a\u4e86\u5c06\u6dfb\u52a0\u5230\u7ed3\u679c\u53d8\u91cf\u4e2d\u7684\u7a0b\u5e8f\u548c\u4e3b\u8981\u7684\u5206\u6790\u7a0b\u5e8f\u5206\u5f00\uff0c\u4ee5\u4fbf\u4e8e\u540e\u7eed\u7684\u529f\u80fd\u62d3\u5c55\uff08\u5982\u5bf9\u914d\u4f4d\u6570\u7b49\u4e8e3\u7684\u5750\u6807\u4fe1\u606f\u8fdb\u884c\u4e8c\u6b21\u5904\u7406\u5c31\u53ef\u4ee5\u76f4\u63a5\u5728_append()\u51fd\u6570\u5f53\u4e2d\u6dfb\u52a0\u529f\u80fd\u800c\u4e0d\u7528\u518d\u52a8\u63d0\u53d6\u4fe1\u606f\u7684\u76f8\u5173\u7a0b\u5e8f\uff09\u3002

    \u5728\u540e\u7eed\u7684\u8c03\u7528\u8fc7\u7a0b\u4e2d\uff0c\u6211\u4eec\u53ea\u9700\u8981\u521d\u59cb\u5316\u76f8\u5173\u5206\u6790\u7c7b\u5e76run\u5c31\u53ef\u4ee5\u4e86

    >>> lmpfile = \"300.lammpstrj\"\n>>> a_300 = A_center_analysis(lmpfile, \"COLVAR\")\n>>> a_300.run(start=200000, stop=-1, step=100)\n>>> print(a_300.bias_2.shape, a_300.angle_2.shape, a_300.cn_2.shape)\n(20472,) (20472,) (20472, 3, 3)\n
    \u4ece\u8f93\u51fa\u7684\u6570\u636e\u5c3a\u5bf8\u76f8\u540c\u53ef\u4ee5\u770b\u51fa\u6bd4\u8f83\u7b26\u5408\u6211\u4eec\u7684\u9884\u671f\uff0c\u80fd\u591f\u505a\u5230\u7ed3\u6784\u548c\u5bf9\u5e94\u7684\u504f\u7f6e\u52bf\u4e00\u4e00\u5bf9\u5e94\uff0c\u5728\u540e\u7eed\u7684\u5206\u5e03\u5904\u7406\u8fc7\u7a0b\u5f53\u4e2d\uff0c\u6211\u4eec\u53ef\u4ee5\u5bf9\u5176\u8fdb\u884c\u76f4\u65b9\u56fe\u6982\u7387\u5bc6\u5ea6\u4f30\u8ba1\uff08\u5f53\u7136\u66f4\u4e25\u8c28\u7684\u65b9\u6cd5\u662f\u4f7f\u7528\u9ad8\u65af\u6838\u6982\u7387\u5bc6\u5ea6\u4f30\u8ba1\u7684\u65b9\u6cd5\uff09\uff1a
    >>> plt.hist(a_300.angle_2*180/np.pi, weights=np.exp(beta*a_300.bias_2), bins=100, density=True)\n
    \u540c\u6837\u7684\uff0c\u53ef\u4ee5\u5c06\u7a0b\u5e8f\u518d\u5411\u4e0a\u5c01\u88c5\u4e00\u5c42\uff0c\u5373\u76f8\u540c\u4f53\u7cfb\u7684\u4e0d\u540c\u6e29\u5ea6\u4e0b\u7684\u7ed3\u679c\u4f5c\u4e3a\u4e00\u4e2a\u7c7b\u7684\u5bf9\u8c61\u6765\u8fdb\u884c\u5206\u6790\uff0c\u8fd9\u6837\u8f93\u51fa\u7684\u7ed3\u679c\u66f4\u4e3a\u6e05\u6670\u3002\u8fd9\u6837\u7684\u5c01\u88c5\u53ef\u80fd\u5c31\u9700\u8981\u7edf\u4e00\u6587\u4ef6\u547d\u540d\u65b9\u5f0f\uff0c\u4ee5\u53ca\u683c\u5f0f\u5316\u7684\u8def\u5f84\u547d\u540d\u65b9\u5f0f\u7b49\u7b49\uff0c\u5728\u5b9e\u9645\u7684\u5de5\u4f5c\u5f53\u4e2d\u5e26\u6765\u7684\u6548\u7387\u63d0\u5347\u5f80\u5f80\u5f88\u53ef\u89c2\u3002

    "},{"location":"wiki/software_usage/Tips_for_LaTeX/","title":"Tips for paper writing with LaTeX","text":""},{"location":"wiki/software_usage/Tips_for_LaTeX/#cross-referece","title":"cross referece","text":"

    What should we do if we want to cite the figures or tables in supplmentary material? Use the xr package!

    Firstly, put the following into the preamble of the SI:

    %Number supplementary material with an S\n\\renewcommand{\\thepage}{S\\arabic{page}}\n\\renewcommand{\\thesection}{S\\arabic{section}} \n\\renewcommand{\\thetable}{S\\arabic{table}} \n\\renewcommand{\\thefigure}{S\\arabic{figure}}\n\\renewcommand{\\theequation}{S\\arabic{equation}}\n

    Then, you can refer to the Figures with Figure Sxxx in your SI file. To cite them in your main text, you can use \\ref, by adding the following to the main file:

    %%Crossreferencing to the SI\n\\usepackage{xr}\n\\externaldocument[SI-]{<path to folder in which you have the SI>}\n

    Now you can reference figures in the SI as

    \\ref{SI-<label you gave the figure in the SI>}\n

    Be cautious: You need to recompile both the paper and the SI after doing so.

    For overleaf users, please refer to here.

    Thanks for the suggestion from Dr. Katharina Doblhoff-Dier in Leiden University.

    "},{"location":"wiki/software_usage/default_version/","title":"\u8f6f\u4ef6\u9ed8\u8ba4\u7248\u672c\u63a8\u8350","text":"

    \u76ee\u524d\u96c6\u7fa4\u4e0a\u5f88\u591a\u8f6f\u4ef6\u90fd\u7f16\u8bd1\u4e86\u591a\u4e2a\u7248\u672c\uff0c\u4f46\u7531\u4e8e\u8f6f\u786c\u4ef6\u5e73\u53f0\u3001\u7248\u672c\u3001\u73af\u5883\u7684\u66f4\u65b0\uff0c\u9700\u8981\u5bf9\u5e38\u7528\u8f6f\u4ef6\u7684\u4e00\u4e9b\u7248\u672c\u68b3\u7406\u5982\u4e0b\uff0c\u5e76\u7ed9\u51fa\u5efa\u8bae\u4f7f\u7528\u7248\u672c\u3002

    Zeus \u96c6\u7fa4\u91c7\u7528 module \u5bf9\u8f6f\u4ef6\u73af\u5883\u8fdb\u884c\u7ba1\u7406\uff0c\u901a\u5e38\u4f7f\u7528\u524d\u9700\u8981\u52a0\u8f7d\u73af\u5883\uff0c\u4f8b\u5982 module load vasp/5.4.4\u5373\u53ef\u52a0\u8f7d VASP 5.4.4 \u7248\u672c\u8fd0\u884c\u6240\u9700\u73af\u5883\u3002\u56e0\u6b64\u4e0b\u6587\u5bf9\u8f6f\u4ef6\u63a8\u8350\u7248\u672c\u7684\u8bf4\u660e\uff0c\u5c06\u4f1a\u5217\u51faZeus\u4e0a\u4f7f\u7528\u7684<module name>\uff0c\u5177\u4f53\u4f7f\u7528\u65f6\u8bf7\u81ea\u884c\u8865\u5168\u4e3amodule load <module name>\u3002

    \u6ce8\u610f\u5982\u679c\u5728 ~/.bashrc \u6216 ~/.bash_profile \u4e2d\u52a0\u8f7d\u4e86\u73af\u5883\uff0c\u5982\u679c\u4e0e\u4e0b\u8ff0\u7248\u672c\u7528\u5230\u7684\u73af\u5883\u5b58\u5728\u51b2\u7a81\uff0c\u53ef\u5728\u63d0\u4ea4\u811a\u672c\u4e2d\u52a0\u5165module purge\u884c\u8fdb\u884c\u5378\u8f7d\uff0c\u4ee5\u514d\u4ea7\u751f\u51b2\u7a81\u3002

    \u6ce8\u610f\uff1a CentOS 7 \u9ed8\u8ba4\u4f7f\u7528\u7684 GCC \u7248\u672c\u4e3a4.9.4\uff0cPython \u7248\u672c\u4e3a2.7\uff0cPython 3 \u7248\u672c\u4e3a 3.6\uff0c\u6545\u4ee5\u4e0b\u6d89\u53ca\u5230\u4e0a\u8ff0\u73af\u5883\u82e5\u672a\u52a0\u8f7d\uff0c\u5219\u8868\u793a\u4f7f\u7528\u9ed8\u8ba4\u73af\u5883\u3002

    \u8f6f\u4ef6\u540d \u63a8\u8350\u7248\u672c \u547d\u4ee4 \u9700\u8981\u8c03\u7528\u73af\u5883 \u5907\u6ce8 VASP vasp/5.4.4 \u5e38\u89c4\u8ba1\u7b97\uff1avasp_std Gamma\u70b9 \uff1avasp_gam intel/17.5.239 mpi/intel/2017.5.239 CPU\u5e76\u884c\u8ba1\u7b97 CP2K cp2k/7.1 \u542f\u7528OpenMP\uff1acp2k_psmp \u672a\u542f\u7528\uff1acp2k_popt gcc/5.5.0 intel/17.5.239 mpi/intel/2017.5.239 CPU\u5e76\u884c\u8ba1\u7b97 DeePMD-kit deepmd/2.0-cuda11.3 \u8bad\u7ec3\uff1adp \u8dd1MD\uff1almp_mpi cuda/11.3 gcc/7.4.0 intel/17.5.239 mpi/intel/2017.5.239 GPU\u52a0\u901f\u52bf\u51fd\u6570\u8bad\u7ec3\uff0c\u91c7\u7528\u7684Lammps\u7248\u672c\u662f20201029"},{"location":"wiki/software_usage/experience_of_dpmd_and_dpgen/","title":"DPMD\u548cDPGEN\u4f7f\u7528\u7ecf\u9a8c","text":""},{"location":"wiki/software_usage/experience_of_dpmd_and_dpgen/#dpmd-trainjson","title":"DPMD train.json\u53c2\u6570\u8bbe\u7f6e\u548c\u7406\u89e3\uff1a","text":""},{"location":"wiki/software_usage/experience_of_dpmd_and_dpgen/#dp-kit","title":"dp-kit \u5b89\u88c5","text":"
    • \u5982\u679c\u672c\u5730\u6709GPU\uff0c\u63a8\u8350\u4f7f\u7528dp-kit\u5168\u5305\u4e0b\u8f7d\uff0c\u603b\u5927\u5c0f1G\u3002 shell\u6267\u884c\u5b89\u88c5\u3002\u4fbf\u4e8e\u5728\u672c\u5730\u5f00\u53d1\u6d4b\u8bd5\u3002
    "},{"location":"wiki/software_usage/experience_of_dpmd_and_dpgen/#deeppotential","title":"DeepPotential","text":"
    1. \u5f62\u8c61\u5316\u7406\u89e3sel_a\uff1a\u4e00\u4e2a\u539f\u5b50\u8d8a\u9ad8\u6982\u7387\u51fa\u73b0\uff0c\u5bf9\u5e94sela\u8d8a\u5927\uff1bsela\u5bf9\u5e94\u4ee5\u4efb\u610f\u539f\u5b50\u4e3acenter\uff0c\u80fd\u627e\u5230\u7684\u8be5\u539f\u5b50\u7684\u6700\u5927\u6570\u76ee
    2. neuron network\u548cresnet\u5927\u5c0f\u4e00\u822c\u4e0d\u4fee\u6539\uff1b\u540c\u65f6\u8bad\u7ec3\u591a\u4e2a\u52bf\u51fd\u6570\u9700\u8981\u4fee\u6539\u968f\u673a\u79cd\u5b50seed
    3. \u7528\u4e8e\u5b9e\u9645\u4f7f\u7528\u7684\u52bf\u51fd\u6570\u9700\u8981well-train\uff0c\u9700\u8981\u201c\u957f\u8bad\u7ec3\u201d\uff0c\u5e38\u7528\u8bbe\u7f6e\u4e3a\uff1a
    \"learning_rate\" - \"decay_steps\"\uff1a20000,\n\"stop_batch\": 400000, # \u4f7f\u7528200000 \u6b65\u4e5f\u5927\u81f4\u6ca1\u6709\u95ee\u9898\u3002\n
    "},{"location":"wiki/software_usage/experience_of_dpmd_and_dpgen/#dpgen","title":"DPGEN \u4f7f\u7528","text":"
    1. \u63d0\u4ea4\u8bad\u7ec3\u540e\u9700\u8981\u8ddf\u8e2atrain\u7684\u60c5\u51b5\u3002\u6709\u65f6\u5019\u7531\u4e8e\u63d0\u4ea4\u540e\u65e0\u6cd5\u914d\u7f6eGPU\u8d44\u6e90\uff08\u88ab\u5176\u4ed6\u7a0b\u5e8f\u5360\u7528\u6216\u5176\u4ed6\u539f\u56e0\uff09\uff0c\u5bfc\u81f4\u8bad\u7ec3\u8f93\u51fa\u4e3a\u201cnan\u201d\uff0c\u9700\u8981\u91cd\u65b0\u63d0\u4ea4\u5e76\u786e\u4fdd\u83b7\u53d6GPU\u8d44\u6e90\u3002
    2. V100\u5361\u4e0a\u77ed\u8bad\u7ec3\u4e00\u822c\u57284~8\u5c0f\u65f6\u3002\u957f\u8bad\u7ec3\u662f\u77ed\u8bad\u7ec310\u500d\u65f6\u95f4\u3002\u7406\u8bba\u4e0adpmd\u65b9\u6cd5\u8bad\u7ec3\u65f6\u95f4\u968f\u5143\u7d20\u7c7b\u578b\u6570\u76ee\u7ebf\u6027\u589e\u957f\u3002\uff08MN\uff0cM\u539f\u5b50\u6570\uff0cN\u7c7b\u578b\u6570\uff09\u3002
    3. \u7528\u4e8e\u8bad\u7ec3\u7684\u6570\u636e\u8981\u6b63\u786e\u7684\u8bbe\u7f6etype.raw\u3002\u5c24\u5176\u6ce8\u610f\u521d\u59cb\u6570\u636e\u7684\u5904\u7406\uff0c\u4fdd\u8bc1\u5143\u7d20\u987a\u5e8f\uff0c\u7f16\u53f7\u6b63\u786e\u3002
    4. \u6ce8\u610f\u6d4b\u8bd5k-points\uff0cdpgen\u5728vasp\u7684INCAR\u4e2d\u4f7f\u7528kspacing\u548ckgamma\u6765\u51b3\u5b9akpoints\u3002\u4e00\u822c\u8981\u6c42\u80fd\u91cf\u6536\u655b\u5230 1 meV/atom \uff0c\u529b\u5206\u91cf\u6536\u655b\u5230 5 meV/A \u4ee5\u4e0b\u3002
    5. dpgen \u7684exploration\u6b65\u9aa4\u901a\u8fc7md\u91c7\u6837\uff0c\u63a2\u7d22\u6b65\u6570\u4e00\u822c\u968f\u7740\u8fed\u4ee3\u589e\u52a0\u523010000~20000\u5373\u53ef\u3002\u4e00\u822c\u589e\u52a0\u968f\u673a\u7684md\u8d77\u70b9\u6570\u76ee\u6bd4\u589e\u52a0\u63a2\u7d22\u6b65\u6570\u91c7\u6837\u66f4\u9ad8\u6548\u3002\u8fd9\u662f\u6700\u5173\u952e\u7684\u6b65\u9aa4\uff0c\u8bbe\u8ba1exploration\u7b56\u7565\u65f6\u9700\u8981\u8003\u8651\u5b9e\u9645\u4f7f\u7528\u65f6\u8981\u63a2\u7d22\u4f53\u7cfb\u548c\u91c7\u6837\u7a7a\u95f4\u76f8\u7c7b\u4f3c\u3002
    6. \u901a\u8fc7\u4fee\u6539machine.json\u5bf9\u5e94\u914d\u7f6e\u8ba9dpgen\u62a5\u9519\u505c\u4e0b\uff0c\u7528\u4e8e\u6570\u636e\u5206\u6790\u548c\u68c0\u6d4b\u3002\u4f8b\u5982\u8bbe\u7f6e\u9519\u8bef\u7684\u7aef\u53e3/IP\u4f7f\u4efb\u52a1\u5728\u67d0\u6b65\u505c\u4e0b\u3002
    7. \u5982\u679c\u8bad\u7ec3\u4e86\u8f83\u65e7\u7248\u672c\u7684\u52bf\u51fd\u6570\uff0c\u53ef\u4ee5\u7528\u66f4\u65b0\u7248\u672c\u4ececheckpoint\u5f00\u59cb\uff0c\u518d\u589e\u52a02000\u6b65\u540efreeze\u3002\uff08\u7248\u672c\u5dee\u5f02\u4e0d\u80fd\u8fc7\u5927\uff09
    8. \u795e\u7ecf\u7f51\u7edc\u62df\u5408\u80fd\u529b\u662f\u5f88\u5f3a\u7684\uff0c\u4e0dconsistent\u7684\u6570\u636e\uff08\u4e0d\u540ck\u70b9\uff09\u4e5f\u80fd\u62df\u5408\u51fa\u975e\u5e38\u5c0f\u7684\u80fd\u91cf/\u529b\u8bef\u5dee\u3002\u6240\u4ee5\uff0c\u8981\u6ce8\u610f\u4f7f\u7528\u6d4b\u8bd5\u4f53\u7cfb\u68c0\u67e5\u52bf\u51fd\u6570\u8d28\u91cf\uff0c\u6d4b\u8bd5\u4f53\u7cfb\u53d6\u51b3\u4e8e\u6240\u7814\u7a76\u7684\u95ee\u9898\u3002\u4e5f\u8981\u6ce8\u610f\u8f93\u5165\u7684DFT\u6570\u636e\u505a\u597d\u5145\u5206\u7684\u8ba1\u7b97\u53c2\u6570\u6d4b\u8bd5\u3002
    9. \u63d0\u4ea4\u4efb\u52a1\u540elcurve.out\u51fa\u73b0NaN\uff1b\u539f\u56e0\u53ef\u80fd\u662f\u5185\u5b58\u6216gpu\u6ca1\u6709\u6b63\u786e\u5206\u914d\u3002\u9700\u8981\u91cd\u542f\u3002
    10. dp restart/freeze \u8981\u4fdd\u6301\u5728\u76f8\u540c\u7684\u8def\u5f84\u4e0b\uff0c\u5982\u679c\u6539\u53d8\u4e86\u6587\u4ef6\u5939\u4f4d\u7f6e/\u540d\u79f0\uff0c\u53ef\u4ee5\u4fee\u6539checkpoint\u6307\u660emodel\u8def\u5f84\u3002
    11. MD\u540c\u65f6\u4f7f\u7528\u56db\u4e2a\u6a21\u578b\u8bc4\u4f30\u4e0d\u5f71\u54cd\u901f\u5ea6\uff08\u5728\u663e\u5b58\u4e0d\u5360\u6ee1\u7684\u60c5\u51b5\u4e0b\uff09\u3002
    12. \u4f7f\u7528\u591a\u4e2a\u6a21\u578bMD\uff0c\u5728\u65e7\u7248\u672c\u4e2d\u662f\u7528\u5e73\u5747\u503c\uff0c\u65b0\u7248\u672c>1.0\u662f\u7528\u7b2c\u4e00\u4e2a\u52bf\u51fd\u6570\u503c\u3002
    13. \u6ce8\u610f\u53ef\u89c6\u5316\u6bcf\u8f6e\u7684\u8bad\u7ec3\u7ed3\u679c\uff0c\u5305\u62ec\u5b66\u4e60\u66f2\u7ebf\uff08\u8bad\u7ec3\u8bef\u5dee\u968fbatch\u4e0b\u964d\u8d8b\u52bf\uff09\uff0cmodel_deviation\u7684\u5206\u5e03\uff0c\u5355\u70b9\u80fd\u7684\u6536\u655b\u548c\u7ed3\u6784\u6b63\u786e\uff0c\u5bf9\u6bcf\u8f6e\u7684\u7ed3\u679c\u8fdb\u884c\u5206\u6790\u3002
    "},{"location":"wiki/software_usage/experience_of_dpmd_and_dpgen/#dft","title":"DFT\u5355\u70b9\u80fd\u8ba1\u7b97\u7ecf\u9a8c","text":"
    • \u4e00\u822c\u5bf9\u4f53\u7cfb\u5f71\u54cd\u6700\u5927\u7684\u662fk\u70b9\uff0c\u9700\u8981\u6d4b\u8bd5\u4e0d\u540c\u7684k\u70b9\uff0ck\u70b9\u6570\u76ee\u548c\u8ba1\u7b97\u6210\u672c\u662f\u5bf9\u5e94\u7684
    • vasp\u64c5\u957f\u5c0f\u4f53\u7cfb\u591ak\u70b9\u5e76\u884c\uff1b\u5927\u4f53\u7cfb\u5c11k\u70b9\u4f1a\u663e\u8457\u8f83\u6162\uff1b\u53ef\u4ee5\u4f7f\u7528kspacing\u63a7\u5236\uff0c\u53c2\u7167
    from pymatgen import Structure\nfrom math import pi\nimport numpy as np\nimport pandas as pd\nstc = Structure.from_file('POSCAR')\na,b,c = stc.lattice.abc\n# CASTEP \u548c VASP \u8ba1\u7b97KSPACING\u4e0d\u540c,\u5dee\u4e00\u4e2a\u5e38\u65702pi\nkspacing_range = np.linspace(0.1, 0.6, 21)\nkpoint_a = np.ceil( 2*pi/kspacing_range/a).astype('int')\nkpoint_b = np.ceil( 2*pi/kspacing_range/b).astype('int')\nkpoint_c = np.ceil( 2*pi/kspacing_range/c).astype('int')\n\ndf = pd.DataFrame({'kspacing': kspacing, 'a': kpoint_a, 'b': kpoint_b, 'c': kpoint_c})\nprint(df) # \u67e5\u770b\u4e0d\u540ckspacing \u5bf9\u5e94\u7684K\u70b9\u6570\u76ee\n
    • \u4e3b\u8981\u7684INCAR\u8ba1\u7b97\u53c2\u6570\u662f
      • ENCUT\uff08\u4e00\u822c\u53d6600/650\u4fdd\u8bc1quality\uff0c\u5bf9\u8ba1\u7b97\u901f\u5ea6\u5f71\u54cd\u4e0d\u660e\u663e\uff09\uff1b
      • ISMEAR=0\uff08ISMEAR=-5\u7684 Bloch\u65b9\u6cd5\u9700\u8981k\u4e0d\u5c0f\u4e8e4\u4e2a\uff0c\u6709\u65f6\u5019\u4e0d\u80fd\u7528\uff0c\u6d4b\u8bd5\u8868\u660e\uff0c\u4e8c\u8005\u80fd\u91cf/\u529b\u8bef\u5dee\u57281e-3\u4ee5\u4e0b\uff0cISMEAR=0\u8ba1\u7b97\u6210\u672c\u66f4\u4f4e\uff09
      • spin\u4f1a\u5bf9\u4f53\u7cfb\u6709\u975e\u5e38\u5927\u5f71\u54cd\uff0c\u4e00\u79cdbrute force\u505a\u6cd5\u662f\u76f4\u63a5\u7ed9\u4e00\u4e2a\u597d\u7684\u521d\u731c\uff08\u4ee3\u7801\u8f85\u52a9\uff09\uff0c
      • LASPH\u53ef\u4ee5\u8003\u8651\u52a0\u5165\uff0c\u63d0\u9ad8\u7cbe\u5ea6\uff0c\u6781\u5c11\u91cf\u6210\u672c\u3002
      • LWAVE\uff0cLCHARG\u5173\u6389\uff0c\u51cf\u5c11\u8ba1\u7b97\u65f6\u95f4\u548c\u50a8\u5b58\u7a7a\u95f4\u6d6a\u8d39\u3002
    • \u6d4b\u8bd5\u8ba1\u7b97\u7684\u601d\u8def\u5e94\u5f53\u662f\uff1a\u5148\u9009\u4e00\u4e2a\u6700\u8d35\u7684\uff0c\u518d\u63d0\u9ad8\u7cbe\u5ea6\uff0c\u770b\u662f\u5426\u6536\u655b\uff0c\u4e4b\u540e\u4ee5\u6b64\u4e3a\u53c2\u7167\uff0c\u964d\u4f4e\u4e0d\u540c\u53c2\u6570\u3002\u5728\u4fdd\u8bc1\u4e86\u7cbe\u5ea6\u53ef\u9760\u7684\u57fa\u7840\u4e0a\uff0c\u51cf\u5c11\u8ba1\u7b97\u6210\u672c
    from ase.io import read\nat = read('OUTCAR')\nref = read('ref/OUTCAR') # \ndE = ref.get_potential_energy() - at.get_potential_energy() # \u4e00\u822cdE \u5c0f\u4e8e10meV\ndEperAtom = dE/len(ref) # \u8981\u6c42\u5c0f\u4e8e1meV/atom\ndF = ref.get_forces() - at.get_forces()\npritn(dF.max(), dF.min()) # \u8981\u6c42\u57285meV/A\u4ee5\u4e0b\uff0c\u5c3d\u53ef\u80fd\u57281meV/A \u4ee5\u4e0b\n
    1. LREAL = auto\uff0c\u5bf9\u4e8e\u5927\u4f53\u7cfb\uff0c\u63a8\u8350\u662freal\uff08auto\u9ed8\u8ba4\u4f1a\u8bbe\u7f6e\uff09\uff0c\u5bf9\u4e8eGPU\uff0c\u5fc5\u987b\u8981real\u3002\u7531\u4e8e\u6c42\u79ef\u5206\u65b9\u6cd5\u5dee\u5f02\uff0c\u5728\u5b9e\u7a7a\u95f4\u8ba1\u7b97\u4f1a\u5f15\u51651\u00b72meV/atom\u7684\u7cfb\u7edf\u8bef\u5dee\u3002
    2. VASP\u8f93\u51fa\u7684\u7ed3\u6784\u53ea\u8981\u662f\u7535\u5b50\u6b65\u6536\u655b\u7684\uff0c\u90fd\u53ef\u4ee5\u6dfb\u52a0\u5230\u8bad\u7ec3\u96c6\u3002\u9700\u8981\u6ce8\u610f\u6dfb\u52a0\u4e86\u9519\u8bef\u7684\u7ed3\u6784\uff08\u80fd\u91cf\u7edd\u5bf9\u503c\u6781\u5927\uff09\u4f1a\u5bfc\u81f4\u8bad\u7ec3\u8bef\u5dee\u65e0\u6cd5\u4e0b\u964d\u3002
    3. \u5982\u679cVASP\u8ba1\u7b97\u53ea\u6709\u5355K\u70b9\uff0c\u4f7f\u7528vasp_gam\uff0c\u76f8\u5bf9vasp_std\u53ef\u4ee5\u8282\u7701\u2159 - \u2153\u7684\u65f6\u95f4\u3002
    "},{"location":"wiki/software_usage/experience_of_dpmd_and_dpgen/#_1","title":"\u6587\u4ef6\u7a7a\u95f4\u7ba1\u7406","text":"

    \u968f\u7740\u6a21\u62df\u65f6\u95f4\u548c\u6a21\u62df\u4f53\u7cfb\u6269\u589e\uff0c\u50a8\u5b58\u6587\u4ef6\u5360\u7528\u7684\u7a7a\u95f4\u975e\u5e38\u5de8\u5927\u3002\u5728\u50a8\u5b58\u6587\u4ef6\u65f6\u5019\u6ce8\u610f\uff1a 1. \u4fdd\u7559\u5fc5\u8981\u7684\u8f93\u5165\u548c\u8f93\u51fa\u6587\u4ef6\uff1a\u5305\u62ec\u521d\u59cb\u7ed3\u6784(data.lmp)\uff0c\u8ba1\u7b97\u8bbe\u7f6e(input.lammps)\uff0c\u8ba1\u7b97\u8f93\u51fa(log)\uff0c\u8f68\u8ff9(traj) 2. \u5efa\u8bae\u7528\u5982\u4e0b\u65b9\u6848\u538b\u7f29\uff1a

    zip -9r -y data.zip data/   # \u4f7f\u7528\u6700\u5927\u538b\u7f29\u7387\uff1b\u4fdd\u7559\u6587\u4ef6\u76f8\u5bf9\u8def\u5f84\u538b\u7f29\n

    \u4e5f\u53ef\u4ee5\u7528npz\u538b\u7f29\uff0c\u76f8\u6bd4zip\u76f4\u63a5\u538b\u7f29\u63d0\u9ad85%\u5de6\u53f3\u3002

    import numpy as np\ndata = ...\ndata = data.astype('float32') # \u4fdd\u5b58\u4e3a32\u4f4d\u4e0d\u635f\u5931\u5750\u6807/\u529b\u7b49\u9700\u8981\u7684\u7cbe\u5ea6\nnp.save_compressionz('data.npz', data=data)\ndata = np.load(data)['data']  # \u91cd\u65b0\u8f7d\u5165\n
    "},{"location":"wiki/software_usage/n2p2/","title":"n2p2 Usage Guide","text":""},{"location":"wiki/software_usage/n2p2/#short-introduction","title":"Short Introduction","text":"

    This repository provides ready-to-use software for high-dimensional neural network potentials in computational physics and chemistry.

    The following link is for your information:

    • Repository
    • Original Methodology: J. Behler and M. Parrinello, Phys. Rev. Lett. 98, 146401 (2007)
    • Paper for This code: Singraber, A.; Morawietz, T.; Behler, J.; Dellago, C. , J. Chem. Theory Comput. 2019 15 (5), 3075-3092
    • For Install this code: see Installation Guide

    Warning

    This page is just the experience and understanding of author. If you find any mistake or vague part, please report the issue

    "},{"location":"wiki/software_usage/n2p2/#basic-principle","title":"Basic Principle","text":"

    The basic of n2p2 softeware is based on the method of neural network fitting. For detail of neural network(NN), please refer to [here].

    The extra works done by Behler and Parrinello, is build a link between Potential Energy Surface and NN.

    At first, they decomposes the total energy into atomic energy(\\(E^{atom}\\)). \\(E^{atom}\\) is not the energy of neutral atom in the vacuum as we have seen in quantum chemistry book. The \\(E^{atom}\\) is just the decomposition of total energy into the contribution of every atoms, as expressed by the following equation: $$ E_{tot}=\\sum_i {E_i^{atom}} $$ Where i runs over the index of atom in a system.

    "},{"location":"wiki/software_usage/n2p2/#usage-in-cluster","title":"Usage in Cluster","text":"

    n2p2 has installed in Cluster51. Use command module load n2p2/2.0.0 to load the code n2p2. After that, you can use all the executable binary of n2p2. LSF script is in the directory /share/base/script/n2p2.lsf. Explanation of lsf script is put in here

    "},{"location":"wiki/software_usage/n2p2/#training-procedure","title":"Training Procedure","text":""},{"location":"wiki/software_usage/n2p2/#overview","title":"Overview","text":"

    The Core library in n2p2 is nnp-train. You can see this command after load module n2p2/2.0.0. Enter the Directory of prepared files and type nnp-train is all enough. For mpi running of command, just type mpirun nnp-train. The input files for nnp-train include:

    • input.nn: input setup for training
    • input.data: input training set for training procedure.
    • scaling.data: scaling data from data set (you will obtain this from nnp-scaling)

    Example input file is in the github repository <n2p2 root>/examples/nnp-train

    "},{"location":"wiki/software_usage/n2p2/#file-inputdata","title":"File: input.data","text":"

    See input.data format here

    Python script for convertion from cp2k xyz to input.data

     from ase.io import read, write\n import os, sys\n\n # data_path: directory contains forces.xyz and coords.xyz\n data_path = \"./test_data\"\n data_path = os.path.abspath(data_path)\n\n #input cell parameter here, a 3x3 list\n cell = [[10., 0., 0. ], [0., 10., 0.], [0., 0., 10.]]\n\n #read coords and forces\n pos_path= os.path.join(data_path, \"coords.xyz\")\n frc_path= os.path.join(data_path, \"forces.xyz\")\n pos = read(pos_path, index = \":\")\n frc = read(frc_path, index = \":\")\n\n out_path = os.path.join(data_path, \"input.data\")\n fw = open(out_path, \"w\")\n for frame_idx in range(len(pos)):\n     fw.write(\"begin\\n\")\n     for i in range(3):\n         fw.write(\"lattice{:10.4f}{:10.4f}{:10.4f}\\n\".format(cell[i][0], cell[i][1], cell[i][2]))\n     for atom in zip(pos[i], frc[i]):\n         fw.write(\"atom{:12.5f}{:12.5f}{:12.5f}\".format(atom[0].position[0], atom[0].position[1], atom[0].position[2]))\n         fw.write(\"{:3}\".format(atom[0].symbol))\n         fw.write(\"{:10.4f}{:10.4f}\".format(0.0, 0.0))\n         fw.write(\"{:12.5f}{:12.5f}{:12.5f}\\n\".format(atom[1].position[0], atom[1].position[1], atom[1].position[2]))\n     fw.write(\"energy{:20.4f}\\n\".format(pos[i].info['E']))\n     fw.write(\"charge{:20.4f}\\n\".format(0.0))\n     fw.write(\"end\\n\")\n
    "},{"location":"wiki/software_usage/n2p2/#nnp-scaling","title":"nnp-scaling","text":"

    nnp-scaling should be executed before nnp-train in order to obtain file scaling-data. There are only two files you need:

    • input.nn
    • input.data

    Example input file is in the github repository <n2p2 root>/examples/nnp-scaling. A point is worth to notice. The random_seed keyword in file input.nn is followed by a number. This number serves as a initialization of psudo-random code. However as you can imply from the name, this random number is fake. It depends strongly on your initialization number (more exactly, you will get a same serial number if you start by a same random seed number). Therefore, if you would like a random starting for parameter in NN, set a different number for random seed.

    "},{"location":"wiki/software_usage/vmd/","title":"VMD \u4f7f\u7528\u8bf4\u660e","text":""},{"location":"wiki/software_usage/vmd/#vmd_1","title":"VMD\u4ecb\u7ecd","text":"

    VMD\u662f\u5206\u5b50\u53ef\u89c6\u5316\u8f6f\u4ef6\uff0c\u4e3b\u8981\u7528\u4ee5\u67e5\u770b\u5206\u5b50\u52a8\u529b\u5b66\u8f68\u8ff9\uff0c

    \u5b98\u7f51: http://www.ks.uiuc.edu/Research/vmd/

    "},{"location":"wiki/software_usage/vmd/#vmd_2","title":"VMD\u5b89\u88c5","text":""},{"location":"wiki/software_usage/vmd/#linux-windows","title":"Linux \u548c Windows","text":"

    \u76f4\u63a5\u67e5\u770b\u5b98\u7f51\uff0c\u5176\u4ed6\u65e0\u9700\u7279\u6b8a\u6ce8\u610f

    "},{"location":"wiki/software_usage/vmd/#macos-catalina","title":"MacOS Catalina\u7248\u672c\u4ee5\u4e0a","text":"

    \u7531\u4e8e\u82f9\u679c\u4e0d\u518d\u652f\u630132\u4f4d\u7684\u8f6f\u4ef6\uff0c\u56e0\u6b64\u9700\u898164\u4f4d\u7248\u672c\u7684VMD\u3002

    \u5df2\u7ecf\u7f16\u8bd1\u597d\u7684\u8f6f\u4ef6\u4ece\u8fd9\u91cc\u4e0b\u8f7d: https://www.ks.uiuc.edu/Research/vmd/mailing_list/vmd-l/31222.html

    "},{"location":"wiki/software_usage/vmd/#vmd_3","title":"\u4f7f\u7528\u96c6\u7fa4\u7684VMD\u8fdb\u884c\u8fdc\u7a0b\u67e5\u770b","text":"

    \u73b0\u572851\u548c52\u96c6\u7fa4\u4e0a\u5747\u5b89\u88c5\u4e86VMD/1.9.3

    \u4f7f\u7528\u65b9\u6cd5\u662f

    module load vmd/1.9.3\n

    \u7136\u540e\u5982\u540c\u5728\u672c\u5730\u7aef\u4f7f\u7528vmd\u4e00\u6837\u4f7f\u7528\u5373\u53ef\u3002

    "},{"location":"wiki/software_usage/vmd/#vmd_4","title":"\u96c6\u7fa4\u6253\u5f00vmd\u62a5\u9519","text":"

    \u5982\u679c\u9047\u5230\u62a5\u9519

    XRequest.149: BadMatch (invalid parameter attributes) 0xa00105\nXRequest.149: GLXBadContext 0xa00001\n

    \u9996\u5148\u5728\u96c6\u7fa4\u4e0a\u67e5\u770b

    glxinfo\nglxgears\n

    \u5982\u679c\u5f97\u5230\u62a5\u9519

    name of display: localhost:24.0\nlibGL error: No matching fbConfigs or visuals found\nlibGL error: failed to load driver: swrast\nX Error of failed request:  GLXBadContext\n  Major opcode of failed request:  149 (GLX)\n  Minor opcode of failed request:  6 (X_GLXIsDirect)\n  Serial number of failed request:  23\n  Current serial number in output stream:  22\n

    \u548c

    libGL error: No matching fbConfigs or visuals found\nlibGL error: failed to load driver: swrast\nX Error of failed request:  BadValue (integer parameter out of range for operation)\n  Major opcode of failed request:  149 (GLX)\n  Minor opcode of failed request:  3 (X_GLXCreateContext)\n  Value in failed request:  0x0\n  Serial number of failed request:  28\n  Current serial number in output stream:  30\n

    \u90a3\u4e48\u8bf7\u5728**\u672c\u5730Mac/iMac\u7684\u7ec8\u7aef\u4e0a**\u9000\u51fa**XQuartz**\u7136\u540e\u5728\u672c\u5730\u7ec8\u7aef\u91cc\u8f93\u5165:

    defaults write org.macosforge.xquartz.X11 enable_iglx -bool true \n

    \u5373\u53ef\u89e3\u51b3\u95ee\u9898

    Ref: https://www.ks.uiuc.edu/Research/vmd/mailing_list/vmd-l/28494.html

    "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/","title":"ECINT \u7684\u4f7f\u7528","text":""},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#_1","title":"\u5b89\u88c5\u4e0e\u914d\u7f6e","text":"

    \u5728\u4f7f\u7528 ECINT \u524d\uff0c\u9700\u5b89\u88c5\u5e76\u914d\u7f6e\u597d aiida-core \u4e0e aiida \u63d2\u4ef6\uff0c\u4e0d\u8fc7\u4e5f\u53ef\u4ee5\u5728 hydrogen \u4e0a\u4f53\u9a8c\u5df2\u914d\u7f6e\u597d\u7684\u73af\u5883

    "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#hydrogen","title":"\u5982\u4f55\u8fdb\u5165 hydrogen","text":"
    1. \u8054\u7cfb\u96c6\u7fa4\u7ba1\u7406\u5458\uff0c\u5c06\u4f60\u7684\u516c\u94a5\u653e\u5230 hydrogen \u4e0a

    2. \u5728\u6d77\u6d0b\u697c\u7f51\u7edc\u73af\u5883\u4e0b\uff0c\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u53ef\u8fdb\u5165 hydrogen

    ssh -p 8099 chenglab@10.24.3.144\n

    \u5efa\u8bae\u5728\u7528\u5de5\u4f5c\u6d41\u65f6\uff0c\u5148\u5728 ~/users \u4e0b\u5efa\u7acb\u4e00\u4e2a\u4ee5\u81ea\u5df1\u540d\u5b57\u547d\u540d\u7684\u5de5\u4f5c\u76ee\u5f55\uff0cusers/public.data \u4e3a 51/52 \u7684 /public.data

    "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#_2","title":"\u8f93\u5165\u6587\u4ef6","text":"

    \u5728\u60f3\u8981\u8fd0\u884c\u5de5\u4f5c\u6d41\u7684\u5de5\u4f5c\u8def\u5f84\u4e0b\u51c6\u5907\u4e00\u4e2a .json \u8f93\u5165\u6587\u4ef6\uff0c\u793a\u4f8b\u5982\u4e0b (\u8981\u7528 \"\uff0c\u800c\u4e0d\u662f '):

    {\n  \"workflow\": \"NebWorkChain\",\n  \"webhook\": \"https://oapi.dingtalk.com/robot/send?access_token=xxxxxx\",\n  \"resdir\": \"results\",\n  \"structure\": [\"ethane_1_opt.xyz\", \"ethane_s1.xyz\", \"ethane_ts.xyz\", \"ethane_s2.xyz\"],\n  \"cell\": [12, 12, 12],\n  \"metadata\": {\n    \"kind_section\": {\n      \"BASIS_SET\": \"TZV2P-GTH\",\n      \"POTENTIAL\": \"GTH-PBE\"\n    }\n  }\n}\n

    \u6216\u8005\u4e5f\u53ef\u4ee5\u7528 .yaml \u8f93\u5165\u6587\u4ef6\uff0c\u793a\u4f8b\u5982\u4e0b (- \u548c ethane_1_opt.xyz \u4e4b\u95f4\u4e0d\u8981\u6f0f\u4e86\u7a7a\u683c):

    workflow: NebWorkChain\nwebhook: https://oapi.dingtalk.com/robot/send?access_token=xxxxxx\nresdir: results\nstructure:\n  - ethane_1_opt.xyz\n  - ethane_s1.xyz\n  - ethane_ts.xyz\n  - ethane_s2.xyz\ncell: [12, 12, 12]\nmetadata:\n  kind_section:\n    BASIS_SET: TZV2P-GTH\n    POTENTIAL: GTH-PBE\n

    \u66f4\u591a\u8f93\u5165\u7684\u4f8b\u5b50\u5728 https://github.com/chenggroup/ecint/tree/develop/example

    "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#_3","title":"\u5404\u5173\u952e\u8bcd\u89e3\u91ca","text":"
    • workflow (\u5fc5\u586b): workflow \u7684\u540d\u5b57\uff0c\u5177\u4f53\u53ef\u9009\u7684\u8bf7\u89c1\u53ef\u9009\u7528\u7684 workflow \u90e8\u5206

    • webhook (\u9009\u586b): \u9489\u9489\u673a\u5668\u4eba webhook\uff0c\u5f53\u5de5\u4f5c\u6d41\u5b8c\u6210\u65f6\u60f3\u8981\u5373\u65f6\u6536\u5230\u9489\u9489\u63d0\u9192\u65f6\u53ef\u8bbe\u7f6e\uff0c\u5426\u5219\u53ef\u4e0d\u7528

    • resdir (\u9009\u586b, default: \u5f53\u524d\u6240\u5728\u8def\u5f84): \u7ed3\u679c\u6587\u4ef6\u7684\u50a8\u5b58\u8def\u5f84

    • structure/structures_folder (\u5fc5\u586b\u5176\u4e2d\u4e4b\u4e00): \u4ec5\u8f93\u5165\u4e00\u4e2a\u7ed3\u6784\u65f6\uff0cstructure \u4e3a\u7ed3\u6784\u6587\u4ef6\u7684\u8def\u5f84 (\u975e\u5217\u8868)\uff0c\u5bf9\u4e8e neb \u8fd9\u79cd\u9700\u8981\u591a\u4e2a\u8f93\u5165\u7ed3\u6784\u7684\uff0cstructure \u4e3a\u7ed3\u6784\u6587\u4ef6\u8def\u5f84\u7684\u5217\u8868\u3002\u5982\u679c\u6279\u91cf\u8fdb\u884c\u8ba1\u7b97\uff0c\u5219\u628a\u6279\u91cf\u7684\u7ed3\u6784\u6240\u5728\u6587\u4ef6\u5939\u52a0\u5165 structures_folder (\u6682\u4e0d\u652f\u6301 neb)

    • cell (\u9009\u586b): \u8bbe\u7f6e\u4e86 cell \u540e\u4f1a\u6539\u53d8\u90a3\u4e9b\u7ed3\u6784\u4e2d\u4e0d\u5305\u542b cell \u4fe1\u606f\u7684 cell\u3002\u5982\u679c\u7528\u7684\u662f .xyz \u683c\u5f0f\uff0c\u4e00\u822c\u9700\u8981\u8bbe\u7f6e cell (\u56e0\u4e3a .xyz \u4e00\u822c\u4e0d\u5305\u542b cell \u7684\u4fe1\u606f)\uff0c.cif or POSCAR(.vasp) \u5219\u4e0d\u9700\u8981\u8bbe\u7f6e\u3002cell \u7684\u683c\u5f0f\u4e0e ase \u4e2d\u7684 cell \u683c\u5f0f\u4fdd\u6301\u4e00\u81f4\uff0c\u5982 [12, 12, 12] \u6216 [[12, 0, 0], [0, 12, 0], [0, 0, 12]] \u6216 [12, 12, 12, 90, 90, 90]

    • metadata (\u9009\u586b):

    \u4ee5\u4e0b\u53c2\u6570\u53ef\u4e0d\u586b\uff0c\u5bf9\u4e8e\u4e0d\u540c\u7684 workflow \u5747\u6709\u4e0d\u540c\u7684\u9ed8\u8ba4\u503c

    • config: \u53ef\u4ee5\u4e3a dict, .json, .yaml\uff0c\u8868\u793a cp2k \u8f93\u5165\u53c2\u6570\u7684\u57fa\u672c\u8bbe\u7f6e\uff0c\u4ee5 dict \u7684\u5f62\u5f0f\u6765\u8868\u793a cp2k \u8f93\u5165\uff0c\u4e00\u4e9b\u7ec6\u81f4\u7684\u8bbe\u7f6e\uff0c\u5982\u8ba1\u7b97\u7cbe\u5ea6\uff0c\u53ef\u5728\u6b64\u5904\u4fee\u6539\uff0c\u4e5f\u53ef\u901a\u8fc7 cp2k \u8f93\u5165\u6587\u4ef6\u8fdb\u884c\u8f6c\u5316\u3002\u65e0\u7279\u6b8a\u9700\u6c42\u53ef\u4e0d\u66f4\u6539\u3002config \u7684\u793a\u4f8b\u5982\u4e0b:

    • kind_section: \u914d\u7f6e BASIS_SET \u548c POTENTIAL \u7684\u57fa\u672c\u4fe1\u606f\uff0c\u53ef\u4ee5\u6709\u56db\u79cd\u8f93\u5165\u5f62\u5f0f

      \u82e5\u8bbe\u7f6e\u4e86 kind_section \u7684\u8bdd\uff0c\u9700\u540c\u65f6\u8bbe\u7f6e BASIS_SET \u4e0e POTENTIAL\u3002\u5982\u679c\u6309\u5143\u7d20\u6765\u6307\u5b9a\u4e86 BASIS_SET \u6216 POTENTIAL \u7684\u8bdd\uff0c\u9700\u8981\u6307\u5b9a\u6240\u6709\u5143\u7d20\u7684\u8bbe\u7f6e\u3002\u8bbe\u7f6e\u6bd4\u8f83\u590d\u6742\u7684\u8bdd\u63a8\u8350\u4ee5\u6587\u4ef6\u7684\u65b9\u5f0f (\u4e0b\u9762\u7684\u7b2c\u56db\u79cd\u65b9\u6cd5) \u6765\u5f15\u7528 kind_section

      • ```python # .json \"kind_section\": {\"BASIS_SET\": \"TZV2P-GTH\", \"POTENTIAL\": \"GTH-PBE\"}

      # or .yaml kind_section: BASIS_SET: TZV2P-GTH POTENTIAL: GTH-PBE ```

      • ```python # .json \"kind_section\": {\"H\": {\"BASIS_SET\": \"TZV2P-GTH\", \"POTENTIAL\": \"GTH-PBE\"}, \"O\": {\"BASIS_SET\": \"TZV2P-GTH\", \"POTENTIAL\": \"GTH-PBE\"}, ...}

      # or .yaml kind_section: H: BASIS_SET: TZV2P-GTH POTENTIAL: GTH-PBE O: BASIS_SET: TZV2P-GTH POTENTIAL: GTH-PBE ... ```

      • ```python # .json \"kind_section\": [{\"\": \"H\", \"BASIS_SET\": \"TZV2P-GTH\", \"POTENTIAL\": \"GTH-PBE\"}, {\"\": \"O\", \"BASIS_SET\": \"TZV2P-GTH\", \"POTENTIAL\": \"GTH-PBE\"}, ...]

      # or .yaml kind_section: - _: H BASIS_SET: TZV2P-GTH POTENTIAL: GTH-PBE - _: O BASIS_SET: TZV2P-GTH POTENTIAL: GTH-PBE ... ```

      • ```python # <> example kind_section: H: BASIS_SET: TZV2P-GTH POTENTIAL: GTH-PBE O: BASIS_SET: TZV2P-GTH POTENTIAL: GTH-PBE ...

        # .json \"kind_section\": \"<>\" # YOUR_KIND_SECTION_FILE can be .json or .yaml

        # or .yaml kind_section: <> # .json or .yaml ```

      • machine: \u9009\u62e9\u914d\u7f6e\u597d\u7684\u670d\u52a1\u5668 (\u76ee\u524d\u4ec5\u652f\u6301 cp2k@aiida_test) \u4ee5\u53ca\u914d\u7f6e\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5

        // example\n{\n    \"code@computer\": \"cp2k@aiida_test\",\n    \"nnode\": 2,\n    \"queue\": \"medium\"\n}\n
        • code@computer: \u914d\u7f6e\u597d\u7684 aiida \u670d\u52a1\u5668 (\u76ee\u524d\u4ec5\u652f\u6301 cp2k@aiida_test)
        • nnode/nprocs/n (\u9009\u586b\u5176\u4e2d\u4e4b\u4e00): \u4f7f\u7528\u670d\u52a1\u5668\u8282\u70b9\u6570/\u4f7f\u7528\u670d\u52a1\u5668\u6838\u6570/\u4f7f\u7528\u670d\u52a1\u5668\u6838\u6570
        • walltime/max_wallclock_seconds/w (\u9009\u586b\u5176\u4e2d\u4e4b\u4e00): \u5f3a\u5236\u7ec8\u6b62\u8ba1\u7b97\u65f6\u95f4\uff0c\u5355\u4f4d s
        • queue/queue_name/q (\u9009\u586b\u5176\u4e2d\u4e4b\u4e00): \u670d\u52a1\u5668\u961f\u5217\u540d
        • ptile: \u6bcf\u8282\u70b9\u81f3\u5c11\u9700\u4f7f\u7528\u7684\u6838\u6570\uff0c\u9ed8\u8ba4\u503c\u4e3a\u6bcf\u8282\u70b9\u7684\u6838\u6570
      • ...: some parameters for special workflow

      • subdata (\u9009\u586b):

      • \u7528\u4e8e\u4fee\u6539\u591a\u6b65\u5de5\u4f5c\u6d41\u4e2d\uff0c\u6bcf\u6b65\u5de5\u4f5c\u6d41\u7684 config, kind_section, machine, \u5176\u8bbe\u7f6e\u4f1a\u8986\u76d6\u6389 metada \u4e2d\u7684\u76f8\u5173\u8bbe\u7f6e\u3002

        e.g. NebWorkChain \u7531\u4e09\u90e8\u5206\u7ec4\u6210: geoopt, neb, frequency. \u82e5\u8f93\u5165\u5982\u4e0b:

        workflow: NebWorkChain\nwebhook: https://oapi.dingtalk.com/robot/send?access_token=xxx  # your own webhook\nresdir: results_yaml\nstructure:\n  - ethane_1_opt.xyz\n  - ethane_s1.xyz\n  - ethane_ts.xyz\n  - ethane_s2.xyz\ncell:\n  - [12, 0, 0]\n  - [0, 12, 0]\n  - [0, 0, 12]\nmetadata:\n  kind_section:\n    BASIS_SET: DZVP-MOLOPT-SR-GTH\n    POTENTIAL: GTH-PBE\nsubdata:\n  geoopt:\n    kind_section:\n      BASIS_SET: TZV2P-MOLOPT-GTH\n      POTENTIAL: GTH-PBE\n

        \u5219 geoopt \u90e8\u5206\u7684 kind_section \u4f1a\u88ab\u66f4\u65b0\u4e3a {\"BASIS_SET\": \"TZV2P-MOLOPT-GTH\", \"POTENTIAL\": \"GTH-PBE\"} \uff0c\u800c neb \u4e0e frequency \u90e8\u5206\u7684 kind_section \u5219\u4e0e metadata \u4e2d\u7684\u4fdd\u6301\u4e00\u81f4\u3002

        • <>:
          • config: \u89c1 metadata
          • kind_section: \u89c1 metadata
          • machine: \u89c1 metadata
        • <>:
          • config
          • kind_section
          • machine
        • ...
        • "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#workflow","title":"\u53ef\u9009\u7528\u7684 workflow","text":"

          \u8f93\u51fa\u7684\u57fa\u672c\u4fe1\u606f\u5728 results.dat \u4e2d\uff0c\u4ee5\u4e0b workflow \u4e2d\u4ec5\u8bf4\u660e\u9664\u4e86 results.dat \u5916\u7684\u8f93\u51fa\u6587\u4ef6

          "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#energysingleworkchain","title":"EnergySingleWorkChain","text":"

          Just single point energy

          • \u8f93\u5165\u9ed8\u8ba4\u503c:
          • config: energy.json
          • kind_section: {\"BASIS_SET\": \"DZVP-MOLOPT-SR-GTH\", \"POTENTIAL\": \"GTH-PBE\"}
          • machine: {\"code@computer\": \"cp2k@aiida_test\", \"nnode\": 1, \"walltime\": 12 * 60 * 60, \"queue\": \"medium\"}
          • \u5176\u4ed6\u8f93\u51fa:
          • \u5305\u542b\u80fd\u91cf\u4fe1\u606f\u7684\u7ed3\u6784: coords.xyz
          "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#geooptsingleworkchain","title":"GeooptSingleWorkChain","text":"

          Just geomertry optimization

          • \u8f93\u5165\u9ed8\u8ba4\u503c:
          • config: geoopt.json
          • kind_section: {\"BASIS_SET\": \"DZVP-MOLOPT-SR-GTH\", \"POTENTIAL\": \"GTH-PBE\"}
          • machine: {\"code@computer\": \"cp2k@aiida_test\", \"nnode\": 1, \"walltime\": 12 * 60 * 60, \"queue\": \"medium\"}
          • \u5176\u4ed6\u8f93\u51fa:
          • \u7ed3\u6784\u4f18\u5316\u5b8c\u540e\u7684\u7ed3\u6784: structure_geoopt.xyz
          "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#nebsingleworkchain","title":"NebSingleWorkChain","text":"

          Just CI-NEB

          • \u8f93\u5165\u9ed8\u8ba4\u503c:
          • config: neb.json
          • kind_section: {\"BASIS_SET\": \"DZVP-MOLOPT-SR-GTH\", \"POTENTIAL\": \"GTH-PBE\"}
          • machine: {\"code@computer\": \"cp2k@aiida_test\", \"nnode\": number_of_replica, \"queue\": \"large\"}
          • \u5176\u4ed6\u8f93\u51fa:
          • \u5305\u542b\u59cb\u7ec8\u6001\u53ca\u4e2d\u95f4\u6001\u7684 trajectory: images_traj.xyz
          • \u52bf\u80fd\u66f2\u7ebf: potential_energy_curve.png
          • \u8fc7\u6e21\u6001\u7ed3\u6784: transition_state.xyz
          "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#frequencysingleworkchain","title":"FrequencySingleWorkChain","text":"

          Just vabrational analysis

          • \u8f93\u5165\u9ed8\u8ba4\u503c:
          • config: frequency.json
          • kind_section: {\"BASIS_SET\": \"DZVP-MOLOPT-SR-GTH\", \"POTENTIAL\": \"GTH-PBE\"}
          • machine: {\"code@computer\": \"cp2k@aiida_test\", \"nnode\": 4, \"queue\": \"large\"}
          • \u5176\u4ed6\u8f93\u51fa:
          • \u632f\u52a8\u9891\u7387\u7684\u503c: frequency.txt
          "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#nebworkchain","title":"NebWorkChain","text":"

          Goopt for initial and final state \u2192 NEB \u2192 Vabrational analysis

          • \u8f93\u5165\u9ed8\u8ba4\u503c:
          • geoopt: {default value in GeooptSingleWorkChain}
          • neb: {default value in NebSingleWorkChain}
          • frequency: {default value in FrequencySingleWorkChain}
          • \u5176\u4ed6\u8f93\u51fa:
          • all outputs of GeooptSingleWorkChain, NebSingleWorkChain and FrequencySingleWorkChain
          "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#cp2k-input-config","title":"CP2K input \u8f6c config","text":"

          \u4f7f\u7528\u5de5\u5177 inp2config \u53ef\u5c06 cp2k \u8f93\u5165\u6587\u4ef6\u8f6c\u6210 config \u6240\u9700\u7684\u5f62\u5f0f, <<CP2K_INP>> \u4e3a cp2k \u8f93\u5165\u6587\u4ef6\u8def\u5f84 <<CONFIG>> \u4e3a\u8f93\u51fa\u7684 config \u6587\u4ef6\u8def\u5f84\uff0c\u540e\u7f00\u4e3a .json/.yaml:

          inp2config <<CP2K_INP>> <<CONFIG>>\n# e.g.\ninp2config input.inp config.yaml\n

          \u8981\u6839\u636e cp2k \u8f93\u5165\u6587\u4ef6\u4e00\u5e76\u751f\u6210 kind_section \u7684\u8f93\u5165\u8bbe\u7f6e, <<KIND_SECTION>> \u4e3a\u8f93\u51fa\u7684 kind_section \u8def\u5f84\uff0c\u540e\u7f00\u4e3a .json/.yaml:

          inp2config <<CP2K_INP>> <<CONFIG>> -k <<KIND_SECTIOn>>\n# e.g.\ninp2config input.inp config.yaml -k kind_section.yaml\n
          "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#_4","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

          \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u5373\u53ef\u63d0\u4ea4\u5de5\u4f5c\u6d41\uff0c<<YOUR_INPUT_FILE>> \u4e3a .json \u6216 .yaml \u8f93\u5165\u6587\u4ef6\u7684\u8def\u5f84\uff0c\u7f3a\u7701\u503c\u4e3a\u5f53\u524d\u8def\u5f84\u4e0b\u7684 ecint.json

          ecrun <<YOUR_INPUT_FILE>>\n
          "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#_5","title":"\u63a8\u9001","text":"

          \u8ba1\u7b97\u5b8c\u6210\u7684\u622a\u56fe\u5982\u4e0b:

          \u8ba1\u7b97\u51fa\u9519\u7684\u622a\u56fe\u5982\u4e0b:

          "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#_6","title":"\u5e38\u89c1\u9519\u8bef","text":""},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#_7","title":"\u8bfb\u53d6\u7ed3\u6784\u6587\u4ef6\u9519\u8bef","text":"
            File \"xxx/lib/python3.7/site-packages/ase/io/formats.py\", line 599, in read\n    io = ioformats[format]\nKeyError: 'coord'\n

          \u9519\u8bef\u539f\u56e0: \u65e0\u6cd5\u8bc6\u522b\u6269\u5c55\u540d

          \u89e3\u51b3\u65b9\u6848: \u6ce8\u610f\u6269\u5c55\u540d\uff0c\u4f7f\u7528\u6b63\u786e\u7684\u6269\u5c55\u540d\uff0c\u5982 .xyz, .cif, POSCAR \u53ef\u7528 POSCAR \u4e0e .vasp

          "},{"location":"wiki/software_usage/ECINT%20Tutorial/user/#xyz","title":"\u8bfb\u53d6 xyz \u9519\u8bef","text":"
          ase.io.extxyz.XYZError: ase.io.extxyz: Expected xyz header but got: invalid literal for int() with base 10: ...\n

          \u9519\u8bef\u539f\u56e0: xyz \u6587\u4ef6\u683c\u5f0f\u9519\u8bef\uff0cxyz \u6587\u4ef6\u7b2c\u4e00\u884c\u662f\u6240\u6709\u539f\u5b50\u4e2a\u6570\uff0c\u7b2c\u4e8c\u884c\u662f\u6ce8\u91ca\u884c(\u53ef\u7a7a\u7740)\uff0c\u7b2c\u4e09\u884c\u5f00\u59cb\u624d\u662f\u5750\u6807

          \u89e3\u51b3\u65b9\u6848: \u5982\u679c\u7b2c\u4e00\u884c\u5f00\u59cb\u5c31\u662f\u5750\u6807\u7684\u8bdd\uff0c\u9700\u8981\u5728\u524d\u9762\u52a0\u4e0a\u539f\u5b50\u4e2a\u6570 (\u5982 180) \u7684\u884c\u4ee5\u53ca\u4e00\u4e2a\u7a7a\u884c

          "},{"location":"wiki/software_usage/cp2k/cp2k-constrainedmd/","title":"CP2K: Constrained MD","text":""},{"location":"wiki/software_usage/cp2k/cp2k-constrainedmd/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • CP2K Constrained MD \u8bbe\u7f6e
          • Potential of Mean Force \u65b9\u6cd5\u8ba1\u7b97\u53cd\u5e94\u81ea\u7531\u80fd
          "},{"location":"wiki/software_usage/cp2k/cp2k-constrainedmd/#_2","title":"\u5b66\u4e60\u8d44\u6599","text":"
          • CP2K \u5b98\u65b9\u6587\u6863: Section CONSTRAINT
          • CP2K \u5b98\u7f51\u4f8b\u5b50: NaCl\u89e3\u79bb\u80fd
          • Sun, J.-J., Cheng, J. Solid-to-liquid phase transitions of sub-nanometer clusters enhance chemical transformation. Nature Communication, 10, 5400 (2019).
          "},{"location":"wiki/software_usage/cp2k/cp2k-constrainedmd/#cp2k-constrained-md_1","title":"CP2K Constrained MD \u8bbe\u7f6e","text":"

          CP2K \u63d0\u4f9b\u4e86\u5c06\u65bd\u52a0 Constraint \u8fc7\u7a0b\u4e2d\u7684\u62c9\u683c\u6717\u65e5\u4e58\u5b50\u8f93\u51fa\u7684\u80fd\u529b\uff0c\u5176\u7edf\u8ba1\u5e73\u5747\u5373\u8be5\u53cd\u5e94\u5750\u6807\u4e0b\u7684Potential of Mean Force (PMF)\u3002 PMF\u5bf9\u53cd\u5e94\u5750\u6807\u79ef\u5206\u5373\u53cd\u5e94\u81ea\u7531\u80fd\u3002MLMD \u53ef\u5b9e\u73b0\u9ad8\u7cbe\u5ea6\u957f\u65f6\u95f4\u5c3a\u5ea6\u6a21\u62df\uff0c\u56e0\u800c\u9002\u7528\u4e8e\u8ba1\u7b97\u5316\u5b66\u53cd\u5e94\u4f53\u7cfb\u7684\u81ea\u7531\u80fd\u3002 \u8fd9\u91cc\u6211\u4eec\u53ef\u7ed3\u5408 DeePMD \u52bf\u8fdb\u884c Constrained MD \u6a21\u62df\u3002

          \u9996\u5148\u5b9a\u4e49 Collective Variable (CV)\uff0c\u8fd9\u91cc\u6211\u4eec\u9009\u62e9\u4e24\u539f\u5b50\u95f4\u8ddd\u79bb\u8fdb\u884c\u63a7\u5236\uff1a

          &FORCE_EVAL\n   ...\n   &SUBSYS\n      ...\n      &COLVAR\n         &DISTANCE\n            ATOMS 225 226\n         &END DISTANCE\n      &END COLVAR\n      ...\n   &END SUBSYS\n   ...\n&END FORCE_EVAL\n

          \u5176\u4e2d 225 \u548c 226 \u5373\u4e3a\u6240\u9700\u63a7\u5236\u952e\u957f\u7684\u539f\u5b50\u5e8f\u53f7\u3002\u6ce8\u610f CP2K \u4e2d\u539f\u5b50\u5e8f\u53f7\u4ece 1 \u5f00\u59cb\u3002

          \u7136\u540e\u5b9a\u4e49\u6240\u9700\u63a7\u5236\u7684\u952e\u957f\uff1a

          &MOTION\n   &CONSTRAINT\n      &COLLECTIVE\n         COLVAR 1\n         INTERMOLECULAR .TRUE.\n         TARGET 3.4015070391941524 # (1)!\n      &END COLLECTIVE\n      &LAGRANGE_MULTIPLIERS ON\n         COMMON_ITERATION_LEVELS 10000000 # (2)!\n      &END LAGRANGE_MULTIPLIERS\n   &END CONSTRAINT\n   ...\n&MOTION\n
          1. \u8bbe\u7f6e\u4e24\u539f\u5b50\u8ddd\u79bb\u7684\u76ee\u6807\u503c\uff0c\u6ce8\u610f\u8fd9\u91cc\u7684\u5355\u4f4d\u662f a.u.
          2. \u7f3a\u7701\u503c\u4e3a1\uff0c\u4e3a\u9632\u6b62\u8f93\u51fa\u8fc7\u957f\u7684\u65e5\u5fd7\u6587\u4ef6\uff0c\u8bf7\u8bbe\u7f6e\u4e3a\u4e00\u4e2a\u5927\u4e8e\u603b\u6b65\u6570\u7684\u503c

          \u6ce8\u610f\u8fd9\u91cc TARGET \u7684\u5355\u4f4d\u662f a.u.\uff0c\u8bf7\u628a\u5e38\u7528\u7684\u5355\u4f4d\uff08\u5982 \u00c5 \uff09\u8f6c\u6362\u4e3a\u539f\u5b50\u5355\u4f4d\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-constrainedmd/#_3","title":"\u9644\u5f55\uff1a\u7269\u7406\u5e38\u6570\u548c\u5355\u4f4d\u6362\u7b97","text":"
          *** Fundamental physical constants (SI units) ***\n\n *** Literature: B. J. Mohr and B. N. Taylor,\n ***             CODATA recommended values of the fundamental physical\n ***             constants: 2006, Web Version 5.1\n ***             http://physics.nist.gov/constants\n\n Speed of light in vacuum [m/s]                             2.99792458000000E+08\n Magnetic constant or permeability of vacuum [N/A**2]       1.25663706143592E-06\n Electric constant or permittivity of vacuum [F/m]          8.85418781762039E-12\n Planck constant (h) [J*s]                                  6.62606896000000E-34\n Planck constant (h-bar) [J*s]                              1.05457162825177E-34\n Elementary charge [C]                                      1.60217648700000E-19\n Electron mass [kg]                                         9.10938215000000E-31\n Electron g factor [ ]                                     -2.00231930436220E+00\n Proton mass [kg]                                           1.67262163700000E-27\n Fine-structure constant                                    7.29735253760000E-03\n Rydberg constant [1/m]                                     1.09737315685270E+07\n Avogadro constant [1/mol]                                  6.02214179000000E+23\n Boltzmann constant [J/K]                                   1.38065040000000E-23\n Atomic mass unit [kg]                                      1.66053878200000E-27\n Bohr radius [m]                                            5.29177208590000E-11\n\n *** Conversion factors ***\n\n [u] -> [a.u.]                                              1.82288848426455E+03\n [Angstrom] -> [Bohr] = [a.u.]                              1.88972613288564E+00\n [a.u.] = [Bohr] -> [Angstrom]                              5.29177208590000E-01\n [a.u.] -> [s]                                              2.41888432650478E-17\n [a.u.] -> [fs]                                             2.41888432650478E-02\n [a.u.] -> [J]                                              4.35974393937059E-18\n [a.u.] -> [N]                                              8.23872205491840E-08\n [a.u.] -> [K]                                              3.15774647902944E+05\n [a.u.] -> [kJ/mol]                                         2.62549961709828E+03\n [a.u.] -> [kcal/mol]                                       6.27509468713739E+02\n [a.u.] -> [Pa]                                             2.94210107994716E+13\n [a.u.] -> [bar]                                            2.94210107994716E+08\n [a.u.] -> [atm]                                            2.90362800883016E+08\n [a.u.] -> [eV]                                             2.72113838565563E+01\n [a.u.] -> [Hz]                                             6.57968392072181E+15\n [a.u.] -> [1/cm] (wave numbers)                            2.19474631370540E+05\n [a.u./Bohr**2] -> [1/cm]                                   5.14048714338585E+03\n
          "},{"location":"wiki/software_usage/cp2k/cp2k-deepmd/","title":"CP2K: DeePMD-kit\u63d2\u4ef6","text":""},{"location":"wiki/software_usage/cp2k/cp2k-deepmd/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • \u7528 CP2K \u8c03\u7528 DeePMD-kit \u4ee5\u8fdb\u884c MLMD \u6a21\u62df
          • Constrained MD \u7684\u53c2\u6570\u8bbe\u7f6e
          "},{"location":"wiki/software_usage/cp2k/cp2k-deepmd/#_2","title":"\u5b66\u4e60\u8d44\u6599","text":"

          CP2K\u5b98\u65b9\u624b\u518c\uff1a

          • Section DEEPMD
          "},{"location":"wiki/software_usage/cp2k/cp2k-deepmd/#_3","title":"\u9002\u7528\u7248\u672c","text":"

          \u4ee5\u4e0b\u6559\u7a0b\u9002\u7528\u4e8e\u6700\u65b0\u7248\u672c\u52a0\u5165 DeePMD \u652f\u6301\u7684 CP2K\u3002 Zeus\u96c6\u7fa4\u4e0a\u7684 cp2k/2024.2-dev \u548c deepmd/2.2.7 \uff08\u672a\u7f16\u8bd1MPI\u548cDFT\u652f\u6301\uff09 \u53ef\u4ee5\u8fd0\u884c\u4ee5\u4e0b\u6559\u7a0b\u3002

          \u6ce8\u610f cp2k/2024.2-dev \u7684\u4f5c\u4e1a\u811a\u672c\u5199\u6cd5\u5982\u4e0b\uff1a

          module load gcc/9.3.0\nmodule load intel/17.5.239\nmodule load cuda/11.8\nmodule load mpi/openmpi/4.1.6-gcc\nmodule load cp2k/2024.2-dev\n
          "},{"location":"wiki/software_usage/cp2k/cp2k-deepmd/#cp2k-md-section","title":"CP2K MD Section \u7684\u8f93\u5165\u6587\u4ef6","text":"

          \u8bf7\u5148\u4e86\u89e3CP2K\u7684\u8f93\u5165\u6587\u4ef6\u8bed\u6cd5\uff0c\u6307\u8def\uff1a

          • CP2K:\u80fd\u91cf\u4e0e\u529b\u7684\u8ba1\u7b97
          • CP2K:REFTRAJ\u6839\u636e\u5df2\u6709MD\u8f68\u8ff9\u8ba1\u7b97\u3002

          \u7531\u4e8e MLMD \u901a\u5e38\u4f1a\u9700\u8981\u7eb3\u79d2\u751a\u81f3\u66f4\u957f\u65f6\u95f4\u5c3a\u5ea6\u7684\u6a21\u62df\uff0c\u82e5\u672a\u8fdb\u884c\u9002\u5f53\u914d\u7f6e\uff0c\u53ef\u80fd\u4f1a\u4ea7\u751f\u8fc7\u957f\u7684\u8f93\u51fa\u6587\u4ef6\uff0c\u56e0\u6b64\u6211\u4eec\u5728 GLOBAL \u4e0b\u505a\u4ee5\u4e0b\u8c03\u6574:

          &GLOBAL\n   PROJECT pmf # (1)!\n   RUN_TYPE MD\n   PRINT_LEVEL SILENT # (2)!\n   WALLTIME 95:00:00 # (3)!\n&END GLOBAL\n
          1. \u6839\u636e\u81ea\u5df1\u7684\u9879\u76ee\u540d\u4fee\u6539\uff0c\u51b3\u5b9a\u8f93\u51fa\u6587\u4ef6\u7684\u540d\u79f0
          2. \u5982\u679c\u8dd1DeePMD, \u8bf7\u52a1\u5fc5\u8bbe\u7f6e\u4e3a SILENT, \u9632\u6b62\u8f93\u51fa\u6587\u4ef6\u8fc7\u5927
          3. \u63a8\u8350\u7a0d\u77ed\u4e8e\u4f5c\u4e1a\u7684 Walltime \u4ee5\u514d\u622a\u65ad\u8f68\u8ff9

          \u7136\u540e\u6211\u4eec\u914d\u7f6e\u5982\u4e0b\u7684\u529b\u573a\u53c2\u6570\uff1a

          &FORCE_EVAL\n   METHOD FIST\n   &MM\n      &FORCEFIELD\n         &NONBONDED\n            &DEEPMD\n               ATOMS C O Pt\n               ATOMS_DEEPMD_TYPE 0 1 2 # (1)!\n               POT_FILE_NAME ../graph.000.pb\n            &END DEEPMD\n         &END NONBONDED\n         IGNORE_MISSING_CRITICAL_PARAMS .TRUE. # (2)!\n      &END FORCEFIELD\n      &POISSON\n         &EWALD\n            EWALD_TYPE none\n         &END EWALD\n      &END POISSON\n   &END MM\n   ...\n&END FORCE_EVAL\n
          1. \u4e0e\u5143\u7d20\u5217\u8868\u5bf9\u5e94\uff0c\u5143\u7d20\u5728 type_map \u4e2d\u7684\u7d22\u5f15\u987a\u5e8f
          2. \u8bf7\u4fdd\u7559\u8fd9\u4e00\u884c\u4ee5\u5ffd\u7565\u672a\u5b9a\u4e49\u53c2\u6570

          \u901a\u5e38 MLMD \u8f68\u8ff9\u6587\u4ef6\u4e0d\u9700\u8981\u6bcf\u6b65\u90fd\u8f93\u51fa\uff0c\u56e0\u800c\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u8bbe\u7f6e\u8f93\u51fa\u95f4\u9694\uff1a

          &MOTION\n   ...\n   &MD\n      ...\n      &PRINT\n         &ENERGY\n            &EACH\n               MD 100 # (1)!\n            &END EACH\n         &END ENERGY\n      &END PRINT\n   &END MD\n   &PRINT\n      &CELL\n         &EACH\n            MD 100 # (2)!\n         &END EACH\n      &END CELL\n      &FORCES\n         &EACH\n            MD 100 # (3)!\n         &END EACH\n      &END FORCES\n      &RESTART_HISTORY\n         &EACH\n            MD 200000 # (4)!\n         &END EACH\n      &END RESTART_HISTORY\n      &TRAJECTORY\n         &EACH\n            MD 100 # (5)!\n         &END EACH\n      &END TRAJECTORY\n   &END PRINT\n&END MOTION\n
          1. \u6b64\u5904\u4fee\u6539ener\u7684\u8f93\u51fa\u9891\u7387\uff0c\u901a\u5e38\u4e0e\u7ed3\u6784\u8f68\u8ff9\u4fdd\u6301\u4e00\u81f4
          2. \u6b64\u5904\u4fee\u6539\u6676\u80de\u53c2\u6570\u7684\u8f93\u51fa\u9891\u7387\uff0c\u6ce8\u610f\u5982\u679c\u6676\u80de\u53c2\u6570\u4e0d\u53d8\u53ef\u4e0d\u5199\u8fd9\u4e00\u90e8\u5206
          3. \u6b64\u5904\u4fee\u6539\u529b\u8f68\u8ff9\u7684\u8f93\u51fa\u9891\u7387\uff0c\u901a\u5e38\u4e0e\u7ed3\u6784\u8f68\u8ff9\u4fdd\u6301\u4e00\u81f4
          4. \u6b64\u5904\u4fee\u6539restart\u6587\u4ef6\u7684\u8f93\u51fa\u9891\u7387\uff0c\u53ef\u6839\u636e Walltime \u548c\u603b\u6b65\u6570\u8fdb\u884c\u4f30\u8ba1
          5. \u6b64\u5904\u4fee\u6539\u7ed3\u6784\u8f68\u8ff9\u7684\u8f93\u51fa\u9891\u7387
          "},{"location":"wiki/software_usage/cp2k/cp2k-dft%2Bu/","title":"CP2K: DFT+U","text":""},{"location":"wiki/software_usage/cp2k/cp2k-dft%2Bu/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • \u5b66\u4e60\u8d44\u6599

          • DFT+U\u57fa\u672c\u539f\u7406

          • CP2K DFT+U\u8bbe\u7f6e

          • DFT+U \u67e5\u770b\u7535\u5b50\u5360\u636e\u6001

          "},{"location":"wiki/software_usage/cp2k/cp2k-dft%2Bu/#_2","title":"\u5b66\u4e60\u8d44\u6599","text":"

          Dudarev, S. L., Manh, D. N., & Sutton, A. P. (1997). Effect of Mott-Hubbard correlations on the electronic structure and structural stability of uranium dioxide. Philosophical Magazine B: Physics of Condensed Matter; Statistical Mechanics, Electronic, Optical and Magnetic Properties, 75(5), 613\u2013628..

          Dudarev, S. L., Botton, G. A., Savrasov, S. Y., Humphreys, C. J., & Sutton, A. P. (1998). Electron-energy-loss spectra and the structural stability of nickel oxide: An LSDA+U study. Physical Review B, 57(3), 1505\u20131509. .

          Himmetoglu, B.; Floris, A.; de Gironcoli, S.; Cococcioni, M. Hubbard-Corrected DFT Energy Functionals: The LDA+U Description of Correlated Systems. International Journal of Quantum Chemistry 2013, 114 (1), 14\u201349..

          "},{"location":"wiki/software_usage/cp2k/cp2k-dft%2Bu/#dftu","title":"DFT+U\u57fa\u672c\u539f\u7406","text":"

          DFT\u5bf9\u4e8e\u7535\u5b50\u7684\u63cf\u8ff0\u662f\u504f\u5411\u79bb\u57df\u5316\u7684\uff0c\u56e0\u6b64DFT\u53ef\u4ee5\u8f83\u597d\u5730\u63cf\u8ff0\u91d1\u5c5e\u6001\u56fa\u4f53\u3002\u5bf9\u4e8e\u8fc7\u6e21\u91d1\u5c5e\u7cfb\u5217\u7684\u6c27\u5316\u7269\uff0c\u4f8b\u5982Fe2O3\uff0cCoO\uff0cCo3O4\uff0cNiO\u7b49\u3002\u8fc7\u6e21\u91d1\u5c5e\u4e2d\u4ecd\u7136\u542b\u6709d\u7535\u5b50\u3002\u5728\u56fa\u4f53\u4e2d\uff0cd\u7535\u5b50\u8f83\u4e3a\u5c40\u57df\uff0c\u4e14\u5c40\u57df\u5728\u8fc7\u6e21\u91d1\u5c5e\u79bb\u5b50\u5468\u56f4\u3002\u6b64\u65f6\u5355\u5355\u4f7f\u7528DFT\u5e76\u4e0d\u80fd\u5f88\u597d\u7684\u63cf\u8ff0\u5c40\u57df\u5316\u7684\u7535\u5b50\u3002\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u52a0\u5927d\u7535\u5b50\u4e4b\u95f4\u7684\u9759\u7535\u6392\u65a5(U)\u6765\u8fbe\u5230\u76ee\u7684\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-dft%2Bu/#cp2k-dftu_1","title":"CP2K DFT+U\u8bbe\u7f6e","text":"

          \u5728CP2K_INPUT / FORCE_EVAL / DFT\u4e0b

          PLUS_U_METHOD MULLIKEN\n
          \u5176\u4e2dMULLIKEN_CHARGES\u4e0d\u63a8\u8350\uff0c LOWDIN\u65b9\u6cd5\u597d\u50cf\u66f4\u51c6\u4f46\u662f\u4e0d\u80fd\u591f\u7b97FORCES\uff0ccp2k v8.2\u7248\u672c\u540e\u53ef\u4ee5\u7b97FORCES\uff0c(\u8be6\u7ec6\u53c2\u8003)[https://groups.google.com/g/cp2k/c/BuIOSWDqJTc/m/fSL89NZaAgAJ]

          \u5728CP2K_INPUT / FORCE_EVAL / SUBSYS / KIND / DFT_PLUS_U\u4e0b

          \u5bf9\u60f3\u8981+U\u7684\u5143\u7d20\u7684\u5bf9\u5e94KIND\u8bbe\u7f6e

          &DFT_PLUS_U\n    # \u8f68\u9053\u89d2\u52a8\u91cf 0 s\u8f68\u9053 1 p\u8f68\u9053 2 d\u8f68\u9053 3 f\u8f68\u9053\n    L 2 \n    # \u6709\u6548U\u503c\uff0c\u8bb0\u5f97\u5199[eV]\uff0c\u4e0d\u7136\u9ed8\u8ba4\u4e3a\u539f\u5b50\u5355\u4f4d\n    U_MINUS_J [eV]  3 \n&END DFT_PLUS_U\n
          "},{"location":"wiki/software_usage/cp2k/cp2k-dft%2Bu/#dftu_1","title":"DFT+U \u67e5\u770b\u7535\u5b50\u5360\u636e\u6001","text":"

          \u5982\u679c\u6211\u4eec\u60f3\u77e5\u9053+U\u4e4b\u540e\u5bf9\u5e94\u539f\u5b50\u4e2d\uff0c\u4f8b\u5982d\u8f68\u9053\u7684\u7535\u5b50\uff0c\u7684\u5360\u636e\u60c5\u51b5\u3002\u6211\u4eec\u53ef\u4ee5\u5229\u7528\u5982\u4e0b\u8bbe\u7f6e\u5c06\u5176print\u5728output\u4e2d\u3002

          \u5728CP2K_INPUT / FORCE_EVAL / DFT / PRINT / PLUS_U\u4e0b\uff0c

          &PLUS_U MEDIUM\n    ADD_LAST NUMERIC\n&END PLUS_U\n

          \u4f60\u4f1a\u5728output\u4e2d\u5f97\u5230\u5982\u4e0b\u8f93\u51fa

            DFT+U occupations of spin 1 for the atoms of atomic kind 3: Fe1\n\n    Atom   Shell       d-2     d-1      d0     d+1     d+2   Trace\n      37       1     1.068   1.088   1.047   1.093   1.069   5.365\n      37       2     0.008   0.008   0.011   0.007   0.009   0.043\n           Total     1.076   1.096   1.058   1.100   1.077   5.408\n\n      38       1     1.064   1.102   1.047   1.089   1.086   5.388\n      38       2     0.009   0.007   0.011   0.009   0.008   0.044\n           Total     1.073   1.109   1.058   1.097   1.094   5.432\n
          \u5982\u679c\u60f3\u770b\u4e0d\u52a0U\u7684\u539f\u5b50\u7684\u5360\u636e\u60c5\u51b5\uff0c\u90a3\u53ef\u4ee5\u7ed9\u5bf9\u5e94\u539f\u5b50\u52a0\u4e00\u4e2a\u975e\u5e38\u5c0f\u7684U\u503c\uff0c\u6bd4\u59821e-20\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-e-f/","title":"CP2K\u80fd\u91cf\u4e0e\u529b\u7684\u8ba1\u7b97","text":""},{"location":"wiki/software_usage/cp2k/cp2k-e-f/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • \u8ba4\u8bc6CP2K\u7684\u57fa\u7840\u8f93\u5165\u6587\u4ef6

          • \u8ba4\u8bc6CP2K\u8f93\u5165\u6587\u4ef6\u7684\u4e3b\u8981\u90e8\u5206

          • \u8fd0\u884c\u8ba1\u7b97

          \u8ba1\u7b97\u6587\u4ef6\u4e0b\u8f7d

          \u672c\u6559\u7a0b\u6539\u7f16\u81eaCP2K\u539f\u6559\u7a0b\uff0c\u4ee5\u534a\u5bfc\u4f53\u548cOT\u4e3a\u4f8b\u5b50\uff0c\u66f4\u7b26\u5408\u7ec4\u5185\u9700\u6c42\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-e-f/#cp2k_1","title":"\u8ba4\u8bc6CP2K\u7684\u57fa\u7840\u8f93\u5165\u6587\u4ef6","text":"

          \u539f\u5219\u4e0a\u6765\u8bf4CP2K\u7684\u8f93\u5165\u6587\u4ef6\u53ea\u9700\u8981\u4e09\u4e2a\uff0c\u4e00\u4e2a\u662f\u8f93\u5165\u53c2\u6570\u7684\u8bbe\u7f6e\u6587\u4ef6input.inp\uff0c\u4e00\u4e2a\u662f\u8d5d\u52bf\u7684\u53c2\u6570\u6587\u4ef6GTH_POTENTIALS\uff0c\u4e00\u4e2a\u662f\u57fa\u7ec4\u7684\u53c2\u6570\u6587\u4ef6BASIS_SET\u3002

          \u5728\u96c6\u7fa4\u4e0a\uff0c\u7ba1\u7406\u5458\u5df2\u7ecf\u628aGTH_POTENTIALS\u548cBASIS_SET\u653e\u7f6e\u5728\u7279\u5b9a\u6587\u4ef6\u5939\uff0c\u5e76\u4e14\u4f7f\u7528\u7279\u6b8a\u7684\u94fe\u63a5\u65b9\u6cd5\u53ef\u4ee5\u8ba9CP2K\u7a0b\u5e8f\u81ea\u52a8\u5bfb\u627e\u5230\u3002\u56e0\u6b64\u5728\u540e\u6587\u4e2d\u6d89\u53ca\u5230\u8d5d\u52bf\u548c\u57fa\u7ec4\u7684\u90e8\u5206\u53ef\u4ee5\u76f4\u63a5\u586b\u5199\u5bf9\u5e94\u7684\u6587\u4ef6\u540d\u79f0\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-e-f/#cp2k_2","title":"\u8ba4\u8bc6CP2K\u8f93\u5165\u6587\u4ef6\u7684\u4e3b\u8981\u90e8\u5206","text":"

          \u73b0\u5728\u8ba9\u6211\u4eec\u6253\u5f00input.inp

          CP2K\u7684\u8f93\u5165\u6587\u4ef6\u4e3b\u8981\u5305\u542b\u4e24\u4e2aSECTION.

          • \"GLOBAL\": \u4e00\u4e9b\u8ba9CP2K\u8dd1\u8d77\u6765\u7684\u901a\u7528\u9009\u9879\uff0c\u6bd4\u5982\u4efb\u52a1\u540d\u79f0\uff0c\u4efb\u52a1\u7c7b\u578b\u3002
          • \"FORCE_EVAL\": \u5305\u542b\u4e86\u6240\u6709\u8ddf\u6c42\u89e3\u539f\u5b50\u7684\u529b\u6709\u5173\u7684\u53c2\u6570\u8bbe\u7f6e\uff0c\u4e5f\u5305\u62ec\u4e86\u539f\u5b50\u7684\u5750\u6807\u4fe1\u606f

          \u73b0\u5728\u6211\u4eec\u5148\u770bGLOBAL

           &GLOBAL\n   PROJECT Universality\n   RUN_TYPE ENERGY_FORCE\n   PRINT_LEVEL\n &END GLOBAL\n

          \u5f53\u8981\u8ba1\u7b97\u4f53\u7cfb\u7684\u529b\u548c\u80fd\u91cf\u65f6\uff0c\u6211\u4eec\u5fc5\u987b\u5728RUN_TYPE\u4e2d\u5bf9\u8ba1\u7b97\u7684\u7c7b\u578b\u8fdb\u884c\u6307\u5b9a\u3002\u6bd4\u5982RUN_TYPE ENERGY_FORCE\u5c31\u662f\u5bf9\u5f53\u524d\u7684\u4f53\u7cfb\u8fdb\u884c\u529b\u548c\u80fd\u91cf\u7684\u8ba1\u7b97\u3002\u5176\u4ed6\u7c7b\u578b\u7684\u8ba1\u7b97\u53ef\u4ee5\u5728CP2K\u624b\u518c\u91cc\u627e\u5230\u3002

          PROJECT\u5b9a\u4e49\u4e86\u8fd9\u4e2a\u8ba1\u7b97\u7684\u9879\u76ee\u540d\u79f0\uff0c\u901a\u5e38\u88ab\u7528\u6765\u547d\u540d\u4e00\u4e9b\u8f93\u51fa\u6587\u4ef6\u3002

          PRINT_LEVEL\u5b9a\u4e49\u4e86CP2K output\u6587\u4ef6\u91cc\u8f93\u51fa\u4fe1\u606f\u91cf\u7684\u5927\u5c0f\u3002

          \u73b0\u5728\u6211\u4eec\u63a5\u7740\u770bFORCE_EVAL

          METHOD Quickstep\n

          METHOD Quickstep\u8868\u660e\u9009\u62e9\u4e86\u4f7f\u7528\u5bc6\u5ea6\u6cdb\u51fd\u7406\u8bba(Density Functional Theory)\u4e2d\u7684GPW\u65b9\u6cd5\u8fdb\u884c\u8ba1\u7b97\u539f\u5b50\u53d7\u529b\u3002

             &SUBSYS\n     &CELL\n       ABC [angstrom]    4.593 4.593 2.959\n     &END CELL\n     &COORD\n @include rutile.xyz\n     &END COORD\n     &KIND O\n       BASIS_SET DZVP-MOLOPT-SR-GTH\n       POTENTIAL GTH-PBE-q6\n     &END KIND\n     &KIND Ti\n       BASIS_SET DZVP-MOLOPT-SR-GTH\n       POTENTIAL GTH-PBE-q12\n     &END KIND\n   &END SUBSYS\n

          Subsection SUBSYS\u5b9a\u4e49\u4e86\u6a21\u62df\u7684\u6676\u80de\u5927\u5c0f(ABC\u6676\u80de\u957f\u5ea6\u89d2\u5ea6\u7b49)\u548c\u539f\u5b50\u5750\u6807\u7684\u521d\u59cb\u7ed3\u6784. \u6709\u5173\u4e8e@include\u7684\u7528\u6cd5\uff0c\u8bf7\u53c2\u8003\u8fd9\u91cc

          Subsection KIND \u5b9a\u4e49\u4e86\u8ba1\u7b97\u4e2d\u51fa\u73b0\u7684\u5143\u7d20\u3002\u5bf9\u4e8e\u6bcf\u4e00\u79cd\u5143\u7d20\u5fc5\u987b\u8981\u6709\u4e00\u4e2a\u5bf9\u5e94\u7684KIND Section. \u7136\u540e\u5728KIND\u91cc\u9762\u5b9a\u4e49\u5b83\u7684\u57fa\u7ec4(BASIS_SET)\u548c\u8d5d\u52bf(POTENTIAL)\u3002

          BASIS_SET\u548cPOTENTIAL\u7684\u540d\u79f0\u4e00\u5b9a\u8981\u5bf9\u5e94\u5230\u57fa\u7ec4\u6587\u4ef6\u91cc\u548c\u8d5d\u52bf\u6587\u4ef6\u91cc\u5b58\u5728\u7684\u6761\u76ee\u3002

           O GTH-PBE-q6 GTH-PBE\n     2    4\n      0.24455430    2   -16.66721480     2.48731132\n     2\n      0.22095592    1    18.33745811\n      0.21133247    0\n

          Subsection CELL \u5b9a\u4e49\u4e86\u6a21\u62df\u4e2d\u7684\u6676\u80de\u5927\u5c0f\u3002 \u6b64\u4f8b\u5b50\u4e2d\uff0cABC\u6307\u7684\u662f\u6676\u80de\u7684\u8fb9\u957f\u3002\u5982\u4e0d\u989d\u5916\u6307\u5b9a\u89d2\u5ea6\uff0c\u9ed8\u8ba4\u4e3a90, 90, 90\u5ea6\u3002[angstrom]\u662f\u6307\u5b9a\u957f\u5ea6\u5355\u4f4d\u3002

          Subsection COORD\u5b9a\u4e49\u521d\u59cb\u7684\u539f\u5b50\u5750\u6807\u3002 \u539f\u5b50\u4f4d\u7f6e\u7684\u9ed8\u8ba4\u683c\u5f0f\u4e3a

          <ATOM_KIND> X Y Z\n

          X Y Z \u4e3a\u7b1b\u5361\u5c14\u5750\u6807\uff0c\u5355\u4f4d\u4e3aAngstrom\u3002\u5982\u679c\u6dfb\u52a0SCALED .TRUE.\uff0c\u4fbf\u662f\u5206\u6570\u5750\u6807\u3002

          Subsection DFT \u63a7\u5236\u4e86\u6240\u6709\u8ddfDFT\u8ba1\u7b97\u6709\u5173\u7684\u7ec6\u8282\u3002\u8be5Subsection\u53ea\u6709\u5f53\u4f60\u628amethod\u9009\u62e9\u4e3aquickstep\u65f6\u624d\u4f1a\u8d77\u4f5c\u7528\u3002

          BASIS_SET_FILE_NAME  BASIS_SET\nPOTENTIAL_FILE_NAME  GTH_POTENTIALS\n

          BASIS_SET_FILE_NAME\u548cPOTENTIAL_FILE_NAME\u5b9a\u4e49\u4e86\u57fa\u7ec4\u548c\u8d5d\u52bf\u7684\u6587\u4ef6\u8def\u5f84\u3002\u7531\u4e8e\u7ba1\u7406\u5458\u5df2\u7ecf\u5728\u96c6\u7fa4\u4e0a\u8bbe\u7f6e\u597d\u4e86\u8def\u5f84\uff0c\u7528\u6237\u76f4\u63a5\u586b\u5199\u8fd9\u4e24\u4e2a\u6587\u4ef6\u540d\u5373\u53ef\u3002

          &QS\n  EPS_DEFAULT 1.0E-13\n&END QS\n

          SubsectionQS\u5305\u542b\u4e86\u4e00\u4e9b\u901a\u7528\u7684\u63a7\u5236\u53c2\u6570\u3002EPS_DEFAULT\u8bbe\u7f6e\u4e86\u6240\u6709quickstep\u4f1a\u7528\u5230\u7684\u9ed8\u8ba4\u5bb9\u5fcd\u5ea6\u3002

               &MGRID\n       CUTOFF 400\n       REL_CUTOFF 60\n     &END MGRID\n

          Subsection MGRID \u5b9a\u4e49\u4e86\u5982\u4f55\u4f7f\u7528quickstep\u4e2d\u7684\u79ef\u5206\u7f51\u683c\u3002quickstep\u4f7f\u7528\u4e86\u591a\u7f51\u683c\u65b9\u6cd5\u6765\u8868\u793a\u9ad8\u65af\u51fd\u6570\u3002\u6bd4\u8f83\u7a84\u548c\u5c16\u7684\u9ad8\u65af\u51fd\u6570\u4f1a\u88ab\u6295\u5f71\u5230\u66f4\u7cbe\u7ec6\u7684\u7f51\u683c\uff0c\u800c\u5bbd\u548c\u987a\u6ed1\u7684\u9ad8\u65af\u51fd\u6570\u5219\u76f8\u53cd\u3002\u5728\u8fd9\u4e2a\u4f8b\u5b50\u4e2d\uff0c\u6211\u4eec\u544a\u8bc9\u4ee3\u7801\u9700\u8981\u8bbe\u7f6e\u6700\u7cbe\u7ec6\u7684\u7f51\u683c\u4e3a400Ry\uff0c\u5e76\u4e14REL_CUTOFF\u4e3a60Ry\u3002\u5173\u4e8eCUTOFF\u548cREL_CUTOFF\u65b9\u9762\u8bf7\u9605\u8bfb

          Subsection XC

               &XC\n       &XC_FUNCTIONAL PBE\n       &END XC_FUNCTIONAL\n     &END XC\n

          \u8fd9\u91cc\u5b9a\u4e49\u4e86\u6211\u4eec\u60f3\u4f7f\u7528\u7684\u4ea4\u6362-\u5173\u8054\u5bc6\u5ea6\u6cdb\u51fd\uff0c\u5728\u8fd9\u4e2a\u4f8b\u5b50\u4e2d\u6211\u4eec\u9009\u62e9\u4e86PBE\u6cdb\u51fd\u3002P\u6cdb\u51fd\u8981\u4e0e\u57fa\u7ec4\u548c\u8d5d\u52bf\u7684\u9009\u62e9\u4e00\u81f4\u3002

               &SCF\n       SCF_GUESS ATOMIC\n       EPS_SCF 3.0E-7\n       MAX_SCF 50\n       &OUTER_SCF\n         EPS_SCF 3.0E-7\n         MAX_SCF 10\n       &END OUTER_SCF\n       &OT\n         MINIMIZER DIIS\n         PRECONDITIONER FULL_SINGLE_INVERSE\n       &END OT\n     &END SCF\n

          SCF_GUESS\u8bbe\u7f6e\u4e86\u5e94\u8be5\u5982\u4f55\u751f\u6210\u521d\u59cb\u7684\u5c1d\u8bd5\u7535\u5b50\u5bc6\u5ea6\u3002\u5728\u8fd9\u4e2a\u4f8b\u5b50\u4e2d\uff0c\u521d\u59cb\u5bc6\u5ea6\u662f\u7531\u539f\u5b50\u7535\u8377\u5bc6\u5ea6\u91cd\u53e0\u751f\u6210\u7684\u3002\u4e00\u4e2a\u597d\u7684\u7535\u5b50\u5bc6\u5ea6\u53ef\u4ee5\u5e2e\u52a9CP2K\u5feb\u901f\u5f97\u5230\u6536\u655b\u7ed3\u679c\u3002EPS_SCF\u8bbe\u7f6e\u4e86\u7535\u5b50\u5bc6\u5ea6\u5dee\u5f02\u7684\u5bb9\u5fcd\u5ea6\uff08\u6536\u655b\u7cbe\u5ea6\u8981\u6c42\uff09\u3002\u8fd9\u4e2a\u4f1a\u8986\u76d6EPS_DEFAULT\u8bbe\u7f6e\u7684\u503c\u3002MAX_SCF\u6307\u6700\u591a\u4f1a\u8fed\u4ee3\u591a\u5c11\u6b21\u3002

          Subsection OUTER_SCF\u8fd9\u91cc\u6682\u65f6\u5148\u4e0d\u591a\u4ecb\u7ecd\uff0c\u4f46\u662f\u4e00\u822c\u7cbe\u5ea6\u8bbe\u7f6e\u8981\u8ddf\u4ee5\u4e0a\u7684EPS_SCF\u4e00\u6837\u3002\u4ee5\u4e0a\u7684SCF\u4e3aINNER_SCF\u3002OUTER_SCF\u8bbe\u7f6eMAX_SCF \u4e3a10\u3002\u5728\u8ba1\u7b97\u4e2d\u5b9e\u9645\u4e0a\u4f1a\u8fed\u4ee3\u7684\u6b21\u6570\u662fINNER_SCF\u4e58\u4ee5OUTER_SCF\uff0c\u537350*10\uff0c500\u6b21\u3002

          Subsection OT\u662f\u5229\u7528Orbital Transformation\u7684\u65b9\u6cd5\u6765\u4f18\u5316\u6ce2\u51fd\u6570\u3002

          &PRINT\n  &FORCES ON\n  &END FORCES\n&END PRINT\n

          \u8fd9\u4e2asubsection\u53ef\u4ee5\u5728output\u91cc\u6253\u5370\u51fa\u4f53\u7cfb\u7684\u539f\u5b50\u53d7\u529b\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-e-f/#_2","title":"\u8fd0\u884c\u8ba1\u7b97","text":"

          \u6b63\u5e38\u8fd0\u884cCP2K\u7684\u65b9\u6cd5\u4e3a

          mpirun -n 32 cp2k.popt input.inp > output & \n

          \u5728\u96c6\u7fa4\u4e0a\uff0c\u6211\u4eec\u4f7f\u7528lsf\u811a\u672c\u6587\u4ef6\u63d0\u4ea4\uff0c\u8fd9\u884c\u547d\u4ee4\u5df2\u7ecf\u5199\u5728\u4e86\u811a\u672c\u6587\u4ef6\u91cc\uff0c\u8bf7\u76f4\u63a5\u63d0\u4ea4\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-e-f/#_3","title":"\u8f93\u51fa\u7ed3\u679c","text":"

          \u5728\u4efb\u52a1\u7ed3\u675f\u540e\uff0c\u4f60\u4f1a\u5f97\u5230\u5982\u4e0b\u6587\u4ef6

          • output
          • Universality-RESTART.wfn
          • Universality-RESTART.wfn.bak-1
          • Universality-RESTART.wfn.bak-2
          • Universality-RESTART.wfn.bak-3

          \u6587\u4ef6output\u5305\u542b\u4e86\u8ba1\u7b97\u7684\u4e3b\u8981\u8f93\u51fa\u3002Universality-RESTART.wfn\u662f\u8ba1\u7b97\u6700\u540e\u5f97\u5230\u6ce2\u51fd\u6570\u3002Universality-RESTART.wfn.bak-<n>\u8bb0\u5f55\u4e86\u6700\u540e\u7b2c\\<n>\u6b65\u524dSCF\u5f97\u5230\u7684\u6ce2\u51fd\u6570\u3002\u6b64\u4f8b\u4e2d\uff0cUniversality-RESTART.wfn.bak-1\u662fSCF\u6700\u540e\u4e00\u6b65\u7684\u6ce2\u51fd\u6570\u3002

          \u4f46\u4f60\u60f3\u8981\u5229\u7528\u6ce2\u51fd\u6570\u91cd\u542f\u8ba1\u7b97\u65f6\uff0c\u53ef\u4ee5\u6539\u4e3aSCF_GUESS RESTART

          \u4ed6\u4f1a\u81ea\u52a8\u4ece<PROJECT_NAME>-RESTART.wfn\u6587\u4ef6\u5f00\u59cb\u91cd\u542f\u8ba1\u7b97\u3002

          \u6211\u4eec\u73b0\u5728\u8be6\u7ec6\u770b\u4e00\u4e0boutput\u6587\u4ef6\u91cc\u7684\u90e8\u5206

           SCF WAVEFUNCTION OPTIMIZATION\n\n  ----------------------------------- OT ---------------------------------------\n  Minimizer      : DIIS                : direct inversion\n                                         in the iterative subspace\n                                         using   7 DIIS vectors\n                                         safer DIIS on\n  Preconditioner : FULL_SINGLE_INVERSE : inversion of\n                                         H + eS - 2*(Sc)(c^T*H*c+const)(Sc)^T\n  Precond_solver : DEFAULT\n  stepsize       :    0.08000000                  energy_gap     :    0.08000000\n  eps_taylor     :   0.10000E-15                  max_taylor     :             4\n  ----------------------------------- OT ---------------------------------------\n\n  Step     Update method      Time    Convergence         Total energy    Change\n  ------------------------------------------------------------------------------\n     1 OT DIIS     0.80E-01    0.5     0.15753643      -176.9839582002 -1.77E+02\n     2 OT DIIS     0.80E-01    0.8     0.09878604      -178.9306891883 -1.95E+00\n     3 OT DIIS     0.80E-01    0.8     0.04863529      -179.6564913758 -7.26E-01\n     4 OT DIIS     0.80E-01    0.8     0.03582212      -179.9871432342 -3.31E-01\n     5 OT DIIS     0.80E-01    0.8     0.02520552      -180.2247770848 -2.38E-01\n     6 OT DIIS     0.80E-01    0.8     0.01876959      -180.4037691134 -1.79E-01\n     7 OT DIIS     0.80E-01    0.8     0.01356216      -180.5257615047 -1.22E-01\n     8 OT DIIS     0.80E-01    0.8     0.01016476      -180.5867232155 -6.10E-02\n     9 OT DIIS     0.80E-01    0.8     0.00712662      -180.6348174041 -4.81E-02\n    10 OT DIIS     0.80E-01    0.8     0.00528671      -180.6543176954 -1.95E-02\n    11 OT DIIS     0.80E-01    0.8     0.00401555      -180.6682811925 -1.40E-02\n    12 OT DIIS     0.80E-01    0.8     0.00331228      -180.6769383021 -8.66E-03\n    13 OT DIIS     0.80E-01    0.8     0.00273633      -180.6824801501 -5.54E-03\n    14 OT DIIS     0.80E-01    0.8     0.00227705      -180.6858569326 -3.38E-03\n    15 OT DIIS     0.80E-01    0.8     0.00189452      -180.6891762522 -3.32E-03\n    16 OT DIIS     0.80E-01    0.8     0.00163117      -180.6913433711 -2.17E-03\n    17 OT DIIS     0.80E-01    0.8     0.00137647      -180.6931734207 -1.83E-03\n    18 OT DIIS     0.80E-01    0.8     0.00119961      -180.6942368984 -1.06E-03\n    19 OT DIIS     0.80E-01    0.9     0.00100873      -180.6952066209 -9.70E-04\n    20 OT DIIS     0.80E-01    0.8     0.00084472      -180.6960712607 -8.65E-04\n    21 OT DIIS     0.80E-01    0.9     0.00073811      -180.6966143834 -5.43E-04\n    22 OT DIIS     0.80E-01    0.8     0.00062100      -180.6969845494 -3.70E-04\n    23 OT DIIS     0.80E-01    0.8     0.00052079      -180.6972986282 -3.14E-04\n    24 OT DIIS     0.80E-01    0.8     0.00044814      -180.6975096788 -2.11E-04\n    25 OT DIIS     0.80E-01    0.8     0.00038815      -180.6976499085 -1.40E-04\n    26 OT DIIS     0.80E-01    0.8     0.00034010      -180.6977592686 -1.09E-04\n    27 OT DIIS     0.80E-01    0.8     0.00029429      -180.6978276824 -6.84E-05\n    28 OT DIIS     0.80E-01    0.8     0.00025218      -180.6979007896 -7.31E-05\n    29 OT DIIS     0.80E-01    0.8     0.00022927      -180.6979456455 -4.49E-05\n    30 OT DIIS     0.80E-01    0.8     0.00020201      -180.6979830729 -3.74E-05\n    31 OT DIIS     0.80E-01    0.8     0.00017896      -180.6980145219 -3.14E-05\n    32 OT DIIS     0.80E-01    0.8     0.00016066      -180.6980416001 -2.71E-05\n    33 OT DIIS     0.80E-01    0.8     0.00014606      -180.6980603801 -1.88E-05\n    34 OT DIIS     0.80E-01    0.8     0.00012970      -180.6980811127 -2.07E-05\n    35 OT DIIS     0.80E-01    0.8     0.00011431      -180.6980956614 -1.45E-05\n    36 OT DIIS     0.80E-01    0.8     0.00009560      -180.6981114298 -1.58E-05\n    37 OT DIIS     0.80E-01    0.8     0.00008482      -180.6981210277 -9.60E-06\n    38 OT DIIS     0.80E-01    0.8     0.00007281      -180.6981278770 -6.85E-06\n    39 OT DIIS     0.80E-01    0.8     0.00006188      -180.6981329264 -5.05E-06\n    40 OT DIIS     0.80E-01    0.8     0.00005294      -180.6981368983 -3.97E-06\n    41 OT DIIS     0.80E-01    0.8     0.00004688      -180.6981391197 -2.22E-06\n    42 OT DIIS     0.80E-01    0.8     0.00004055      -180.6981410282 -1.91E-06\n    43 OT DIIS     0.80E-01    0.8     0.00003559      -180.6981421977 -1.17E-06\n    44 OT DIIS     0.80E-01    0.8     0.00003040      -180.6981432648 -1.07E-06\n    45 OT DIIS     0.80E-01    0.8     0.00002734      -180.6981439881 -7.23E-07\n    46 OT DIIS     0.80E-01    0.8     0.00002451      -180.6981445033 -5.15E-07\n    47 OT DIIS     0.80E-01    0.8     0.00002178      -180.6981449169 -4.14E-07\n    48 OT DIIS     0.80E-01    0.8     0.00001953      -180.6981452985 -3.82E-07\n    49 OT DIIS     0.80E-01    0.8     0.00001795      -180.6981455598 -2.61E-07\n    50 OT DIIS     0.80E-01    0.8     0.00001622      -180.6981458123 -2.52E-07\n\n  Leaving inner SCF loop after reaching    50 steps.\n\n\n  Electronic density on regular grids:        -47.9999999967        0.0000000033\n  Core density on regular grids:               48.0000000000       -0.0000000000\n  Total charge density on r-space grids:        0.0000000033\n  Total charge density g-space grids:           0.0000000033\n\n  Overlap energy of the core charge distribution:               0.00000000000007\n  Self energy of the core charge distribution:               -379.90298629198736\n  Core Hamiltonian energy:                                    102.12467948924306\n  Hartree energy:                                             125.99881317904760\n  Exchange-correlation energy:                                -28.91865218857406\n\n  Total energy:                                              -180.69814581227070\n\n  outer SCF iter =    1 RMS gradient =   0.16E-04 energy =       -180.6981458123\n

          \u4ee5\u4e0a\u663e\u793a\u4e86\u6211\u4eec\u4f7f\u7528OT DIIS\u65b9\u6cd5\u8fdb\u884c\u8ba1\u7b97\u3002\u73b0\u5728\u8ba1\u7b97\u5df2\u7ecf\u8fdb\u884c\u4e8650\u4e2aSCF\u8fed\u4ee3\u3002\u5f53\u7136\u73b0\u5728\u8fd8\u672a\u8fbe\u5230\u6536\u655b\u9650\u3002\u6211\u4eec\u53ef\u4ee5\u770b\u5230\u6700\u540e\u4e00\u4e2aouter SCF iter = 1\u4e5f\u5c31\u662f\u8bf4\u4e00\u4e2aouter SCF\u5305\u542b\u4e86\u4e00\u4e2a\u5b8c\u6574\u7684innter SCF\u3002

           ATOMIC FORCES in [a.u.]\n\n # Atom   Kind   Element          X              Y              Z\n      1      1      Ti          0.00000026    -0.00000079     0.00000063\n      2      1      Ti          0.00000026    -0.00000027     0.00000004\n      3      2      O          -0.07002277     0.07002168    -0.00000018\n      4      2      O           0.07002184    -0.07002056     0.00000006\n      5      2      O           0.07002270     0.07002086    -0.00000083\n      6      2      O          -0.07002229    -0.07002093     0.00000028\n SUM OF ATOMIC FORCES           0.00000000    -0.00000000     0.00000000     0.00000000\n

          \u4ee5\u4e0a\u663e\u793a\u4e86\u539f\u5b50\u53d7\u529b\u7684\u60c5\u51b5\uff0c\u6211\u4eec\u53d1\u73b0\u6709\u4e9b\u539f\u5b50\u7684\u53d7\u529b\u4e0d\u63a5\u8fd1\u4e8e0\uff0c\u8bf4\u660e\u8fd9\u4e2a\u7cfb\u7edf\u8fd8\u6ca1\u5904\u5728\u6700\u4f73\u7684\u7ed3\u6784\u4f4d\u7f6e\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-geoopt/","title":"CP2K: \u7ed3\u6784\u548c\u6676\u80de\u4f18\u5316","text":""},{"location":"wiki/software_usage/cp2k/cp2k-geoopt/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • \u5b66\u4e60\u8d44\u6599

          • \u57fa\u672c\u539f\u7406

          • CP2K \u7ed3\u6784\u4f18\u5316\u8bbe\u7f6e

          • CP2K \u7ed3\u6784\u4f18\u5316\u95ee\u9898

          "},{"location":"wiki/software_usage/cp2k/cp2k-geoopt/#_2","title":"\u5b66\u4e60\u8d44\u6599","text":"

          Slides

          "},{"location":"wiki/software_usage/cp2k/cp2k-geoopt/#_3","title":"\u57fa\u672c\u539f\u7406","text":"

          \u5efa\u8bbe\u4e2d, \u53c2\u8003\u5b98\u7f51

          "},{"location":"wiki/software_usage/cp2k/cp2k-geoopt/#cp2k_1","title":"CP2K \u7ed3\u6784\u4f18\u5316\u8bbe\u7f6e","text":"

          \u7ed3\u6784\u4f18\u5316

          &GLOBAL\nRUN_TYPE GEO_OPT\n&END GLOBAL\n

          \u6676\u80de\u4f18\u5316

          &GLOBAL\nRUN_TYPE CELL_OPT\n&END GLOBAL\n

          \u540c\u65f6\uff0c\u5728MOTION\u4e0b\u8bbe\u7f6eOPTIMIZER\u548c\u4e00\u4e9bCONSTRAIN

          &MOTION\n  &CELL_OPT\n    OPTIMIZER LBFGS \n    KEEP_ANGLES\n    TYPE DIRECT_CELL_OPT\n  &END CELL_OPT\n&END MOTION\n

          LBFGS\u662f\u5bf9\u5927\u4f53\u7cfb\u5e38\u7528\u7684\uff0cBFGS\u9488\u5bf9\u5c0f\u4f53\u7cfb\uff0c\u66f4\u4e3aRobust\u7684\u662fCG\u3002

          KEEP_ANGLES\u662f\u6307\u4fdd\u6301\u6676\u80de\u7684\u89d2\u5ea6\u4e0d\u53d8\u3002

          TYPE\u9ed8\u8ba4\u662fDIRECT_CELL_OPT\uff0c\u5373\u540c\u65f6\u4f18\u5316\u6676\u80de\u548c\u91cc\u9762\u7684\u4f4d\u7f6e\uff0c\u662f\u6700\u5feb\u7684\u4f18\u5316\u65b9\u6cd5\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-geoopt/#cp2k_2","title":"CP2K \u7ed3\u6784\u4f18\u5316\u95ee\u9898","text":"

          \u6676\u80de\u4f18\u5316\u9700\u8981\u8ba1\u7b97STRESS TENSOR\u3002\u901a\u5e38\u91c7\u7528ANALYTICAL\u65b9\u6cd5\u8ba1\u7b97\u5373\u53ef\uff0c\u4e5f\u662f\u6700\u5feb\u7684\u65b9\u6cd5\u3002\u4f46\u662f\u4e00\u4e9b\u6cdb\u51fd\u5e76\u6ca1\u6709\u5b9e\u73b0\u76f8\u5e94\u7684STRENSS TENSOR\u7684\u8ba1\u7b97\uff0c\u53ef\u4ee5\u91c7\u7528NUMERICAL\u7684\u65b9\u6cd5\u8fdb\u884c\u8ba1\u7b97\u3002\u6bd4\u5982SCAN\u3002\u5728cp2k v8.2\u540e\u52a0\u5165\u4e86METAGGA(\u5305\u62ecSCAN)\u7684STRESS TENSOR\uff0c\u4f46\u662f\u4ec5\u5b9e\u73b0 kinetic energy density\u7684\u90e8\u5206\uff0c\u4f18\u5316\u4f1a\u51fa\u95ee\u9898\uff0c\u539f\u56e0\u4e0d\u660e\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-hf/","title":"CP2K: \u6742\u5316\u6cdb\u51fd","text":""},{"location":"wiki/software_usage/cp2k/cp2k-hf/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • \u5b66\u4e60\u8d44\u6599
          • \u6742\u5316\u6cdb\u51fd\u57fa\u672c\u539f\u7406
          • \u6742\u5316\u6cdb\u51fd\u8f85\u52a9\u57fa\u7ec4
          • CP2K\u6742\u5316\u6cdb\u51fd\u8bbe\u7f6e
          • \u53c2\u6570\u7684\u6d4b\u8bd5\u548c\u6536\u655b
          • \u4e00\u4e9b\u5143\u7d20\u63a8\u8350\u7684ADMM
          "},{"location":"wiki/software_usage/cp2k/cp2k-hf/#_2","title":"\u5b66\u4e60\u8d44\u6599","text":"

          Slides: UCL DFT with Hybrid Functionals

          Slides: Hybrid Functional and ADMM

          \u5b98\u65b9\u7ec3\u4e60

          "},{"location":"wiki/software_usage/cp2k/cp2k-hf/#_3","title":"\u6742\u5316\u6cdb\u51fd\u57fa\u672c\u539f\u7406","text":"

          \u5efa\u8bbe\u4e2d

          "},{"location":"wiki/software_usage/cp2k/cp2k-hf/#_4","title":"\u6742\u5316\u6cdb\u51fd\u8f85\u52a9\u57fa\u7ec4","text":"

          \u5efa\u8bbe\u4e2d

          "},{"location":"wiki/software_usage/cp2k/cp2k-hf/#cp2k_1","title":"CP2K\u6742\u5316\u6cdb\u51fd\u8bbe\u7f6e","text":"
              # BASIS Purification\n    BASIS_SET_FILE_NAME BASIS_ADMM_MOLOPT\n    BASIS_SET_FILE_NAME BASIS_ADMM\n    &AUXILIARY_DENSITY_MATRIX_METHOD\n      METHOD BASIS_PROJECTION\n      ADMM_PURIFICATION_METHOD MO_DIAG\n    &END AUXILIARY_DENSITY_MATRIX_METHOD\n    # KIND \u8bbe\u7f6e\u4f8b\u5b50\n    &KIND O\n      BASIS_SET DZVP-MOLOPT-SR-GTH\n      POTENTIAL GTH-PBE-q6\n      BASIS_SET AUX_FIT cFIT3\n    &END KIND\n
          # HSE06\u6cdb\u51fd\u90e8\u5206\n      &XC_FUNCTIONAL\n        &PBE\n          SCALE_X 0.0\n          SCALE_C 1.0\n        &END PBE\n        &XWPBE\n          SCALE_X -0.25\n          SCALE_X0 1.0\n          OMEGA 0.11\n        &END XWPBE\n      &END XC_FUNCTIONAL\n      &HF\n        &SCREENING\n          EPS_SCHWARZ 1.0E-6\n          SCREEN_ON_INITIAL_P FALSE\n        &END SCREENING\n        &INTERACTION_POTENTIAL\n          POTENTIAL_TYPE SHORTRANGE\n          OMEGA 0.11\n          T_C_G_DATA t_c_g.dat\n        &END INTERACTION_POTENTIAL\n        &MEMORY\n          MAX_MEMORY 10000\n          EPS_STORAGE_SCALING 0.1\n        &END MEMORY\n        # this depends on user\n        &PERIODIC\n          NUMBER_OF_SHELLS 0\n        &END PERIODIC\n        FRACTION 0.25\n      &END HF\n
          "},{"location":"wiki/software_usage/cp2k/cp2k-hf/#_5","title":"\u53c2\u6570\u7684\u6d4b\u8bd5\u548c\u6536\u655b","text":""},{"location":"wiki/software_usage/cp2k/cp2k-hf/#restart","title":"RESTART\u6ce2\u51fd\u6570","text":"

          \u52a1\u5fc5\u4f7f\u7528\u76f8\u540c\u539f\u5b50\u7ed3\u6784\u7684PBE\u6cdb\u51fd\u4f18\u5316\u540e\u7684\u6ce2\u51fd\u6570\u8fdb\u884c\u91cd\u542f\uff0c\u53ef\u4ee5\u7701\u4e0b\u5927\u91cf\u673a\u65f6\uff0c\u9664\u975e\u4f60\u5f88\u6709\u94b1\u3002

          \u5728\u6d4b\u8bd5\u53c2\u6570\u6536\u655b\u524d**\u52a1\u5fc5**\u628aSCF\u6b65\u6570\u8c03\u62101\u3002\u53ea\u8981\u8ba1\u7b97\u7684\u6570\u503c\u6536\u655b\u5373\u53ef\u3002

          &SCF\n      EPS_SCF 3.0E-7\n      MAX_SCF 1\n&END SCF\n
          "},{"location":"wiki/software_usage/cp2k/cp2k-hf/#eps_pgf_orb","title":"EPS_PGF_ORB\u7684\u6536\u655b","text":"

          \u5728\u521d\u6b21\u8ba1\u7b97\u4e2d\uff0c\u7528\u6237\u4f1a\u9047\u5230\u5982\u4e0bWarning

           *** WARNING in hfx_energy_potential.F:605 :: The Kohn Sham matrix is not  ***\n *** 100% occupied. This may result in incorrect Hartree-Fock results. Try ***\n *** to decrease EPS_PGF_ORB and EPS_FILTER_MATRIX in the QS section. For  ***\n *** more information see FAQ: https://www.cp2k.org/faq:hfx_eps_warning    ***\n

          \u8fd9\u662f\u56e0\u4e3aCP2K\u4f1a\u6839\u636e\u67d0\u4e9b\u8bbe\u5b9a\u7684\u503c\uff0c\u6765\u7b5b\u9009\u51fa\u4e0d\u9700\u8981\u8ba1\u7b97\u7684\u56db\u7535\u5b50\u79ef\u5206\u3002\u53ef\u4ee5\u6709\u6548\u964d\u4f4eHartree-Fock\u77e9\u9635\u7684\u8ba1\u7b97\u3002\u5982\u679c\u7b5b\u9009\u7684\u79ef\u5206\u8fc7\u591a\uff0c\u90a3\u4e48H-F\u8ba1\u7b97\u51fa\u6765\u7684\u7ed3\u679c\u5c31\u4f1a\u5931\u771f\u3002\u4e5f\u662f\u6b64Warning\u7684\u6765\u6e90\u3002

          \u63a7\u5236\u8fd9\u4e2a\u7b5b\u9009\u6807\u51c6\u7684\u6709EPS_PGF_ORB\u8fd9\u4e2a\u53c2\u6570\u3002\u8d8a\u5c0f\u7684\u8bdd\u7b5b\u9009\u7684\u79ef\u5206\u8d8a\u5c11\uff0cH-F\u7ed3\u679c\u4e5f\u5c31\u8d8a\u771f\u5b9e\u3002\u901a\u5e38\u60c5\u51b5\u4e0b\u8fd9\u4e2aWarning\u662f\u4e0d\u4f1a\u6d88\u5931\u7684\uff0c\u5373\u4f7f\u7528\u6237\u8c03\u5230\u4e00\u4e2a\u975e\u5e38\u5c0f\u7684\u91cf\u7ea7\uff0c\u4f8b\u59821.0E-20\u3002

          \u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u6bd4\u5bf9\u4e0d\u540c\u7684EPF_PGF_ORB\u7684\u80fd\u91cf\u6536\u655b\u6765\u9009\u62e9\u5408\u9002\u7684\u503c\u3002

          EPS_PGF_ORB \u80fd\u91cf(a. u.) \u4e0e\u4e0a\u4e00\u4e2a\u7684\u8bef\u5dee 1.0E-13 -8402.872803898026177 1.0E-15 -8402.872803587537419 -3.1E-07 1.0E-17 -8402.872803510470476 -7.7E-08

          \u4e00\u822c\u7684SCF\u6536\u655b\u9650\u57283.0E-7\uff0c\u80fd\u91cf\u57fa\u672c\u4e5f\u5728\u8fd9\u4e2a\u91cf\u7ea7\u4ee5\u4e0b\uff0c\u56e0\u6b64\u80fd\u91cf\u6536\u655b\u9700\u8981\u8fbe\u52301.0E-7\u4ee5\u4e0b\u6700\u597d\u3002\u6240\u4ee5\u6211\u4eec\u9009\u62e91.0E-15\u4f5c\u4e3aEPS_PGF_ORB\u7684\u503c\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-hf/#admm","title":"ADMM\u57fa\u7ec4\u7684\u6536\u655b","text":"

          \u4e0eEPS_PGF_ORB\u7c7b\u4f3c\u7684\u662fADMM\u57fa\u7ec4\u7684\u6536\u655b\u3002\u5bf9\u4e8e\u540c\u4e00\u79cd\u5143\u7d20, CP2K\u63d0\u4f9b\u591a\u4e86\u591a\u79cd\u57fa\u7ec4\uff0c\u4f8b\u5982cFIT10, cFIT11, cFIT12 \u7b49...\u3002\u6d4b\u8bd5\u7684\u65b9\u6cd5\u5c31\u662f\u9010\u6e10\u589e\u5927ADMM\u57fa\u7ec4\u3002\u80fd\u91cf\u8bef\u5dee\u5fc5\u987b\u5f52\u4e00\u5230\u6bcf\u4e2a\u539f\u5b50\u3002\u901a\u5e38\u4fdd\u8bc1\u8bef\u5dee\u57281meV/atom\u7684\u91cf\u7ea7\u6700\u597d\u3002

          \u4ee5SrTiO3\u4f53\u7cfb\u4e3a\u4f8b

          ADMM_BASIS For Ti \u80fd\u91cf(a.u.) \u4e0e\u4e0a\u4e00\u4e2a\u7684\u8bef\u5dee(meV/atom) \u539f\u5b50\u6570 cFIT10 -9062.291421862293646 368 cFIT11 -9062.255359275355659 -2.6 368 cFIT12 -9062.260056088771307 0.3 368 cFIT13 -9062.210205928951837 -3.6 368

          \u8fd9\u4e2a\u65f6\u5019\u9009\u62e9**cFIT10**\u6216\u8005**cFIT11**\u5373\u53ef

          "},{"location":"wiki/software_usage/cp2k/cp2k-hf/#admm_1","title":"\u4e00\u4e9b\u5143\u7d20\u63a8\u8350\u7684ADMM","text":"

          \u7b14\u8005\u4eb2\u6d4b\uff0c\u901a\u5e38\u4e0e\u4f53\u7cfb\u5173\u7cfb\u4e0d\u5927\u3002

          \u5143\u7d20 ADMM\u57fa\u7ec4 O cFIT3 H cFIT3 Ti cFIT11 Cd cFIT10 Sn cFIT9 Pb cFIT9 Sr cFIT9 Pt cFIT10 Mg cpFIT3 Ba cFIT9 Na cFIT3 Ta cFIT10"},{"location":"wiki/software_usage/cp2k/cp2k-hf/#warning","title":"\u5176\u4ed6Warning\u5904\u7406","text":"

          \u5176\u4ed6\u7684Warning\u5728\u5b98\u65b9\u6587\u6863\u4e2d\u6709\u63d0\u8fc7 \u6742\u5316\u6cdb\u51fd\u8ba1\u7b97Warning

          Cutoff Radiis Warning *** WARNING in hfx_types.F:1287 :: Periodic Hartree Fock calculation *** *** requested with use of a truncated or shortrange potential. The cutoff *** *** radius is larger than half the minimal cell dimension. This may lead *** *** to unphysical total energies. Reduce the cutoff radius in order to *** *** avoid possible problems. ***

          \u8fd9\u662f\u7531\u4e8e\u5728\u5468\u671f\u8fb9\u754c\u6761\u4ef6\u4e0b, CP2K\u53ea\u53d6HF exchange\u77ed\u7a0b\u90e8\u5206\uff0c\u800c\u957f\u7a0b\u90e8\u5206\u5219\u7531DFT exchange\u6765\u8865\u5145\u3002\u56e0\u6b64\u9700\u8981\u77ed\u7a0b\u7684\u957f\u5ea6\uff0c\u5373Cutoff Radiis\u3002 \u5bf9\u4e8e\u8be5Warning\u6709\u5982\u4e0b\u4e09\u79cd\u5904\u7406\u65b9\u5f0f\u3002

          • \u5982\u679c\u4f7f\u7528HSE06\uff0c\u8bf7\u5ffd\u89c6\uff0c\u56e0\u4e3a\u8fd9\u4e2acutoff\u7531omega\u786e\u5b9a\u3002
          • \u51cf\u5c11CUTOFF_RADIUS\uff0c\u5982\u679c\u4f60\u7528\u7684\u662fPBE0-TC
          • \u7528\u66f4\u5927\u5468\u671f\u8fb9\u754c\u76d2\u5b50

          \u53c2\u8003

          "},{"location":"wiki/software_usage/cp2k/cp2k-neb/","title":"CP2K: Nudged Elastic Band","text":""},{"location":"wiki/software_usage/cp2k/cp2k-neb/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • \u5b66\u4e60\u8d44\u6599

          • NEB \u57fa\u672c\u539f\u7406

          • CP2K NEB\u8bbe\u7f6e

          "},{"location":"wiki/software_usage/cp2k/cp2k-neb/#_2","title":"\u5b66\u4e60\u8d44\u6599","text":"
          • Henkelman, G. & J\u00f3nsson, H. Improved tangent estimate in the nudged elastic band method for finding minimum energy paths and saddle points. J. Chem. Phys. 113, 9978\u20139985 (2000).

          • Henkelman, G., Uberuaga, B. P. & J\u00f3nsson, H. A climbing image nudged elastic band method for finding saddle points and minimum energy paths. J Chem Phys 113, 9901\u20139904 (2000).

          "},{"location":"wiki/software_usage/cp2k/cp2k-neb/#neb","title":"NEB \u57fa\u672c\u539f\u7406","text":"

          \u5f53\u786e\u5b9a\u53cd\u5e94\u7269\u548c\u4ea7\u7269\u7ed3\u6784\u540e\u53ef\u4ee5\u627e\u5230\u4ece\u53cd\u5e94\u7269\u5230\u4ea7\u7269\u7684\u80fd\u91cf\u6700\u5c0f\u8def\u5f84(Minimum Energy Path, MEP). \u5904\u4e8e\u80fd\u91cf\u6700\u5c0f\u8def\u5f84\u4e0a\u7684\u4efb\u610f\u4e00\u4e2a\u7ed3\u6784\u4e2d\uff0c\u4f5c\u7528\u5728\u539f\u5b50\u4e0a\u5e76\u5782\u76f4\u4e8eMEP\u7684\u529b\u5206\u91cf\u90fd\u4e3a0. NEB\u662f\u4e00\u79cd\u5bfb\u627eMEP\u7684\u65b9\u6cd5\u3002\u9996\u5148NEB\u5728\u53cd\u5e94\u7269\u7ed3\u6784\u548c\u4ea7\u7269\u7ed3\u6784\u4e4b\u95f4\u5efa\u7acb\u4e00\u5957\u7ed3\u6784(\u79f0\u4e3aimage\u6216\u8005replica)\u3002 \u8fd9\u4e9b\u76f8\u90bb\u7684image\u4e4b\u95f4\u7528\u5f39\u7c27\u529b\u8fde\u63a5(spring force)\uff0c\u5f62\u6210\u4e00\u6761\u7c7b\u6a61\u76ae\u7b4b(Elastic Band)\u7684\u6784\u9020\u3002\u5176\u4e2d\u6bcf\u4e2aimage\u53d7\u5230\u5782\u76f4\u4e8eMEP\u7684\u771f\u6b63\u7684\u529b\u540c\u65f6\u53d7\u5230\u5e73\u884c\u4e8eMEP\u7684\u5f39\u7c27\u529b\uff0c\u901a\u8fc7\u6700\u5c0f\u5316\u8fd9\u4e2aBand\u7684\u529b\uff0c\u5373\u53ef\u5f97\u5230MEP\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-neb/#cp2k-neb","title":"CP2K NEB\u8bbe\u7f6e","text":"

          \u9996\u5148\u628aRUN_TYPE\u8bbe\u7f6e\u4e3aBAND

          &GLOBAL\n    RUN_TYPE BAND\n&END GLOBAL\n

          \u5176\u6b21\u662fMOTION\u90e8\u5206

          &MOTION\n    &BAND\n        # \u63d0\u4ea4\u4efb\u52a1\u65f6 \u603bcpu\u6570\u76ee\u4e3aNROC_REP*NUMBER_OF_REPLICA\n        NROC_REP 24 #\u4e00\u4e2aimage\u8981\u7528\u591a\u5c11cpu\u6765\u7b97\n        NUMBER_OF_REPLICA 8 #\u521b\u9020\u591a\u5c11image, \u8fd9\u91cc\u662f\u5305\u542b\u521d\u59cb\u7ed3\u6784\u548c\u6700\u7ec8\u7ed3\u6784\u7684\u6570\u76ee\u3002 \n        BAND_TYPE CI-NEB #\u4f7f\u7528Climbing Image NEB\u65b9\u6cd5\uff0c\u5177\u4f53\u5185\u5bb9\u53c2\u7167\u6587\u732eSEC. IV\n        K_SPRING 0.05 \u5f39\u7c27\u632f\u5b50\u7684\u5f3a\u5ea6\uff0c\u7406\u8bba\u4e0a\u5f39\u7c27\u632f\u5b50\u5f3a\u5ea6\u4e0d\u4f1a\u5f71\u54cd\u4f18\u5316\u7684\u7ed3\u679c\n        &CONVERGENCE_CONTROL # \u8ddf\u7ed3\u6784\u4f18\u5316\u7c7b\u4f3c\n            MAX_FORCE 0.0030\n            RMS_FORCE 0.0050\n            MAX_DR 0.002\n            RMS_DR 0.005\n        &END CONVERGENCE_CONTROL\n        ROTATE_FRAMES F\n        ALIGN_FRAMES F\n        &CI_NEB \n            NSTEPS_IT  2 # \u5728\u53d8\u6210CI\u4e4b\u524d\uff0c\u9700\u8981\u8dd1\u6b63\u5e38NEB, \u8fd9\u91cc\u8bbe\u7f6e\u8dd1\u6b63\u5e38NEB\u7684\u56de\u5408\u6570\u76ee\n        &END CI_NEB\n        &OPTIMIZE_BAND\n            OPT_TYPE DIIS\n            &DIIS\n                NO_LS T\n                MAX_STEPS 1000\n                N_DIIS 3\n            &END DIIS\n        &END OPTIMIZE_BAND\n        &REPLICA #\u521d\u59cb\u7ed3\u6784\u7684\u5750\u6807\n            &COORD\n            @include init.xyz # \u7b2c\u4e00\u79cd\u65b9\u6cd5\uff0c\u53ea\u5305\u542b\u5750\u6807xyz\uff0c\u4e0d\u9700\u8981\u5143\u7d20\n            &END COORD\n        &END REPLICA\n        &REPLICA # \u6700\u7ec8\u7ed3\u6784\u7684\u5750\u6807\n            &COORD\n            @include fin.xyz # \u53ea\u5305\u542b\u5750\u6807xyz\uff0c\u4e0d\u9700\u8981\u5143\u7d20\uff0c\n            &END COORD\n        &END REPLICA\n        &REPLICA # \u6700\u7ec8\u7ed3\u6784\u7684\u5750\u6807\n            COORD_FILE_NAME ./tr7.xyz # \u7b2c\u4e8c\u79cd\u65b9\u6cd5\uff0c\u8fd9\u4e2a\u662f\u6b63\u5e38\u7684xyz\u6587\u4ef6\n        &END REPLICA\n        &PROGRAM_RUN_INFO # \u770bREPLICA\u95f4\u7684\u8ddd\u79bb\n            INITIAL_CONFIGURATION_INFO\n        &END\n    &END BAND\n&END MOTION\n
          \u6ce8\u610f\u5230\u5982\u679c\u53ea\u5b9a\u4e49\u4e24\u4e2aREPLICA section\uff0c\u5e76\u4e14\u5c0f\u4e8e\u4f60\u7684NUMBER_OF_REPLICA\uff0c\u90a3\u4e48\u5269\u4f59\u7684REPLICA\u7ed3\u6784\u5c06\u4f1a\u7531CP2K\u81ea\u5df1\u751f\u6210\u3002 \u5982\u679c\u5b9a\u4e49\u7684REPLICA section\u6570\u76ee\u7b49\u4e8eNUMBER_OF_REPLICA\uff0c\u90a3\u4e48CP2K\u5c06\u4e0d\u4f1a\u81ea\u52a8\u751f\u6210REPLICA\u7684\u7ed3\u6784\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-neb/#neb_1","title":"\u91cd\u65b0\u542f\u52a8NEB","text":"

          \u5728cp2k input\u6587\u4ef6\u91cc\u52a0\u5165EXT_RESTART section\u3002\u5e76\u4e14\u5c06xxx-1.restart\u6539\u6210\u4f60\u7684\u771f\u5b9e\u7684restart\u6587\u4ef6\u3002

          &EXT_RESTART\n  RESTART_BAND\n  RESTART_FILE_NAME   xxx-1.restart\n&END\n
          \u540c\u65f6\uff0c\u6211\u4eec\u53ef\u4ee5\u5229\u7528\u4e4b\u524d\u7684\u6ce2\u51fd\u6570RESTART,\u53ea\u9700\u8981\u5728FORCE_EVAL/DFT/SCF\u4e0b\u8bbe\u7f6e
          SCF_GUESS RESTART\n
          \u5373\u53ef\u3002 \u5047\u8bbe\u4f60\u7684PROJECT NAME \u662f water\uff0c\u89c1GLOBAL/PROJECT\uff0c\u540c\u65f6\u4f60\u7684NUMBER_OF_REPLICA\u4e3a8, \u90a3\u4e48\u4f60\u5c06\u4f1a\u751f\u6210\u5982\u4e0b\u6587\u4ef6
          water-BAND01-RESTART.wfn\nwater-BAND02-RESTART.wfn\nwater-BAND03-RESTART.wfn\nwater-BAND04-RESTART.wfn\nwater-BAND05-RESTART.wfn\nwater-BAND06-RESTART.wfn\nwater-BAND07-RESTART.wfn\nwater-BAND08-RESTART.wfn\n
          \u5176\u4e2dBAND\u540e\u9762\u7684\u6570\u5b57\u4ee3\u8868REPLICA\u7684\u5e8f\u6570\u3002\u5728\u91cd\u65b0\u542f\u52a8\u65f6\uff0c\u5219\u4f1a\u81ea\u52a8\u8bfb\u53d6\u8fd9\u4e9b\u6ce2\u51fd\u6570\u3002\u5982\u679c\u6ce2\u51fd\u6570\u662f\u901a\u8fc7\u5176\u4ed6\u65b9\u6cd5\u751f\u6210\u6216\u8005\u63d0\u524d\u51c6\u5907\u597d\u7684\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7\u66f4\u6539\u6ce2\u51fd\u6570\u7684\u540d\u79f0\u4f7f\u5176\u7b26\u5408\u4e0a\u8ff0\u89c4\u5219\u6765\u542f\u52a8NEB\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-reftraj/","title":"\u6839\u636e\u5df2\u6709\u8f68\u8ff9\u8fd0\u884cCP2K\u5206\u5b50\u52a8\u529b\u5b66\u8ba1\u7b97","text":""},{"location":"wiki/software_usage/cp2k/cp2k-reftraj/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • CP2K \u5206\u5b50\u52a8\u529b\u5b66\u8ba1\u7b97\u7684\u8f93\u5165\u6587\u4ef6

          • \u5982\u4f55\u6839\u636e\u5df2\u6709\u7684CP2K\u8f68\u8ff9\u8fdb\u884c\u8ba1\u7b97

          "},{"location":"wiki/software_usage/cp2k/cp2k-reftraj/#_2","title":"\u5b66\u4e60\u8d44\u6599","text":"

          CP2K\u5b98\u65b9\u624b\u518c\uff1aSection MD

          CP2K\u5b98\u65b9\u7ec3\u4e60\uff1aAIMD of bulk liquid water

          "},{"location":"wiki/software_usage/cp2k/cp2k-reftraj/#cp2k-md-section","title":"CP2K MD Section \u7684\u8f93\u5165\u6587\u4ef6","text":"

          \u8bf7\u5148\u4e86\u89e3CP2K\u7684\u8f93\u5165\u6587\u4ef6\u8bed\u6cd5\uff0c\u6307\u8def\uff1aCP2K:\u80fd\u91cf\u4e0e\u529b\u7684\u8ba1\u7b97\u3002

          CP2K \u7684\u8f93\u5165\u6587\u4ef6\u7531\u4e0d\u540c\u7684 SECTION \u7ec4\u6210\uff0c\u800c\u6bcf\u4e2a SECTION \u4e0b\u7ea7\u6709\u53ef\u4ee5\u5305\u542b SUBSECTION \u548c KEYWORDS\uff0c\u8fd9\u4e9b\u4e0d\u540c\u7b49\u7ea7\u7684 SECTION \u548c KEYWORD \u90fd\u662f\u5927\u5199\u82f1\u6587\u5355\u8bcd\u3002\u4e00\u4efd\u8f93\u5165\u6587\u4ef6\u7684\u8bed\u6cd5\u5982\u4e0b\uff1a

          &SECTION\n  &SUSECTION\n  ...\n  &END SUBSECTION\n  KEYWORD1 <value>\n  KEYWORD2 <value>\n  ...\n&END SECTION\n

          \u800c\u5982\u679c\u5e0c\u671b\u7528CP2K\u8fdb\u884cMD\u8ba1\u7b97\uff0c\u9700\u8981\u6839\u636e\u4f53\u7cfb\u7684\u9700\u8981\uff0c\u914d\u7f6eCP2K:\u80fd\u91cf\u4e0e\u529b\u7684\u8ba1\u7b97\u4e2d\u4ecb\u7ecd\u7684 GLOBAL \u548c FORCE_EVAL \u8fd9\u4e24\u90e8\u5206\uff0c\u5e76\u4e14\u5c06 SECTION GLOBAL \u4e0b\u7684\u5173\u952e\u5b57 RUN_TYPE \u6539\u4e3aMD\u3002

          &GLOBAL\n  ...\n  RUN_TYPE MD <---- \u8fd0\u884cMD\u4efb\u52a1\u8bf7\u5c06 RUN_TYPE \u6539\u4e3a MD\n&END GLOBAL\n

          \u6b64\u5916\uff0c\u8fd8\u9700\u8981\u5728\u914d\u7f6e\u6587\u4ef6 input.inp \u4e2d\u5199\u5165 \uff1a

          • MOTION: \u5305\u542b\u5982\u4f55\u6f14\u53d8\u539f\u5b50\u6838\uff08\u4f8b\u5982MD\uff09\uff0c\u63a7\u5236\u8f93\u51fa\u4ec0\u4e48\u6570\u636e

          SECTION in input.inp. This section defines a set of tool connected with the motion of the nuclei.

          • MD: \u5305\u542b\u4e86\u4e00\u4e9b\u5206\u5b50\u52a8\u529b\u5b66\u6a21\u62df\u7684\u57fa\u672c\u53c2\u6570\uff0c\u5982\u9009\u62e9\u4ec0\u4e48\u7cfb\u7efc\uff08ensemble\uff09\u3001\u6e29\u5ea6\u3001\u6b65\u957f\u548c\u603b\u6b65\u6570\u7b49\u3002

          SUBSECTION in MOTION. This section defines the whole set of parameters needed perform an MD run.

          "},{"location":"wiki/software_usage/cp2k/cp2k-reftraj/#motion","title":"\u4e00\u4e2a\u7b80\u5355\u7684 MOTION \u90e8\u5206\u7684\u4f8b\u5b50","text":"
          &MOTION \n  &MD\n    ENSEMBLE NVE\n    STEPS 10\n    TIMESTEP 0.5\n    TEMPERATURE 300.0\n  &END MD\n  &PRINT\n    &CELL\n      &EACH\n        MD 1\n      &END EACH\n    &END CELL\n    &FORCES\n      &EACH\n        MD 1\n      &END EACH\n    &END FORCES\n    &TRAJECTORY\n      &EACH\n        MD 1\n      &END EACH\n    &END TRAJECTORY\n    &VELOCITIES\n      &EACH\n        MD 1\n      &END EACH\n    &END VELOCITIES\n  &END PRINT\n&END MOTION\n

          \u4ee5\u4e0a\u4f8b\u5b50\u975e\u5e38\u76f4\u63a5\uff0c\u4e00\u884c\u4e00\u884c\u8bfb\u4e0b\u6765\u5b57\u9762\u610f\u601d\u5c31\u662fMD\u7684\u53c2\u6570\u8bbe\u7f6e\u3002\u503c\u5f97\u6ce8\u610f\u7684\u662f\u5728 PRINT \u90e8\u5206\u4e2d\u7684 &EACH MD 1 &END EACH \u63a7\u5236\u7684\u662fMD\u6253\u5370\u8f93\u51fa\u7684\u9891\u7387\uff0c\u6307\u7684\u662f\u6bcf\u4e00\u6b65MD\u6a21\u62df\u5bf9\u5e94\u4e00\u4e2a\u8f93\u51fa\uff0c\u8bbe\u7f6e\u62103\u5c31\u662f\u6bcf\u4e09\u6b65\u8f93\u51fa\u4e00\u6b21\u3002EACH\u4e2dMD\u8f93\u51fa\u9891\u7387\u7f3a\u7701\u503c\u662f1\u3002

          Warning

          \u4e3a\u4e86\u65b9\u4fbf\u5206\u6790\uff0cCELL \u7684\u8f93\u51fa\u9891\u7387\u5e94\u8be5\u548c TRAJECTORY \u7684\u4fdd\u6301\u4e00\u81f4

          "},{"location":"wiki/software_usage/cp2k/cp2k-reftraj/#md","title":"\u6839\u636e\u5df2\u6709\u8f68\u8ff9\u8fdb\u884cMD\u8ba1\u7b97","text":"

          \u6709\u7684\u65f6\u5019\uff0c\u6211\u4eec\u9700\u8981\u5bf9\u5df2\u6709\u7684\u4e00\u6761MD\u8f68\u8ff9\u8fdb\u884c\u8ba1\u7b97\uff1a

          • \u5bf9\u673a\u5668\u5b66\u4e60\u52bf\u51fd\u6570\u751f\u6210\u7684MD\u8f68\u8ff9\u8fdb\u884c\u7cbe\u786e\u8ba1\u7b97

          • \u66f4\u6539FORCE_EVAL \u90e8\u5206\u7684\u53c2\u6570\uff0c\u63d0\u5347\u5df2\u6709\u8f68\u8ff9\u80fd\u91cf\u548c\u529b\u7684\u8ba1\u7b97\u7684\u7cbe\u5ea6

          • \u2026\u2026

          \u6211\u4eec\u53ef\u4ee5\u5728CP2K\u8f93\u5165\u6587\u4ef6\u7684 MD SECTION \u4e0b\u52a0\u5165REFTRAJ SECTION\u6765\u5b9e\u73b0\u5bf9\u5df2\u6709\u8f68\u8ff9\u7684\u8ba1\u7b97\u3002

          \u4ee5TiO2\u4e3a\u4f8b\u5b50\uff0c\u9700\u8981\u5728\u63d0\u4ea4\u4efb\u52a1\u7684\u76ee\u5f55\u4e0b\u51c6\u5907\uff1a

          tree\n.\n\u251c\u2500\u2500 cp2k.lsf                    <---- cp2k \u4efb\u52a1\u63d0\u4ea4\u811a\u672c\uff08/data/share/base/scripts/cp2k.lsf\uff09 \n\u251c\u2500\u2500 input.inp               <---- cp2k \u8f93\u5165\u6587\u4ef6\n\u251c\u2500\u2500 reftraj.xyz       <---- \u5df2\u6709\u7684\u8f68\u8ff9\n\u2514\u2500\u2500 rutile.xyz          <---- \u53ef\u4ee5\u662f\u8f68\u8ff9\u4e2d\u7684\u4e00\u5e27\u7ed3\u6784\n\n0 directories, 4 files\n

          \u5176\u4e2d rutile.xyz \u5bf9\u5e94\u7684\u662f\u8f93\u5165\u6587\u4ef6input.inp\u4e2d SUBSYS \u4e2d\u6307\u5b9a\u76d2\u5b50\u4e2d\u7684\u539f\u5b50\u5750\u6807\u6587\u4ef6\uff0c\u53ef\u4ee5\u76f4\u63a5\u9009\u7528\u5df2\u6709\u8f68\u8ff9\u4e2d\u7684\u67d0\u4e00\u5e27\u6570\u636e\u3002

          \u9488\u5bf9\u8fd9\u4e00\u4efb\u52a1\uff0c\u5728 MOTION \u90e8\u5206\u5199\u5165

          &MOTION\n  &MD\n    &REFTRAJ\n      TRAJ_FILE_NAME reftraj.xyz\n      EVAL_ENERGY_FORCES .TRUE.\n      EVAL_FORCES .TRUE.\n      FIRST_SNAPSHOT 1\n      LAST_SNAPSHOT 50\n      STRIDE 1\n    &END REFTRAJ\n    ...\n  &END MD\n  &PRINT\n    ...\n  &END PRINT\n

          \u5176\u4e2d TRAJ_FILE_NAME \u5173\u952e\u5b57\u6307\u5b9a\u4e86\u5f53\u524d\u6587\u4ef6\u5939\u4e0b\u7684 reftraj.xyz \u505a\u4e3a\u9700\u8981\u8ba1\u7b97\u7684\u8f68\u8ff9\u3002

          \u503c\u5f97\u6ce8\u610f\u7684\u662f\uff0cCP2K\u8f93\u5165\u6587\u4ef6\u4e2d\u7ed9\u5173\u952e\u5b57\u8d4b\u903b\u8f91\u503c\u65f6\u7528 .TRUE. \u6216 .FALSE.\uff0c\u800c EVAL_ENERGY_FORCES\u00a0\u548c EVAL_FORCES \u7684\u7f3a\u7701\u503c\u662f .FALSE.\uff0c\u56e0\u6b64\u5982\u679c\u8981\u8ba1\u7b97\u80fd\u91cf\u548c\u529b\u5fc5\u987b\u8981\u660e\u786e\u6307\u5b9a\u8fd9\u4e24\u4e2a\u5173\u952e\u5b57\u3002

          FIRST_SNAPSHOT , LAST_SNAPSHOT \u548c STRIDE\u8fd9\u4e00\u7ec4\u5173\u952e\u8bcd\u6307\u5b9a\u4e86\u5982\u4f55\u5bf9 reftraj.xyz \u7684\u7ed3\u6784\u8fdb\u884c\u8ba1\u7b97\u3002\u6307\u7684\u662f\u4ece\u5df2\u6709\u8f68\u8ff9\u7684\u7b2c FIRST_SNAPSHOT \u5e27\u5230\u7b2c LAST_SNAPSHOT \u5e27\u7ed3\u6784\uff0c\u6bcf STRIDE \u5e27\u7ed3\u6784\u8ba1\u7b97\u4e00\u6b21\u3002\u800c\u5bf9\u4e8e\u672c\u4f8b\u5b50\uff0creftraj.xyz\u4e2d\u5171\u670950\u5e27\u7ed3\u6784\uff0c\u56e0\u6b64\u4ee5\u4e0a\u914d\u7f6e\u6587\u4ef6\u8868\u660e\u4ece\u5df2\u6709\u8f68\u8ff9\u7684\u7b2c 1 \u5e27\u5230\u7b2c 50 \u5e27\u7ed3\u6784\uff0c\u6bcf 1 \u5e27\u7ed3\u6784\u8ba1\u7b97\u4e00\u6b21\uff0c\u6240\u4ee5\u8fd9\u6837\u8bbe\u7f6e\u4f1a\u8ba1\u7b97\u5df2\u6709\u8f68\u8ff9\u4e2d\u7684\u6bcf\u4e00\u4e2a\u7ed3\u6784\u7684\u80fd\u91cf\u548c\u529b\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-scan/","title":"CP2K: SCAN\u6cdb\u51fd","text":""},{"location":"wiki/software_usage/cp2k/cp2k-scan/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • \u5b66\u4e60\u8d44\u6599

          • SCAN\u57fa\u672c\u539f\u7406

          • CP2K SCAN\u6cdb\u51fd\u8bbe\u7f6e

          • CP2K SCAN\u6cdb\u51fd\u7684\u95ee\u9898

          "},{"location":"wiki/software_usage/cp2k/cp2k-scan/#_2","title":"\u5b66\u4e60\u8d44\u6599","text":"

          Sun, J., Remsing, R. C., Zhang, Y., Sun, Z., Ruzsinszky, A., Peng, H., \u2026 Perdew, J. P. (2016). Accurate first-principles structures and energies of diversely bonded systems from an efficient density functional. Nature Chemistry, 8(9), 831\u2013836. https://doi.org/10.1038/nchem.2535

          Sun, J., Remsing, R. C., Zhang, Y., Sun, Z., Ruzsinszky, A., Peng, H., \u2026 Perdew, J. P. (2015). SCAN: An Efficient Density Functional Yielding Accurate Structures and Energies of Diversely-Bonded Materials, 1\u201319. Retrieved from http://arxiv.org/abs/1511.01089

          "},{"location":"wiki/software_usage/cp2k/cp2k-scan/#scan","title":"SCAN\u57fa\u672c\u539f\u7406","text":"

          SCAN\u6cdb\u51fd\u5c5e\u4e8eMetaGGA\u7684\u4e00\u7c7b\u3002\u52a0\u5165\u4e86\u5bc6\u5ea6\u68af\u5ea6\u7684\u4e8c\u9636\u5bfc\u6570\u3002\u8fd1\u5e74\u6765\uff0cSCAN\u6cdb\u51fd\u88ab\u7528\u4e8e\u6c34\u7684\u8ba1\u7b97\u7814\u7a76\u9010\u6e10\u589e\u591a\uff0c\u540c\u65f6\u5bf9\u4e8e\u534a\u5bfc\u4f53**\u4f53\u76f8**\u8ba1\u7b97\u7684\u80fd\u5e26\u4e5f\u6bd4\u8f83\u51c6\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-scan/#cp2k-scan_1","title":"CP2K SCAN\u6cdb\u51fd\u8bbe\u7f6e","text":"

          SCAN\u6cdb\u51fd\u5e76\u4e0d\u662fCP2K\u6e90\u7801\u81ea\u5e26\uff0c\u5b9e\u9645\u662f\u5f15\u7528\u4e86libxc\u4e2d\u7684\u6cdb\u51fd\u51fd\u6570\u3002\u53ea\u6709CP2K4.1\u4ee5\u4e0a\u7248\u672c\u7684libxc\u5e93\u624d\u80fd\u591f\u4f7f\u7528SCAN\u6cdb\u51fd

          &XC_FUNCTIONAL\n     &LIBXC\n        FUNCTIONAL MGGA_X_SCAN\n     &END LIBXC\n     &LIBXC\n        FUNCTIONAL MGGA_C_SCAN\n     &END LIBXC\n&END XC_FUNCTIONAL\n

          SCAN\u6cdb\u51fd\u6709\u4e00\u5957\u81ea\u5df1\u5bf9\u5e94\u7684\u8d5d\u52bf\uff0c\u653e\u5728Hutter\u7684github\u5e93\u4e2d\u3002

          \u5177\u4f53\u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u8c37\u6b4c\u8bba\u575b\u94fe\u63a5

          https://github.com/juerghutter/GTH/blob/master/SCAN/POTENTIAL

          \u4e3b\u96c6\u7fa4\u4e0a\u6211\u5df2\u7ecf\u653e\u7f6e\u4e86\u4e00\u4efdSCAN\u8d5d\u52bf\u3002\u540d\u79f0\u4e3aGTH-SCAN-POTENTIAL

          cp2k \u8f93\u5165\u6587\u4ef6\u8bbe\u7f6e\u4e3a\u5982\u4e0b\u5373\u53ef\uff1a

          POTENTIAL_FILE_NAME GTH-SCAN-POTENTIAL\n
          "},{"location":"wiki/software_usage/cp2k/cp2k-scan/#cp2k-scan_2","title":"CP2K SCAN\u6cdb\u51fd\u7684\u95ee\u9898","text":"

          SCAN\u6cdb\u51fd\u5bf9\u4e8e\u6709\u5927\u91cf\u771f\u7a7a\u7684\u4f53\u7cfb\u4f3c\u4e4e\u975e\u5e38\u96be\u4ee5\u6536\u655b\u3002\u7b14\u8005\u81f3\u4eca\u8bd5\u7528\u8fc7\u4e86Hematite Slab\u6a21\u578b\u548cSrTiO3\u6a21\u578b\uff0c\u5747\u65e0\u6cd5\u6b63\u5e38\u6536\u655b\u3002\u5176\u4ed6\u610f\u89c1\u53c2\u8003\u8c37\u6b4c\u8bba\u575b\u3002\u5982\u6709\u4efb\u4f55\u5efa\u8bae\u5efa\u8bae\u5feb\u901f\u8054\u7cfb\u7b14\u8005\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-slab/","title":"CP2K: Slab\u8ba1\u7b97","text":""},{"location":"wiki/software_usage/cp2k/cp2k-slab/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • \u4ec0\u4e48\u662fSlab\u6a21\u578b

          • CP2K \u5076\u6781\u77eb\u6b63

          • \u5176\u4ed6\u53bb\u9664\u5468\u671f\u6027\u7684\u65b9\u5f0f

          "},{"location":"wiki/software_usage/cp2k/cp2k-slab/#slab","title":"\u4ec0\u4e48\u662fSlab\u6a21\u578b","text":"

          Slab\u6a21\u578b\u662f\u5728\u4e09\u7ef4\u5468\u671f\u6027\u8fb9\u754c\u6761\u4ef6\u4e0b\u8ba1\u7b97\u56fa\u4f53\u8868\u9762\u7684\u4e00\u79cd\u65b9\u6cd5\u3002\u901a\u5e38\u9009\u62e9z\u65b9\u5411\u4e3a\u8868\u9762\u671d\u5411\uff0c\u5373\u6a21\u578b\u7684z\u65b9\u5411\u4e2d\u6709\u4e00\u534a\u662f\u771f\u7a7a\uff08\u65e0\u539f\u5b50\uff09\u53e6\u4e00\u534a\u4e3a\u56fa\u4f53\u6a21\u578b\u3002\u5982\u4e0b\u56fe\u4e3a\u4e00\u4e2a\u5178\u578b\u7684Slab\u6a21\u578b:

          "},{"location":"wiki/software_usage/cp2k/cp2k-slab/#cp2k","title":"CP2K \u5076\u6781\u77eb\u6b63","text":"

          Slab\u6a21\u578b\u867d\u7136\u662f\u4ee3\u8868\u8868\u9762\uff0c\u4f46\u662f\u5b9e\u9645\u4e0a\u5728z\u65b9\u5411\u662f\u56fa\u4f53-\u771f\u7a7a-\u56fa\u4f53-\u771f\u7a7a-...\u7684\u4ea4\u66ff\u3002\u5982\u679c\u6211\u4eec\u5efa\u7acb\u7684Slab\u6a21\u578b\u5728z\u65b9\u5411\u662f\u975e\u5bf9\u79f0\u7684\uff0c\u6a21\u578b\u5c31\u4f1a\u4ea7\u751f\u4e00\u4e2a\u6cbfz\u65b9\u5411\u7684\u5076\u6781\u3002\u5076\u6781\u4f1a\u4ea7\u751f\u9759\u7535\u52bf\uff0c\u9759\u7535\u52bf\u63a5\u7740\u4f1a\u5f71\u54cd\u6a21\u578b\u7684\u955c\u50cf\uff08\u5468\u671f\u6027\u8fb9\u754c\u6761\u4ef6\uff09\u3002\u6700\u540e\u7b97\u51fa\u6765\u7684\u6a21\u578b\u7684\u603b\u80fd\u91cf\u548c\u529b\u4e0e\u771f\u5b9e\u60c5\u51b5\u662f\u4e0d\u76f8\u7b26\u7684\u3002\u56e0\u6b64\u6211\u4eec\u9700\u8981\u65b9\u6cd5\u53bb\u77eb\u6b63\u8fd9\u79cd\u865a\u5047\u7684\u9759\u7535\u5f71\u54cd\u3002

          \u4e00\u79cd\u5e38\u7528\u7684\u65b9\u6cd5\u5c31\u662f\u5076\u6781\u77eb\u6b63\uff0c\u5728\u771f\u7a7a\u90e8\u5206\u52a0\u5165\u4e00\u4e2a\u8d85\u7a84\u7684\u4f46\u662f\u65b9\u5411\u76f8\u53cd\u7684\u5076\u6781\u3002\u8fd9\u6837\u4e00\u6765\uff0c\u56fa\u4f53\u6a21\u578b\u4ea7\u751f\u7684\u5076\u6781\u548c\u771f\u7a7a\u4e2d\u7684\u5076\u6781\u5c31\u4f1a\u76f8\u4e92\u62b5\u6d88\u3002\u6a21\u578b\u548c\u5176\u955c\u50cf\u4e4b\u95f4\u7684\u9759\u7535\u52bf\u5f71\u54cd\u5c31\u4f1a\u62b5\u6d88\u3002

          \u5177\u4f53\u7684\u8bbe\u7f6e\u5982\u4e0b:

          \u5728FORCE_EVAL/QS/DFT\u4e0b\u5f00\u542f

          SURFACE_DIPOLE_CORRECTION .TRUE.\n
          "},{"location":"wiki/software_usage/cp2k/cp2k-slab/#_2","title":"\u5176\u4ed6\u53bb\u9664\u5468\u671f\u6027\u7684\u65b9\u5f0f","text":"

          \u8868\u9762\u5076\u6781\u77eb\u6b63\u4ec5\u6709z\u65b9\u5411\u53ef\u4ee5\u53bb\u9664\uff0c\u82e5\u8981\u53bb\u9664\u5176\u4ed6\u4e09\u4e2a\u65b9\u5411\u7684\u5468\u671f\uff0c\u53ef\u4ee5\u91c7\u7528\u53e6\u5916\u7684\u8bbe\u7f6e

          \u5728FORCE_EVAL/SUBSYS/CELL\u4e0b

          PERIODIC NONE\n

          \u5728FORCE_EVAL/DFT/POISSON\u4e0b

          PERIODIC NONE\nPOISSON_SOLVER MT (\u5176\u4ed6\u4e5f\u53ef\u4ee5 \u7b14\u8005\u4ec5\u8bd5\u8fc7MT)\n
          "},{"location":"wiki/software_usage/cp2k/cp2k-tools/","title":"CP2K","text":"

          cp2k\u6709\u8bb8\u591a\u65b9\u4fbf\u7684\u5de5\u5177\u3002\u53ef\u4ee5\u5e2e\u6211\u4eec\u811a\u672c\u5316\u5de5\u4f5c\u6d41\u7a0b\uff0c\u8282\u7ea6\u65f6\u95f4\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-tools/#pycp2k","title":"PYCP2K: \u811a\u672c\u5316\u8f93\u5165\u6587\u4ef6\u751f\u6210\u5de5\u5177","text":"

          \u4e3b\u8981\u4f7f\u7528Python\u8bed\u8a00\uff0c\u53ef\u4ee5\u628acp2k\u8f93\u5165\u5de5\u4f5c\u96c6\u6210\u4e3aPython \u5177\u4f53\u4f7f\u7528\u94fe\u63a5\u770b\u8fd9\u91cc

          \u8981\u6ce8\u610f\u7684\u662f\uff0c\u4ed6\u8fd9\u91cc\u53ea\u9002\u7528v5.1\u4ee5\u524d\u7248\u672c\u7684 cp2k\u3002\u5982\u679c\u6211\u4eec\u4f7f\u7528\u4f8b\u5982v7.1\u4ee5\u4e0a\u7684\u7248\u672c\uff0c\u90a3\u4e48\u53ef\u4ee5\u81ea\u5df1\u751f\u6210\u5bf9\u5e94\u7684\u5305\u3002 \u8be6\u60c5\u89c1Pycp2k github\u7684 README \u4e2d Manual installation \u90e8\u5206\u3002 \u5728\u6211\u4eec\u96c6\u7fa4\uff0c\u8981\u751f\u6210 xml \u6587\u4ef6\uff0c\u9996\u5148module load cp2k/7.1\uff0c\u7136\u540e\u4f7f\u7528cp2k.popt --xml\u547d\u4ee4\u5373\u53ef\u5f97\u5230 xml \u6587\u4ef6\u3002 \u5176\u4ed6\u6309\u7167 Manual installation \u7684\u6307\u793a\u5373\u53ef\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-zpe/","title":"CP2K: ZPE(Zero-point energy)","text":""},{"location":"wiki/software_usage/cp2k/cp2k-zpe/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • ZPE\u57fa\u672c\u539f\u7406

          • CP2K Frequence\u8bbe\u7f6e

          • CP2K Frequency\u8ba1\u7b97\u7ed3\u679c\u68c0\u67e5

          • ZPE\u6c42\u89e3

          • \u6ce8\u610f\u4e8b\u9879

          "},{"location":"wiki/software_usage/cp2k/cp2k-zpe/#zpe","title":"ZPE\u57fa\u672c\u539f\u7406","text":"

          \u96f6\u70b9\u80fd(ZPE)\u662f\u91cf\u5b50\u529b\u5b66\u7cfb\u7edf\u53ef\u80fd\u5177\u6709\u7684\u6700\u4f4e\u53ef\u80fd\u80fd\u91cf\uff0c\u6b64\u65f6\u7cfb\u7edf\u6240\u5904\u7684\u6001\u79f0\u4e3a\u57fa\u6001\uff1b\u6240\u6709\u91cf\u5b50\u529b\u5b66\u7cfb\u7edf\u90fd\u6709\u96f6\u70b9\u80fd\u3002\u4e0e\u7ecf\u5178\u529b\u5b66\u4e0d\u540c\uff0c\u91cf\u5b50\u7cfb\u7edf\u5728Heisenberg\u4e0d\u786e\u5b9a\u6027\u539f\u7406\u6240\u63cf\u8ff0\u7684\u6700\u4f4e\u80fd\u91cf\u72b6\u6001\u4e0b\u4e0d\u65ad\u6ce2\u52a8\u3002

          \u6211\u4eec\u5728\u8ba1\u7b97\u5409\u5e03\u65af\u81ea\u7531\u80fd(\\(G=E_{DFT}+ZPE-TS^\\circ\\))\u65f6\u4f1a\u6d89\u53ca\u5230\u96f6\u70b9\u632f\u52a8\u80fd\uff0c\u96f6\u70b9\u632f\u52a8\u80fd\u7684\u8ba1\u7b97\u516c\u5f0f\u4e3a\uff1a

          \\(ZPE=\\sum_{i=0}^{3N}\\frac{\\hbar\\omega}{2}\\)

          \u56e0\u6b64\u6211\u4eec\u9700\u501f\u52a9CP2K\u8ba1\u7b97\u5f97\u5230\u632f\u52a8\u9891\u7387\\(\\omega\\)\u3002

          Boyer, T. H. Quantum Energy and Long-Range Forces. Ann. Phys 1970, 56, 474\u2013503.

          Girod, M.; Grammaticos, B. The Zero-Point Energy Correction and Its Effect on Nuclear Dynamics. Nucl. Physics, Sect. A 1979, 330 (1), 40\u201352. https://doi.org/10.1016/0375-9474(79)90535-9.

          "},{"location":"wiki/software_usage/cp2k/cp2k-zpe/#cp2k-frequence","title":"CP2K Frequence\u8bbe\u7f6e","text":"
          1. \u8bbe\u7f6eCP2K INPUT / GLOBAL / RUN_TYPE

          RUN_TYPE  VIBRATIONAL_ANALYSIS\n
          2. \u5728CP2K INPUT / VIBRATIONAL_ANALYSIS\u4e0b

          &VIBRATIONAL_ANALYSIS\n  NPROC_REP 192  # \u603b\u6838\u6570=\u8282\u70b9\u6570*\u6838\u6570\uff08\u901a\u5e38\u4e0e\u63d0\u4ea4\u4f5c\u4e1acp2k.lsf\u6587\u4ef6\u4e2d\u7684\u6838\u6570\u4e00\u81f4\uff09\n  DX 0.02\n  FULLY_PERIODIC\n  &PRINT\n    &MOLDEN_VIB\n    &END\n    &CARTESIAN_EIGS\n    &END\n    &PROGRAM_RUN_INFO\n      &EACH\n        REPLICA_EVAL 1\n      &END\n    &END\n  &END PRINT\n&END VIBRATIONAL_ANALYSIS\n
          1. \u5728CP2K INPUT / MOTION\u4e0b
          &MOTION\n  &CONSTRAINT\n    &FIXED_ATOMS\n      LIST 1..320 # \u8ba1\u7b97\u65f6\u9700\u8981\u56fa\u5b9a\u7684\u65e0\u5173\u539f\u5b50\u5bf9\u5e94\u7684\u5e8f\u53f7\n    &END\n  &END\n&END MOTION\n
          "},{"location":"wiki/software_usage/cp2k/cp2k-zpe/#cp2k-frequency","title":"CP2K Frequency\u8ba1\u7b97\u7ed3\u679c\u68c0\u67e5","text":"

          \u6b63\u5e38\u8ba1\u7b97\u7ed3\u675f\u4f1a\u8f93\u51faproject-VIBRATIONS-1.mol\u6587\u4ef6,\u91cc\u9762[FREQ]\u6a21\u5757\u5373\u4e3a\u8ba1\u7b97\u5f97\u5230\u7684frequence(unit:\\(cm^{-1}\\))

          [FREQ]\n      204.783042\n      296.784083\n      379.892297\n      414.559665\n      913.554709\n     3650.225071\n

          \u5728CP2K\u8ba1\u7b97NEB\u7684\u8fc7\u7a0b\u4e2d\u5bfb\u627e\u8fc7\u5ea6\u6001\u65f6\uff0c\u8fc7\u6e21\u6001\u7684Frequence\u4e2d\u4f1a\u6709\u865a\u9891\uff0c\u5bf9\u5e94\u8d1f\u503c\uff1a

          [FREQ]\n     -150.004617\n       76.011787\n       90.652110\n      105.659737\n      114.363774\n      118.342870\n      125.738357\n      \u2026\u2026\n
          "},{"location":"wiki/software_usage/cp2k/cp2k-zpe/#zpe_1","title":"ZPE\u6c42\u89e3","text":"

          \\(ZPE=\\sum_{i=0}^{3N}\\frac{\\hbar\\omega_i}{2}\\)

          CP2K\u8ba1\u7b97\u5f97\u5230\u7684Frequence\u662f\u6ce2\u957f\u7684\u5012\u6570\\(\\frac{1}{\\lambda}\\),\u5355\u4f4d\u4e3a\\(cm^{-1}\\),\u6839\u636e\\(\\frac{1}{\\omega}=\\frac{\\lambda}{c}\\)\u53ef\u4ee5\u8ba1\u7b97\u5f97\u5230\u632f\u52a8\u9891\u7387\\(\\omega\\)\uff1b

          N\u5bf9\u5e94\u8ba1\u7b97\u7684\u539f\u5b50\u4e2a\u6570\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k-zpe/#_2","title":"\u6ce8\u610f\u4e8b\u9879","text":"

          (1) \u7531\u4e8ePBC\u6761\u4ef6\u7684\u9650\u5236\uff0cCP2K\u7684Frequence\u8ba1\u7b97\u7ed3\u679c\u4e2d\u4e0d\u5305\u542b\u5e73\u52a8\u9891\u7387\uff0c\u662f\u5426\u5305\u542b\u8f6c\u52a8\u9891\u7387\u53d6\u51b3\u4e8e\u4f53\u7cfb\u7684\u72b6\u6001(CONSTRAINT)\uff0c\u901a\u5e38\u632f\u52a8\u9891\u7387\u8fdc\u5927\u4e8e\u8f6c\u52a8\u9891\u7387\u3002

          (2) \u8ba1\u7b97\u771f\u7a7a\u4e2d\u4e00\u4e2a\u5206\u5b50\u7684Frequence\u65f6\uff0c\u8981\u53bb\u9664\u76d2\u5b50\u6240\u6709\u65b9\u5411\u7684\u5468\u671f\u6027\uff0c\u901a\u5e38\u53ef\u4ee5\u7528\\(20\u00c5\\times20\u00c5\\times20\u00c5\\)\u7684\u76d2\u5b50\u8fdb\u884c\u6d4b\u8bd5\u3002

          (3) \u4f7f\u7528CP2K\u8ba1\u7b97\u4e00\u4e2a\u7a33\u5b9a\u7ed3\u6784\u5f0f\u7684\u9891\u7387\u65f6\uff0c\u4e5f\u5e38\u4f1a\u51fa\u73b0\u591a\u4e2a\u865a\u9891\u3002\u8fd9\u662fCP2K\u8ba1\u7b97\u4f7f\u7528GTH\u8d5d\u52bf\u65f6\u5b58\u5728\u7684\u4e00\u4e2a\u95ee\u9898\u3002\u8be6\u7ec6\u5185\u5bb9\u8bf7\u53c2\u8003(https://groups.google.com/forum/?fromgroups#!topic/cp2k/DVCV0epl7Wo)

          \u89e3\u51b3\u65b9\u6848\u6709\u56db\u79cd\uff1a

          a. \u4f7f\u7528NLCC\u8d5d\u52bf(http://arxiv.org/abs/1212.6011)\u3002\u4e0d\u8fc7NLCC\u8d5d\u52bf\u5f88\u4e0d\u5b8c\u6574\uff0c\u53ea\u6709B-Cl\u7684\u5143\u7d20\u6709\uff0c\u4e14\u53ea\u63d0\u4f9b\u4e86PBE\u6cdb\u51fd\u7684\u8d5d\u52bf\u3002

          b. \u589e\u5927CUTOFF\uff0c\u4f7f\u7528600 Ry\u4ee5\u4e0a\u7684CUTOFF\u3002

          c. \u5728XC_GRID\u90e8\u5206\u4f7f\u7528\u5e73\u6ed1\u53c2\u6570SMOOTING\uff0c\u4e0d\u63a8\u8350\u4f7f\u7528\u3002

          d. \u5728XC_GRID\u90e8\u5206\u4f7f\u7528USE_FINER_GRID\u3002\u52a0\u4e0a\u8fd9\u4e2a\u53c2\u6570\u540e\uff0cXC\u90e8\u5206\u7684\u683c\u70b9\u7684\u7cbe\u5ea6\u63d0\u9ad8\u4e3a4*CUTOFF\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k/","title":"CP2K \u5165\u95e8","text":""},{"location":"wiki/software_usage/cp2k/cp2k/#_1","title":"\u5b66\u4e60\u76ee\u6807","text":"
          • \u8bbe\u7f6eCP2K\u73af\u5883\u53d8\u91cf
          • \u4e66\u5199CP2K\u7684\u8f93\u5165\u6587\u4ef6
          • \u68c0\u67e5CP2K\u8f93\u5165\u6587\u4ef6
          • \u5355\u70b9\u80fd\u8ba1\u7b97
          • \u7ed3\u6784\u4f18\u5316
          • \u5206\u5b50\u52a8\u529b\u5b66
          "},{"location":"wiki/software_usage/cp2k/cp2k/#cp2k_1","title":"CP2K\u7684\u7279\u8272","text":"

          CP2K\u540c\u65f6\u4f7f\u7528\u4e86\u5e73\u9762\u6ce2\u57fa\u7ec4\u548c\u9ad8\u65af\u57fa\u7ec4\uff0c\u56e0\u6b64\u53ef\u4ee5\u5728\u5085\u7acb\u53f6\u7a7a\u95f4\u91cc\u63cf\u8ff0\u957f\u7a0b\u4f5c\u7528\u529b\u548c\u5b9e\u7a7a\u95f4\u91cc\u63cf\u8ff0\u5c40\u57df\u7684\u6ce2\u51fd\u6570\u3002\u4f7f\u7528CP2K\u8fdb\u884c\u5206\u5b50\u52a8\u529b\u5b66(MD)\u8fd0\u7b97\u6548\u7387\u5f88\u9ad8\u3002CP2K\u4f7f\u7528\u4e86\u5355k\u70b9\u7684\u8ba1\u7b97\u65b9\u5f0f\uff0c\u53c8\u79f0\u4e3agamma approximation\uff0c\u56e0\u6b64\u5728\u65e9\u671fCP2K\u7248\u672c\u4e2d\u6ca1\u6709K\u70b9\u7684\u8bbe\u7f6e\u3002\u8fd1\u5e74\u4ec5\u5728\u5355\u70b9\u80fd\u4e2d\u52a0\u5165\u4e86k\u70b9\u7684\u8ba1\u7b97\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k/#cp2k_2","title":"\u8bbe\u7f6eCP2K\u73af\u5883\u53d8\u91cf","text":""},{"location":"wiki/software_usage/cp2k/cp2k/#basispseudopotential","title":"\u54ea\u91cc\u83b7\u53d6Basis\u548cPseudoPotential\u6587\u4ef6","text":"

          Github

          "},{"location":"wiki/software_usage/cp2k/cp2k/#_2","title":"\u7701\u7565\u8def\u5f84","text":"

          CP2K\u9700\u8981\u7528\u5230\u8d5d\u52bf\u548c\u57fa\u7ec4\u6587\u4ef6\u3002\u5047\u8bbe\u8fd9\u4e9b\u6587\u4ef6\u90fd\u5b58\u5728\u4e8e\u76ee\u5f55/somewhere/basis/\u4e0b\u3002\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u73af\u5883\u53d8\u91cfCP2K_DATA_DIR\u6765\u8ba9CP2K\u81ea\u5df1\u627e\u5230\u6587\u4ef6\u3002

          \u6253\u5f00\u81ea\u5df1\u7684 ~/.bashrc\u6587\u4ef6. \u6dfb\u52a0\u4ee5\u4e0b\u547d\u4ee4

          export CP2K_DATA_DIR=/somewhere/basis/\n

          \u4e4b\u540e\u5728\u4f7f\u7528\u8d5d\u52bf\u548c\u57fa\u7ec4\u65f6\u53ef\u4ee5\u76f4\u63a5\u5199\u6587\u4ef6\u540d\u5b57\u800c\u4e0d\u9700\u8981\u6307\u51fa\u8def\u5f84\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k/#cp2k_3","title":"\u4e66\u5199CP2K\u8f93\u5165\u6587\u4ef6","text":"

          CP2K\u8f93\u5165\u6587\u4ef6\u7684\u4e66\u5199\u5728CP2K\u5b98\u7f51\u4e2d\u6709\u8bb8\u591a\u4f8b\u5b50\uff0c\u8bf7\u5927\u5bb6\u81ea\u5df1\u4e0a\u7f51\u5b66\u4e60\u3002

          \u9664\u4e86\u7b80\u5355\u7684SECTION, VALUE\u7684\u4e66\u5199\u5f62\u5f0f\u4ee5\u5916\uff0cCP2K\u8fd8\u63d0\u4f9b\u4e86\u4e00\u4e9b\u7b80\u5355\u7684\u53d8\u91cf\u8bbe\u7f6e\u6761\u4ef6\u5224\u65ad\u7b49\u8bbe\u5b9a\u65b9\u5f0f\uff0c\u5177\u4f53\u53c2\u8003CP2K\u8f93\u5165\u53c2\u8003\u624b\u518c\u3002

          "},{"location":"wiki/software_usage/cp2k/cp2k/#_3","title":"\u4ec0\u4e48\u662f\u597d\u7684\u8f93\u5165\u6587\u4ef6\u4e60\u60ef?","text":"

          CP2K\u7684\u8f93\u5165\u6587\u4ef6\u53c2\u6570\u8bbe\u7f6e\u7e41\u6742\uff0c\u5f80\u5f80\u6211\u4eec\u662f\u7b2c\u4e00\u6b21\u4ece\u5934\u5230\u4f4d\u5199\u4e00\u904d\u6216\u8005\u76f4\u63a5\u62ff\u522b\u4eba\u7684input\u4fee\u6539\u540e\u8fdb\u884c\u4f7f\u7528\u3002\u4f46\u662f\u8fd9\u6837\u4f1a\u9020\u6210\u4e66\u5199\u9519\u8bef\u6216\u8005\u8bbe\u7f6e\u9519\u8bef\u9891\u7e41\u53d1\u751f\u3002\u63d0\u4ea4\u8d85\u7b97\u4e4b\u540e\u88ab\u9000\u56de\u6765\u7684\u8bdd\u6392\u961f\u65f6\u95f4\u5c31\u6d6a\u8d39\u4e86\u3002\u5728\u6b64\u7b14\u8005\u6709\u51e0\u4e2a\u5efa\u8bae\uff1a

          1. \u4f7f\u7528cp2k.popt -c input.inp \u68c0\u67e5\u8f93\u5165\u6587\u4ef6\u7684\u8bed\u6cd5
          2. \u4f7f\u7528\u6ce8\u91ca(#)\u6765\u63d0\u9192\u8f93\u5165\u6587\u4ef6\u7684\u8bbe\u7f6e
          3. \u4f7f\u7528\u53d8\u91cf\u548c\u6761\u4ef6\u5224\u65ad\u6765\u7b80\u5355\u7684\u5f00\u5173CP2K\u7684\u529f\u80fd
          #a good example of input file\n#set variable and condition to open/close section in CP2K\n#if variable is 0 in condition, it is false, otherwise it is true\n@SET HSE06 0\n\n########## This part is HSE06 ##########\n@IF ${HSE06}\n            &XC_FUNCTIONAL\n                &PBE\n                    SCALE_X 0.0\n                    SCALE_C 1.0\n                &END PBE\n                &XWPBE\n                    SCALE_X -0.25\n                    SCALE_X0 1.0\n                    OMEGA 0.11\n                &END XWPBE\n            &END XC_FUNCTIONAL\n            &HF\n                &SCREENING\n                    EPS_SCHWARZ 1.0E-6\n                    SCREEN_ON_INITIAL_P FALSE\n                &END SCREENING\n                &INTERACTION_POTENTIAL\n                    POTENTIAL_TYPE SHORTRANGE\n                    OMEGA 0.11\n                    T_C_G_DATA t_c_g.dat\n                &END INTERACTION_POTENTIAL\n                &MEMORY\n                    MAX_MEMORY 10000\n                    EPS_STORAGE_SCALING 0.1\n                &END MEMORY\n                &PERIODIC\n                     NUMBER_OF_SHELLS 0\n                &END PERIODIC\n                FRACTION 0.25\n            &END HF\n@ENDIF\n

          Warning

          "},{"location":"wiki/software_usage/cp2k/cp2k/#input","title":"\u6ce8\u91ca\u8981\u5355\u72ec\u5360\u4e00\u884c\uff0c\u4ee3\u7801\u548c\u6ce8\u91ca\u6df7\u5408\u4f1a\u5bfc\u81f4input\u8bfb\u5165\u9519\u8bef","text":""},{"location":"wiki/software_usage/cp2k/cp2k/#cp2k_4","title":"\u68c0\u67e5CP2K\u8f93\u5165\u6587\u4ef6","text":"

          \u5728\u670d\u52a1\u5668\u4e0a\uff0c\u9700\u8981\u901a\u8fc7module load cp2k/\u7248\u672c\u53f7 \u6765\u542f\u52a8CP2K\u8f6f\u4ef6\u3002Load\u540e\uff0c\u53ef\u4ee5\u4f7f\u7528cp2k.popt\u547d\u4ee4\uff0c\u8fd9\u662fCP2K\u8f6f\u4ef6\u7684\u4e3b\u8981\u7a0b\u5e8f\u3002

          CP2K\u7684\u8ba1\u7b97\u8fd0\u884c\u662f

          cp2k.popt input.inp > output\n

          \u5f53\u7136\u5728\u670d\u52a1\u5668\u4e0a\u9700\u8981\u901a\u8fc7\u63d0\u4ea4\u811a\u672c\u6765\u6267\u884c\u547d\u4ee4\u3002

          \u7531\u4e8eCP2K\u8f93\u5165\u6587\u4ef6\u6709\u65f6\u8f83\u4e3a\u5e9e\u5927\uff0c\u7ecf\u5e38\u4f1a\u6709\u8bef\u5199\u6216\u8005\u8bed\u6cd5\u9519\u8bef\u7684\u60c5\u51b5\u53d1\u751f\uff0c\u4e3a\u4e86\u907f\u514d\u63d0\u4ea4\u4e4b\u540e\u88ab\u9000\u56de\u6765\uff0c\u53ef\u4ee5\u5148\u4f7f\u7528\u547d\u4ee4\u68c0\u67e5:

          cp2k.popt -c input.inp\n

          Warning

          cp2k.popt -c \u4ec5\u68c0\u67e5\u662f\u5426\u6709\u8bed\u6cd5\u9519\u8bef\uff0c\u5b9e\u9645\u8fd0\u884c\u7684\u9519\u8bef\u4e0d\u4f1a\u68c0\u67e5\u51fa\u6765

          "},{"location":"wiki/software_usage/cp2k/cp2k/#_4","title":"\u5355\u70b9\u80fd\u8ba1\u7b97","text":"

          \u53c2\u89c1\u5b98\u7f51\u7684\u4f8b\u5b50: CP2K\u80fd\u91cf\u548c\u529b\u7684\u8ba1\u7b97

          \u53c2\u89c1\u5b98\u7f51\u7684\u4f8b\u5b50: CP2K\u4e2dCUTOFF\u548cREL_CUTOFF\u7684\u6d4b\u8bd5

          "},{"location":"wiki/software_usage/cp2k/cp2k/#_5","title":"\u7ed3\u6784\u4f18\u5316","text":"

          \u5efa\u8bbe\u4e2d

          "},{"location":"wiki/software_usage/cp2k/cp2k/#_6","title":"\u5206\u5b50\u52a8\u529b\u5b66","text":"

          \u5efa\u8bbe\u4e2d

          "},{"location":"wiki/software_usage/cp2k/cp2k/#cp2k_5","title":"CP2K\u7684\u4e00\u4e9b\u5e38\u7528\u5de5\u5177","text":"

          CP2K Vim input \u63d2\u4ef6

          "},{"location":"wiki/teamwork/archive_rules/","title":"\u5982\u4f55\u5f52\u6863/\u6574\u7406\u9879\u76ee\u6587\u4ef6","text":""},{"location":"wiki/teamwork/archive_rules/#_2","title":"\u6570\u636e\u6574\u7406\u7684\u5fc5\u8981\u6027","text":"

          \u4e3a\u4e86\u80fd\u8ba9\u63a5\u6536\u9879\u76ee\u7684\u4eba\uff0c\u4ee5\u53ca\u7ec4\u91cc\u5176\u4ed6\u4eba\u7684\u6570\u636e\u80fd\u591f\u76f8\u4e92\u53c2\u8003\uff0c\u907f\u514d\u4e0d\u5fc5\u8981\u7684\u91cd\u590d\u8ba1\u7b97\u548c\u6d6a\u8d39\u3002\u6211\u4e0e\u4e91\u9708\u603b\u7ed3\u4e86\u4e00\u4e9b\u7b80\u5355\u7684\u6574\u7406\u89c4\u5219\u3002

          "},{"location":"wiki/teamwork/archive_rules/#_3","title":"\u6570\u636e\u6574\u7406\u7684\u89c4\u5219","text":""},{"location":"wiki/teamwork/archive_rules/#1","title":"\u89c4\u52191:","text":"

          \u4ee5\u9879\u76ee\u540d\u79f0\u547d\u540d\u5927\u6587\u4ef6\u5939\u3002\u4f8b\uff1aSnO2110\u9762\u7684\u673a\u5668\u5b66\u4e60

          SnO2110-ML #\u9879\u76ee\u6587\u4ef6\u540d\n
          "},{"location":"wiki/teamwork/archive_rules/#2","title":"\u89c4\u52192:","text":"

          \u4ee5 \u6570\u5b57 \u4f5c\u4e3a\u76ee\u5f55\u540d\u524d\u7f00\uff0c\u4ee5\u4e0b \u5212\u7ebf\u547d\u540d\u6cd5 \u6765\u7ed9\u76ee\u5f55\u547d\u540d\u3002

          \u56e0\u4e3a\u8ba1\u7b97\u5fc5\u5b9a\u4f34\u968f\u7740 \u76ee\u7684\uff0c\u6240\u4ee5\u76ee\u5f55\u540d\u4ee5\u8ba1\u7b97\u7684 \u76ee\u7684 \u6765\u547d\u540d\u3002

          \u6570\u5b57 \u53ef\u4ee5\u4f7f\u76ee\u5f55\u6309\u7167\u81ea\u5df1\u7684\u610f\u5fd7\u6765\u6392\u5e8f\uff0c \u4e0b\u5212\u7ebf\u547d\u540d\u6cd5 \u53ef\u4ee5\u6709\u6548\u7684\u9605\u8bfb\u3002\u4f8b\uff1a

          ./SnO2110-ML\n\u251c\u2500\u2500 00.train_set #\u653e\u8bad\u7ec3\u96c6\n\u251c\u2500\u2500 01.train_set_test #\u505a\u8bad\u7ec3\u96c6\u6d4b\u8bd5\n\u251c\u2500\u2500 02.DP_Pots #\u653e\u673a\u5668\u5b66\u4e60\u52bf\u80fd\n\u251c\u2500\u2500 03.dissociation #\u8ba1\u7b97\u89e3\u79bb\u5ea6\n\u251c\u2500\u2500 04.surface_tension #\u8ba1\u7b97\u8868\u9762\u5f20\u529b\n

          \u6ce8\u610f\uff1a\u518d\u6b21\u4e00\u7ea7\u76ee\u5f55\u53ef\u4e0d\u6309\u7167\u4ee5\u4e0a\u65b9\u6cd5\u6765\u547d\u540d\uff0c\u5c3d\u91cf\u4f7f\u7528 \u4e0b\u5212\u7ebf\u547d\u540d\u6cd5 \u5373\u53ef\u3002

          "},{"location":"wiki/teamwork/archive_rules/#3","title":"\u89c4\u52193:","text":"

          \u5bf9\u4e8e \u4f5c\u56fe\u7c7b\u7684\u76ee\u5f55\uff0c\u8981\u4fdd\u7559\u4f5c\u56fe\u7684 \u6570\u636e\uff0c\u539f\u59cb\u811a\u672c \u548c \u4f5c\u51fa\u6765\u7684\u56fe\u3002\u4f8b\uff1a

          01.train_set_test\n\u251c\u2500\u2500 TrainSetEnergy.pdf #\u4f5c\u51fa\u6765\u7684\u56fe\n\u251c\u2500\u2500 TrainSetForce.png #\u4f5c\u51fa\u6765\u7684\u56fe\n\u251c\u2500\u2500 TrainingSetError.py #\u5904\u7406\u4f5c\u56fe\u7684\u811a\u672c \u53ef\u4ee5\u76f4\u63a5\u8fd0\u884c\uff01\n\u251c\u2500\u2500 e.out #\u4f5c\u56fe\u7684\u539f\u59cb\u6570\u636e\n\u2514\u2500\u2500 f.out #\u4f5c\u56fe\u7684\u539f\u59cb\u6570\u636e\n

          \u5bf9\u4e8e \u8ba1\u7b97\u7c7b\u7684\u76ee\u5f55\uff0c\u8981\u4fdd\u7559 \u5fc5\u8981\u7684\u8f93\u51fa\u6587\u4ef6 \u548c \u8f93\u5165\u6587\u4ef6\u3002\u4f8b\uff1a

          02.DP_Pots #\u653e\u673a\u5668\u5b66\u4e60\u52bf\u80fd\n\u251c\u2500\u2500 v1.0 #\u7248\u672c\u53f7\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 graph.000.pb #\u52bf\u80fd\u51fd\u6570\uff0c\u8f93\u51fa\u6587\u4ef6\u7684\u4e00\u79cd\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 graph.001.pb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 graph.002.pb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 graph.003.pb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 input.000.json #\u5bf9\u5e94\u7684\u8f93\u5165\u6587\u4ef6\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 input.001.json\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 input.002.json\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 input.003.json\n\u251c\u2500\u2500 v1.2\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 graph.000.pb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 graph.001.pb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 graph.002.pb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 graph.003.pb\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 input.000.json\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 input.001.json\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 input.002.json\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 input.003.json\n\u2514\u2500\u2500 v1.3\n    \u251c\u2500\u2500 README\n    \u251c\u2500\u2500 graph.000.pb\n    \u251c\u2500\u2500 graph.001.pb\n    \u251c\u2500\u2500 graph.002.pb\n    \u2514\u2500\u2500 graph.003.pb\n
          "},{"location":"wiki/teamwork/archive_rules/#4","title":"\u89c4\u52194:","text":"

          \u5728\u6587\u4ef6\u5939\u91cc\u653e\u5165\u5fc5\u8981\u7684\u8bf4\u660e\u6587\u4ef6\uff0c\u4f8b\u5982 README

          \u2514\u2500\u2500 v1.3\n    \u251c\u2500\u2500 README #\u5fc5\u8981\u7684\u8bf4\u660e\u6587\u4ef6\uff0c\u63a8\u8350\u4f7f\u7528markdown\u8bed\u8a00\u4e66\u5199\n    \u251c\u2500\u2500 graph.000.pb\n    \u251c\u2500\u2500 graph.001.pb\n    \u251c\u2500\u2500 graph.002.pb\n    \u2514\u2500\u2500 graph.003.pb\n
          # README\n converted from v1.2 pot\n compress input use that v1.2 training input\n
          "},{"location":"wiki/teamwork/git_usage/","title":"Git \u57fa\u672c\u4f7f\u7528\u6559\u7a0b","text":"

          Git\u662f\u76ee\u524d\u4e16\u754c\u4e0a\u6700\u5148\u8fdb\u7684\u5206\u5e03\u5f0f\u7248\u672c\u63a7\u5236\u7cfb\u7edf\uff08\u6ca1\u6709\u4e4b\u4e00\uff09\u2014\u2014 \u5ed6\u96ea\u5cf0

          \u7248\u672c\u63a7\u5236\u7cfb\u7edf\u53ef\u4ee5\u5e2e\u52a9\u7528\u6237\u5feb\u901f\u8bc6\u522b\u3001\u6574\u7406\u9879\u76ee\u7684\u4fee\u6539\u7b49\uff0c\u907f\u514d\u51fa\u73b0\u8bf8\u5982 \"\u65b0\u5efa\u6587\u672c\u6587\u4ef6_by\u6d69\u4e8c-\u7b2c19\u7248_\u4fee\u6539190810-v114.514 - \u526f\u672c(9).txt\" \u7b49\u4ee4\u4eba\u8840\u538b\u4e0a\u5347\u3001\u5455\u5410\u4e0d\u6b62\u7684\u60c5\u51b5\u3002

          Git\u4f5c\u4e3a\u5f00\u6e90\u793e\u533a\u5e38\u7528\u7684\u7248\u672c\u63a7\u5236\u7cfb\u7edf\uff0c\u6709\u7740\u5f3a\u5927\u7684\u529f\u80fd\uff0c\u53ef\u4ee5\u5e2e\u52a9\u7528\u6237\u7ba1\u7406\u4ee5\u6587\u672c\uff08\u5982\u4ee3\u7801\u7b49\uff09\u4e3a\u4e3b\u7684\u9879\u76ee\u3002\u5f53\u7136\u5bf9\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c\u4f8b\u5982docx\u3001pptx\u7b49\uff0cGit\u7684\u652f\u6301\u5c1a\u4e0d\u591f\u5b8c\u5584\uff0c\u52a0\u4e0a\u670d\u52a1\u5668\u4f17\u6240\u5468\u77e5\u7684\u539f\u56e0\uff0c\u56e0\u800c\u4e0d\u5efa\u8bae\u628aGithub\u5f53\u6210\u7f51\u76d8\u4f7f\u7528\u3002

          \u76ee\u524d\u7ec4\u5185\u6709\u5173\u673a\u5668\u5b66\u4e60\u3001\u81ea\u52a8\u5316\u7684\u5de5\u4f5c\u9010\u6e10\u589e\u591a\uff0c\u9700\u8981\u4ee3\u7801\u5171\u4eab\u548c\u534f\u540c\u7684\u573a\u5408\u9010\u6e10\u589e\u52a0\u3002\u53e6\u4e00\u65b9\u9762\uff0c\u57fa\u4e8e LaTeX \u7b49\u6807\u8bb0\u8bed\u8a00\u7684\u8bba\u6587\u5199\u4f5c\uff0c\u5176\u5176\u5b9e\u8d28\u4e0a\u5bf9\u4e5f\u662f\u6587\u672c\u6587\u4ef6\u7684\u5904\u7406\u3002\u4f46\u9274\u4e8e Git \u7684\u5165\u95e8\u548c\u4f7f\u7528\u5c1a\u6709\u4e00\u5b9a\u95e8\u69db\uff0c\u9700\u8981\u4e00\u4e9b\u57fa\u7840\u547d\u4ee4\u7684\u5165\u95e8\u3002\u56e0\u800c\u5199\u4e0b\u8fd9\u7bc7\u6587\u5b57\uff0c\u6574\u7406\u4e00\u4e9b\u5e38\u7528\u7684Git\u64cd\u4f5c\uff0c\u9650\u4e8e\u7bc7\u5e45\u548c\u6c34\u5e73\uff0c\u53ef\u80fd\u4f1a\u6709\u4e00\u4e9b\u7f3a\u6f0f\uff0c\u8fd8\u8bf7\u6307\u6b63\u3002

          \u672c\u6587\u5c06\u957f\u671f\u66f4\u65b0\uff0c\u4e0d\u5b9a\u671f\u6536\u5f55\u4e00\u4e9b\u5c0f\u6545\u4e8b\u5c0fTrick\u3002

          "},{"location":"wiki/teamwork/git_usage/#_1","title":"\u9879\u76ee\u521b\u5efa","text":""},{"location":"wiki/teamwork/git_usage/#github","title":"\u57fa\u4e8e Github \u521b\u5efa\u9879\u76ee","text":"

          \u9996\u5148\u6ce8\u518c Github \u8d26\u53f7\uff0c\u8fd9\u91cc\u4e0d\u4f5c\u8d58\u8ff0\u3002

          \u63d0\u793a

          \u82e5\u6b63\u5728\u9605\u8bfb\u672c\u6587\u7684\u8bfb\u8005\u662f\u5728\u6821\u5e08\u751f\uff0c\u53ef\u901a\u8fc7 Github \u5b98\u65b9\u6e20\u9053 \u7533\u8bf7\u6210\u4e3a\u6821\u56ed\u4e13\u4e1a\u7528\u6237\uff08Campus Expert\uff09\uff0c\u4ece\u800c\u53ef\u4ee5\u514d\u8d39\u4f7f\u7528\uff08\u767d\u5ad6\uff09\u4e13\u4e1a\u7248\u7279\u6027\uff0c\u5e76\u4eab\u53d7\u4e00\u7cfb\u5217\u4f18\u60e0\uff08\u5305\u62ecPycharm\u4e13\u4e1a\u7248\u7b49\uff0c\u8be6\u89c1\u5b98\u7f51\u4ecb\u7ecd\uff09\u3002\u5f53\u7136\u8fd9\u4e0d\u5f71\u54cd\u6211\u4eec\u540e\u6587\u7684\u64cd\u4f5c\uff0c\u8bfb\u8005\u53ef\u4ee5\u7a0d\u540e\u7533\u8bf7\u3002\u6ce8\u610f\u7533\u8bf7\u7684IP\u9700\u8981\u4f4d\u4e8e\u6821\u56ed\u7f51\u73af\u5883\u5185\uff0c\u5e76\u4e14\u6700\u597d\u4fdd\u8bc1IP\u5b9a\u4f4d\u5728\u6821\u533a\u8303\u56f4\u5185\u4ee5\u514d\u51fa\u73b0\u9519\u8bef\u8bc6\u522b\u5bfc\u81f4\u7533\u8bf7\u5931\u8d25\u3002\u4f8b\u5982\u53a6\u95e8\u5927\u5b66\u66fe\u5448\u594e\u697c\u4e0d\u4f4d\u4e8eGithub\u8ba4\u53ef\u7684\u6821\u533a\u8303\u56f4\u5185\uff0c\u8bf7\u6700\u597d\u5230\u5316\u5b66\u697c\u3001\u5362\u5609\u9521\u697c\u3001\u56fe\u4e66\u9986\u7b49\u5730\u7533\u8bf7\u3002\u7533\u8bf7\u65f6\u53ef\u80fd\u9700\u8981\u63d0\u4f9b\u5b66\u6821\u90ae\u7bb1\u3001\u5b66\u751f\u5361\u7167\u7247\u4fe1\u606f\u7b49\uff0c\u8bf7\u6309\u7167\u76f8\u5e94\u63d0\u793a\u64cd\u4f5c\u3002

          \u5b8c\u6210\u6ce8\u518c\u5230\u8fbe\u9996\u9875\uff0c\u4fbf\u53ef\u4ee5\u770b\u5230\u5982\u56fe\u7684\u6309\u94ae\uff0c\u70b9\u51fb\"New\"\u5373\u53ef\u521b\u5efa\u4e00\u4e2a\u4ed3\u5e93\uff08Repository\uff09\u3002

          \u968f\u540e\u4fbf\u51fa\u73b0\u5982\u4e0b\u56fe\u7684\u754c\u9762\uff0c\u53ef\u4ee5\u9009\u62e9\u8bbe\u7f6e\u8be5\u4ed3\u5e93\u7684\u5f52\u5c5e\uff08Owner\uff09\u3001\u540d\u79f0\uff08Repository name\uff09\u3001\u8bf4\u660e\uff08Description\uff09\u3001\u6743\u9650\u7b49\u3002\u9700\u8981\u8bf4\u660e\u7684\u662f\uff0c\u516c\u5171\u4ed3\u5e93\uff08Public\uff09\u7684\u5185\u5bb9\u4efb\u4f55\u4eba\u90fd\u80fd\u770b\u5230\uff0c\u4f46\u63d0\u4ea4\uff08Push\uff09\u9700\u8981\u8bbe\u7f6e\u6743\u9650\uff1b\u800c\u79c1\u6709\u4ed3\u5e93\uff08Private\uff09\u7684\u8bbf\u95ee\u6743\u9650\u53d6\u51b3\u4e8e\u5f52\u5c5e\u8005\uff0c\u82e5\u4e3a\u4e2a\u4eba\u4ed3\u5e93\u9ed8\u8ba4\u4ec5\u81ea\u5df1\u53ef\u89c1\uff0c\u82e5\u4e3a\u7ec4\u7ec7\uff08Organization\uff09\u5219\u4ec5\u8be5\u7ec4\u7ec7\u6210\u5458\u53ef\u89c1\u3002

          \u5c3d\u7ba1\u76f4\u63a5\u70b9\u51fb\u201cCreate repository\u201d\u6211\u4eec\u4fbf\u53ef\u4ee5\u5feb\u901f\u521b\u5efa\u4e00\u4e2a\u4ed3\u5e93\uff0c\u8fd9\u91cc\u63a8\u8350\u6839\u636e\u60c5\u51b5\u9009\u62e9\u662f\u5426\u8981\u521b\u5efa\u8bf4\u660e\u6587\u6863\uff08README file\uff09\u3001\u5ffd\u7565\u4fe1\u606f\uff08.gitignore\uff09\u4ee5\u53ca\u5f00\u6e90\u534f\u8bae\uff08License\uff09\u3002\u5173\u4e8e\u5f00\u6e90\u534f\u8bae\u7684\u8bf4\u660e\uff0c\u8bf7\u70b9\u51fb\"Learn more\"\uff0c\u8fd9\u91cc\u9650\u4e8e\u7bc7\u5e45\u539f\u56e0\u4e0d\u8fc7\u591a\u63cf\u8ff0\u3002

          \u9700\u8981\u8bf4\u660e\u7684\u662f.gitignore\uff0c\u5982\u56fe\u6240\u793a\uff0c\u53ef\u4ee5\u770b\u5230 Github \u63d0\u4f9b\u4e86\u591a\u79cd\u6a21\u677f\u4f9b\u9009\u62e9\uff0c\u4f8b\u5982\u9700\u8981\u521b\u5efa\u7684\u9879\u76ee\u4ee5Python\u4ee3\u7801\u4e3a\u4e3b\uff0c\u5219\u53ef\u4ee5\u9009\u62e9Python\u3002\u5219\u4ed3\u5e93\u521b\u5efa\u540e\uff0cGit\u5c06\u4e0d\u518d\u8ffd\u8e2a\u6587\u4ef6\u5939\u4e0b\u53ef\u80fd\u5b58\u5728\u7684\u65e5\u5fd7\u6587\u4ef6\u3001\u9884\u7f16\u8bd1\u6587\u4ef6\uff08\u5982.pyc\uff09\u3001Jupyter Notebook\u7f13\u5b58\u7b49\uff0c\u8fd9\u5bf9\u4e8e\u4fdd\u6301\u5de5\u4f5c\u533a\u548c\u4fee\u6539\u4fe1\u606f\u7684\u6e05\u6670\u6709\u5f88\u5927\u5e2e\u52a9\u3002\u5f53\u7136\uff0c\u8fd9\u91cc\u7684\u6a21\u677f\u53ef\u80fd\u65e0\u6cd5\u5305\u542b\u6240\u6709\u9700\u6c42\uff0c\u6545\u4e5f\u53ef\u4ee5\u5148\u521b\u5efa\u4ed3\u5e93\u518d\u6dfb\u52a0\u3002

          \u4e3a\u4e86\u5408\u4f5c\u7684\u5feb\u6377\u3001\u9632\u6b62\u5728\u63d0\u4ea4\u65f6\u628a\u8fc7\u591a\u65e0\u7528\u6587\u4ef6\u63d0\u4ea4\u5230Git\u4ed3\u5e93\u4e2d\uff0c\u5f3a\u70c8\u63a8\u8350\u5728\u9879\u76ee\u521b\u5efa\u4e4b\u521d\u5c31\u5efa\u7acb.gitignore\u6587\u4ef6\u3002\u540e\u6587\u5c06\u66f4\u52a0\u8be6\u7ec6\u5730\u4ecb\u7ecd\u8fd9\u4e00\u6587\u4ef6\u7684\u7528\u6cd5\u3002

          "},{"location":"wiki/teamwork/git_usage/#_2","title":"\u8fdc\u7a0b\u2194\ufe0e\u672c\u5730","text":"

          \u5728Github\u4e0a\u521b\u5efa\u9879\u76ee\u540e\uff0c\u4e0b\u4e00\u4e2a\u5173\u5fc3\u7684\u8bae\u9898\u81ea\u7136\u662f\uff0c\u5982\u4f55\u628a\u672c\u5730\u7684\u4ee3\u7801\u4e0a\u4f20\u5230\u8fdc\u7a0b\u3002

          \u76f8\u4fe1\u4e0d\u5c11\u4eba\u5df2\u7ecf\u5bf9\u4e0a\u56fe\u4e2d\u7684\u6309\u94ae \"Add file\" \u8dc3\u8dc3\u6b32\u8bd5\u4e86\uff0c\u70b9\u51fb\u5373\u53ef\u770b\u5230\u4e24\u4e2a\u9009\u9879\uff0c\u5373\u521b\u5efa\u6587\u4ef6\u548c\u4e0a\u4f20\u6587\u4ef6\u3002\u524d\u8005\u53ef\u4ee5\u63d0\u4f9b\u4e00\u4e2a\u6587\u672c\u6846\u8f93\u5165\u4f60\u60f3\u8981\u5efa\u7acb\u7684\u6587\u5b57\uff0c\u540e\u8005\u5219\u63d0\u4f9b\u4e86\u4e00\u4e2a\u533a\u57df\u53ef\u4ee5\u901a\u8fc7\u6d4f\u89c8\u5668\u62d6\u52a8\u6587\u4ef6\u624b\u52a8\u4e0a\u4f20\u6216\u8005\u6253\u5f00\u8d44\u6e90\u7ba1\u7406\u5668\u9009\u62e9\u8981\u4e0a\u4f20\u7684\u6587\u4ef6\u3002\u4f46\u5f53\u6587\u4ef6\u8f83\u591a\u3001\u8f83\u5927\u65f6\uff0c\u8fd9\u4e24\u79cd\u65b9\u6cd5\u4fbf\u663e\u5f97\u4e0d\u591f\u4fbf\u6377\u3002\u56e0\u6b64\u8fd9\u91cc\u6211\u4eec\u4ece Git \u547d\u4ee4\u884c\u51fa\u53d1\uff0c\u4ecb\u7ecd\u66f4\u5e38\u7528\u7684\u63d0\u4ea4\u65b9\u5f0f\u3002

          \u5b9e\u9645\u4e0a Github \u4ec5\u4ec5\u662f\u4e16\u754c\u6700\u5927\u7684 Git \u8fdc\u7a0b\u9879\u76ee\u7ba1\u7406\u5e73\u53f0\uff0cGit \u672c\u8eab\u5219\u4e0d\u4f9d\u8d56\u4e8e Github \u5b58\u5728\uff0c\u56e0\u6b64\u6211\u4eec\u5728\u672c\u5730\u5373\u53ef\u8ffd\u8e2a\u6587\u4ef6\u7684\u4fee\u6539\uff0c\u8fdb\u884c\u7248\u672c\u63a7\u5236\u3002Git\u5728\u672c\u5730\u7684\u5b89\u88c5\u975e\u5e38\u7b80\u5355\uff0c\u7528\u6237\u53ef\u4ee5\u53c2\u7167\u5ed6\u96ea\u5cf0\u8001\u5e08\u7684\u6559\u7a0b\u8fdb\u884c\u3002\u5728\u5b89\u88c5\u7684\u6700\u540e\uff0c\u7528\u6237\u9700\u8981\u8bbe\u7f6e\u81ea\u5df1\u7684\u4fe1\u606f\uff0c\u5373\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002\u4e3a\u4e86\u4f7f\u5728\u8fdc\u7a0b\u7684\u7528\u6237\u4fe1\u606f\u548c\u672c\u5730\u4fdd\u6301\u4e00\u81f4\uff0c\u901a\u5e38\u4e0eGithub\u7684\u7528\u6237\u540d\u548c\u6ce8\u518c\u90ae\u7bb1\u4fdd\u6301\u4e00\u81f4\u3002

          git config --global user.name \"Your Name\"\ngit config --global user.email \"email@example.com\"\n

          \u6ce8\u610fgit config\u547d\u4ee4\u7684--global\u53c2\u6570\uff0c\u7528\u4e86\u8fd9\u4e2a\u53c2\u6570\uff0c\u8868\u793a\u4f60\u8fd9\u53f0\u673a\u5668\u4e0a\u6240\u6709\u7684Git\u4ed3\u5e93\u90fd\u4f1a\u4f7f\u7528\u8fd9\u4e2a\u914d\u7f6e\uff0c\u5f53\u7136\u4e5f\u53ef\u4ee5\u5bf9\u67d0\u4e2a\u4ed3\u5e93\u6307\u5b9a\u4e0d\u540c\u7684\u7528\u6237\u540d\u548cEmail\u5730\u5740\uff0c\u5373\u53bb\u6389--global\u3002

          \u5728\u8fdc\u7a0b\u521b\u5efa\u4ed3\u5e93\u540e\uff0c\u6211\u4eec\u4fbf\u53ef\u4ee5\u628a\u8fdc\u7a0b\u7684\u4ed3\u5e93\u62c9\u53d6\uff08Pull\uff09\u5230\u672c\u5730\u3002\u70b9\u51fb\u7eff\u8272\u7684Code\u6309\u94ae\uff0c\u5373\u53ef\u770b\u5230\u5982\u56fe\u7684\u5bf9\u8bdd\u6846\uff0c\u70b9\u51fb\u6587\u672c\u6846\u53f3\u4fa7\u7684\u6309\u94ae\u590d\u5236\u94fe\u63a5\u3002

          \u82e5\u5728\u672c\u5730\u67d0\u4e2a\u76ee\u5f55\u4e0b\uff0c\u8f93\u5165\u5982\u4e0b\u547d\u4ee4\uff1a

          git clone https://github.com/chenggroup/Test.git\n

          \u5373\u53ef\u5c06\u8fdc\u7a0b\u4ed3\u5e93\u62c9\u53d6\u5230\u672c\u5730\uff0c\u5e76\u521b\u5efa\u4e00\u4e2aTest\u76ee\u5f55\u7528\u4e8e\u5b58\u653e\u6587\u4ef6\u3002

          \u5148\u522b\u6025\u7740\u8f93\u5165\u4e0a\u9762\u7684\u547d\u4ee4\u3002\u7531\u4e8e\u5b89\u5168\u6027\u539f\u56e0\uff0cGithub\u5b98\u65b9\u4ece2021\u5e748\u6708\u8d77\u5173\u95ed\u4e86\u901a\u8fc7HTTPS\u534f\u8bae\u76f4\u63a5\u4e0a\u4f20\u63d0\u4ea4\u7684\u529f\u80fd\uff0c\u56e0\u6b64\u8981\u60f3\u4ece\u672c\u5730\u5411\u8fdc\u7a0b\u4e0a\u4f20\u63d0\u4ea4\uff0c\u9700\u8981\u4f7f\u7528SSH\u534f\u8bae\uff0c\u56e0\u6b64\u6211\u4eec\u9700\u8981\u8fdb\u884c\u989d\u5916\u914d\u7f6e\uff0c\u8bf7\u53c2\u8003\u5ed6\u96ea\u5cf0\u8001\u5e08\u7684\u6559\u7a0b\u64cd\u4f5c\u3002

          \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u5373\u53ef\u7528SSH\u9762\u677f\u91cc\u7684\u94fe\u63a5\u6765\u514b\u9686\uff08Clone\uff09\u8fdc\u7a0b\u4ed3\u5e93\u5230\u672c\u5730\uff1a

          git clone git@github.com:chenggroup/Test.git\n

          \u6ce8\u610f git clone \u540e\u7684\u94fe\u63a5\u8981\u4fee\u6539\u4e3a\u4f60\u590d\u5236\u7684\u94fe\u63a5\u3002

          \u968f\u540e cd Test \u8fdb\u5165\u672c\u5730\u4ed3\u5e93\uff0c\u4fbf\u53ef\u4ee5\u5bf9\u672c\u5730\u4ed3\u5e93\u8fdb\u884c\u7f16\u8f91\u3002\u8fd9\u91cc\u6211\u4eec\u7528Vim\u521b\u5efa\u4e00\u4e2a\u6587\u4ef6\uff0c\u4e3a\u6f14\u793a\u64cd\u4f5c\u65b9\u4fbf\uff0c\u6587\u4ef6\u540d\u5047\u8bbe\u662ffirst_commit.txt\uff1a

          vim first_commit.txt\n

          \u5728\u6587\u4ef6\u4e2d\u8fdb\u884c\u4e00\u4e9b\u7f16\u8f91\uff0c\u4f8b\u5982\u8f93\u5165\uff1a

          test\n2021\nfirst commit\n

          \u5982\u679c\u5c1a\u4e0d\u719f\u6089 Vim \u7684\u64cd\u4f5c\uff0c\u8bf7\u53c2\u8003Linux\u5feb\u901f\u57fa\u7840\u5165\u95e8\u3002

          \u4fdd\u5b58\u5e76\u9000\u51fa\uff0c\u8f93\u5165git status\uff0c\u53ef\u4ee5\u770b\u5230\u5df2\u7ecf\u76d1\u6d4b\u5230\u5c1a\u672a\u63d0\u4ea4\u7684\u66f4\u6539\uff1a

          $ git status\nOn branch master\nYour branch is up to date with 'origin/master'.\n\nUntracked files:\n  (use \"git add <file>...\" to include in what will be committed)\n    first_commit.txt\n\nnothing added to commit but untracked files present (use \"git add\" to track)\n

          \u6ce8\u610f\u8fd9\u91cc\u63d0\u5230\uff0c\u6211\u4eec\u6b63\u5904\u4e8emaster\u5206\u652f\u4e0a\uff0c\u5e76\u4e0e\u8fdc\u7a0b\u7684origin/master\u5206\u652f\u4fdd\u6301\u4e00\u81f4\u3002\u8f93\u5165

          git add .\n

          \u5373\u53ef\u5c06\u5f53\u524d\u76ee\u5f55\u4e0b\u4fee\u6539\u7684\u6587\u4ef6\u6dfb\u52a0\u5230\u6682\u5b58\u533a\uff0c\u53ef\u4f9b\u63d0\u4ea4\u3002\u56e0\u6b64\u8f93\u5165\uff1a

          git commit -m \"some description\"\n

          \u5373\u53ef\u751f\u6210\u4e00\u4e2a\u63d0\u4ea4\uff0c\u5305\u542b\u4e86\u4e0a\u8ff0\u6587\u4ef6\u7684\u4fee\u6539\u3002\u8fd9\u91ccsome description\u53ef\u4ee5\u53c2\u7167\u81ea\u5df1\u7684\u7f16\u8f91\u8fdb\u884c\u4fee\u6539\u3002

          \u4f46\u4e0a\u8ff0\u6b65\u9aa4\u4ec5\u4ec5\u662f\u63d0\u4ea4\u5230\u672c\u5730\u7684Git\u4ed3\u5e93\uff0c\u8981\u60f3\u548c\u8fdc\u7a0b\u540c\u6b65\uff0c\u5219\u9700\u8981\uff1a

          git push origin\n

          \u5c06\u672c\u5730\u7684\u66f4\u6539\u63d0\u4ea4\u5230\u8fdc\u7a0b\u5bf9\u5e94\u7684\u5206\u652f\uff0c\u5373\u4e0a\u8ff0\u7684origin/master\uff0c\u8f93\u51fa\u5982\u4e0b\uff1a

          $ git push origin\nEnumerating objects: 4, done.\nCounting objects: 100% (4/4), done.\nDelta compression using up to 4 threads\nCompressing objects: 100% (2/2), done.\nWriting objects: 100% (3/3), 309 bytes | 309.00 KiB/s, done.\nTotal 3 (delta 0), reused 0 (delta 0)\nTo github.com:chenggroup/Test.git\n   26c6605..d964d89  master -> master\n

          \u56de\u5230\u8fdc\u7a0b\u9875\u9762\u5c31\u4f1a\u53d1\u73b0\uff0c\u6211\u4eec\u5df2\u7ecf\u63d0\u4ea4\u6210\u529f\u3002

          \u70b9\u51fb\u8fdb\u5165\uff0c\u5185\u5bb9\u548c\u672c\u5730\u4e00\u81f4\uff1a

          \u4ece\u800c\u6211\u4eec\u53ef\u4ee5\u628a\u672c\u5730\u4ed3\u5e93\u7684\u4fee\u6539\u540c\u6b65\u5230\u8fdc\u7a0b\u3002\u5728git commit\u4e4b\u524d\uff0c\u5b9e\u9645\u4e0a\u4efb\u4f55\u4fee\u6539\u90fd\u53ef\u4ee5\u6dfb\u52a0\u5230\u6682\u5b58\u533a\u4e2d\uff0c\u4f46\u8fd9\u91cc\u9700\u8981\u6ce8\u610f\u53ef\u4ee5\u88abTrack\u7684\u6587\u4ef6\u662f\u5426\u662f\u81ea\u5df1\u60f3\u8981\u7684\uff0c\u800c\u4e0d\u8981\u65e0\u8111git add .\u751a\u81f3git add *\uff0c\u4ee5\u514d\u8ffd\u8e2a\u5230\u4e00\u4e9b\u201c\u4e0d\u901f\u4e4b\u5ba2\u201d\u3002

          "},{"location":"wiki/teamwork/git_usage/#_3","title":"\u9879\u76ee\u7ef4\u62a4","text":""},{"location":"wiki/teamwork/git_usage/#_4","title":"\u5206\u652f","text":"

          \u5982\u679c\u9879\u76ee\u672c\u8eab\u5185\u5bb9\u8f83\u591a\uff0c\u4e14\u7531\u591a\u4e2a\u4eba\u7ef4\u62a4\uff0c\u5c06\u6240\u6709\u63d0\u4ea4\u90fd\u653e\u5230\u540c\u4e00\u6761\u65f6\u95f4\u7ebf\u4e0a\uff0c\u5c31\u4f1a\u5f62\u6210\u975e\u5e38\u957f\u7684\u4fee\u6539\uff0c\u4e0d\u5229\u4e8e\u6bcf\u4e2a\u4eba\u8ffd\u8e2a\u81ea\u5df1\u7684\u4fee\u6539\u3002\u5e76\u4e14\u6709\u65f6\u4f1a\u5e0c\u671b\u5728\u91cd\u6784\u7684\u540c\u65f6\uff0c\u4fdd\u6301\u4e3b\u7ebf\u5b8c\u6574\u6027\u3002\u8fd9\u4e00\u9700\u6c42\u53ef\u7531Git\u8f7b\u677e\u89e3\u51b3\u3002

          Git\u652f\u6301\u521b\u5efa\u5206\u652f\uff08Branch\uff09\uff0c\u5373\u53ef\u4ee5\u4ece\u4e3b\u7ebf\u5206\u652f\u51fa\u4e00\u4e2a\u72ec\u7acb\u7684Branch\uff0c\u5e76\u5728\u8be5Branch\u4fee\u6539\uff0c\u901a\u8fc7\u540e\u518d\u5408\u5e76\uff08Merge\uff09\u5230\u4e3b\u7ebf\u4e0a\u3002\u8fd9\u6837\uff0c\u4fbf\u53ef\u4ee5\u5728\u4e0d\u5e72\u6d89\u4e3b\u7ebf\u7684\u60c5\u51b5\u5bf9\u5206\u652f\u8fdb\u884c\u7ef4\u62a4\u548c\u4fee\u6539\u3002\u5e76\u4e14\u6bcf\u4e2a\u4eba\u90fd\u53ef\u4ee5\u521b\u5efa\u81ea\u5df1\u7684\u72ec\u7acb\u5206\u652f\uff0c\u4ece\u800c\u907f\u514d\u5404\u81ea\u7684\u4fee\u6539\u4e4b\u95f4\u51fa\u73b0\u51b2\u7a81\uff0c\u5bfc\u81f4\u6df7\u4e71\u3002

          \u5207\u6362\u5206\u652f\u7684\u547d\u4ee4\u5982\u4e0b\uff1a

          git checkout -b devel\n

          \u82e5\u672c\u5730\u4e4b\u524d\u4e0d\u5b58\u5728devel\u5206\u652f\uff0c\u5219\u53ef\u7531\u5f53\u524d\u5206\u652f\u51fa\u53d1\u521b\u5efa\u4e00\u4e2a\u3002\u8fd9\u6837\u7684\u5b9e\u73b0\u65b9\u5f0f\u5c31\u5982\u540c\u4ece\u5f53\u524d\u5730\u94c1\u7ad9\u6362\u4e58\u5230\u53e6\u4e00\u6761\u5730\u94c1\u7ebf\u8def\uff0c\u518d\u7ee7\u7eed\u4e58\u5750\u3002\u4e4b\u540e\u7684\u6240\u6709\u4fee\u6539\u4fbf\u4f53\u73b0\u5728devel\u5206\u652f\u4e0a\u3002

          \u5f53\u4fee\u6539\u7684\u4ee3\u7801\u6d4b\u8bd5\u5b8c\u5584\uff0c\u6211\u4eec\u4fbf\u53ef\u4ee5\u628a\u652f\u7ebf\u4ee3\u7801\u5408\u5e76\u5230\u4e3b\u7ebf\u4e0a\uff0c\u5373\u5728\u6362\u4e58\u7ebf\u8def\u7684\u5730\u94c1\u7ad9\u4fee\u5efa\u4e00\u4e2a\u6362\u4e58\u7ad9\uff0c\u4e0e\u4e3b\u7ebf\u6362\u4e58\uff0c\u5e76\u4fdd\u7559\u4e4b\u524d\u7684\u6240\u6709\u4fee\u6539\u3002\u547d\u4ee4\u5982\u4e0b\uff1a

          git checkout master\ngit merge devel\n

          \u5173\u4e8e\u5206\u652f\u7ba1\u7406\uff0c\u66f4\u8be6\u7ec6\u7684\u4ecb\u7ecd\uff0c\u53ef\u4ee5\u53c2\u8003\u5ed6\u96ea\u5cf0\u7684\u6559\u7a0b\u3002

          "},{"location":"wiki/teamwork/git_usage/#pull-request","title":"\u62c9\u53d6\u8bf7\u6c42\uff08Pull Request\uff09","text":"

          \u7c7b\u4f3c\u4e8e\u5206\u652f\u7684\u5b9e\u73b0\uff0c\u5bf9\u516c\u5f00\u5728Github\u4e0a\u7684\u8fdc\u7a0b\u9879\u76ee\uff0c\u53ef\u4ee5\u7531\u5f53\u524d\u9879\u76ee\u51fa\u53d1\uff0c\u5efa\u7acb\u9879\u76ee\u7684\u590d\u523b\uff08Fork\uff09\u3002\u590d\u523b\u51fa\u7684\u9879\u76ee\u53ef\u4ee5\u770b\u4f5c\u662f\u4e3b\u9879\u76ee\u7684\u5206\u652f\uff0c\u5e76\u4fdd\u7559\u4e86\u521d\u59cb\u9879\u76ee\u7684\u76f8\u5e94\u5206\u652f\u3002

          Fork\u7684\u9879\u76ee\u4ecd\u662f\u8fdc\u7a0b\u9879\u76ee\uff0c\u56e0\u800c\u53ef\u4ee5Clone\u5230\u672c\u5730\u4f5c\u8fdb\u4e00\u6b65\u4fee\u6539\uff0c\u5e76\u53ef\u4ee5\u4e0e\u672c\u5730\u540c\u6b65\u4ece\u800c\u66f4\u65b0\u8fdc\u7a0b\u7684Fork\u9879\u76ee\uff0c\u800c\u539f\u59cb\u9879\u76ee\u4fdd\u6301\u4e0d\u53d8\uff08\u5e76\u4e14\u5f88\u53ef\u80fd\u4e5f\u6ca1\u6743\u9650\u6539\u53d8\uff09\u3002

          \u6b64\u65f6\uff0c\u8981\u60f3\u5411\u539f\u59cb\u9879\u76ee\u63d0\u4ea4\u81ea\u5df1\u7684\u4fee\u6539\uff0c\u5219\u9700\u8981\u521b\u5efa\u62c9\u53d6\u8bf7\u6c42\uff08Pull request\uff0c\u7b80\u5199\u4e3aPR\uff09\u3002\u70b9\u51fb\u9875\u9762\u4e0a\u7684\"Contribute\"\uff0c\u70b9\u51fb\"Open pull request\"\u5373\u53ef\u521b\u5efaPR\u3002

          \u968f\u540e\uff0c\u4fbf\u53ef\u4ee5\u6307\u5b9a\u4eceFork\u9879\u76ee\u7684\u67d0\u4e2a\u5206\u652f\u63d0\u4ea4PR\u5230\u539f\u59cb\u9879\u76ee\u7684\u67d0\u4e2a\u5206\u652f\u3002\u4f8b\u5982\u56fe\u4e2d\u662f\u4ece\u81ea\u5df1\u7684devel\u5230\u539f\u59cb\u7684master\u5206\u652f\u3002\u5728\u4e0b\u65b9\u7684\u6587\u672c\u6846\u4e2d\u53ef\u4ee5\u8f93\u5165\u81ea\u5df1\u7684\u4fee\u6539\u53ca\u5bf9\u5e94\u7684\u63cf\u8ff0\uff0c\u4fbf\u4e8e\u539f\u59cb\u9879\u76ee\u7684\u7ef4\u62a4\u8005\u5ba1\u6838\u3001\u5904\u7406\u3001\u5408\u5e76PR\u3002

          \u9875\u9762\u5411\u4e0b\u7ffb\uff0c\u53ef\u4ee5\u770b\u5230\u81ea\u5df1\u7684\u5386\u53f2\u63d0\u4ea4\uff0c\u4ee5\u53ca\u4fee\u6539\u7684\u6587\u4ef6\u7b49\u3002\u6ce8\u610f\u5728\u521b\u5efaPR\u524d\uff0c\u8bf7\u52a1\u5fc5\u67e5\u770b\u672c\u6b21PR\u76f8\u6bd4\u539f\u59cb\u6587\u4ef6\u4fee\u6539\u4e86\u54ea\u4e9b\uff0c\u4ee5\u514d\u4e00\u4e9b\u4e0d\u5e0c\u671b\u4e0a\u4f20\u7684\u5185\u5bb9\u6df7\u8fdb\u6765\uff0c\u7ed9\u5ba1\u6838\u4eba\u5458\u5e26\u6765\u56f0\u6270\uff0c\u4e0d\u5229\u4e8e\u6293\u4f4f\u771f\u6b63\u6838\u5fc3\u7684\u4fee\u6539\u3002\u3001

          \u63d0\u4ea4PR\u4ee5\u540e\uff0c\u5ba1\u6838\u4eba\u5458\u53ef\u80fd\u4f1a\u63d0\u51fa\u4e00\u4e9b\u5efa\u8bae\uff0c\u751a\u81f3\u662f\u4fee\u6539\u610f\u89c1\u3002\u82e5\u63d0\u4ea4\u5230\u5bf9\u5e94\u7684\u590d\u523b\u5206\u652f\uff0c\u5219\u76f8\u5e94\u7684\u4fee\u6539\u4e5f\u4f1a\u540c\u6b65\u5230PR\u4e2d\uff0c\u56e0\u6b64\u4e0d\u9700\u8981\u989d\u5916\u63d0\u4ea4\u4fee\u6539\u8bf7\u6c42\u3002

          "},{"location":"wiki/teamwork/git_usage/#issue","title":"\u521b\u5efa\u8bae\u9898\uff08Issue\uff09","text":"

          \u5f53\u53d1\u73b0\u4ee3\u7801\u53ef\u80fd\u5b58\u5728BUG\u6216\u8005\u81ea\u5df1\u6709\u4e00\u4e9b\u7591\u95ee\u9700\u8981\u7ef4\u62a4\u8005\u56de\u7b54\u65f6\uff0c\u6291\u6216\u662f\u6709\u4e00\u4e9b\u60f3\u8981\u5f00\u53d1\u8005\u5b9e\u73b0\u7684\u65b0\u529f\u80fd\uff0c\u7528\u6237\u4e5f\u53ef\u4ee5\u5728\u539f\u59cb\u9879\u76ee\u4e2d\u521b\u5efa\u8bae\u9898\uff08Issue\uff09\uff0c\u7528\u5c3d\u53ef\u80fd\u7b80\u6d01\u7684\u8bed\u8a00\u63cf\u8ff0\u81ea\u5df1\u9047\u5230\u7684\u95ee\u9898\uff0c\u6216\u81ea\u5df1\u7684\u9700\u6c42\u3002\u4e00\u4e9b\u6d41\u884c\u7684\u9879\u76ee\u53ef\u80fd\u4f1a\u63d0\u4f9bIssue\u6a21\u677f\uff0c\u8bf7\u6309\u7167\u6a21\u677f\u63d0\u793a\u586b\u5199\uff0c\u63d0\u9ad8\u89e3\u51b3\u95ee\u9898\u7684\u6548\u7387\uff0c\u65b9\u4fbf\u5f00\u53d1\u8005\u5bf9\u5e94\u4fee\u590dBUG\u6216\u8005\u5f00\u53d1\u7279\u6027\u3002

          \u5982\u679c\u4f60\u770b\u5230\u76f8\u5173\u7684Issue\uff0c\u800c\u6070\u597d\u4f60\u7684\u4fee\u6539\u53ef\u4ee5\u4e3a\u4e4b\u63d0\u4f9b\u5e2e\u52a9\uff0c\u4e5f\u53ef\u4ee5\u63d0\u4ea4PR\uff0c\u5e76\u5728PR\u7684\u63cf\u8ff0\u4e2d\u7528#<ID>\u8fde\u63a5\u5230\u5bf9\u5e94\u7684Issue\uff0c\u4fbf\u4e8e\u63d0\u95ee\u8005\u540c\u6b65\u4f60\u7684\u4fee\u6539\u3002

          "},{"location":"wiki/teamwork/git_usage/#gitignore","title":".gitignore \u6587\u4ef6","text":"

          \u5f00\u53d1\u8005\u5e38\u5e38\u9700\u8981\u5728\u9879\u76ee\u6587\u4ef6\u5939\u4e0b\u8c03\u8bd5\uff0c\u800c\u8bba\u6587\u64b0\u7a3f\u4eba\u5e38\u5e38\u9700\u8981\u7f16\u8bd1 LaTex \u9879\u76ee\u4ea7\u751f PDF \u4f9b\u9884\u89c8\u3002\u8fd9\u4e9b\u8fc7\u7a0b\uff0c\u90fd\u53ef\u80fd\u4ea7\u751f\u4e00\u4e9b\u65e5\u5fd7\u3001\u7f13\u5b58\u3001\u8f93\u51fa\u7b49\u6587\u4ef6\uff0c\u4e00\u4e9b\u751a\u81f3\u662f\u4e8c\u8fdb\u5236\u6587\u4ef6\u3002\u5728\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0cGit \u4f1a\u76d1\u6d4b\u9879\u76ee\u76ee\u5f55\u4e0b\u7684\u6240\u6709\u6587\u4ef6\uff0c\u5982\u679cgit add .\uff0c\u5219\u4f1a\u5168\u90e8\u52a0\u5165\u5230\u6682\u5b58\u533a\u3002\u82e5\u5728git commit\u65f6\u4ecd\u672a\u53d1\u73b0\u95ee\u9898\uff0c\u8fd9\u4e9b\u6587\u4ef6\u5c31\u4f1a\u4e00\u5e76\u88ab\u4ed3\u5e93\u8ffd\u8e2a\u3002\u5f53\u4e0a\u4f20\u5230\u8fdc\u7a0b\u4ed3\u5e93\uff0c\u6709\u6743\u9650\u67e5\u770b\u8fd9\u4e9b\u9879\u76ee\u7684\u4eba\u4fbf\u4f1a\u5728Github\u6216\u8005\u5176\u4ed6\u5730\u65b9\u770b\u5230\u8fd9\u4e9b\u6587\u4ef6\uff0c\u8840\u538b\u53ef\u80fd\u4f1a\u65e0\u6cd5\u6291\u5236\u5730\u6025\u901f\u4e0a\u5347\u2026\u2026

          \u4e3a\u4e86\u907f\u514d\u8fd9\u79cd\u60c5\u51b5\uff0c\u4fbf\u9700\u8981\u6709\u529e\u6cd5\u62d2\u7edd\u8ffd\u8e2a\u8fd9\u4e9b\u6587\u4ef6\u3002Git\u63d0\u4f9b\u7684\u89e3\u51b3\u65b9\u6848\u4fbf\u662f\u521b\u5efa\u4e00\u4e2a.gitignore\u6587\u4ef6\uff0c\u8bb0\u5f55\u8fd9\u4e9b\u5e0c\u671b\u88ab\u5ffd\u7565\u7684\u6587\u4ef6\u6216\u76ee\u5f55\u3002\u5176\u683c\u5f0f\u5982\u4e0b\u6240\u793a\uff0c\u5373\u628a\u5e0c\u671b\u5ffd\u7565\u6216\u8005\u6392\u9664\u7684\u6587\u4ef6\u52a0\u5165\u5176\u4e2d\u3002

          # \u6392\u9664\u7279\u5b9a\u6587\u4ef6\ntext.txt\n\n# \u6392\u9664tmp\u4e0b\u7684\u6240\u6709\u6587\u4ef6\ntmp/*\n\n# \u6392\u9664\u6240\u6709.\u5f00\u5934\u7684\u9690\u85cf\u6587\u4ef6\n.*\n\n# \u6392\u9664\u6240\u6709.class\u6587\u4ef6\n*.class\n\n# \u4e0d\u6392\u9664.gitignore\u548cApp.class\n!.gitignore\n!App.class\n

          \u53ef\u4ee5\u60f3\u50cf\uff0c\u5982\u679c\u6240\u6709\u89c4\u5219\u90fd\u624b\u52a8\u7f16\u5199\uff0c\u5bf9\u4e8e\u7ef4\u62a4\u8005\u53ef\u80fd\u4f1a\u6709\u56f0\u6270\u3002\u56e0\u6b64\uff0cGithub\u4e0a\u4ea6\u6709\u7ef4\u62a4\u4e00\u4e2a.gitignore\u6587\u4ef6\u7684\u4ed3\u5e93\uff08github/gitignore: A collection of useful .gitignore templates\uff09\uff0c\u7528\u6237\u53ea\u9700\u8981\u6839\u636e\u81ea\u5df1\u7684\u9700\u6c42\u4ece\u4e2d\u9009\u53d6\u76f8\u5e94\u7684\u5ffd\u7565\u4fe1\u606f\uff0c\u52a0\u5165\u5230\u672c\u5730\u7684.gitignore\u5373\u53ef\u3002\u6ce8\u610f\uff0c\u8be5\u4ed3\u5e93\u7684\u6839\u76ee\u5f55\u4e0b\u653e\u7f6e\u7684\u662f\u4e00\u4e9b\u5e38\u7528\u8bed\u8a00\u73af\u5883\uff0c\u800c\u4e00\u4e9b\u7f16\u8f91\u5668\u6216IDE\u540c\u6837\u4f1a\u4ea7\u751f\u7f13\u5b58\u6587\u4ef6\uff0c\u8fd9\u4e9b\u6a21\u677f\u89c1\u4e8eglobal\u4e0b\u3002\u5b9e\u9645\u4e0a\uff0c\u4eceGithub\u521b\u5efa\u7684\u4ed3\u5e93\u4fbf\u662f\u4ece\u8fd9\u4e2a\u4ed3\u5e93\u4e2d\u62c9\u53d6.gitignore\u7684\u6a21\u677f\u3002

          \u4f46\u662f\uff0c\u5f88\u591a\u610f\u8bc6\u5230\u81ea\u5df1\u9700\u8981.gitignore\u7684\u7528\u6237\u5f80\u5f80\u662f\u7ecf\u5386\u4e86\u8840\u538b\u7684\u4e0a\u5347\uff0c\u60f3\u8981\u4ea1\u7f8a\u8865\u7262\u7684\u3002\u5373\u5df2\u7ecf\u628a\u8bf8\u5982\u65e5\u5fd7\u6587\u4ef6\u4e00\u7c7b\u7684\u6587\u4ef6\u63d0\u4ea4\u5230\u8fdc\u7a0b\u4ed3\u5e93\u4e2d\uff0c\u751a\u81f3\u5728clone\u65f6\u624d\u53d1\u73b0\u95ee\u9898\u3002\u4e00\u4e2a\u6bd4\u8f83\u5feb\u901f\u7684\u89e3\u51b3\u65b9\u6848\u4fbf\u662f\uff0c\u5728\u5efa\u7acb.gitignore\u540e\uff0c\u76f4\u63a5\u8fd0\u884c\uff1a

          git rm -r --cached .\ngit add .\n

          \u76f8\u5f53\u4e8e\u4ece\u5934\u5f00\u59cb\uff0c\u76f4\u63a5\u5c06\u4e0d\u5e0c\u671b\u7ee7\u7eedtrack\u7684\u6587\u4ef6\u6807\u8bb0\u4e3a\u5220\u9664\uff0c\u4ece\u800c\u5728\u63d0\u4ea4\u4e0a\u5f7b\u5e95\u5ffd\u7565\u8fd9\u4e9b\u6587\u4ef6\u7684\u5b58\u5728\uff0c\u4f46\u540c\u65f6\u4e0d\u5220\u9664\u539f\u59cb\u6587\u4ef6\u3002\u4f46\u8fd9\u4e9b\u6587\u4ef6\u7684\u8bb0\u5f55\u4ecd\u5b58\u5728\u4e8e\u8fdc\u7a0b\u3002

          \u53e6\u4e00\u79cd\u601d\u8def\u5219\u662f\u5229\u7528git update-index --assume-unchanged <file>\u547d\u4ee4\uff0c\u5ffd\u7565\u6389\u8be5\u6587\u4ef6\u7684\u66f4\u6539\uff0c\u4f46\u4ecd\u4fdd\u7559\u4e86\u6587\u4ef6\u672c\u8eab\u3002\u603b\u4e4b\uff0c\u8fd9\u4e24\u79cd\u65b9\u6cd5\u90fd\u65e0\u6cd5\u4ece\u6839\u672c\u4e0a\u89e3\u51b3\u5df2\u7ecf\u63d0\u4ea4\u5230\u8fdc\u7a0b\u7684\u6587\u4ef6\uff0c\u56e0\u6b64\u8fd8\u662f\u63a8\u8350\u5728git init\u4e4b\u521d\u5c31\u5199\u597d.gitignore\uff0c\u6216\u5229\u7528 Github \u81ea\u5e26\u7684\u6a21\u677f\u3002

          "},{"location":"wiki/teamwork/tutorial_rules/","title":"\u5982\u4f55\u7ec4\u7ec7\u57f9\u8bad","text":"
          1. \u4e00\u5b9a\u8981\u5728\u57f9\u8bad\u548c\u5c55\u793a**\u524d**\u628a\u5e7b\u706f\u7247\u548c\u57f9\u8bad\u6750\u6599\u53d1\u9001\u7ed9\u5b66\u5458

          2. \u57f9\u8bad\u6750\u6599\u8bf7\u9075\u5faa\u4ee5\u4e0b\u683c\u5f0f

          3. \u4e3b\u9898

          4. \u76ee\u6807\u548c\u6b64\u6b21\u57f9\u8bad\u7684\u6536\u76ca

            --- \u4f8b\uff1a1.\u7406\u89e3\u5de5\u4f5c\u6d41 2.\u5b66\u4e60\u5982\u4f55\u81ea\u52a8\u5316\u5de5\u4f5c\u6d41 3.\u5b66\u4e60\u5982\u4f55\u901a\u8fc7airflow\u53ef\u89c6\u5316\u5de5\u4f5c\u6d41

          5. \u63d0\u524d\u5e2e\u53c2\u4e0e\u8005/\u5b66\u5458\u51c6\u5907

            --- \u6240\u9700\u7684\u80cc\u666f\u77e5\u8bc6\u548c\u63d0\u4f9b\u5fc5\u8981\u7684\u5f15\u5bfc - \u94fe\u63a5\uff0c\u4e66\u7c4d\uff0c\u5fc5\u8bfb\u6587\u7ae0\u7b49

            --- \u5176\u4ed6\u9700\u8981\u5728\u57f9\u8bad\u524d\u505a\u597d\u7684\u51c6\u5907

            --- \u4f8b\uff1a1.\u5b89\u88c5PyCharm, Jupiter, python3.9\u7b49 2.\u5b89\u88c5\u548c\u9a8c\u8bc1\u6240\u9700\u7684\u5305\uff08airflow\uff09

          6. \u57f9\u8bad\u5185\u5bb9\u7684\u65f6\u95f4\u5b89\u6392

            --- \u4f8b\uff1a

            1. \u4ecb\u7ecd\u5de5\u4f5c\u6d41(10\u5206\u949f)
            2. \u4ecb\u7ecdaiida\u548caiflow(20\u5206\u949f)
            3. \u7ec3\u4e60\u5de5\u4f5c\u6d41\u548c\u53ef\u89c6\u5316\u5de5\u4f5c\u6d41(50\u5206\u949f)
            4. \u7b54\u7591(19\u5206\u949f)
          7. \u786e\u4fdd\u4f60\u8db3\u591f\u65e9\u5730\u53d1\u9001\u7684\u5e7b\u706f\u7247\u548c\u57f9\u8bad\u6750\u6599\u3002\u7559\u4e0b\u5145\u8db3\u7684\u65f6\u95f4\u7ed9\u5b66\u5458\u5b8c\u6210\u51c6\u5907\u7684\u4efb\u52a1\u3002

          "},{"location":"wiki/teamwork/tutorial_rules/#trainingpresentaion-guideline","title":"Training/Presentaion Guideline","text":"
          1. Always send slides and agenda BEFORE presentation and training

          2. Follow the agenda format as below:

            a. Topic

            b. Objective and benefit of training

            \u2014\u2014 e.g. 1. Understand workflow 2. Learn how to automate workflow 3. Learn how to visualize workflow via package'airflow'

            c. Participant's preparation

            \u2014\u2014 State the desired background knowledge and provide induction \u2014 links, books, must-read papers etc.

            \u2014\u2014 State the preparation that the participants need to complete before attending the training

            \u2014\u2014 e.g. 1. Install IDE PyCharm, Jupiter, python3.9 etc. 2. Install and validate required packages(airflow)

            d. Items with time slot

            \u2014\u2014 e.g.

          3. Introduce workflow (10 minutes)

          4. Introduce Aida and airflow (20 minutes)

          5. Practice workflow and visualize via 'airflow' (50 minutes)

          6. Q&A (10 minutes)

          7. Make sure you send slides and agenda early and leave plenty of time for the participants to complete the preparation tasks.

          "},{"location":"en/","title":"Welcome to ChengGroup Wiki","text":"

          This is group wiki for chenggroup of XMU which is personal academic group. Visitors are welcomed to have look in our wiki. It's better if you would like to post any suggestions for us. This Wiki are mainly written in Simplified Chinese.

          "},{"location":"en/wiki/book_recommendation/","title":"Recommended Book List","text":""},{"location":"en/wiki/book_recommendation/#basic-theory-series","title":"Basic Theory Series","text":"
          1. Quantum Chemistry

          Quantum mechanics fundamentals tailored for chemistry students, with detailed calculations.

          1. Modern Quantum Chemistry

          A classic textbook on quantum chemistry, with a highlight on the Hartree-Fock theory.

          1. The Electronic Structure and Chemistry of Solids

          Qualitative solid-state physics suitable for chemistry students, providing a basic understanding of solid-state physics.

          1. Statistical Mechanics

          A comprehensive introduction to statistical mechanics with detailed formula derivations. Serious study will provide a relatively deep understanding of statistical mechanics.

          1. Second Quantized Approach to Quantum Chemistry

          As stated in the preface of this book, it is designed to introduce chemists (non-physicists) to the language of second quantization. If you want to delve into second quantization in detail for literature reading, I highly recommend this book. As the author says, you will only appreciate the beauty of second quantization once you start using it.

          "},{"location":"en/wiki/book_recommendation/#programming-series","title":"Programming Series","text":"
          1. Linux Command Line and Shell Scripting Bible

          From beginners to intermediate, this book provides excellent explanations and comprehensiveness. Interacting with the Unix kernel using bash and shell is the foundation for all programming, especially if you want to enjoy programming on Linux or MacOS.

          1. Python Crash Course

          A systematic introduction to Python programming, focusing on the data presentation part in the project section.

          1. Numerical Python

          A textbook on using Python in data science, covering numpy, scipy, and matplotlib.

          1. Fortran for Scientists & Engineers

          An introduction to Fortran with a solid foundation. The examples are straightforward, comprehensive, and can be completed in about 20 days. The summaries and code references at the end of each chapter are well-detailed. Additionally, there is a tutorial on Fortran/C interoperability in the appendix.

          "}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..1d2fafb4 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,983 @@ + + + + https://wiki.cheng-group.net/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/news/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/book_recommendation/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/miscellaneous/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/cluster_usage/cluster_usage/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/cluster_usage/conda/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/cluster_usage/gpu_usage/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/cluster_usage/jupyter/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/cluster_usage/notification_for_hpc/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/cluster_usage/pack_backup/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/cluster_usage/ssh_note/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/cluster_usage/tensorboard/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/cluster_usage/vscode_remote/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/deprecated/deepmd-kit_installation_104/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/deprecated/lsf_usage/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/deprecated/mig_usage/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/how_to_edit/howtodo/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/how_to_edit/howtousenews/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/ase/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/basis_pps/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/dpgen/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/dpmd/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/linux/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/presentation/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/python_numpy/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/qc_dft/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/read_papers/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/toc/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/new_comers/workflow/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/question_under_carpet/chemical_computing/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/skills/QS4writing/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/skills/research_skill/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_development/lammps/installation/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_development/lammps/plugin/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_development/lammps/quick_start/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_installation/cp2k-7.1/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_installation/gcc/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_installation/install_from_src_in_conda/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_installation/softwares/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/DP-GEN/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/DeePMD-kit/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/MDAnalysis/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/Tips_for_LaTeX/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/default_version/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/experience_of_dpmd_and_dpgen/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/i-pi/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/n2p2/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/vmd/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/ECINT%20Tutorial/user/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-constrainedmd/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-deepmd/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-dft%2Bu/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-e-f/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-geoopt/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-hf/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-neb/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-reftraj/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-scan/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-slab/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-tools/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k-zpe/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/software_usage/cp2k/cp2k/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/teamwork/archive_rules/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/teamwork/git_usage/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/wiki/teamwork/tutorial_rules/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/news/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/book_recommendation/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/miscellaneous/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/cluster_usage/cluster_usage/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/cluster_usage/conda/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/cluster_usage/gpu_usage/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/cluster_usage/jupyter/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/cluster_usage/notification_for_hpc/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/cluster_usage/pack_backup/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/cluster_usage/ssh_note/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/cluster_usage/tensorboard/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/cluster_usage/vscode_remote/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/deprecated/deepmd-kit_installation_104/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/deprecated/lsf_usage/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/deprecated/mig_usage/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/how_to_edit/howtodo/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/how_to_edit/howtousenews/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/ase/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/basis_pps/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/dpgen/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/dpmd/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/linux/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/presentation/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/python_numpy/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/qc_dft/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/read_papers/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/toc/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/new_comers/workflow/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/question_under_carpet/chemical_computing/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/skills/QS4writing/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/skills/research_skill/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_development/lammps/installation/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_development/lammps/plugin/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_development/lammps/quick_start/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_installation/cp2k-7.1/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_installation/gcc/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_installation/install_from_src_in_conda/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_installation/softwares/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/DP-GEN/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/DeePMD-kit/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/MDAnalysis/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/Tips_for_LaTeX/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/default_version/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/experience_of_dpmd_and_dpgen/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/i-pi/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/n2p2/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/vmd/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/ECINT%20Tutorial/user/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-constrainedmd/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-deepmd/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-dft%2Bu/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-e-f/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-geoopt/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-hf/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-neb/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-reftraj/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-scan/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-slab/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-tools/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k-zpe/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/software_usage/cp2k/cp2k/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/teamwork/archive_rules/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/teamwork/git_usage/ + 2024-08-06 + daily + + + + + https://wiki.cheng-group.net/en/wiki/teamwork/tutorial_rules/ + 2024-08-06 + daily + + + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 00000000..a7ca9ad9 Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/wiki/book_recommendation/index.html b/wiki/book_recommendation/index.html new file mode 100644 index 00000000..12ec07a9 --- /dev/null +++ b/wiki/book_recommendation/index.html @@ -0,0 +1,2857 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 书籍推荐 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          推荐书籍一览

          +

          基础理论系列

          +
            +
          1. +

            Quantum Chemistry

            +

            面向化学学生的量子力学基础,推算比较详细。

            +
          2. +
          3. +

            Modern Quantum Chemistry

            +

            经典量子化学教材,Hartree-Fock理论是全书最出彩的地方。

            +
          4. +
          5. +

            The Electronic Structure and Chemistry of Solids

            +

            定性的固体物理,适合化学学生阅读,对固体物理有简单的了解。

            +
          6. +
          7. +

            Statistical Mechanics

            +

            系统而全的物理向统计力学入门,公式推导详细,认真看会对统计力学有相对深度的理解

            +
          8. +
          9. +

            Second Quantized Approach to Quantum Chemistry

            +

            正如这本书的前言所说,本书是为了简单引导化学家(非物理专业的人士)熟悉二次量子化这门语言。如果你在文献阅读中想详细了解二次量子化,个人十分推荐这本书。正如作者所说,二次量子化的美只有你开始使用了你才会欣赏到。

            +
          10. +
          +

          编程系列

          +
            +
          1. +

            Linux Command Line and Shell Scripting Bible

            +

            从入门到中级,讲解和全面性来看都是不错的书。使用bash和shell跟Unix内核进行交互是所有编程开始的基础(如果你想用Linux或MacOS快乐的编程的话)

            +
          2. +
          3. +

            Python Crash Course

            +

            Python编程的系统入门书,project章节只需要看数据展示部分。

            +
          4. +
          5. +

            Numerical Python

            +

            有关于数据科学中使用python的text book,numpy,scipy,matplotlib都有包括

            +
          6. +
          7. +

            Fortran for Scientists & Engineers

            +

            Fortran入门+基础,例子简单明了,全面,20天左右可做完全书,每个章节后面的总结和代码参考很完善。另外关于和C/C++的接口教程在附录Fortran/C Interoperablity

            +
          8. +
          +

          写作系列

          +
            +
          1. +

            How to Write a Lot

            +

            打碎不写作的借口

            +
          2. +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/cluster_usage/cluster_usage/index.html b/wiki/cluster_usage/cluster_usage/index.html new file mode 100644 index 00000000..d3f30f01 --- /dev/null +++ b/wiki/cluster_usage/cluster_usage/index.html @@ -0,0 +1,3272 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 计算集群使用说明 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          计算集群使用说明

          +

          集群的基本概念

          +

          CPU/Core(核)的概念

          +

          CPU 是 Central Processing Unit 的缩写。比起全称,他的缩写更为大家所熟知。我们买电脑时都会看这个电脑拥有几个 CPU。CPU可以计算数字或者执行你的代码等等。每个CPU有多个计算核心(Core),调度系统可按照所需核心数对任务使用的资源进行分配,因而在实际使用中,我们常常用代替CPU这个表述。

          +

          Memory(内存)的概念

          +

          内存(Memory)就是储存数据的地方。跟硬盘(disk)储存的数据不同,内存里的数据可以直接被 读取。跟你在硬盘里储存的数据类似,只是它被读取的速度更快。当执行程序时,有一些数据会先被读入内存,然后再执行计算。因此内存越大,被读入的数据也就越多,能够同时处理的数据也就越多,代码运行的时间会更短。

          +

          Node(节点)的概念

          +

          节点(Node)换个日常的说法就是你的电脑,比如一台台式机或者笔记本电脑。它由若干个和一个内存组成。因此可以把节点简单理解成日常见到的电脑(主机)。

          +

          HPC(集群/超级计算机/超算)的概念

          +

          HPC就是High Performance Cluster的缩写,又称为超级计算机,高性能集群等。它由若干个节点组成。实际使用中,这些节点会有不同的角色,通常包含登录节点管理节点计算节点等。登录节点顾名思义就是用来登录的节点。用户从自己电脑可以登录到登录节点计算节点是用来计算的节点,他们的唯一使命就是计算。管理节点比较特殊,用来管理计算节点,比如分配某某计算任务给某几个计算节点来算。

          +

          Message Passing Interface(MPI)并行计算的概念

          +

          并行计算是若干个节点一起执行计算的意思。从节点的概念可以知道,一个节点内存肯定是有限。比如,现有一个节点有24个和32GB的内存,我们想执行一个计算,用到48个,自然需要用到两个节点。问题是另一个节点的24个如何读取到第一个节点内存里的数据?这一个时候就要用到MPI/并行计算了。MPI是信息传输界面的简称。是一种告诉节点怎么跨节点读取内存的代码。也就是说这是计算机代码的一部分,我们常用的计算软件vaspcp2k都已经写入了,所以只要直接使用便可以。

          +

          组内集群知识

          +

          本课题组使用 Zeus 计算集群提交计算任务进行计算模拟。Zeus 集群由两个登陆节点、一个管理节点、三个计算集群构成,每个计算集群包含多个计算节点(含六个 GPU 节点和一个大内存胖节点),其中 GPU 节点包括一个安装有 4 张 V100 的节点、一个安装有4张 A100 的节点和四个安装有 8 张 2080 Ti 的节点。

          +

          目前,所有 CPU 节点可以通过同一登陆节点进行提交,以下对集群使用的一些注意事项进行说明。关于 GPU 的使用,请参考使用集群上的GPU

          +

          使用上述集群之前,你必须拥有一个账号才能进行任务提交。申请账号请联系集群管理员。

          +

          创建密钥对

          +
          +

          Warning

          +

          新人必学

          +
          +

          ssh 是用来安全进行登录远程电脑的命令。使用后,有两种选择来验证登录

          +
            +
          1. 使用密码
          2. +
          3. 使用密钥
          4. +
          +

          第一种方法已经为大众所熟知,但是不安全,目前集群对新开账号原则上不提供登陆密码。因此我们采用密钥进行登录。

          +

          使用如下命令生成密钥:

          +
          ssh-keygen
          +
          +

          根据终端的提示进行操作(实际上你可能只需要不停按enter键)。默认情况下你会在~/.ssh目录中得到id_rsaid_rsa.pub文件,他们分别是 私钥公钥。创建好了之后请把 公钥 id_rsa.pub 文件发给服务器管理员。

          +
          +

          Warning

          +

          私钥是登录集群的钥匙,请务必保管好这个文件,防止自己的电脑被入侵

          +
          +

          获取账号

          +

          集群只允许已经授权的用户进行登录。在从管理员处获得你的账号名和初始密码后, Linux 或 Mac 用户可直接从命令行登录集群,使用 ssh 命令即可。

          +
          $ ssh -p <port> username@ip_address
          +
          +

          请将 usernameip_address 替换为管理员提供的账号和IP地址,<port> 替换为端口号。

          +

          集群均采用 Linux 系统,因此不熟悉 Linux 基本操作的用户(例如查看文件、编辑文本、复制数据等)可以参考Linux快速基础入门,并熟悉这些操作。本文档假设用户有一定的 Linux 基础。

          +

          Windows 用户

          +

          对 Windows 用户来说,可以使用以下方法登陆集群。

          +
            +
          1. (Windows 10/11用户推荐)使用 WSL(Windows Subsystem for Linux)。WSL 是 Windows 10 新版的特性,可使得用户在 Windows 系统下运行命令行模式的 Ubuntu 或 OpenSUSE 等子系统。使用 WSL 的用户可直接参考 Linux 的使用方法进行操作。具体安装方式可以参考官方教程。 对于使用集群的大多数需求,WSL 1 即可满足,因此不一定需要升级到 WSL 2 。
          2. +
          +
          +
            +
          • 这种方法对于图形界面(VMD、GNUPlot)等支持较差,尚需要额外的步骤配置图形界面转发,这里限于篇幅原因暂不进行介绍。如有需要请参考这里
          • +
          • 目前 Windows 11 已经提供了对图形界面的直接支持(请参考),但需要使用 WSL 2。
          • +
          • 注意:由于代理机制原因,WSL 2 无法直接使用桌面端的 Easy Connect VPN服务,须设法进行端口转发。WSL 1 可以。也可以考虑使用 Easy Connect Docker 镜像,通过Socks代理访问SSH。
          • +
          +
          +
            +
          1. +

            使用 Git Windows客户端,其自带一个基于Zsh的shell,亦可以提供对SSH的支持,体验更接近原生Bash,缺点是没有SFTP管理等功能。

            +
          2. +
          3. +

            使用 Xshell、PuTTY 等 SSH 客户端,Windows 10 以下的用户可使用这种方式。这类 SSH 客户端可以提供较完整的 SSH 功能。关于Putty的使用请参考

            +
          4. +
          5. +

            使用虚拟机安装 Linux。若不想安装 Linux 双系统可以选择使用这种方式。正确配置的虚拟机和真正使用 Linux 几乎无差别。但虚拟机启动时间长,且完全启动时占用系统资源较多。

            +
          6. +
          +

          目录结构

          +

          Zeus 集群具有如下的目录结构,为了保持统一性,请在/data/usernameusername请替换为自己的用户名)下做计算。

          +
          /data <--目前的数据盘(432TB大存储)
          +├── 51-data <--原51备份后的数据
          +│   ├── ...
          +│   ├── ...
          +│   └── username
          +├── 52-data <--原52备份后的数据
          +│   ├── ...
          +│   ├── ...
          +│   └── username
          +├── home <--Zeus(191)登陆后的home文件夹
          +│   ├── ...
          +│   ├── ...
          +│   └── username
          +├── ...
          +├── ...
          +└── username <--在这里解压数据、提交计算
          +
          +

          作业提交

          +

          计算节点、队列和脚本

          +

          通过sinfo命令可以看到,目前的集群包括51/52/53三个类别,分别为51/52/53计算集群,51/52/53集群的计算节点分别对应编号为c51-00x/c52-00x/c53-00x

          +
          PARTITION   AVAIL  TIMELIMIT  NODES  STATE NODELIST
          +gpu1           up   infinite      1   idle c51-g001
          +gpu2           up   infinite      1   idle c51-g002
          +gpu3           up   infinite      4   idle c51-m[001-004]
          +c51-small      up      20:00     33   idle c51-[001-011,013-034]
          +c51-medium     up   12:00:00     33   idle c51-[001-011,013-034]
          +c51-large      up 1-00:00:00     33   idle c51-[001-011,013-034]
          +c51-long       up 2-00:00:00     33   idle c51-[001-011,013-034]
          +c51-xlong      up 3-00:00:00     33   idle c51-[001-011,013-034]
          +c51-xlarge     up 1-00:00:00     33   idle c51-[001-011,013-034]
          +c51-exlong     up 7-00:00:00     33   idle c51-[001-011,013-034]
          +c52-small      up      20:00     40   idle c52-[001-040]
          +c52-medium     up   12:00:00     40   idle c52-[001-040]
          +c52-large      up 1-00:00:00     40   idle c52-[001-040]
          +c52-long       up 2-00:00:00     40   idle c52-[001-040]
          +c52-xlong      up 3-00:00:00     40   idle c52-[001-040]
          +c52-xlarge     up 1-00:00:00     40   idle c52-[001-040]
          +c52-exlong     up 7-00:00:00     40   idle c52-[001-040]
          +c53-small      up      20:00     34   idle c53-[001-034]
          +c53-medium     up   12:00:00     34   idle c53-[001-034]
          +c53-large      up 1-00:00:00     34   idle c53-[001-034]
          +c53-long       up 2-00:00:00     34   idle c53-[001-034]
          +c53-xlong      up 3-00:00:00     34   idle c53-[001-034]
          +c53-xlarge*    up 1-00:00:00     34   idle c53-[001-034]
          +
          +

          由于处理器核数不同,任务只能在具有相同核数的节点间并行,由此对不同集群的节点按照队列进行了分组,队列前缀分别为51-/52-/53-,其对应每个节点上的核数分别为24/28/32。通过sinfo命令可以看到当前集群上的队列及其使用情况。

          +

          现编号为c51-00x 的节点需通过队列c51-smallc51-mediumc51-large等等来进行提交,并设置核数为24的倍数(24,48,72等)以确定节点数,--ntasks-per-node=24使用节点的数量通过总核数除以每个节点核数的值来确定。 同理,若想使用编号为c52-00x 的节点,则队列名为c52-smallc52-mediumc52-large等等,核数为28的倍数(28,56,84等),--ntasks-per-node=28;若想使用编号为c53-00x 的节点,则队列名为c53-smallc53-mediumc53-large等等,核数为32的倍数(32,64,96等),--ntasks-per-node=32

          +
          +

          GPU(Tesla V100节点)和胖节点仍按照51进行编组,编号分别为c51-g001c51-s001

          +
          +

          目前每个队列仍限制同时运行4个任务、队列内使用至多12个节点。新增全局任务限制,即三组队列总共使用核数不超过556,若超出此限制则任务会处于PEND状态。

          +

          提交脚本示例放在/data/share/base/scripts里面,软件统一安装在/data/share/apps下,目前安装了VASP 5.4.4、CP2K 7.1、Gaussian 16、Lammps、Gromacs、DeePMD-kit等。

          +

          这里对作业提交脚本举例说明如下:

          +
          cp2k.slurm
          #!/bin/bash
          +
          +#SBATCH -J cp2k
          +#SBATCH -o cp2k.out.%j
          +#SBATCH -e cp2k.err.%j
          +#SBATCH -p c53-large
          +#SBATCH -N 2
          +#SBATCH --ntasks-per-node=32
          +#SBATCH --exclusive
          +#SBATCH --mem=8G
          +
          +# add modulefiles
          +ulimit -s unlimited
          +module load intel/17.5.239 mpi/intel/2017.5.239
          +module load gcc/5.5.0
          +module load cp2k/7.1
          +
          +mpiexec.hydra cp2k.popt input.inp >& output_$LSB_JOBID
          +
          +

          其中:

          +
            +
          • #SBATCH -p 队列名 用于指定作业提交的队列。
          • +
          • #SBATCH -t hh:mm:ss 用于指定任务所需的时间(Walltime),若运行超过hh:mm:ss,则任务会被管理系统杀死。对于不同类型的队列,Walltime上限有所不同。对small队列要求在20分钟以内,对medium要求在12小时以内,对largexlarge要求在24小时以内,对long要求在48小时以内,对xlong则在72小时以内。
          • +
          • #SBATCH --job-name=cp2k指定作业名称,一般按照实际计算来取名以方便查看完成情况。
          • +
          • #SBATCH -N 2指定作业提交的总节点数,#SBATCH --ntasks-per-node=32指定提交队列的每个节点上的CPU总核数,例如这里在53队列中选取2个节点进行并行计算,即使用了64个核。
          • +
          • #SBATCH --mem=8G 指定作业所需消耗的内存,例如这里限制作业占用内存为 8 GB。
          • +
          • module load xxx用于加载环境,保持/data/share/base/scripts示例中的写法即可。
          • +
          • mpiexec.hydra cp2k.popt input.inp >& output_$LSB_JOBID是实际执行任务的命令。
          • +
          +

          可以看到,任务提交脚本实际上是一个具有特殊注释格式的 bash 脚本。因此在加载环境后,可以使用 bash 语法来设置环境变量、控制任务运行的路径、进行批处理等等。

          +
          +

          注意

          +

          对于 fatgpu* 队列,请 务必 指定作业所需的内存不小于自己任务实际需要内存大小! +否则可能会因为其他人任务尚未结束而迟迟无法运行,或因为申请了过多的内存资源而使得其他任务无法提交。 +为了公平使用,请一定遵守上述规则。 管理员会根据用户反馈直接清除未遵循规范的任务。

          +

          相应地,其他计算队列通常会一个任务独占若干节点,因而不需要设置内存,保持默认——占满即可。 +考虑到 DFT 计算本身对内存有一定需求,请务必注意自己的设置符合实际情况,以免浪费宝贵的时间和机时。

          +
          +

          作业提交

          +

          若用户已经准备好相应计算的输入和提交脚本,则可以对任务进行提交。例如提交脚本文件名为cp2k.slurm,则提交命令为:

          +
          sbatch cp2k.slurm
          +
          +

          若提交成功,可以看到以下提示:

          +
          Job <1360> is submitted to queue <53-large>
          +
          +

          表示任务已经成功提交到节点上,编号为 1360

          +

          任务提交后,可以通过squeue -u <username>命令查看自己任务的运行情况,<username>即自己的用户名。

          +
          JOBID   USER    STAT  QUEUE      FROM_HOST   EXEC_HOST   JOB_NAME   SUBMIT_TIME
          +1227    user    RUN   52-medium  mgt02       28*c52-032  CoX        Mar  9 22:35
          +                                             28*c52-023
          +1133    user    RUN   51-medium  mgt02       24*c51-024  Cu13       Mar  9 21:20
          +                                             24*c51-031
          +1360    user    PEND  53-large   mgt02                   cp2k       Mar 10 13:26
          +
          +

          其中 JOBID 即为任务编号,STAT 表示状态,RUN 即为正在运行,而 PEND 表示正在排队,可能是因为空余节点数不足。可以看到,1227和1133号任务正在运行,分别使用了2个节点,刚刚提交的1360号任务则在排队。

          +

          如果想要停止或取消已经提交的任务,则使用命令:

          +
          bkill 1360
          +
          +

          若看到 Job <1360> is being terminated 的提示,则说明停止任务的请求已经发出。一段时间后,该任务即被杀死。

          +
          +

          链接

          + +
          +

          登出集群

          +

          请在命令行中输入:

          +
          exit
          +
          +

          回车即可退出登陆。

          +

          校外访问

          +

          若为在校师生,可使用学校提供的 SSLVPN 登陆集群。

          +

          详细配置方法请参阅:SSLVPN 使用说明-厦门大学VPN专题网站

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/cluster_usage/conda/index.html b/wiki/cluster_usage/conda/index.html new file mode 100644 index 00000000..3657a010 --- /dev/null +++ b/wiki/cluster_usage/conda/index.html @@ -0,0 +1,2851 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Anaconda - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          Anaconda 使用指南

          +

          初始化设定

          +

          登录 HPC

          +

          module load miniconda/3
          +conda init bash
          +
          +这会自动修饰你的~/.bashrc文件 +登出HPC,再次登陆

          +

          打开你的~/.condarc文件

          +
          vim ~/.condarc
          +
          +

          修改以下文件并放入你的~/.condarc里

          +
          channels:
          +  - defaults
          +ssl_verify: true
          +envs_dirs:
          +#modify, this is where your environment file in
          +  - /data/ch2_101/conda/env
          +pkgs_dirs:
          +#modify, this is where your package file in
          +  - /data/ch2_101/conda/pkgs
          +
          +

          退出文件

          +

          通过以下命令确认你的环境 +

          conda env list
          +

          +

          创建你自己的环境

          +

          创建你自己的环境,之后都启用自己的环境进行使用

          +
          conda create -n <your env name> python
          +
          +conda activate <your env name>
          +
          +

          修饰你的终端前缀

          +

          用上述方法创造的环境会在你的终端命令行前加上一长串路径,例:

          +
          (/Users/USER_NAME/research/data-science/PROJECT_NAME/envs) $
          +
          +

          可以用以下命令缩短前缀

          +
          conda config --set env_prompt '({name})'
          +
          +

          此命令会修改你的.condarc文件

          +

          参考文献

          +

          没有了,愉快的用conda进行数据处理吧!

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/cluster_usage/gpu_usage/index.html b/wiki/cluster_usage/gpu_usage/index.html new file mode 100644 index 00000000..3a79d10b --- /dev/null +++ b/wiki/cluster_usage/gpu_usage/index.html @@ -0,0 +1,3114 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 使用集群上的 GPU - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          使用集群上的 GPU

          +

          GPU 队列概况

          +

          GPU节点调度采用Slurm调度系统进行管理。用户使用时,请在**191节点**上提交、管理任务。

          +

          目前课题组GPU有6个节点:

          +
            +
          • c51-g001: 节点上有 4 张 Tesla V100,采用队列名gpu1进行提交。
          • +
          • c51-g002: 节点上有 4 张 A100,采用队列名gpu2进行提交。其中有2张卡(0,1)为完整的A100 80G PCIe,另外两张(2,3)已各自切分为 7 个 MIG 计算实例。
          • +
          • c51-m001 c51-m002 c51-m003 c51-m004): 每个节点上有 8 张 2080 Ti,采用队列名gpu3进行提交。
          • +
          +

          6个节点均可联系管理员开通使用权限。

          +

          队列选择指导(供参考)

          +

          以下部分是一个简单的指导,仅供参考,请根据自己实际需要选用。

          +

          gpu3 队列上有32张 Nvidia 2080Ti 显卡,每张卡提供约11 GB显存。基本上平时对百原子级别 DeePMD 势函数的训练乃至MD都可以完成,故平时DP-GEN流程使用该队列进行计算即可。

          +

          gpu1 队列配置有4张 Nvidia Tesla V100 显卡,每张卡提供约32 GB显存,且提供完整的双精度加速支持,故适用于更大体系 DeePMD 的训练。对模型进行长训练时,也可使用此队列。同时,因其完整的双精度计算支持以及NV-LINK的引入,一些支持GPU加速的计算软件(如VASP 6.1+)也推荐在此节点上提交,并可用于多卡并行。

          +

          gpu2 队列配置有4张 Nvidia A100 显卡。其中两张卡为完整卡,每张提供80 GB显存,且提供完整的双精度加速支持,适用于需要更大体系 DeePMD 训练以及更大体系的GPU加速计算,也适用于更大Batch数据集的加载,例如需要内存较多的 NLP 模型。但注意A100未提供NV-LINK和NV-Switch,故请勿进行多卡并行计算,以免效率达不到预期。

          +

          同时,A100引入了MIG功能,可以将卡拆分为2-7个小型的GPU实例 (GI),每个GI可以独立运行GPU计算任务,速度相比在同一张卡上直接同时运行多个任务的情况下有明显提升,但相比单任务速度下降50%以内。目前,该节点配置为2张完整的80 GB卡(0-1号卡)和2张切分为7个GI的卡(2-3号卡),每个GI的速度大致与2080Ti相近且略强,故可以用于DP-GEN训练。通过Slurm调度系统可以控制使用完整的 A100 还是切分后的小卡。

          +

          提交任务至 GPU

          +
          +

          由于嘉庚超算的投用,Slurm系统将得到广泛应用,且后者可以完整支持MIG等GPU硬件新特性,故目前计划逐步切换至Slurm调度。 +目前GPU的调度已经全部切换至Slurm。 +关于Slurm介绍的部分将在全面迁移后,独立成一篇文档。

          +
          +

          gpu1gpu3队列

          +

          常规使用gpu1队列和gpu3队列的示例脚本放在/data/share/base/scripts下,举例如下:

          +
          deepmd.sub
          #!/bin/bash
          +#SBATCH -N 1
          +#SBATCH --ntasks-per-node=1
          +#SBATCH -t 96:00:00
          +#SBATCH --partition=gpu3
          +#SBATCH --gres=gpu:1
          +#SBATCH --mem=8G
          +
          +# add modulefiles
          +module add deepmd/2.0-cuda11.3
          +
          +dp train input.json 1>> train.log 2>> train.err
          +dp freeze  1>> train.log 2>> train.log
          +
          +

          其中 -N 1表示使用1个节点,--ntasks-per-node=1 表示每个节点上使用1个CPU核,--partition=gpu3即表示提交任务到gpu3队列上,--gres=gpu:1即分配其中的1张卡给任务。gpu3中每个节点有8张2080Ti卡,因而上述命令组合起来即表示分配1个节点上的1个CPU核以及1张2080Ti卡用于计算。

          +

          若需要使用其他队列,只需将--partition的参数修改为对应的队列,即gpu1gpu3

          +
          +

          关于内存用量的说明

          +

          注意 --mem=8G 表示内存消耗为 8 GB。目前集群设置了默认值,即在不写的情况下,每分配 1 张GPU卡可使用 16 GB 物理内存。 +若需要更多物理内存,请手动指定该值为更大的数值,以免任务由于超出默认内存限制或因为其他任务挤占、资源不足而被系统因 OOM (Out of Memory) 原因强制退出。 +例如:--mem=24G 即可分配每个任务使用24GB内存。 +目前 gpu1gpu2 队列每个节点的总内存为 256 GB, gpu3 队列每个节点总内存为 128 GB,因而注意如果每个任务分配内存过大,可能会导致卡空置但没有足够的内存分配的问题。 +因此请务必根据自己的实际需要指定该参数以保证公平使用!

          +
          +

          gpu2队列

          +

          gpu2队列提供了2张完整A100 80G卡供大任务使用,以及2张分卡共14个实例供相对比较零散的任务使用。

          +

          完整卡使用时,可参照gpu1gpu3队列,将--gres的参数改为gpu:a100:1即可,其中1仍表示分配1张卡。

          +

          MIG 实例(即俗称的A100分卡、小卡)的使用脚本放在/data/share/base/scripts下,举例如下:

          +
          cp2k_mig.sub
          #!/bin/bash -l
          +#SBATCH --parsable
          +#SBATCH --nodes 1
          +#SBATCH --ntasks-per-node 1
          +#SBATCH --partition gpu2
          +#SBATCH --gres=gpu:1g.10gb:1
          +#SBATCH --time=96:00:00
          +#SBATCH --mem=4G
          +
          +module load deepmd/2.1
          +cp2k.ssmp -i input.inp 1>>output 2>>err.log
          +
          +

          其中--gres=gpu:1g.10gb:1即表示分配 1 个MIG实例给任务使用。

          +
          +

          注意

          +

          A100分配GPU的命令需要写明硬件类型,否则Slurm在分配资源时无法区分。

          +
          +

          关于Slurm作业管理系统

          +

          若用户已经准备好相应计算的输入和提交脚本,则可以对任务进行提交。例如提交脚本文件名为deepmd.sub,则提交命令为:

          +
          sbatch deepmd.sub
          +
          +

          若提交成功,可以看到以下提示:

          +
          Submitted batch job 630
          +
          +

          表示任务已经成功提交到节点上,编号为 630

          +

          任务提交后,可以通过squeue命令查看集群上任务的运行情况。

          +
          JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)
          +  620      gpu2    100-2     user  R    5:47:46      1 c51-g002
          +  619      gpu2    150-2     user  R    7:19:49      1 c51-g002
          +  630      gpu3 deepmd.s    ypliu PD       0:00      1 (Resources)
          +  623      gpu3 deepmd.s     user  R       0:22      1 c51-m001
          +  625      gpu3    ec_dp     user  R      55:28      1 c51-m001
          +  610      gpu3 deepmd.s     user  R   19:04:13      1 c51-m003
          +  609      gpu3 deepmd.s     user  R   19:05:22      1 c51-m002
          +
          +

          其中 JOBID 即为任务编号,ST 表示状态,R 即为正在运行,而 PD 表示正在排队,可能是因为空余卡数不足。可以看到,623号任务正在运行,可能,刚刚提交的630号任务则在排队。

          +

          如果想要停止或取消已经提交的任务,则使用命令:

          +
          scancel 630
          +
          +

          一段时间后,该任务即被杀死。

          +

          Slurm 与 LSF 命令对照表如下所示:

          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          LSFSlurm描述
          bsub < script_filesbatch script_file提交任务,作业脚本名为script_file
          bkill 123scancel 123取消任务,作业 ID 号为 123
          bjobssqueue浏览当前用户提交的作业任务
          bqueuessinfo
          sinfo -s
          浏览当前节点和队列信息,'-s'命令表示简易输出
          bhostssinfo -N查看当前节点列表
          bjobs -l 123scontrol show job 123查看 123 号任务的详细信息。
          若不指定任务号则输出当前所有任务信息
          bqueues -l queuescontrol show partition queue查看队列名为queue的队列的详细信息。
          若不指定队列则返回当前所有可用队列的详细信息。
          bhosts -l g001scontrol show node g001查看节点名为 g001的节点状态。
          若不指定节点则返回当前所有节点信息。
          bpeek 123speek 123 *查看 123 号任务的标准输出。
          +
          +

          * speek 命令不是 Slurm 标准命令,仅适用原 Metal 集群使用。

          +
          +

          作业提交脚本对照表:

          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          LSFSlurm描述
          #BSUB#SBATCH前缀
          -q queue_name-p queue_name--partition=queue_name指定队列名称
          -n 64-n 64指定使用64个核
          ----N 1使用1个节点
          -W [hh:mm:ss]-t [minutes]-t [days-hh:mm:ss]指定最大使用时间
          -o file_name-o file_name指定标准输出文件名
          -e file_name-e file_name指定报错信息文件名
          -J job_name--job-name=job_name作业名
          -M 128-mem-per-cpu=128M--mem-per-cpu=1G限制内存使用量
          -R "span[ptile=16]"--tasks-per-node=16指定每个核使用的节点数
          +

          通过 scontrol 命令可以方便地修改任务的队列、截止时间、排除节点等信息,使用方法类似于 LSF 系统的 bmod 命令,但使用上更加简洁。

          +
          +

          链接

          +

          更多使用教程和说明请参考:Slurm作业调度系统使用指南

          +
          +

          dpgen 提交 GPU 任务参数设置

          +

          请参考DP-GEN使用说明

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/cluster_usage/jupyter/index.html b/wiki/cluster_usage/jupyter/index.html new file mode 100644 index 00000000..a58284e1 --- /dev/null +++ b/wiki/cluster_usage/jupyter/index.html @@ -0,0 +1,2957 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Jupyter 系列使用指南 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          Jupyter 系列使用指南

          +

          Jupyter Notebook

          +

          转化 Jupyter Notebook 为 Python 脚本

          +
          ipython nbconvert --to python *.ipynb
          +
          +

          远程打开 Jupyter Notebook

          +

          Jupyter notebook 可以通过本地电脑的浏览器打开。但如果你想在远程电脑上(如集群)打开,怎么办?远程打开 Jupyter notebook 的好处就是可以不用下载数据,直接远程处理。但是由于集群并没有显示/输出装置,你需要通过其他方法来打开 Jupyter notebook。

          +

          远程打开的方法

          +
            +
          • 使用如下命令在集群上打开你的 jupyter notebook:
          • +
          +
          # 在远程集群运行如下命令
          +# <port number>由你自己决定,比如 9898
          +jupyter notebook --no-browser --port=<port number>
          +
          +
            +
          • 在你的本地的电脑使用如下命令:
          • +
          +
          # <port number>由你自己决定,比如 9898,是跟远程打开的端口对应。
          +ssh -N -f -L localhost:8888:localhost:<port number> username@your_remote_host_name
          +
          +

          ssh 登陆的命令可以查看这里进行简化.

          +
            +
          • 打开本地电脑的浏览器,输入localhost:8888 。然后会弹出输入 passwordtoken的页面, 你可以在集群上输入如下命令来查看:
          • +
          +
          #type this command in your remote computer, you can find token to enter remote notebook
          +jupyter notebook list
          +
          +

          利用空节点运行 Jupyter

          +

          由于登陆节点资源十分有限,实际上不太建议在登陆节点上直接运行 Jupyter 服务。这里提供一种可能的方案,通过 LSF 启动 Jupyter 服务,实现在远程的调用。

          +

          首先在自己希望作为 Jupyter 根目录的文件夹下编辑提交脚本(例如jupyter.lsf):

          +
          #!/bin/bash
          +#BSUB -q fat
          +#BSUB -J deepmd
          +#BSUB -o %J.stdout
          +#BSUB -e %J.stderr
          +
          +# add modulefiles
          +source ~/.bashrc
          +
          +cat /etc/hosts | grep c51-s001
          +jupyter-lab --ip=0.0.0.0 --port=<port>
          +
          +

          如图即使用了胖节点的 1 个核来开启任务,同时在任务输出中显示出胖节点所在的 IP 地址,请在提交后稍等片刻后通过 bpeek 命令查看(可能一开始是空的,稍后会有输出):

          +
          123.45.67.89 c51-s001 c51-s001.hpc.xmu
          +
          +

          假设输出为 123.45.67.89,则可在本地运行命令:

          +
          ssh -L <local_port>:123.45.67.89:<port> <username>@<ip_of_cluster>
          +
          +

          其中<local_port>为本地任意端口,<port>与作业脚本保持一致,其余部分与平时登陆命令保持一致,注意不要漏掉-p xxxx。此部分的说明请参考SSH 使用说明

          +

          在本地浏览器输入:localhost:<local_port>即可访问这一远程 Jupyter 服务。

          +

          此途径最大的好处是可以在 GPU 集群上运行,从而可以直接调用 GPU 卡。但请注意,需要在脚本中指定所需的 GPU 卡数。

          +
          #!/bin/bash
          +#BSUB -q gpu
          +#BSUB -W 24:00
          +#BSUB -J deepmd
          +#BSUB -o %J.stdout
          +#BSUB -e %J.stderr
          +#BSUB -n 8
          +#BSUB -gpu "num=1:mode=shared:mps=no:j_exclusive=yes"
          +#BSUB -R "span[ptile=8]"
          +
          +# add modulefiles
          +source ~/.bashrc
          +
          +#dp train input.json 1>> train.log 2>> train.err
          +cat /etc/hosts
          +jupyter-lab --ip=0.0.0.0 --port=8888
          +
          +

          如果想在 Jupyter 中调用虚拟环境(如myenv),需要在对应虚拟环境中安装 ipykernel和环境的 kernel。[参考资料]

          +
          # 激活虚拟环境 myenv
          +# 也可用 conda activate myenv
          +source activate myenv
          +conda install pip
          +conda install ipykernel
          +# 实际使用中需替换 myenv 和 "Python (myenv)"
          +python -m ipykernel install --user --name myenv --display-name "Python (myenv)"
          +
          +

          Jupyter Lab

          +

          Under construction

          +

          Jupyter Hub

          +

          Under construction

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/cluster_usage/notification_for_hpc/index.html b/wiki/cluster_usage/notification_for_hpc/index.html new file mode 100644 index 00000000..a1d3cd39 --- /dev/null +++ b/wiki/cluster_usage/notification_for_hpc/index.html @@ -0,0 +1,2891 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + 计算任务的推送 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          计算任务的推送

          +

          推送至钉钉

          +

          计算节点可访问互联网的服务器,可以设置任务完成后推送到钉钉。效果如下

          +

          image-20200715201517823

          +

          申请钉钉机器人

          +
          +

          申请步骤需要在PC端钉钉操作

          +
          +

          首先需要申请一个钉钉机器人,并拿到 webhook ,步骤如下:

          +
            +
          1. +

            点击头像→机器人管理

            +
          2. +
          3. +

            添加 自定义 机器人

            +
          4. +
          +

          image-20200715184517490

          +
            +
          1. 群组选择工作通知,安全设置中添加关键词 Job, info
          2. +
          +

          image-20200715183755580

          +
            +
          1. 复制机器人的 webhook
          2. +
          +

          服务器上设置推送

          +

          在服务器提交脚本中加上 module load notification ,并在最后加上 dingtalk_notification WEBHOOK 即可实现推送至钉钉。示例脚本如下:

          +
          #!/bin/bash
          +#BSUB -J "test"
          +#BSUB -o %J.txt
          +#BSUB -e %J.txt
          +#BSUB -q large
          +#BSUB -n 2
          +#BSUB -W 12:00
          +
          +module load notification
          +
          +MPIRUN_COMMAND  # your command to run software
          +
          +dingtalk_notification https://oapi.dingtalk.com/robot/send?access_token=xxxx  # replace it by your webhook
          +
          +

          其中 notification 的示例如下,请自行编辑modulefile文件(可参考此处),并替换 <YOUR_HPC_NAME><YOUR_IP> 的值:

          +
          #%Module
          +
          +set-alias    dingtalk_notification {
          +    curl $1 \
          +        -H 'Content-Type: application/json' \
          +        -d '{
          +            "msgtype": "markdown",
          +            "markdown": {
          +                "title":"Job Info",
          +                "text": "'"Job Info \\n
          +\\n
          +Job $LSB_JOBID is finished in **<YOUR_HPC_NAME>**! \\n
          +\\n
          +> Server ip: **<YOUR_IP>** \\n
          +> \\n
          +> Job id: **$LSB_JOBID** \\n
          +> \\n
          +> Job name: **$LSB_JOBNAME** \\n
          +> \\n
          +> Job queue: **$LSB_QUEUE** \\n
          +> \\n
          +> Job workdir: **$LS_EXECCWD** \\n"'"
          +            }
          +        }'
          +}
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/cluster_usage/pack_backup/index.html b/wiki/cluster_usage/pack_backup/index.html new file mode 100644 index 00000000..54648b3b --- /dev/null +++ b/wiki/cluster_usage/pack_backup/index.html @@ -0,0 +1,3094 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + 文件整理与备份攻略 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          文件整理与备份攻略

          +
          +

          本文将持续更新

          +
          +

          在实际科研工作中,我们时常会遇到文件整理的问题。比如利用 CP2K 通过 Constrained MD 计算 Potential Mean Force 时,会产生大量 Lagrange Multiplier 文件;使用 DP-GEN 训练势函数时,由于 Model Deviation 过程中会生成大量结构文件,每一轮每条轨迹都会有很多,便会使文件总数快速上升;同时,计算过程中会产生波函数、cube等文件,可能会占据大量的空间。如何高效整理这些文件也成为一个难题。

          +

          本文将给出一些具体的攻略,供大家参考使用。

          +

          一些常识

          +

          Linux中文件储存上限与储存空间储存数目有关系。因此我们不仅需要关注储存空间(文件)大小,还需要关注文件的数目。例如DP-GEN产生的大量碎片化文件和cp2k的potential mean force 产生的大量Lagrange Multiplier文件都会影响文件储存。

          +

          以下命令可以查看自己的文件/目录大小

          +
          # 查看当前目录的大小
          +du -sch .
          +# 查看某文件的大小
          +du -sch file_name
          +# 查看该目录下所有文件/目录大小
          +du -sch ./*
          +
          +

          文件打包与压缩

          +

          tar命令

          +

          对于结构复杂的目录,可以使用 tar 命令进行打包或压缩。

          +

          tar 命令支持压缩或解压缩,其使用方法大致如下:

          +
          打包与压缩: tar [-j|-z] [cv] [-f 创建的档名] filename... 
          +解压缩: tar [-j|-z] [xv] [-f 创建的档名] [-C 目录]
          +========
          +选项与参数:
          +-c  :创建打包文件,可搭配 -v 来察看过程中被打包的文件(夹)名(filename)
          +-x  :解打包或解压缩的功能,可以搭配 -C (大写) 在特定目录完成解压缩的操作。
          +-c, -x 不可同时使用,还请注意!
          +-z  :通过 gzip 进行压缩/解压缩:此时压缩文件名最好为 *.tar.gz  *.tgz
          +-j  :通过 bzip2 进行压缩/解压缩:此时压缩文件名最好为 *.tar.bz2
          +-v  :在压缩/解压缩的过程中,将正在处理的文件名显示出来。
          +-f filename:-f 后面要立刻接要被处理的档名!建议 -f 单独写一个选项罗!
          +-p  :保留备份数据的原本权限与属性,常用于备份(-c)重要的配置档。
          +--exclude=FILE:在压缩的过程中,不将文件名为 FILE 的文件打包。
          +--remove-files:打包后删除被打包的文件。
          +
          +

          比如希望在 /some/place 下打包/data/userX 下的所有文件(夹)为 userX_backup.tgz,便可以使用命令

          +
          tar -zcvf /some/place/userX_backup.tgz /data/userX/*
          +
          +

          使用tar命令将文件打包为 *.tgz 等压缩档的优点是可以保留软连接,适合用于结构复杂的目录,例如 DP-GEN 项目目录。

          +

          如果用户希望将打包后的文件直接删除以节省空间,则可以使用:

          +
          tar -zcvf /some/place/userX_backup.tgz /data/userX/* --remove-files
          +
          +

          这样在创建压缩档后,程序会删除 /data/userX/*

          +

          对于 DP-GEN 等文件数量非常多的任务,直接存储会占据大量的 inode 空间,从而出现明明磁盘空间够却无法写入的尴尬局面,因此可以对已经跑过的 iteration 进行主动打包以减少文件数量,节约 inode 数。同时如果需要进行磁盘级的备份、迁移,处理小文件的速度会大幅放缓,而处理大文件的读写速度反而可以达到硬盘读写速率或网络传输速率的上限。

          +

          文件删除

          +

          find命令

          +

          对于大量具有相似命名的文件,可以利用 find 命令进行索引和删除。

          +

          例如对当前目录下(./),想要查找 AuO 任务产生的所有的 cube 文件(假设命名均为AuO_*.cube),可以采用如下命令进行展示:

          +
          find ./ -name AuO_*.cube
          +
          +

          如果想要将这些文件直接删除,还可以加入 -delete 命令:

          +
          find ./ -name AuO_*.cube -delete
          +
          +
          +

          注意

          +

          注意 find 命令后的选项为 - 而非 --

          +
          +

          rsync命令

          +

          rsync 作为常用的文件传输与同步命令,实际上也可以用于将某一文件夹清空,对于有大量小文件的情况相比传统的 rm 命令会快很多。例如想要清空 /some/path 目录,可以先运行:

          +
          mkdir /tmp/empty
          +
          +

          然后运行:

          +

          bash +rsync --delete -rlptD /tmp/empty/ /some/path

          +

          常用软件的文件处理

          +

          cp2k

          +

          cp2k在计算中会产生大(量)文件,以下文件可以删除。

          +
            +
          • 波函数文件(.wfn):波函数文件储存DFT计算的轨道信息,常用于restart。但.wfn文件往往随着体系增大而迅速增大。如无必要(重要波函数),算完之后即可将其删除。
          • +
          • 网格文件(.cube):这类文件储存着三维空间信息,例如:静电势、分子轨道。大小中等(10MB左右)。按普通AIMD长度(60000步),每50步输出一个会有1200个.cube文件。累积下来空间不容小觑。如分析完毕,即可删除,或用压缩工具压缩,或用专业的bqbtool压缩。
          • +
          • 轨迹文件(.xyz): 分子动力学/结构优化输出的轨迹文件,包含普通轨迹文件,速度文件,力文件。普通AIMD长度输出的三个文件基本在1至2GB左右。如使用机器学习势函数会储存大量轨迹数据,常常会达到100GB左右。如分析完毕,即可删除,或用压缩工具压缩,或用专业的bqbtool压缩。
          • +
          • 态密度文件(.pdos): 体系的态密度文件,大小偏小,约为1至2MB左右一个文件,但一个体系会输出多个文件,因此差不多在6至8MB,与网格文件类似,大量积累后会产生空间占用。如分析完毕,即可删除,或用压缩工具压缩。
          • +
          +

          压缩工具: bqbtool

          +

          cp2k轨迹文件/网格文件,如舍不得丢掉。可以采用bqbtool进行压缩。bqbtool专门针对此类型文件进行压缩开发的工具,压缩率达到10%。

          +

          个人安装参考bqb手册,5152服务器上已经安装,使用命令如下:

          +
          # 压缩轨迹文件
          +bqbtool compress postraj xxx.xyz xxx.bqb
          +# 压缩cube文件, 可提前把cube文件按顺序cat到一个文件中。
          +bqbtool compress voltraj xxx.cube xxx.bqb
          +
          +

          如果将某个文件夹及其子文件夹中的所有文件都压缩,可以结合使用findbqbtool compress

          +
          find . -name '*.cube' | while read line; do
          +  bqbtool compress voltraj $line $line.bqb
          +done
          +
          +

          批量压缩效果:

          +
          .
          +├── bqbtool.log
          +├── run.sh
          +├── test.000
          +│   ├── cp2k-TOTAL_DENSITY-1_0.cube
          +│   ├── cp2k-TOTAL_DENSITY-1_0.cube.bqb
          +│   ├── cp2k-v_hartree-1_0.cube
          +│   ├── cp2k-v_hartree-1_0.cube.bqb
          +│   └── test.002
          +│       ├── cp2k-TOTAL_DENSITY-1_0.cube
          +│       ├── cp2k-TOTAL_DENSITY-1_0.cube.bqb
          +│       ├── cp2k-v_hartree-1_0.cube
          +│       └── cp2k-v_hartree-1_0.cube.bqb
          +├── test.001
          +│   ├── cp2k-TOTAL_DENSITY-1_0.cube
          +│   ├── cp2k-TOTAL_DENSITY-1_0.cube.bqb
          +│   ├── cp2k-v_hartree-1_0.cube
          +│   └── cp2k-v_hartree-1_0.cube.bqb
          +└── test.002
          +    ├── cp2k-TOTAL_DENSITY-1_0.cube
          +    ├── cp2k-TOTAL_DENSITY-1_0.cube.bqb
          +    ├── cp2k-v_hartree-1_0.cube
          +    └── cp2k-v_hartree-1_0.cube.bqb
          +
          +

          集群打包要点

          +

          本次5152将进行迁移,文件的数目将会影响迁移速度。因此尽可能地把原本目录压缩成几个文件,可以提升迁移速度,例如:

          +
          -rw-rw-r-- 1 jyhu jyhu 668M Jan 15 17:58 1-CoO.tar.gz
          +-rw-rw-r-- 1 jyhu jyhu 559M Jan 15 15:40 2-ZIS.tar.gz
          +-rw-rw-r-- 1 jyhu jyhu 2.6G Jan 15 17:07 3-LiS@TiO2.tar.gz
          +-rw-rw-r-- 1 jyhu jyhu 2.8G Jan 15 15:53 4-Graphene.tar.gz
          +-rw-rw-r-- 1 jyhu jyhu 3.4M Jan 16 11:05 NEB.tar.gz
          +-rw-rw-r-- 1 jyhu jyhu 324M Jan 16 11:07 pKa-jqli.tar.gz
          +
          +

          打包方法可以采用tar压缩,参照以上部分

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/cluster_usage/ssh_note/index.html b/wiki/cluster_usage/ssh_note/index.html new file mode 100644 index 00000000..248c1231 --- /dev/null +++ b/wiki/cluster_usage/ssh_note/index.html @@ -0,0 +1,3430 @@ + + + + + + + + + + + + + + + + + + + + + + + + + SSH 与 SCP 使用入门 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          SSH 使用入门

          +

          此入门仅介绍一些作者认为必要且实用的功能,完善的帮助手册可以通过命令,man ssh_config, man ssh查看

          +

          为便于说明,假设需要登陆的远程服务器IP为123.45.67.89,SSH 端口为 7696,用户名为kmr。

          +

          学习目标

          +
            +
          • 使用SSH登录服务器/集群
          • +
          • 使用SCP进行文件传输
          • +
          +

          可选目标

          +
            +
          • 使用ssh config文件进行SSH登录管理
          • +
          • 学会用跳板机进行SSH登录
          • +
          +

          创建密钥对

          +
          +

          Warning

          +

          新人必学

          +
          +

          ssh 是用来安全进行登录远程电脑的命令。使用后,有两种选择来验证登录

          +
            +
          1. 使用密码
          2. +
          3. 使用密钥
          4. +
          +

          第一种方法已经为大众所熟知,但是不安全。因此我们采用密钥进行登录。

          +

          使用如下命令生成密钥:

          +
          ssh-keygen
          +
          +

          根据终端的提示进行操作(实际上你可能只需要不停按enter键)。默认情况下你会在~/.ssh目录中得到id_rsaid_rsa.pub文件,他们分别是私钥和公钥。创建好了之后请把id_rsa.pub文件给服务器管理员。

          +
          +

          Warning

          +

          私钥是登录集群的钥匙,请务必保管好这个文件,防止自己的电脑被入侵

          +
          +

          使用SSH登录服务器

          +
          +

          Warning

          +

          新人必学

          +
          +

          若远程服务器已经放置了公钥,则可输入以下命令登陆服务器:

          +
          ssh -i <path to your private key> -p <port number> username@server_ip
          +
          +

          示例,假设密钥在本地的路径为 ~/.ssh/id_rsa

          +
          ssh -i ~/.ssh/id_rsa -p 7696 kmr@123.45.67.89
          +
          +

          -p 后指定的是端口。若省略不写,默认通过 22 端口与远程服务器进行连接。

          +

          默认情况下,id_rsaid_rsa.pub文件位于~/.ssh下,则-i 选项及其对应参数可以省略。

          +
          +

          Warning

          +

          计算集群只允许在校园网特定IP范围内直接登陆使用。

          +
          +

          使用SCP进行文件传输

          +

          SCP实际上是SSH+FTP的结合,如果配置好了SSH命令,可以使用以下命令来进行文件传输:

          +
          scp myserver:remote_file local_directory_path
          +scp local_directory_path myserver:remote_file
          +
          +

          比如需要把上文提到的远程服务器的文件/data/home/kmr/file传到本地 /some/local/place 目录下, +则使用命令:

          +
          scp -P 7696 kmr@123.45.67.89:/data/home/kmr/file /some/local/place
          +
          +

          从本地上传到远程则交换顺序:

          +
          scp -P 7696 /some/local/place/file kmr@123.45.67.89:/data/home/kmr/
          +
          +
          +

          Warning

          +

          注意 scp 指定端口的命令是大写的-P 而非小写的 -p,这是不同于 ssh 命令的一点。

          +
          +

          若所传文件为目录,则需要使用-r选项:

          +
          scp -r -P 7696 kmr@123.45.67.89:/data/home/kmr/directory /some/local/place
          +
          +
          +

          Tip

          +

          注意 scp 本身可以看作一个特殊的 ssh 命令,因此无论从远程还是本地传输文件都应在本地运行,只是参数的顺序决定了传输的方向。如果两个参数均写本地路径,则与 cp 命令的行为相近,但不可均写远程路径。

          +
          +

          zsh下 (比如macOS >=10.15版本的默认终端),不能直接使用通配符*批量传输文件,需要将包含*的字符串用单引号括起。

          +

          可选:通过配置 config 优雅地的使用 SSH

          +

          为了避免每次都输入一大串命令。 请使用vim编辑如下文件:

          +
          vim ~/.ssh/config
          +
          +
          +

          注意

          +

          请注意修改该文件权限为 600 (即 -rw------- ),否则可能导致无法并行。 +类似地,如发现自己的任务交上去只能在一个节点上运行,也请检查 ~/.ssh 下各个文件的权限,注意只有公钥是 644 权限。

          +
          +

          我们可以把SSH命令的参数都储存在这个文件里。以下是语法示例文件:

          +
          Host myserver # (1)!
          +    User kmr # (2)!
          +    Hostname 123.45.67.89 # (3)!
          +    Port 7696 # (4)!
          +    IdentityFile ~/.ssh/id_rsa # (5)!
          +
          +
            +
          1. nickname for your cluster
          2. +
          3. replacement of username in ssh
          4. +
          5. replace of cluster_ip in ssh
          6. +
          7. replacement of -p <port number> in ssh
          8. +
          9. replace of -i <path to your private key> in ssh
          10. +
          +

          保存上述文件,你就可以简单地使用如下命令登录:

          +
          ssh myserver
          +
          +

          此命令即相当于上文提到的ssh -i ~/.ssh/id_rsa -p 7696 kmr@123.45.67.89

          +

          加深理解

          +
          +

          Warning

          +

          该视频仅帮助理解SSH原理以及基本操作,视频中含有本笔记未要求的内容,但是大部分普通用户没有权限执行。

          +
          + + +

          在本地电脑显示服务器图像 (X11 Forwarding)

          +

          使用终端登录服务器后没办法直接显示图形界面。有时候在*服务器*上使用画图软件时,可以通过X11 Forwarding功能将图像显示到本地电脑上。只需要在命令里加上-X或者-Y

          +
          ssh -X -i <para.> -p <para.> username@server_ip
          +
          +

          在config文件中配置X11 Forwarding*

          +
          Host <hostnickname>
          +    ForwardX11 yes  # (1)!
          +    ForwardX11Trusted yes # (2)!
          +
          +
            +
          1. equivalent to -X
          2. +
          3. equivalent to -Y (This option valid only if your ForwardX11 is set to yes!)
          4. +
          +

          使用跳板机/代理进行远程登录

          +

          本组的服务器限制了登录的ip,即你只能在学校ip范围内进行登录。同时由于登录需要密钥,而密钥保存在办公室电脑上,因此登录就必须使用办公室电脑。因此,人不在办公室时就很难登录服务器。

          +

          解决方法就是,先在校园网环境下通过SSH登录到办公室电脑(仅自己的用户名密码即可),再通过办公室电脑登录到服务器。此时办公室电脑是作为*跳板*来使用的:

          +
          ssh username@proxy
          +ssh -p port_number -i key_file username@cluster191
          +
          +

          在config文件中配置跳板机*

          +

          打开 ~/.ssh/config,复制以下代码(注意去掉注释,否则可能会报错):

          +
          # nickname you set for your office computer
          +Host proxy
          +    # username you set for login
          +    User robinzhuang
          +    # IP address of your office computer, change the xxx to real one!
          +    Hostname 10.24.3.xxx
          +
          +# nickname for your cluster
          +Host myserver
          +    # username you set, change to real one!
          +    User kmr
          +    # IP for cluster, change to real one!
          +    Hostname 123.45.67.89
          +    # the key file location used in login 
          +    IdentityFile ~/.ssh/id_rsa
          +    # specify the port number, replace xx with real port!
          +    Port xx
          +    # use Host proxy as Jump Server
          +    ProxyJump proxy
          +
          +

          我们可以发现其实是直接登录课题组服务器的一些改进,我们首先配置了从这台电脑登录到跳板机的命令,然后再配置利用跳板机到服务器的命令。

          +
          +

          如果上述的 ProxyJump proxy 不起作用,可将其替换为 ProxyCommand ssh -o 'ForwardAgent yes' proxy "ssh-add ~/.ssh/id_rsa && nc %h %p" ,请用你的密钥的路径来代替上述的 ~/.ssh/id_rsa 部分。

          +
          +

          完成以上配置后可以使用如下命令直接配置:

          +
          ssh myserver
          +
          +

          在config文件中转发端口*

          +

          有时,我们在服务器上部署了 jupyter notebook 等服务时,需要把远程的某个端口 (以下例子中为 8888 端口) 转发到本地的某个端口 (以下例子中为 9999 端口),使得在本地访问 https://localhost:9999 时也能访问远程的 jupyter notebook 服务。

          +
          Host myserver # (1)!
          +    User kmr # (2)!
          +    Hostname 123.45.67.89 # (3)!
          +    LocalForward 9999 localhost:8888 # (4)!
          +
          +
            +
          1. 为你的服务器取一个任意的昵称
          2. +
          3. 请修改为真实的用户名
          4. +
          5. 请修改为真实的IP
          6. +
          7. localhost:8888 是相对于远端服务器的真实IP和端口,若不是 localhost,请替换为对应的IP和端口号
          8. +
          +

          在使用跳板机的情况下使用X11 Forwarding

          +

          只需要在 ~/.ssh/config 中加入

          +
          Host * # (1)!
          +    ForwardX11Trusted yes
          +
          +
            +
          1. 对任意配置生效
          2. +
          +

          一份示例配置文件(config)

          +

          以下为 ~/.ssh/config 的一个示例,需要时可在这份示例文件上进行修改,必要修改的部分已在注释中标出,General config 可以直接照抄。注意须删掉文件中所有的注释。

          +
          # General config
          +Host *
          +    ForwardX11Trusted yes
          +    ForwardAgent yes
          +    AddKeysToAgent yes
          +    ServerAliveInterval 60
          +    ControlPersist yes
          +    ControlMaster auto
          +    ControlPath /tmp/%r@%h:%p
          +
          +# set proxy
          +# nickname for your Jump Server
          +Host nickname_proxy
          +    # IP for Jump Server (REPlACE IT!)
          +    Hostname 10.24.3.255
          +    # your username for Jump Server (REPlACE IT!)
          +    User chenglab
          +
          +# Host1 and host2
          +# nickname for your cluster
          +Host nickname_1
          +    Hostname 123.45.67.89
          +    # your host1 username (REPlACE IT!)
          +    User kmr1 
          +    LocalForward 8051 localhost:8888
          +# nickname for your cluster
          +Host nickname_2
          +    Hostname 123.45.67.90
          +    # your host2 username (REPlACE IT!)
          +    User kmr2
          +    LocalForward 8052 localhost:8888
          +
          +# set same parts for host1 and host2
          +# use your own nickname
          +Host nickname_1 nickname_2
          +    Port 7696
          +    # use your own nickname
          +    ProxyJump nickname_proxy
          +
          +

          超纲的部分​​*

          +

          在配置文件中实现类似选择语句的功能,以下例子描述的是当网络环境随时变更时,连接同一台机器可能会需要访问不同IP时所采取的策略。

          +
          +

          此例子不建议初学者直接复制粘贴,其中需要替换的部分请根据具体应用场景来自行斟酌

          +
          +
          Host elements
          +    User chenglab
          +    Match host elements exec "nc -G 4 -z 10.24.3.144 %p"
          +        # Private net IP
          +        Hostname 10.24.3.144
          +    Match host elements
          +        # Public net IP
          +        Hostname xxx.xxx.xxx.xxx
          +        Port 6000
          +
          +

          常见问题

          +

          ssh private key are too open

          +

          The error message is

          +
          @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
          +@ WARNING: UNPROTECTED PRIVATE KEY FILE! @
          +@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
          +Permissions 0644 for '/home/me/.ssh/id_rsa_targethost' are too open.
          +It is recommended that your private key files are NOT accessible by others.
          +This private key will be ignored.
          +bad permissions: ignore key: /home/me/.ssh/id_rsa_targethost
          +
          +

          This arises from the permission of your private key:id_rsa file.

          +

          Use command ls -l to see your id_rsa permission. if it is not -rw-------, you should change it to that! Use the following command:

          +
          chmod 600 ~/.ssh/id_rsa
          +
          +

          No xauth data; using fake authentication data for X11 forwarding.

          +

          The error message is

          +
          Warning: No xauth data; using fake authentication data for X11 forwarding.
          +
          +

          This is because ssh can't find your xauth location. Usually, the location is in /opt/X11/bin/xauth. Add this in your ssh configure file:

          +
          Host *
          +    XAuthLocation /opt/X11/bin/xauth
          +
          +

          Remote host identification has changed!

          +

          When the remote host was just repaired, the error like below might be raised.

          +
          @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
          +@    WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!     @
          +@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
          +IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
          +Someone could be eavesdropping on you right now (man-in-the-middle attack)!
          +It is also possible that a host key has just been changed.
          +The fingerprint for the RSA key sent by the remote host is
          +51:82:00:1c:7e:6f:ac:ac:de:f1:53:08:1c:7d:55:68.
          +Please contact your system administrator.
          +Add correct host key in /Users/isaacalves/.ssh/known_hosts to get rid of this message.
          +Offending RSA key in /Users/isaacalves/.ssh/known_hosts:12
          +RSA host key for 104.131.16.158 has changed and you have requested strict checking.
          +Host key verification failed.
          +
          +

          Take it easy, and just edit your /Users/isaacalves/.ssh/known_hosts file to remove the line with the IP address of the very remote host. For some users such as Ubuntu or Debian users, ssh -R xxx might be necessary, which would be shown in the error info.

          +

          However, if not any repair or upgrade happened, man-in-the-middle attack might happen. Just stop logging in and contact manager of cluster at once to make sure.

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/cluster_usage/tensorboard/index.html b/wiki/cluster_usage/tensorboard/index.html new file mode 100644 index 00000000..980ccb02 --- /dev/null +++ b/wiki/cluster_usage/tensorboard/index.html @@ -0,0 +1,2889 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 集群 TensorBoard 使用指南 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          集群 TensorBoard 使用指南

          +

          需求

          +

          基于 DeepMD-kit 或者 TensorFlow 的代码调试及训练数据实时监控。

          +

          TensorBoard 是什么

          +

          DeepMD-kit 官方教程

          +

          用法

          +

          DP 官方教程给出了在本地运行程序时的可视化,如果在服务器上运行,我们需要进行端口转发。

          +

          在计算节点上运行程序(推荐)

          +
          +

          以在 gpu3 队列运行 DeepMD-kit 训练程序为例,其他程序可对应替换。

          +
          +
            +
          1. 通过 lsf 脚本提交程序到计算节点 +
            #!/bin/bash
            +#BSUB -q gpu3
            +#BSUB -W 24:00
            +#BSUB -J type_map_0
            +#BSUB -o %J.stdout
            +#BSUB -e %J.stderr
            +#BSUB -n 4
            +#BSUB -gpu "num=1:mode=shared:mps=no:j_exclusive=yes"
            +#BSUB -R "span[ptile=32]"
            +
            +# add modulefiles
            +module add deepmd/2.0-cuda11.3
            +
            +dp train input.json 1>> train.log 2>> train.err &
            +tensorboard --logdir=log --port=6006
            +
            + 如果想要实时查看训练过程中的数据,训练指令和 tensorboard 的运行指令需要同时运 行,故采用 &将训练指令挂起。
            +

            --logdir指定 tensorboard 的 event 文件所在路径(在 json 文件中指定)。

            +

            --port指定 tensorboard 在服务器上运行的端口号(缺省默认为 6006)。

            +
            +
          2. +
          3. 查看计算节点 ip 地址 + 做法类似jupyter notebook 教程,在登录节点命令行输入下面指令(将 c51-m002替换为实际运行的节点)。 +
            cat /etc/hosts | grep c51-m002
            +
          4. +
          5. 将端口转发到本地 +
            ssh -NfL localhost:<local_port>:<remote_ip>:<port> <username>@<ip_of_cluster>
            +
          6. +
          +

          在登录节点上运行程序

          +
          +

          Warning

          +

          仅供短时间测试!长时间运行请使用计算节点!!

          +
          +

          在命令行中运行训练和 tensorboard 程序后,在本地执行

          +
          ssh -NfL <local_port>:localhost:<port> <username>@<ip_of_cluster>
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/cluster_usage/vscode_remote/index.html b/wiki/cluster_usage/vscode_remote/index.html new file mode 100644 index 00000000..13093d8a --- /dev/null +++ b/wiki/cluster_usage/vscode_remote/index.html @@ -0,0 +1,2838 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 在非登陆节点上使用VSCode - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          在非登陆节点上使用VSCode

          +

          VSCode 通过 Remote 插件提供了强大的远程编辑能力,使得用户可以在远程获得接近本地的编辑体验。 +VSCode Server原生基于Node和Electron技术,有着较高的内存等需求, +但鉴于目前登陆节点的资源日渐捉襟见肘,这里提出一个方案, +可以让用户较为方便地使用非登陆节点的资源开启VSCode Remote。

          +

          本文假设用户已经阅读过SSH 与 SCP 使用入门特别是有关 config 文件的部分, +并知晓集群的基础概况和调度系统使用方法。 +如未阅读,请先参阅上述两篇文字。

          +

          MacOS 和 Linux 用户

          +

          由于笔者目前使用的设备是 MacOS 操作系统(Linux情况类似),这里给出较完整的图文说明。

          +

          首先用自己最顺手的方式打开并编辑 ~/.ssh/config 文件, +参照这里的说明, +增加登陆节点的配置信息:

          +
          .ssh/config
          Host <nickname>
          +    HostName <ip_of_zeus>
          +    Port <port>
          +    User <username>
          +
          +

          请将<ip_of_zeus>, <port>, <username>替换为实际的IP地址、端口号以及用户名。 +<nickname>请替换为任意自己喜欢的昵称,但请注意, +不要使用c5*的形式! 否则会和下文冲突。

          +

          然后增加以下几行:

          +
          .ssh/config
          Host c5*
          +    User <username>
          +    ProxyCommand ssh -o ForwardAgent=yes <username>@<nickname> "nc -w 120 %h %p"
          +
          +

          这里采用 c5* 作为前缀是为了在登陆节点上快速登陆到对应的计算节点。 +Zeus 集群上所有计算节点(含CPU、GPU、胖节点)均以 c5* 开头,具有类似 c5*-* 的形式, +故这里采用如此写法。请根据集群的情况对应调整。

          +

          然后在集群上,运行以下命令,开启一个虚拟终端:

          +
          user@login01$ bsub -q fat -n 1 -Is bash
          +Job <xxx> is submitted to queue <fat>.
          +<<Waiting for dispatch ...>>
          +<<Starting on c51-s001>>
          +user@c51-s001:~$ 
          +
          +

          注意 bsub 的附加命令请参照集群使用说明, +Walltime及队列情况仍需要参照设置。

          +

          然后,请打开一个VSCode窗口,并点击左下角的按钮,选择“Connect to Host”:

          +

          +

          输入虚拟终端所在的节点,例如上文中的输出 c51-s001:

          +

          +

          如果提示输入密码等信息,请按回车以继续

          +

          +

          等待安装 VSCode Server 即可。若以前曾配置过远程,会自动调用之前的服务。

          +

          Windows 用户

          +

          对于Windows用户,由于笔者暂时没有Windows设备,请参照此教程尝试,思路比较接近。本文即参考了该文章的实现。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/deprecated/deepmd-kit_installation_104/index.html b/wiki/deprecated/deepmd-kit_installation_104/index.html new file mode 100644 index 00000000..e1e1d918 --- /dev/null +++ b/wiki/deprecated/deepmd-kit_installation_104/index.html @@ -0,0 +1,3008 @@ + + + + + + + + + + + + + + + + + + + + + DeePMD-kit安装教程1.0 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          + +
          + + + +
          +
          + + + + + + + +

          DeepMD-kit安装实战:服务器篇(旧版)

          +
          +

          本部分写于2019年11月,基于国重服务器环境进行安装,适用于Tensorflow版本低于1.13的情形。目前针对更高版本已经有新版教程,请移步。

          +
          +

          准备工作

          +

          首先准备必要的依赖。

          +

          检查可用的模块,并加载必要的模块:

          +
          module avail
          +module add cuda/9.2
          +module add gcc/4.9.4
          +# gcc>=4.9 required by dp_ipi, or it won't be built.
          +# For gcc-8.3 could not be supported, here we select a lower version.
          +
          +

          本教程推荐使用conda虚拟环境安装,故:

          +
          module add miniconda/3.7
          +conda create -n deepmd python=3.6
          +conda activate deepmd
          +
          +

          下载并编译nccl:

          +
          cd /some/nccl_download_path
          +git clone https://github.com/NVIDIA/nccl.git -b v2.4.8-1
          +cd nccl
          +make -j src.build --prefix="/some/nccl_install_path" NVCC_GENCODE="-gencode=arch=compute_70,code=sm_70"
          +
          +

          由于国重GPU节点不能直接联网,故使用登陆节点进行编译效率较高,但由于缺少必要的依赖libcuda.solibcuda.so.1(包含在GPU驱动中,登陆节点未安装),故采用stubs所带的库编译,并手动加入环境变量。

          +
          ln -s /share/cuda/9.2/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1
          +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/share/cuda/9.2/lib64/stubs:/some/local/path
          +
          +

          在某个想要的路径下将tensorflow-1.12版本的源代码下载好:

          +
          cd /some/workspace
          +git clone https://github.com/tensorflow/tensorflow tensorflow -b r1.12 --depth=1
          +
          +

          下载好bazel安装包并运行,将所需的环境加入环境变量:

          +
          wget https://github.com/bazelbuild/bazel/releases/download/0.15.0/bazel-0.15.0-installer-linux-x86_64.sh
          +chmod +x bazel-0.15.0-installer-linux-x86_64.sh
          +./bazel-0.15.0-installer-linux-x86_64.sh --user
          +export PATH="$PATH:$HOME/bin"
          +
          +

          tensorflow编译

          +

          首先配置tensorflow的编译选项:

          +
          cd tensorflow/
          +./configure
          +
          +

          根据需要,提供正确的组件和路径:

          +
          Please specify the location of python. [Default is /xxx]:
          +
          +Found possible Python library paths:
          +  /xxx/python3.6/site-packages
          +Please input the desired Python library path to use. Default is [xxx/python3.6/site-packages]
          +
          +Do you wish to build TensorFlow with Apache Ignite support? [Y/n]: Y
          +
          +Do you wish to build TensorFlow with XLA JIT support? [Y/n]: Y
          +
          +Do you wish to build TensorFlow with OpenCL SYCL support? [y/N]: N
          +
          +Do you wish to build TensorFlow with ROCm support? [y/N]: N
          +
          +Do you wish to build TensorFlow with CUDA support? [y/N]: Y
          +
          +Please specify the CUDA SDK version you want to use. [Leave empty to default to CUDA 9.0]: 9.2
          +
          +Please specify the location where CUDA 9.2 toolkit is installed. Refer to README.md for more details. [Default is /usr/local/cuda]: /share/cuda/9.2
          +
          +Please specify the cuDNN version you want to use. [Leave empty to default to cuDNN 7]: 7
          +
          +Please specify the location where cuDNN 7 library is installed. Refer to README.md for more details. [Default is /usr/local/cuda-10.0]: /share/cuda/9.2
          +
          +Do you wish to build TensorFlow with TensorRT support? [y/N]: N
          +
          +Please specify the NCCL version you want to use. If NCCL 2.2 is not installed, then you can use version 1.3 that can be fetched automatically but it may have worse performance with multiple GPUs. [Default is 2.2]: 2.4.8
          +
          +Please specify the location where NCCL 2 library is installed. Refer to README.md for more details. [Default is /usr/local/cuda]:/some/nccl_install_path
          +
          +Please note that each additional compute capability significantly increases your build time and binary size. [Default is: 3.5,7.0] 6.1
          +
          +Do you want to use clang as CUDA compiler? [y/N]: N
          +
          +Please specify which gcc should be used by nvcc as the host compiler. [Default is /xxx/gcc]: 
          +
          +Do you wish to build TensorFlow with MPI support? [y/N]: N
          +
          +Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native]: -march=native
          +
          +Would you like to interactively configure ./WORKSPACE for Android builds? [y/N]:N
          +
          +
          +

          注意

          +
            +
          1. CUDA需要写清是9.2版本,否则可能会找不到小版本的依赖库。
          2. +
          +

          然后运行编译,但由于该节点的版本较为非主流,建议自行编译tf的python interface以避免兼容性问题。

          +
          bazel build --config=opt --copt=-msse4.2 --copt=-mavx --copt=-mavx2 --copt=-mfma --local_resources 2048,.5,1.0 --config=cuda //tensorflow/tools/pip_package:build_pip_package --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}"
          +
          +

          由于目前节点支持主要的几种优化参数,故可以全部打开以加快运行速度。

          +

          为了他人的正常使用,建议主动限制在登陆节点上编译时的内存和CPU资源使用量。--local_resources 2048,.5,1.0这个设定可能有些保守,但可以保证不会占用过多资源(实测需要11个小时左右,但全程内存占用不超过2G且只使用了一个线程,若觉得太慢可以把中间的参数适当调高)。

          +
            +
          1. nccl和gcc的路径对应前面加载和编译的环境。
          2. +
          +
          +

          编译如果通过,则再运行以下命令编译c++ interface(实际上一步已经编译好所需的大部分依赖,这一步只是再封装成c++库):

          +
          bazel build -c opt --copt=-msse4.2 --copt=-mavx --copt=-mavx2 --copt=-mfma --config=cuda --verbose_failures //tensorflow:libtensorflow_cc.so --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}"
          +
          +

          这里可以先将tensorflow-python安装好。

          +
          ./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
          +pip install /tmp/tensorflow_pkg/tensorflow-version-tags.whl # depends on your version info
          +
          +

          然后,将进行一系列依赖的编译和安装。以防万一,建议首先安装依赖,方便起见,这里使用conda安装。

          +
          conda install automake autoconf libtool
          +
          +

          将cmake切换到新版本:

          +
          module add cmake/3.7.3
          +
          +

          指定tf-cc的目标路径为变量$tensorflow_root,并依次运行以下命令:

          +
          mkdir -p $tensorflow_root
          +mkdir /tmp/proto
          +sed -i 's;PROTOBUF_URL=.*;PROTOBUF_URL=\"https://mirror.bazel.build/github.com/google/protobuf/archive/v3.6.0.tar.gz\";g' tensorflow/contrib/makefile/download_dependencies.sh
          +tensorflow/contrib/makefile/download_dependencies.sh
          +cd tensorflow/contrib/makefile/downloads/protobuf/
          +./autogen.sh
          +./configure --prefix=/tmp/proto/
          +make
          +make install
          +mkdir /tmp/eigen
          +cd ../eigen
          +mkdir build_dir
          +cd build_dir
          +cmake -DCMAKE_INSTALL_PREFIX=/tmp/eigen/ ../
          +make install
          +mkdir /tmp/nsync
          +cd ../../nsync
          +mkdir build_dir
          +cd build_dir
          +cmake -DCMAKE_INSTALL_PREFIX=/tmp/nsync/ ../
          +make
          +make install
          +cd ../../absl
          +bazel build
          +mkdir -p $tensorflow_root/include/
          +rsync -avzh --include '*/' --include '*.h' --exclude '*' absl $tensorflow_root/include/
          +cd ../../../../..
          +mkdir $tensorflow_root/lib
          +cp bazel-bin/tensorflow/libtensorflow_cc.so $tensorflow_root/lib/
          +cp bazel-bin/tensorflow/libtensorflow_framework.so $tensorflow_root/lib/
          +cp /tmp/proto/lib/libprotobuf.a $tensorflow_root/lib/
          +cp /tmp/nsync/lib64/libnsync.a $tensorflow_root/lib/
          +mkdir -p $tensorflow_root/include/tensorflow
          +cp -r bazel-genfiles/* $tensorflow_root/include/
          +cp -r tensorflow/cc $tensorflow_root/include/tensorflow
          +cp -r tensorflow/core $tensorflow_root/include/tensorflow
          +cp -r third_party $tensorflow_root/include
          +cp -r /tmp/proto/include/* $tensorflow_root/include
          +cp -r /tmp/eigen/include/eigen3/* $tensorflow_root/include
          +cp -r /tmp/nsync/include/*h $tensorflow_root/include
          +cd $tensorflow_root/include
          +find . -name "*.cc" -type f -delete
          +rm -fr /tmp/proto /tmp/eigen /tmp/nsync
          +
          +

          以完成c++部分的编译。

          +

          DeePMD-kit安装(1.0+)

          +

          首先下载DeePMD-kit,并进入:

          +
          cd /some/workspace
          +git clone https://github.com/deepmodeling/deepmd-kit.git
          +cd deepmd-kit
          +deepmd_source_dir=`pwd`
          +
          +

          如果前面使用了module load gcc/4.9.4提供的高版本gcc(以4.9.4为例)进行编译,需要手动载入对应的环境变量供cmake识别正确的gcc版本。

          +
          export CC=/share/apps/gcc/4.9.4/bin/gcc
          +export CXX=/share/apps/gcc/4.9.4/bin/g++
          +
          +

          然后安装dpmd-py

          +
          pip install .
          +
          +
          +

          如果遇到no module named 'google'或者no module named 'absl'的报错,则可能存在版本bug,需要重新安装依赖。

          +
          pip install --update protobus
          +pip install --update absl-py
          +
          +
          +

          指定DeePMD-kit的目标路径为变量$deepmd_root,随后编译DeePMD-kit C++ Interface:

          +
          cd $deepmd_source_dir/source
          +mkdir build 
          +cd build
          +cmake -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..
          +make
          +make install
          +
          +

          如果运行:

          +
          $ ls $deepmd_root/bin
          +dp_ipi
          +$ ls $deepmd_root/lib
          +libdeepmd_ipi.so  libdeepmd_op.so  libdeepmd.so
          +
          +

          得到上述的结果,说明编译成功(若cmake时检测到的是4.8或更低版本的gcc,则编译结果会缺少dp_ipilibdeepmd_ipi.so)。

          +

          LAMMPS DeePMD-kit 接口编译

          +

          首先编译接口:

          +
          cd $deepmd_source_dir/source/build
          +make lammps
          +
          +

          然后下载好稳定版的lammps,并解压:

          +
          cd /some/workspace
          +wget -c https://lammps.sandia.gov/tars/lammps-stable.tar.gz
          +tar xf lammps-stable.tar.gz
          +
          +

          若解压后得到目录名为lammps-31Mar17,则

          +
          cd lammps-31Mar17/src/
          +cp -r $deepmd_source_dir/source/build/USER-DEEPMD .
          +
          +

          打开deepmd module,并根据需要添加所需的模块,以fep为例:

          +
          make yes-user-deepmd
          +make yes-user-fep 
          +
          +

          载入需要的mpi库,并编译:

          +
          module load intel/15.0.6
          +module load mpi/intel/5.0.3.049
          +make mpi -j4
          +
          +

          得到可执行文件:lmp_mpi

          +

          可将该文件复制到在$PATH中的路径,则可以直接输入文件名运行。

          +

          注意

          +

          完成上述安装步骤后,若需要立即测试运行,**必须**将stubs提供的libcuda.solibcuda.so.1从环境变量中移除,否则运行时会报错。

          +

          可以直接退出登陆并重新登陆,以免出现该问题。

          +

          一些可能的坑

          +

          尽管上述过程应该已经绕过了大部分的坑,但仍不能保证100%安装运行成功。这里记录几种可能的报错的处理方案。

          +

          需要conda init

          +

          这种情况已知可能发生在lsf脚本提交的步骤,来源于conda activate deepmd的步骤。具体原因尚不清楚,解决方案是手动载入所需的环境变量。推荐的做法是利用用户自定义module。

          +

          首先,启用自定义module:

          +
          module load use.own
          +
          +

          然后运行module avail查看自定义脚本的文件位置,输出结果可能如下:

          +
          ----------- /share/base/modulefiles/compilers -----------
          +............
          +
          +------------- /usr/share/Modules/modulefiles ------------
          +dot         module-git  module-info modules     null        use.own
          +
          +------------ /data/home/someuser/privatemodules ------------
          +null
          +
          +

          显示/data/home/someuser/privatemodules是当前用户自定义模块的存放位置。

          +

          则创建路径,并进入:

          +
          mkdir -p /data/home/someuser/privatemodules
          +cd /data/home/someuser/privatemodules
          +
          +

          然后根据想要的名字创建文件或目录。

          +

          比如想以deepmd为模块名,且希望提供不同版本的支持,则可以:

          +
          mkdir deepmd
          +vim 1.0
          +
          +

          编辑1.0文件:

          +
          # Help message
          +proc ModulesHelp { } {
          +    set nameversion [module-info name]
          +    regsub "/.*" $nameversion "" name
          +    regsub ".*/" $nameversion "" version
          +    puts stderr "\tLoads the $version $name environment"
          +}
          +
          +# Set variables
          +set nameversion [module-info name]
          +regsub "/.*" $nameversion "" name
          +regsub ".*/" $nameversion "" version
          +
          +module-whatis "Miniconda, an alternative distirbution for python 3.6"
          +
          +# set environment variables
          +
          +    setenv        PYTHONROOT    /data/home/someuser/anaconda3/envs/deepmd
          +
          +    prepend-path    PATH        $env(PYTHONROOT)/bin
          +    prepend-path    MANPATH        $env(PYTHONROOT)/share/man
          +    prepend-path    PYTHONPATH    $env(PYTHONROOT)/lib/python3.6/site-packages
          +
          +

          注意修改PYTHONROOT为正确的虚拟环境路径(可用conda env list查看),并且python3.6也要与实际使用的python版本一致。

          +

          这样,便可以通过module调用所需的虚拟环境。

          +

          使用时提交脚本可以这样写:

          +
          module load use.own
          +module load deepmd/1.0
          +
          + + + + + + + + + + + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/deprecated/lsf_usage/index.html b/wiki/deprecated/lsf_usage/index.html new file mode 100644 index 00000000..04757adb --- /dev/null +++ b/wiki/deprecated/lsf_usage/index.html @@ -0,0 +1,3144 @@ + + + + + + + + + + + + + + + + + + + + + Lsf usage - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          Lsf usage

          + +

          LSF 作业管理系统(新版,作为归档)

          +

          目前 LSF Suite 10.2 已在 Zeus 上部署测试,该版本包含了新版的 LSF 作业管理系统,因而可对 GPU 提供支持。

          +

          输入 lsload -gpu 即可查看集群当前可以使用的 GPU 数目:

          +
          HOST_NAME       status  ngpus  gpu_shared_avg_mut  gpu_shared_avg_ut  ngpus_physical
          +c51-g001            ok      4                  1%                 6%               4
          +c51-g002            ok      4                  0%                 6%               4
          +c51-m002            ok      8                  9%                68%               8
          +c51-m004            ok      8                 12%                89%               8
          +c51-m003            ok      8                  9%                72%               8
          +c51-m001            ok      8                 15%                72%               8
          +
          +

          输入 lsload -gpuload 则可以对 GPU 负载情况进行统计:

          +
          HOST_NAME       gpuid   gpu_model   gpu_mode  gpu_temp   gpu_ecc  gpu_ut  gpu_mut gpu_mtotal gpu_mused   gpu_pstate   gpu_status   gpu_error
          +c51-g001            0 TeslaV100_S        0.0       48C       0.0     26%       7%      31.7G      1.1G            0           ok           -
          +                    1 TeslaV100_S        0.0       38C       0.0      0%       0%      31.7G        0M            0           ok           -
          +                    2 TeslaV100_S        0.0       36C       0.0      0%       0%      31.7G        0M            0           ok           -
          +                    3 TeslaV100_S        0.0       37C       0.0      0%       0%      31.7G        0M            0           ok           -
          +c51-g002            0 A10080GBPCI        0.0       44C       0.0      8%       0%      79.3G     1020M            0           ok           -
          +                    1 A10080GBPCI        0.0       49C       0.0      8%       0%      79.3G     1020M            0           ok           -
          +                    2 A10080GBPCI        0.0       47C       0.0      8%       0%      79.3G     1020M            0           ok           -
          +                    3 A10080GBPCI        0.0       44C       0.0      0%       0%      79.3G      434M            0           ok           -
          +c51-m004            0 NVIDIAGeFor        0.0       64C       0.0     91%      13%      10.7G      1.5G            2           ok           -
          +                    1 NVIDIAGeFor        0.0       65C       0.0     89%      13%      10.7G      1.5G            2           ok           -
          +                    2 NVIDIAGeFor        0.0       60C       0.0     88%      12%      10.7G      1.5G            2           ok           -
          +                    3 NVIDIAGeFor        0.0       66C       0.0     89%      13%      10.7G      1.5G            2           ok           -
          +                    4 NVIDIAGeFor        0.0       69C       0.0     87%      13%      10.7G      1.5G            2           ok           -
          +                    5 NVIDIAGeFor        0.0       70C       0.0     91%      13%      10.7G      1.5G            2           ok           -
          +                    6 NVIDIAGeFor        0.0       65C       0.0     85%      12%      10.7G      1.5G            2           ok           -
          +                    7 NVIDIAGeFor        0.0       64C       0.0     87%      12%      10.7G      1.5G            2           ok           -
          +c51-m002            0 NVIDIAGeFor        0.0       58C       0.0     92%      14%      10.7G      1.5G            2           ok           -
          +                    1 NVIDIAGeFor        0.0       65C       0.0     86%      13%      10.7G      2.5G            2           ok           -
          +                    2 NVIDIAGeFor        0.0       56C       0.0     86%      13%      10.7G      2.5G            2           ok           -
          +                    3 NVIDIAGeFor        0.0       55C       0.0     63%       8%      10.7G      768M            2           ok           -
          +                    4 NVIDIAGeFor        0.0       51C       0.0     63%       8%      10.7G      768M            2           ok           -
          +                    5 NVIDIAGeFor        0.0       52C       0.0     68%       9%      10.7G      768M            2           ok           -
          +                    6 NVIDIAGeFor        0.0       54C       0.0     66%       8%      10.7G      768M            2           ok           -
          +                    7 NVIDIAGeFor        0.0       52C       0.0     39%       2%      10.7G      1.5G            2           ok           -
          +c51-m003            0 NVIDIAGeFor        0.0       55C       0.0     62%       8%      10.7G      768M            2           ok           -
          +                    1 NVIDIAGeFor        0.0       53C       0.0     64%       8%      10.7G      768M            2           ok           -
          +                    2 NVIDIAGeFor        0.0       51C       0.0     64%       8%      10.7G      768M            2           ok           -
          +                    3 NVIDIAGeFor        0.0       55C       0.0     62%       8%      10.7G      768M            2           ok           -
          +                    4 NVIDIAGeFor        0.0       55C       0.0     79%      10%      10.7G      768M            2           ok           -
          +                    5 NVIDIAGeFor        0.0       57C       0.0     79%      10%      10.7G      768M            2           ok           -
          +                    6 NVIDIAGeFor        0.0       54C       0.0     80%      10%      10.7G      768M            2           ok           -
          +                    7 NVIDIAGeFor        0.0       55C       0.0     80%      10%      10.7G      768M            2           ok           -
          +c51-m001            0 NVIDIAGeFor        0.0       62C       0.0     98%      21%      10.7G      1.7G            2           ok           -
          +                    1 NVIDIAGeFor        0.0       64C       0.0     98%      22%      10.7G      1.7G            2           ok           -
          +                    2 NVIDIAGeFor        0.0       58C       0.0     97%      21%      10.7G      1.7G            2           ok           -
          +                    3 NVIDIAGeFor        0.0       66C       0.0     93%      19%      10.7G      894M            2           ok           -
          +                    4 NVIDIAGeFor        0.0       69C       0.0     98%      21%      10.7G      1.7G            2           ok           -
          +                    5 NVIDIAGeFor        0.0       62C       0.0     98%      21%      10.7G      1.7G            2           ok           -
          +                    6 NVIDIAGeFor        0.0       25C       0.0      0%       0%      10.7G        0M            8           ok           -
          +                    7 NVIDIAGeFor        0.0       35C       0.0      0%       0%      10.7G        0M            8           ok           -
          +
          +

          使用 GPU 资源时,需要对提交脚本进行相应修改,用 -gpu 命令申请 GPU 资源。

          +
          #!/bin/bash
          +
          +#BSUB -q gpu
          +#BSUB -W 24:00
          +#BSUB -J train
          +#BSUB -o %J.stdout
          +#BSUB -e %J.stderr
          +#BSUB -gpu "num=1:mode=shared:mps=no:j_exclusive=no"
          +#BSUB -n 4
          +#BSUB -R "span[ptile=32]"
          +
          +module add deepmd/2.0b1
          +lmp_mpi -i input.lammps 1>> model_devi.log 2>> model_devi.log
          +
          +

          其中 num=1 表示申请1张GPU卡,j_exclusive=no 表示允许和其他任务共存,-n 表示申请的CPU核数。 +使用V100时,请设置为不超过8的整数; +使用A100时,请设置为不超过8的整数,若为开启MIG的情况,请参考A100拆分实例使用说明; +使用2080Ti时,请设置为不超过4的整数,否则均可能会出现资源空闲但无法使用的情况。如希望独占一张卡请使用j_exclusive=yes

          +
          +

          链接

          +

          使用新版 LSF 提交任务,不需要引入检测脚本或CUDA_VISIBLE_DEVICES控制使用的GPU。

          +
          +

          绑定CPU

          +

          对某些作业类型(如VASP),当使用GPU时,会希望CPU进程尽可能独立运行在所分配的核上,此时可通过设置 CPU 亲和性来控制所用的核数。示例如下:

          +
          #!/bin/bash
          +#
          +#BSUB -q gpu
          +#BSUB -W 12:00
          +#BSUB -J vasp
          +#BSUB -o vasp.%J.stdout
          +#BSUB -e vasp.%J.stderr
          +#BSUB -n 8
          +#BSUB -R "span[ptile=32]"
          +#BSUB -gpu "num=1:mode=shared:mps=no:j_exclusive=no"
          +#BSUB -R "affinity[core(1,exclusive=(core,alljobs))]"
          +
          +# add modulefiles
          +module load vasp/6.1.0-openacc
          +mpirun -np 1 vasp_gam
          +
          +

          其中,core(1,exclusive=(core,alljobs)) 表示使用1个核且与其他作业不同。注意这里需要根据实际使用的核数指定,因为作业中mpirun -np的参数是1。

          +

          DP-GEN Slurm 系统提交方法

          +

          以训练步骤为例:

          +
          {
          +  "train": [
          +    {
          +      "machine": {
          +        "machine_type": "slurm",
          +        "hostname": "xx.xxx.xxx.xxx",
          +        "port": 22,
          +        "username": "chenglab",
          +        "work_path": "/home/chenglab/ypliu/dprun/train"
          +      },
          +      "resources": {
          +        "numb_gpu": 1,
          +        "numb_node": 1,
          +        "task_per_node": 2,
          +        "partition": "gpu",
          +        "exclude_list": [],
          +        "source_list": [],
          +        "module_list": [
          +            "deepmd/1.2"
          +        ],
          +        "time_limit": "96:0:0",
          +        "sleep": 20
          +      },
          +      "python_path": "/share/apps/deepmd/1.2/bin/python3.6"
          +    }
          +  ],
          +  ...
          +}
          +
          +

          若提交任务使用QoS设置,则可以在resources中增加qos项目,示例如下:

          +
          {
          +  "train": [
          +    {
          +      "machine": {
          +        "machine_type": "slurm",
          +        "hostname": "xx.xxx.xxx.xxx",
          +        "port": 22,
          +        "username": "chenglab",
          +        "work_path": "/home/chenglab/ypliu/dprun/train"
          +      },
          +      "resources": {
          +        "numb_gpu": 1,
          +        "numb_node": 1,
          +        "task_per_node": 2,
          +        "partition": "gpu",
          +        "exclude_list": [],
          +        "source_list": [],
          +        "module_list": [
          +            "deepmd/1.2"
          +        ],
          +        "time_limit": "96:0:0",
          +        "qos": "normal",
          +        "sleep": 20
          +      },
          +      "python_path": "/share/apps/deepmd/1.2/bin/python3.6"
          +    }
          +  ],
          +  ...
          +}
          +
          +

          LSF 作业管理系统(旧版)

          +
          +

          目前旧版 LSF 系统(10.1.0.0)已不再适用,此部分仅作归档,不再更新,还请留意。 +新版说明请移步

          +
          +

          在GPU节点上,需要通过指定 CUDA_VISIBLE_DEVICES 来对任务进行管理。

          +
          #!/bin/bash
          +
          +#BSUB -q gpu
          +#BSUB -W 24:00
          +#BSUB -J test
          +#BSUB -o %J.stdout
          +#BSUB -e %J.stderr
          +#BSUB -n 4
          +
          +
          +

          lsf 提交脚本中需要包含 export CUDA_VISIBLE_DEVICES=X ,其中 X 数值需要根据具体节点的卡的使用情况确定。

          +
          +

          使用者可以用 ssh <host> nvidia-smi 登陆到对应节点(节点名为 <host>)检查 GPU 使用情况。 +示例如下:

          +

          $ ssh c51-g001 nvidia-smi
          +Wed Mar 10 12:59:01 2021
          ++-----------------------------------------------------------------------------+
          +| NVIDIA-SMI 460.32.03    Driver Version: 460.32.03    CUDA Version: 11.2     |
          +|-------------------------------+----------------------+----------------------+
          +| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
          +| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
          +|                               |                      |               MIG M. |
          +|===============================+======================+======================|
          +|   0  Tesla V100-SXM2...  Off  | 00000000:61:00.0 Off |                    0 |
          +| N/A   42C    P0    42W / 300W |      3MiB / 32510MiB |      0%      Default |
          +|                               |                      |                  N/A |
          ++-------------------------------+----------------------+----------------------+
          +|   1  Tesla V100-SXM2...  Off  | 00000000:62:00.0 Off |                    0 |
          +| N/A   43C    P0    44W / 300W |  31530MiB / 32510MiB |     62%      Default |
          +|                               |                      |                  N/A |
          ++-------------------------------+----------------------+----------------------+
          +|   2  Tesla V100-SXM2...  Off  | 00000000:89:00.0 Off |                    0 |
          +| N/A   43C    P0    45W / 300W |      3MiB / 32510MiB |      0%      Default |
          +|                               |                      |                  N/A |
          ++-------------------------------+----------------------+----------------------+
          +|   3  Tesla V100-SXM2...  Off  | 00000000:8A:00.0 Off |                    0 |
          +| N/A   43C    P0    47W / 300W |      3MiB / 32510MiB |      0%      Default |
          +|                               |                      |                  N/A |
          ++-------------------------------+----------------------+----------------------+
          +
          ++-----------------------------------------------------------------------------+
          +| Processes:                                                                  |
          +|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |
          +|        ID   ID                                                   Usage      |
          +|=============================================================================|
          +|    1   N/A  N/A    127004      C   ...pps/deepmd/1.2/bin/python    31527MiB |
          ++-----------------------------------------------------------------------------+
          +
          +表示目前该节点(c51-g001 )上 1 号卡正在被进程号为 127004 的进程 ...pps/deepmd/1.2/bin/python 使用,占用显存为 31527 MB,GPU 利用率为 62%。

          +

          在 Zeus 集群使用 deepmd 的提交脚本示例如下(目前 large 队列未对用户最大提交任务数设限制,Walltime 也无时间限制):

          +
          #!/bin/bash
          +
          +#BSUB -q large
          +#BSUB -J train
          +#BSUB -o %J.stdout
          +#BSUB -e %J.stderr
          +#BSUB -n 4
          +
          +module add cuda/9.2
          +module add deepmd/1.0
          +export CUDA_VISIBLE_DEVICES=0
          +# decided by the specific usage of gpus
          +dp train input.json > train.log
          +
          +

          检测脚本

          +

          Zeus 集群上预置了两个检测脚本,针对不同需要对卡的使用进行划分。

          +

          可以使用检测脚本/share/base/tools/export_visible_devices来确定 $CUDA_VISIBLE_DEVICES 的值,示例如下:

          +
          #!/bin/bash
          +
          +#BSUB -q gpu
          +#BSUB -J train
          +#BSUB -o %J.stdout
          +#BSUB -e %J.stderr
          +#BSUB -n 4
          +
          +module add cuda/9.2
          +module add deepmd/1.0
          +source /share/base/scripts/export_visible_devices
          +
          +dp train input.json > train.log
          +
          +

          /share/base/tools/export_visible_devices 可以使用flag -t mem 控制显存识别下限,即使用显存若不超过 mem 的数值,则认为该卡未被使用。根据实际使用情况和经验,默认100 MB以下视为空卡,即可以向该卡提交任务。

          +

          也可以使用检测脚本/share/base/tools/avail_gpu.sh来确定 $CUDA_VISIBLE_DEVICES 的值。/share/base/tools/avail_gpu.sh 可以使用flag -t util 控制显卡利用率可用上限,即使用显卡利用率若超过 util 的数值,则认为该卡被使用。目前脚本默认显卡利用率低于5%视为空卡,即可以向该卡提交任务。

          +

          任务优先级设置(QoS)(不可用)

          +

          默认情况下提交的任务Qos设置为normal,即填充在整个队列的末尾。如果任务比较紧急,可以向管理员报备申请使用emergency优先级,采用此优先级的任务默认排在队列顶。

          +

          使用方法如下,即在提交脚本中加入下行:

          +
          #SBATCH --qos emergency
          +
          +

          DP-GEN

          +

          以训练步骤为例:

          +
          {
          +  "train": [
          +    {
          +      "machine": {
          +        "machine_type": "lsf",
          +        "hostname": "xx.xxx.xxx.xxx",
          +        "port": 22,
          +        "username": "username",
          +        "password": "password",
          +        "work_path": "/some/remote/path"
          +      },
          +      "resources": {
          +        "node_cpu": 4,
          +        "numb_node": 1,
          +        "task_per_node": 4,
          +        "partition": "large",
          +        "exclude_list": [],
          +        "source_list": [
          +            "/share/base/scripts/export_visible_devices -t 100"
          +        ],
          +        "module_list": [
          +            "cuda/9.2",
          +            "deepmd/1.0"
          +                ],
          +        "time_limit": "96:0:0",
          +        "submit_wait_time": 20
          +      },
          +      "python_path": "/share/deepmd-1.0/bin/python3.6"
          +    }
          +  ],
          +  ......
          +}
          +
          +

          DP-GEN v1.0 API

          +
          +

          注意

          +

          train 部分使用了对新版 LSF 提供支持的写法,即同时指定 gpu_usagegpu_new_syntaxTrue,从而可在提交脚本中使用新版 LSF 的语法。

          model_devi部分使用的是旧版语法,且未指定GPU,但导入了检测脚本。

          fp 部分使用的是针对CPU计算使用的语法。注意 mpiexec.hydra 需要写出。

          +
          +
          {
          +  "api_version": "1.0",
          +  "train": [
          +    {
          +      "command": "dp",
          +      "machine": {
          +        "batch_type": "LSF",
          +        "context_type": "SSHContext",
          +        "local_root": "./",
          +        "remote_root": "/data/tom/dprun/train",
          +        "remote_profile": {
          +            "hostname": "123.45.67.89",
          +            "username": "tom"
          +        }
          +      },
          +      "resources": {
          +        "number_node": 1,
          +        "cpu_per_node": 4,
          +        "gpu_per_node": 1,
          +        "queue_name": "gpu",
          +        "group_size": 1,
          +        "kwargs": {
          +          "gpu_usage": true,
          +          "gpu_new_syntax": true, 
          +          "gpu_exclusive": true
          +        },
          +        "custom_flags": [
          +          "#BSUB -J train",
          +          "#BSUB -W 24:00"
          +        ],
          +        "module_list": [
          +          "deepmd/2.0"
          +        ]
          +      }
          +    }
          +  ],
          +  "model_devi":[
          +    {
          +      "command": "lmp_mpi",
          +      "machine":{
          +        "batch_type": "LSF",
          +        "context_type": "SSHContext",
          +        "local_root": "./",
          +        "remote_root": "/data/jerry/dprun/md",
          +        "remote_profile": {
          +          "hostname": "198.76.54.32",
          +          "username": "jerry",
          +          "port": 6666
          +        }
          +      },
          +      "resources": {
          +        "number_node": 1,
          +        "cpu_per_node": 8,
          +        "gpu_per_node": 0,
          +        "queue_name": "gpu",
          +        "group_size": 5,
          +        "kwargs": {
          +          "gpu_usage": false
          +        },
          +        "custom_flags": [
          +          "#BSUB -J md",
          +          "#BSUB -W 24:00"
          +        ],
          +        "strategy": {"if_cuda_multi_devices": false},
          +        "para_deg": 2,
          +        "module_list": [
          +          "deepmd/2.0"
          +        ],
          +        "source_list": [
          +          "/share/base/tools/avail_gpu.sh"
          +        ]
          +      }
          +    }
          +  ],
          +  "fp":[
          +    {
          +      "command": "mpiexec.hydra -genvall vasp_gam",
          +      "machine":{
          +        "batch_type": "LSF",
          +        "context_type": "SSHContext",
          +        "local_root": "./",
          +        "remote_root": "/data/jerry/dprun/fp",
          +        "remote_profile": {
          +          "hostname": "198.76.54.32",
          +          "username": "jerry",
          +          "port": 6666
          +        }
          +      },
          +      "resources": {
          +        "number_node": 2,
          +        "cpu_per_node": 32,
          +        "gpu_per_node": 0,
          +        "kwargs": {
          +          "gpu_usage": false
          +        },
          +        "custom_flags": [
          +          "#BSUB -J label",
          +          "#BSUB -W 12:00"
          +        ],
          +        "queue_name": "medium",
          +        "group_size": 10,
          +        "module_list": [
          +          "intel/17.5.239",
          +          "mpi/intel/2017.5.239",
          +          "vasp/5.4.4"
          +        ]
          +      }
          +    }
          +  ]
          +}
          +
          + + + + + + + + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/deprecated/mig_usage/index.html b/wiki/deprecated/mig_usage/index.html new file mode 100644 index 00000000..88e308ca --- /dev/null +++ b/wiki/deprecated/mig_usage/index.html @@ -0,0 +1,2838 @@ + + + + + + + + + + + + + + + + + + + + + 使用A100切分的GPU实例 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          使用集群上的 GPU —— 使用A100切分的GPU实例

          +

          目前Zeus上已经部署了c51-g002节点,安装有4张Nvidia Tesla A100加速卡。Nvidia官方在A100发布后引入了Multi-Instance GPU(MIG)技术,可以将一张A100拆分为最多7个GPU实例(GPU Instance),在此基础上可以创建计算实例(Computing Instance)。

          +

          拆分工作需要管理员权限,因而管理员已经事先将其中的3张卡拆分为7个GI并创建CI,因此目前c51-g002节点可以同时使用至多22个GPU实例。

          +

          受限于现有的调度系统,如果你希望使用Zeus上的A100来进行计算,请仔细阅读以下操作指引。

          +

          常规使用

          +

          目前,c51-g002节点上的0号卡尚未开启MIG功能,因此使用上基本与V100一样。为了调度方便,请**务必**使用j_exclusive=yes选项以确保任务可以正确调度到0号卡。如果使用DP-GEN,请设置gpu_exclusivetrue

          +
          +

          注意

          +

          不要心存侥幸设置j_exclusive=no,你会惊奇地发现任务可能被提交到其他卡上,因而无法尽情地享用80GB大显存。同时这也会使得其他人的任务被提交到0号卡上,从而产生干扰。

          +
          +

          由于A100仅支持CUDA 11.1以上版本,故请注意使用的软件版本。以DeePMD-kit为例,目前集群上只有deepmd/2.0-cuda11.3兼容,因此请务必注意势函数和使用的DeePMD的版本,以免出现报错。

          +

          以下给出示例提交脚本:

          +
          #!/bin/bash
          +#BSUB -q gpu2
          +#BSUB -W 24:00
          +#BSUB -J deepmd
          +#BSUB -o %J.stdout
          +#BSUB -e %J.stderr
          +#BSUB -n 11
          +#BSUB -gpu "num=1:mode=shared:mps=no:j_exclusive=yes"
          +#BSUB -R "span[ptile=11]"
          +
          +# add modulefiles
          +module add deepmd/2.0-cuda11.3
          +
          +dp train input.json 1>> train.log 2>> train.err
          +
          +

          请参考/data/share/base/scripts下的实例,可复制粘贴使用。(带有A100标注,不带MIG后缀)

          +

          这里设置-n 11是考虑到GI调度的要求,我们需要防止出现多于22个任务同时运行在A100上。

          +

          使用MIG切分的GI

          +

          受限于现有LSF调度系统,尚且无法直接完成对GI的调度。因此我们需要另辟蹊径,所幸j_exclusive=no的情况下可以让任务正确识别到开启了MIG的卡,但也仅限于此了。我们需要进一步让任务正确分配到空闲的CI上,而非默认的第一个(通常编号为7)。

          +
          +

          注意

          +

          不要心存侥幸设置j_exclusive=yes,你会惊奇地发现如果有人用了0号卡,你的任务会处于PEND状态,这是因为LSF认为其他卡均非空。

          +
          +
          +

          注意

          +

          也请不要参考LSF官方文档对于这里的说明,我们的版本不兼容MIG选项。

          +
          +

          实际上英伟达官方指导中,若要手动使用CI,需要指定CUDA_VISIBLE_DEVICES为对应的UUID。通过ssh登陆到c51-g002节点上,运行以下命令:

          +
          nvidia-smi -L
          +
          +

          可以得到以下输出:

          +
          GPU 0: A100 80GB PCIe (UUID: GPU-558ce120-5b8b-16a1-87d4-ce157bba3e9d)
          +GPU 1: A100 80GB PCIe (UUID: GPU-162e30f5-cc45-efb9-1e81-19337f4919ce)
          +  MIG 1g.10gb Device 0: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/7/0)
          +  MIG 1g.10gb Device 1: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/8/0)
          +  MIG 1g.10gb Device 2: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/9/0)
          +  MIG 1g.10gb Device 3: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/11/0)
          +  MIG 1g.10gb Device 4: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/12/0)
          +  MIG 1g.10gb Device 5: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/13/0)
          +  MIG 1g.10gb Device 6: (UUID: MIG-GPU-162e30f5-cc45-efb9-1e81-19337f4919ce/14/0)
          +GPU 2: A100 80GB PCIe (UUID: GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747)
          +  MIG 1g.10gb Device 0: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/7/0)
          +  MIG 1g.10gb Device 1: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/8/0)
          +  MIG 1g.10gb Device 2: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/9/0)
          +  MIG 1g.10gb Device 3: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/10/0)
          +  MIG 1g.10gb Device 4: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/11/0)
          +  MIG 1g.10gb Device 5: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/12/0)
          +  MIG 1g.10gb Device 6: (UUID: MIG-GPU-b43c9a60-fe1a-73ec-06b5-59e6e8b25747/13/0)
          +GPU 3: A100 80GB PCIe (UUID: GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792)
          +  MIG 1g.10gb Device 0: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/7/0)
          +  MIG 1g.10gb Device 1: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/8/0)
          +  MIG 1g.10gb Device 2: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/9/0)
          +  MIG 1g.10gb Device 3: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/11/0)
          +  MIG 1g.10gb Device 4: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/12/0)
          +  MIG 1g.10gb Device 5: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/13/0)
          +  MIG 1g.10gb Device 6: (UUID: MIG-GPU-6fc20abf-dbd6-c875-17d0-8b5b579c9792/14/0)
          +
          +

          可以看到,1-3号GPU各自拥有了7个独立的MIG Device,各自的UUID列在括号里。

          +

          但是,如果你试图把任务直接交上去,并且手动指定一个UUID,则会发现很可能你的任务没有跑在想要的卡上,甚至在CPU上运行。这是因为LSF调度下,只有一张卡可见,因此只有该可见卡的UUID才有效。

          +

          因此,无论怎样,我们都需要一个脚本来监测自己目前位于哪张卡,该卡上有哪几个GI空闲。

          +

          管理员提供了一个脚本放置在/data/share/base/tools/mig_check.py,可输出当前可用的UUID。该脚本已经设置好执行环境,因而直接运行即可,不要用本地的Python环境来执行。 +以下给出一个示例提交脚本:

          +
          #BSUB -e %J.err
          +#BSUB -o %J.out
          +#BSUB -n 1
          +#BSUB -R 'span[ptile=1]'
          +#BSUB -q gpu2
          +#BSUB -gpu 'num=1:mode=shared:j_exclusive=no'
          +#BSUB -J train
          +#BSUB -W 24:00
          +
          +module load deepmd/2.0-cuda11.3
          +
          +export CUDA_VISIBLE_DEVICES=`/data/share/base/tools/mig_check.py`
          +
          +dp train input.json 1>> train.log 2>> train.err
          +
          +

          请设置使用1个CPU核以免没有足够多的CPU数供任务提交。

          +

          如果使用新版DP-GEN或DPDispatcher来调度任务,请加入新的环境变量选项。以下给出一个resources部分的示例:

          +
          "resources": {
          +    "number_node": 1,
          +    "cpu_per_node": 1,
          +    "gpu_per_node": 1,
          +    "queue_name": "gpu2",
          +    "group_size": 1,
          +    "kwargs": {
          +      "gpu_usage": true,
          +      "gpu_new_syntax": true,
          +      "gpu_exclusive": false
          +    },
          +    "custom_flags": [
          +      "#BSUB -J train",
          +      "#BSUB -W 24:00"
          +    ],
          +    "strategy": {"if_cuda_multi_devices": false},
          +    "module_list": ["deepmd/2.0-cuda11.3"],
          +    "envs": {"CUDA_VISIBLE_DEVICES": "`/data/share/base/tools/mig_check.py`"},
          +    "wait_time": 60
          +}
          +
          +

          请务必设置gpu_exclusivefalse以确保任务正确提交到1-3号卡;请务必设置if_cuda_multi_devicesfalse以免自动写入CUDA_VISIBLE_DEVICES。同时经过实践,30 s的等待时间对于训练任务可能太短,或需要60s。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/how_to_edit/howtodo/index.html b/wiki/how_to_edit/howtodo/index.html new file mode 100644 index 00000000..cf08da51 --- /dev/null +++ b/wiki/how_to_edit/howtodo/index.html @@ -0,0 +1,3842 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 如何使用 Wiki - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          如何使用 Wiki

          +

          Wiki 书写使用 markdown 格式。本 wiki 使用 python-markdown 作为 markdown 的解释器,支持一些 markdown 的扩展语法。在本地编辑 markdown 文件时,推荐使用 VSCode

          +
          +

          Warning

          +

          Typora正式版已经收费,且测试版在某些系统环境已不可用。

          +
          +

          有任何问题可以在 https://github.com/chenggroup/chenggroup.github.io/issues 进行反馈。

          +
          +

          文档中带有 * 的部分可以略过。

          +
          +

          对某篇 wiki 内容有疑问

          +

          请使用页面下方的评论区、登陆Github账号后进行评论。该部分基于giscus构建,可以自动创建一个discussion,从而提供方便的互动。此功能需要创建页面的贡献者手动开启。

          +

          如何上传 wiki

          +

          如果还不会 markdown 语法,可以先看 markdown 语法部分,能被识别为 wiki 的 markdown 文件应在文件的开头插入 YAML Front Matter。把自己的 markdown 文档上传到 wiki 上可以有两种方案,本质都是在使用 Github: 1. 上传文件至 Github 仓库 (推荐);2. 由 wiki 网站 导向编辑页面。

          +

          上传文件至 github 仓库 (推荐)

          +

          推荐通过 pull requests 的方法来增加或修改 wiki 网站 上的 wiki。

          +

          1. Fork wiki 文档所在仓库

          +

          先 fork https://github.com/chenggroup/chenggroup.github.io ,然后进入 fork 成功后的仓库。

          + + +

          2. 创建新文件或上传本地文件

          + + +

          推荐在本地用 typora 等编辑器写好 markdown 后直接上传文件,文件请上传至 _wiki 目录 (master 分支)。也可以修改 fork 的仓库的 docs/wiki 下的文件,然后再提交 PR。

          +

          3. 设置导航

          +
          +

          Note

          +

          新增步骤

          +
          +

          在上传新的文档后,需要手动在仓库首级的 mkdocs.yml 中设置导航。

          +

          例如在软件使用中增加 VASP 使用教程的话(假设放在 docs/wiki/software_usage/vasp.md),且希望放在 CP2K 和 DP-GEN 之间,请在 nav 中增加如下内容:

          +
          nav:
          +  ...
          +  - Wikis:
          +      ...
          +      - 软件使用:
          +          ...
          +          - wiki/software_usage/Tips_for_LaTeX.md
          +          - CP2K:
          +              ...
          +          - wiki/software_usage/vasp.md # 新增导航
          +          - wiki/software_usage/DP-GEN.md
          +          ...
          +      ...
          +
          +

          4. 提交 PR

          + + +

          如何预览 wiki

          +

          预览 wiki 也有两种方案:1. 使用 typora 等实时渲染;2. 在本地启动 Mkdocs 服务。

          +

          通过 typora (注意已经收费)

          +

          使用 typora 编辑器可以很方便地实时渲染 markdown 文件。如果不使用本 wiki 中标注有 *wiki 扩展语法 ,则可以大体上认为 typora 所渲染出的文档与直接查看 wiki 网站 的文档相差无几,基本仅存在显示风格上的差异。但要注意需更改 typora 的一些设置(见后文),避免和 wiki 所使用的 markdown 扩展功能发生冲突。

          +

          修改 markdown 拓展语法设置

          +

          需要关闭上下标、高亮以及图表的功能。

          +

          Screen Shot 2019-11-08 at 21.21.10

          +

          修改数学公式设置

          +

          需要关闭数学公式自动添加序号的功能。

          +

          Screen Shot 2019-11-08 at 21.23.00

          +

          修改图像设置

          +

          需要把默认的无特殊操作改为通过 iPic 上传图片,不过在这之前需要 下载 iPic 。推荐在 iPic 偏好设置中开启压缩上传图片的选项,这样可以使 wiki 网页加载的速度更快。

          +

          image-20210602152924699

          +

          通过 Mkdocs 服务*

          +

          1. 下载网站源码至本地

          +
          git clone https://github.com/chenggroup/chenggroup.github.io.git
          +cd chenggroup.github.io
          +
          +

          2. 安装 mkdocs-material 和 必要的 mkdocs 插件

          +

          可参考 mkdocs-material 官方安装指南

          +
          pip install mkdocs-material \
          +    mkdocs-macros-plugin \
          +    mkdocs-static-i18n[material]
          +
          +

          4. 启动 Mkdocs 服务

          +
          mkdocs serve
          +
          +

          5. 编辑 wiki

          +

          把要预览的 wiki 移到 docs/wiki/ 目录下,或是直接编辑 docs/wiki/ 目录下的 markdown 文件。

          +

          6. 预览 wiki

          +

          等待片刻,打开浏览器访问 http://127.0.0.1:8000

          +

          Markdown 语法

          +

          Markdown 是一种标记语言,和代码一样,可以用纯文本的形式来书写。其使用的常用标记符号不超过十个,可以让人专注于文字而不是排版,并且也可以方便地导出为 HTML、PDF 等格式。

          +

          基本语法

          +

          markdown-basic-gramma

          +
          +

          ⚠ 插入图片时切勿使用本地路径,否则在 wiki 上无法查看,具体请参考 Typro 插入图片设置

          +
          +

          可参考 markdown 教程练习 来学习基本语法。

          +
          +

          ⚠ 要引用同一篇 wiki 中的小标题(二至六级标题)可以通过 [sub title](#sub-title) 来引用。不过需要注意,要把小标题中的空格用 - 代替,所有大写字母改成小写,且忽略 . , & 等特殊符号。比如,用 [1. Fork wiki 文档所在仓库](#1-fork-wiki-文档所在仓库) 来表示 1. Fork wiki 文档所在仓库 。若有多个同名标题,以 title, tile-1, title-2 来区分。

          +
          +

          GFM 扩展语法

          +

          GFM(GitHub Flavored Markdown) 是 github 所使用的 markdown 扩展语法。

          +

          清单

          +
          - [ ] 未完成列表
          +- [x] 已完成列表
          +
          +
            +
          • 未完成列表
          • +
          • 已完成列表
          • +
          +

          表情

          +
          :eyeglasses: :+1:
          +
          +

          👓 👍

          +

          Wiki 扩展语法

          +

          标注 * 的部分可以不去注意

          +

          YAML Front Matter

          +
          加入标题
          +

          只有在 markdown 文件的头部加入 YAML Front Matter 部分,才能使你写的 wiki 展示在网页上。因此最简单的,请在 YAML Front Matter 中加入 title,如下所示:

          +
          ---
          +title: getting-started
          +---
          +
          +
          添加作者
          +

          YAML Front Matter 中加入 authors 即可添加作者,多个作者用 yaml 语法的列表表示:

          +
          ---
          +title: getting-started
          +authors: one author
          +---
          +
          +
          ---
          +title: getting-started
          +authors:
          +  - author1
          +  - author2
          +---
          +
          +
          开启评论功能
          +

          对创建页面的编辑者来说,通常情况下请开启评论功能、以便读者可以快速提交反馈或评论,即在 YAML Front Matter 部分增加一行:

          +
          ---
          +...
          +comments: true
          +---
          +
          +

          数学公式

          +

          数学公式可以用 LaTeX 语法来书写,两端用 $(一般用于行内公式) 或 $$(会使公式居中显示) 来标记,如 $E=mc^2$ 可表示 \(E=mc^2\)

          +
          $$
          +E[\rho] = T_s[\rho] + \int \mathrm{d}r\ v_{\rm ext}(r)\rho(r) + V_{H}[\rho] + E_{\rm xc}[\rho]
          +$$
          +
          +
          \[ +E[\rho] = T_s[\rho] + \int \mathrm{d}r\ v_{\rm ext}(r)\rho(r) + V_{H}[\rho] + E_{\rm xc}[\rho] +\]
          +

          要表示多行公式,需要使用 aligned,并要在行尾部加 \\

          +
          $$
          +\begin{aligned} \dot{x} &= \sigma(y-x) \\
          +\dot{y} &= \rho x - y - xz \\
          +\dot{z} &= -\beta z + xy \end{aligned} 
          +$$
          +
          +
          \[ +\begin{aligned} \dot{x} &= \sigma(y-x) \\ +\dot{y} &= \rho x - y - xz \\ +\dot{z} &= -\beta z + xy \end{aligned} +\]
          +

          若实现给公式编号等功能,可参照 LaTeX 的做法。

          +

          化学式与化学反应式

          +

          此功能通过 LaTeX 的 mhchem 插件来实现,使用上与数学公式输入相近,都需要通过 $$$ 来标记。

          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          源码化学式与化学反应式
          $\ce{Mg(OH)2}$\(\ce{Mg(OH)2}\)
          $\ce{CrO4^2-}$\(\ce{CrO4^2-}\)
          $\ce{[Cu(NH3)4]^2+}$\(\ce{[Cu(NH3)4]^2+}\)
          $\ce{CoCl2.6H2O}$\(\ce{CoCl2.6H2O}\)
          $\ce{^{227}_{90}Th+}$\(\ce{^{227}_{90}Th+}\)
          $\ce{C2H5-OH}$\(\ce{C2H5-OH}\)
          $\ce{CH3CH=CH2}$\(\ce{CH3CH=CH2}\)
          $\ce{HC#CH}$\(\ce{HC#CH}\)
          $\ce{CaCO3 ->[900\,{}^{\circ}\mathrm{C}] CaO + CO2}$\(\ce{CaCO3 ->[900\,{}^{\circ}\mathrm{C}] CaO + CO2}\)
          $\ce{H2PO4- <=>C[OH-][H+] H+ + HPO4^2-}$\(\ce{H2PO4- <=>C[OH-][H+] H+ + HPO4^2-}\)
          +

          上下标

          +

          一般情况下可以用 <sup></sup> 表示上标,用 <sub></sub> 表示下标,如 支付宝TM 可用 支付宝<sup>TM</sup> 表示。

          +

          按钮*

          +
          [Subscribe to our newsletter](#){ .md-button }
          +
          + + +

          default +primary

          +

          提示*

          +
          !!! tldr "title"
          +    TLDR means too long, didn't read
          +
          +

          改变 tldr 即可使用不同的提示类型,比如

          +
          +

          Use tldr for this.

          +

          TLDR means too long, didn't read

          +
          +
          +

          Use tip for this.

          +

          This is a tip.

          +
          +
          +

          Use info for this.

          +

          This is a piece of information, or you can use todo.

          +
          +
          +

          Use question for this.

          +

          This is a question.

          +
          +
          +

          Use warning for this.

          +

          This is a warning

          +
          +
          +

          Use danger for this.

          +

          This alerts danger!

          +
          +
          +

          Use success for this.

          +

          This alerts success

          +
          +

          流程图

          +

          流程图可以用来表示工作流或者步骤等:

          +
          ``` mermaid
          +graph LR
          +  A[Start] --> B{Error?};
          +  B -->|Yes| C[Hmm...];
          +  C --> D[Debug];
          +  D --> B;
          +  B ---->|No| E[Yay!];
          +```
          +
          +
          graph LR
          +  A[Start] --> B{Error?};
          +  B -->|Yes| C[Hmm...];
          +  C --> D[Debug];
          +  D --> B;
          +  B ---->|No| E[Yay!];
          +

          引用本网站的其他 wiki

          +

          使用

          +
          [title](relavent/path/to/file.md)
          +
          +

          即可在 wiki 中引用本网站的其他 wiki 。只需将 relavent/path/to/file.md 改成想要引用的 wiki 相对此文档的 相对路径

          +

          比如,要想引用 如何使用 wiki 这篇 wiki,则只需把 relavent/path/to/file.md 换成 ../how_to_edit/howtodo.md

          +

          Screen Shot 2019-12-02 at 11.30.03

          +
          +

          Warning

          +

          注意这里推荐使用的是相对路径,可不改变同级目录结构。如需修改上级目录结构需要对应更改。

          +
          +

          文档英文翻译

          +

          目前本 Wiki 采用 mkdocs-static-i18n 实现多语言支持,因而若需要编写翻译版本,仅需要在同一目录下增加一个后缀为 .en 的markdown文件。例如中文文档为 custom.md,则英文文档为 custom.en.md

          +

          注意请将导言区的 title 内容翻译为英文。

          +

          若涉及导航栏中自定义栏目的翻译,请在 mkdocs.yml 中增加。以下给出一个实例:

          +
          nav:
          +  - 主页: index.md
          +  - 分类1: 
          +      - topic1/index.md
          +      - topic1/item1.md
          +  - 分类2: topic2/index.md
          +
          +plugins:
          +  - i18n:
          +    languages:
          +      - locale: en
          +        default: true
          +        name: English
          +      - locale: fr
          +        name: Français
          +        nav_translations:
          +          主页: Home
          +          分类1: Topic 1
          +          分类2: Topic 2
          +
          +

          参考资料*

          +

          要使用更多功能,请参考mkdocs-material官方文档

          +

          当然,想要快速获得支持,也可以联系作者或者 Open an issue

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/how_to_edit/howtousenews/index.html b/wiki/how_to_edit/howtousenews/index.html new file mode 100644 index 00000000..aeef4e88 --- /dev/null +++ b/wiki/how_to_edit/howtousenews/index.html @@ -0,0 +1,2743 @@ + + + + + + + + + + + + + + + + + + + + + 如何发布 News - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          如何发布 News (致管理员)

          +

          因为迁移新的实现,暂不启用。

          +

          服务器的一些信息或是其他零碎的信息可发布在 News 里。

          +

          上传文件

          +

          文件的格式

          +

          请上传 markdown 格式的文件,当然也支持一些 markdown 的拓展功能

          +

          文件的命名

          +

          文件以 YYYY-MM-dd-name.md 来命名,如 2019-11-01-welcome.md

          +
          +

          如果文件前缀的日期是个未来日期,则其不会在 News 页面上显示,不过当到了其日期之后则会自动出现在 News 页面上。

          +
          +

          设置 News 的摘要

          +

          在一级标题之下, <!--more--> 之上的内容会被当作摘要。进入 read more 之前会显示摘要。

          +

          设置 News 的分类

          +

          YAML Front Matter 处添加 tags 可更方便地按照某些标签来检索 News,tags 示例如下所示:

          +
          ---
          +tags:
          +  - HPCreview
          +  - HPCreport
          +---
          +
          +

          查看 News

          +

          进入 https://wiki.cheng-group.net//news 可查看所有 News,https://wiki.cheng-group.net//archive 可查看按时间分类的 News。

          + + + + + + + + + + + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/miscellaneous/index.html b/wiki/miscellaneous/index.html new file mode 100644 index 00000000..7e81423d --- /dev/null +++ b/wiki/miscellaneous/index.html @@ -0,0 +1,2950 @@ + + + + + + + + + + + + + + + + + + + + + + + 杂项(Miscellaneous) - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          Miscellaneous

          +

          Put temporary or unclassied content here!

          +

          Run Process when you logout shell

          +

          Everytime you login the cluster, you want to run some commands while you have to logout the shell. Unfortunately, these commands will stop as soon as you logout. How to keep commands run? The trick here is use command nohup and &.

          +

          bash +nohup command &

          +

          You just need to prepend nohup and append & in your commands.Now, you can go back and have a nice sleep.

          +

          删除 linux 下的符号链接(快捷方式)

          +

          Linux 系统下的符号链接,又称软链接,基本类似于 Windows 系统下的快捷方式。如果你已经接触过deepmd,你应该已经对见到过一些符号链接了。需要注意的一点是,符号链接本质上是一个 独立的文本文件,操作系统会将其解释为另一个文件或者路径(文件夹)。因此符号链接有如下两个性质:

          +
            +
          • +

            删除符号链接文件并不会影响原本的文件/路径(文件夹)

            +
          • +
          • +

            删除原始文件/路径后,符号链接仍然存在,但是链接会损坏,成为 “stale symbolic link”(字面意思)。

            +
          • +
          +

          在整理工作文件夹的时候,我们可能会需要删除符号链接,我们尤其需要注意路径符号链接的删除:

          +

          一个dp-gen的训练路径结构如下:

          +
          00.train/
          +├── 000
          +├── 001
          +├── 002
          +├── 003
          +├── data.init -> /data/rhbi/TiO2-ML/00.cp2k_md
          +├── data.iters
          +├── graph.000.pb -> 000/frozen_model.pb
          +├── graph.001.pb -> 001/frozen_model.pb
          +├── graph.002.pb -> 002/frozen_model.pb
          +├── graph.003.pb -> 003/frozen_model.pb
          +└── jr.json
          +
          +

          假设你想要删除和文件关联的软链接‘graph.000.pb’,输入 rm graph.000.pb,没有任何问题,你成功删除了这个文件。然而如果你想删除和一个文件夹相关的链接,data.init,你可能会不假思索地输入

          +
          rm data.init/
          +
          +

          这时候你会收到报错:

          +
          rm: cannot remove ‘data.init/’: Is a directory
          +
          +

          再次强调,符号链接本质上是一个 独立的文本文件。收到报错是因为shell的自动全功能把‘data.init’识别为了一个路径,因此在最后加入了斜杠‘/’,然而符号链接只是一个文本文件,这个时候系统认为不能用rm命令删掉一个路径,所以报错。正确的解决方法是去掉斜杠,输入正确的命令成功删除链接:

          +
          rm data.init
          +
          +

          当然shell的自动补全和你使用的 shell 版本有关,有可能你的 shell 不会犯蠢直接加上‘/’,但是在删除链接的时候你需要额外注意,避免你的数据损失。

          +
          +

          danger

          +

          千万不要运行 'rm -rf data.init/*' ,你会删除掉原路径下的所有文件!!!'

          +
          +

          集群使用出错:/bin/sh^M: bad interpreter: No such file or directory

          +

          错误情况

          +

          /bin/sh^M: bad interpreter: No such file or directory

          +

          在集群上使用bsub提交作业后正常显示:

          +
          Job <1360> is submitted to queue <53-large>
          +
          +

          但是用bjobs查看不到这个作业,(可能先显示在排队PEND)显示No unfinished job found,这个时候使用ls命令会看见提交的.lsf作业的目录下会生成输出和报错文件:1360.stdout,1360.stderr,这说明作业已经运行结束(异常结束)。

          +

          错误原因

          +

          使用vim命令查看.stdout和.stderr这两个文件,会发现在作业的换行处出现很多^M符号,查询原因是windows的文件上传到linux系统时文件格式可能不一致

          +

          错误处理

          +

          方法一:参考linux下运行脚本报读取或^M错误处理 - 知乎 (zhihu.com)

          +

          方法二:用vim命令在集群上新建一个作业,然后把作业内容复制上去,再bsub提交作业即可

          +

          Scrum Group

          +

          简单介绍

          +
            +
          • scrum meeting 即每日例会,在橄榄球运动中 a scrum 意思为一场比赛,scrum meeting 旨在通过每日例会的形式来总结最近所做的工作,进行讨论和反思并对未来短期内的工作进行规划和展望。
          • +
          +

          基本规则

          +
            +
          • 所有的学生根据所研究方向分为若干小组,每个小组由各自的 scrum master 管理,并由 scrum master 带领进行每周的汇报。
          • +
          • scrum meeting 每周进行两次,进行时间根据具体情况而定。
          • +
          • 所有的研究生和本科四年级学生除非有要事均需参加scrum meeting,如果有事不能参加的需向所在组的 scrum master 进行请假和汇报。
          • +
          • 如果当天老师繁忙,各个小组应该自行组织 scrum meeting。
          • +
          +

          例会内容

          +
            +
          • 汇报从上次 scrum meeting 到目前为止所做的工作内容,包括遇到的问题、新的发现或者存在的疑问等。
          • +
          +

          参考文件

          +
            +
          • 请参考以下文件(待更新)
          • +
          • https://www.scrumguides.org/scrum-guide.html
          • +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/ase/index.html b/wiki/new_comers/ase/index.html new file mode 100644 index 00000000..31879bbf --- /dev/null +++ b/wiki/new_comers/ase/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ASE: 原子建模基础 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          ASE: 原子建模基础

          + + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/basis_pps/index.html b/wiki/new_comers/basis_pps/index.html new file mode 100644 index 00000000..d97c48ca --- /dev/null +++ b/wiki/new_comers/basis_pps/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 密度泛函近似,基组与赝势 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          密度泛函近似,基组与赝势

          + + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/dpgen/index.html b/wiki/new_comers/dpgen/index.html new file mode 100644 index 00000000..b8d5da92 --- /dev/null +++ b/wiki/new_comers/dpgen/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 深度势能生成器: DP-GEN - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          深度势能生成器: DP-GEN

          + + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/dpmd/index.html b/wiki/new_comers/dpmd/index.html new file mode 100644 index 00000000..327520e7 --- /dev/null +++ b/wiki/new_comers/dpmd/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 机器学习: 理论与DeePMD-kit - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          机器学习: 理论与DeePMD-kit

          + + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/linux/index.html b/wiki/new_comers/linux/index.html new file mode 100644 index 00000000..79cc86cb --- /dev/null +++ b/wiki/new_comers/linux/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Linux快速基础入门 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          Linux快速基础入门

          + + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/presentation/index.html b/wiki/new_comers/presentation/index.html new file mode 100644 index 00000000..2805f684 --- /dev/null +++ b/wiki/new_comers/presentation/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 如何进行展示/Presentation - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          如何进行展示/Presentation

          + + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/python_numpy/index.html b/wiki/new_comers/python_numpy/index.html new file mode 100644 index 00000000..465033cb --- /dev/null +++ b/wiki/new_comers/python_numpy/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Python 和 Numpy - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          Python 和 Numpy

          + + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/qc_dft/index.html b/wiki/new_comers/qc_dft/index.html new file mode 100644 index 00000000..600bbb59 --- /dev/null +++ b/wiki/new_comers/qc_dft/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 量子化学与密度泛函理论 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          量子化学与密度泛函理论

          + + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/read_papers/index.html b/wiki/new_comers/read_papers/index.html new file mode 100644 index 00000000..949721e0 --- /dev/null +++ b/wiki/new_comers/read_papers/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 如何阅读文献 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          如何阅读文献

          + + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/toc/index.html b/wiki/new_comers/toc/index.html new file mode 100644 index 00000000..6d547812 --- /dev/null +++ b/wiki/new_comers/toc/index.html @@ -0,0 +1,2932 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 导览 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          新生入门教程

          +

          欢迎加入程俊课题组,每个人来到新环境都需要熟悉和学习规则,请各位新生按照以下清单顺序进行入组的准备。

          +

          个人座位

          +

          每位入学新生将分到一个座位和一台iMac电脑用于日常的科研。请大家先注册一个Apple ID, 然后寻找**课题组的集群管理员**,为你开通iMac电脑的账号。

          +

          集群与集群账号

          +

          课题组配备有集群(超算)资源供科研使用,而集群是以**Linux**系统运行的。与Windows类似,是另一种电脑操作系统。主要以键盘操作为主,因此如果不熟悉**Linux**系统的同学,请先自己粗略学习一下(视频)Linux入门

          +

          要登陆集群,同样需要集群账号,请寻找**课题组的集群管理员**为你开通集群账号。

          +

          登录集群**建议使用iMac的终端(terminal)**。这里iMac,指的就是苹果苹果电脑。由于苹果操作系统MacosLinux都是从Unix系统衍生出来,因此使用苹果系列电脑来登录集群最为方便。Windows系统的电脑则需要额外安装软件。

          +

          使用iMac登录集群只需要同时按住command+空格,就会跳出搜索框。在搜索框中输入terminal/终端,则会跳出终端应用。使用终端的SSH命令即可。SSH使用具体见下文。

          +

          为建立账号,需要生成SSH密钥。登录集群需要使用SSH操作。

          +

          使用集群前,请大家熟悉集群的基本知识和操作。如果要使用GPU等资源,还需学习如何使用集群上的GPU

          +

          如果以上有任何难以理解的内容请立即汇报给**课题组的集群管理员**

          +

          在iMac上和在集群上使用Python

          +

          Python是一种非常方便的编程语言,可以帮助我们处理计算数据。但是纯Python的安装和相应的Python库使用是十分烦人的。因此名为Anaconda的软件可以帮助我们解决这个问题。

          +

          在iMac上,安装Anaconda,直接去搜索引擎搜索Anaconda然后去官网下载对应的安装包即可。

          +

          在集群上,我们已经提前为大家安装好了Anaconda,使用和设置方法参见集群上的Anaconda

          +

          必学项目

          +

          量子化学(Levine)(前14章)

          +

          (视频)量子化学与密度泛函理论

          +

          (视频)密度泛函近似,基组与赝势

          +

          (视频)Linux入门

          +

          (视频)如何阅读文献

          +

          (视频)如何进行展示

          +

          (视频)Python和Numpy

          +

          选学[具体项目相关]

          +

          机器学习

          +

          (视频)Deep Learning Lecture by Frank Noe *需要科学上网

          +

          (书籍)Pattern Recognition and Machine Learning

          +

          (书籍)Deep Learning(花书)

          +

          (视频)Machine Learning for Physics and the Physics of Learning 2019 *需要科学上网

          +

          (视频)机器学习: 理论与DeePMD-kit

          +

          (视频)深度势能生成器: DP-GEN

          +

          DeePMD-kit 使用入门

          +

          DP-GEN使用入门

          +

          工作流

          +

          (视频)自动化计算与工作流: AiiDA

          +

          生成模型

          +

          (视频)Diffusion and Score-Based Generative Models

          +

          (视频)Dr. Yang Song — Advancements in Diffusion Models for Generative AI

          +

          (博客)Generative Modeling by Estimating Gradients of the Data Distribution

          +

          (博客)A Pedagogical Introduction to Score Models

          +

          (视频)通用分子结构模型Graphormer简介 - 郑书新博士

          +

          (视频)Beyond AlphaFold2: 从结构预测到分布预测 | 郑书新博士 | 微软研究院 | Distributional Graphormer (DiG)

          +

          (视频)Materials Project Seminars – Tian Xie "MatterGen: a generative model for inorganic materials design"

          +

          统计力学

          +

          (博客)Introduction to Statistical Mechanics

          +

          (博客)David Tong at DAMTP, Cambridge: Lectures on Theoretical Physics

          +

          (博客)Lectures on Statistical Physics

          +

          (博客)Lectures on Quantum Mechanics

          +

          (博客)Lectures on Solid State Physics

          + + + + + + + + + + + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/new_comers/workflow/index.html b/wiki/new_comers/workflow/index.html new file mode 100644 index 00000000..59db16d1 --- /dev/null +++ b/wiki/new_comers/workflow/index.html @@ -0,0 +1,2719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 自动化计算与工作流: AiiDA - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          自动化计算与工作流: AiiDA

          + + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/question_under_carpet/chemical_computing/index.html b/wiki/question_under_carpet/chemical_computing/index.html new file mode 100644 index 00000000..6a1d8cd7 --- /dev/null +++ b/wiki/question_under_carpet/chemical_computing/index.html @@ -0,0 +1,2784 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 计算化学踩坑合集 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          计算化学踩坑合集

          +

          有时候,我们会沿用别人测试过的设置进行计算,而不一定会从头进行系统测试。但是,作为计算软件的使用者,我们需要意识到某些可能会出错的地方(或许是很棘手的问题),而不是将这些问题视而不见(sweep the problems under the carpet)。在此文章记录大家在项目中碰到的奇奇怪怪的坑,以供参考。

          +
          +

          有新的内容可以通过 PR 或者评论区提出。可引用置顶issue #131

          +
          +

          Cu pseudopotential

          +

          涉及 Cu 二价离子的计算可能要采用 19 电子的赝势 (semi-core potential)。

          +
          +

          We found that only the computation of the orbital energy of the empty d-level of aqueous Cu2+ requires the use of a semi-core potential with explicit 3s and 3p electrons. +Ref: J. Am. Chem. Soc. 2004, 126, 12, 3928–3938 [link]

          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/skills/QS4writing/index.html b/wiki/skills/QS4writing/index.html new file mode 100644 index 00000000..d68fafaa --- /dev/null +++ b/wiki/skills/QS4writing/index.html @@ -0,0 +1,3218 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Quick Start for Paper Writing(科研写作急速上手) - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          Quick Start for Writing

          +
          +

          小提示

          +
          +

          中文版可以在英文版之后找到,但是还是鼓励大家先读读英文版~

          +

          English version

          +

          I write this blog aiming to share some simple tips about academic writing, and hope it could help the "poor guys" struggling with the writing, more or less.

          +

          Notice: I am not a master in writing but only a TOTAL FRESHMAN. And all the texts following are based on what I learnt and my understanding, maybe incomplete (I hope no mistakes at least). Nevertheless, I believe that it is the reason why I can make the texts more friendly and achievable for the tyros. If you have any question, please feel free to come and talk with me ;-)

          +

          Practice! Practice... Practice?

          +

          I guess some (or even most) of the you would say 'duh' when you heard about "Practice! Practice! Practice!" in some books. Sounds 100 percent correct but useless, right? Overall I agree, if you don't have a concrete and reasonable plan. Aimless practice can sometimes not only have no effects, but, even worse, depress you. Hence, I strongly suggest you start with writing YOUR paper, a specific example. For those having no projects, experiment report can also be an alternative. Then, craft your work step by step!

          +

          Step ONE: polish up your outline

          +

          The first and most important step. Seems to irrelevant to writing skills, uhm? Yes, but checking the outline with your colleagues and supervisors can largely save your time. Just imagine the time to rewrite the whole section! Generally speaking, the big framework for the project (and then the paper) has been made. However, we need to go a further step, and check the structures between paragraphs and even sentences. Actually it is the nightmare for many students, I believe.

          +

          For example, here we try to introduce the modelling methods in interface electrochemistry (mainly about EDL modelling), following solution electrochemistry introduced in the last section. Hence, we write down the outline below and discuss it with our partners.

          +
          the electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)
          +==>
          +EDL is hard to be probed (reason)
          +==>
          +we can get some info with in situ techniques and ab initio simulations
          +==>
          +One of the key characteristics of EDL is its capcacitance
          +==>
          +EDL capacitance can be measured by experiment (CV/impedance) and be a bencemark for modelling
          +==>
          +replace the solute by the electrode (from solution electrochemistry to interface electrochemistry)
          +==>
          +use similar simulation methods and focus on their performace on EDL modelling
          +
          +

          In this step, you don't need to consider the elegance of your language. Simple but accurate texts can make your life easier.

          +

          Step TWO: ABT structure

          +

          “How long would you need to tell a story?” Randy Olson asked this question in his TEDMED talk. (YouTube link here. Sorry I cannot find another source for the guys in China...) In this talk and his book Houston, We Have a Narrative: Why Science Needs Story, Olson introduced a quite simple method to construct a narrative, the ABT structure:

          +
          +

          (...) AND (...), BUT (...), THEREFORE (...)

          +
          +

          Let's try to fill this structure with the outline in the last step!

          +
          %% start the ABT structure
          +% EDL is important (... AND ...)
          +the electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)
          +% BUT it is hard to be probed
          +However, EDL is hard to be probed, not only because xxx but xxx
          +% THEREFORE, we need some tools
          +To address this difficulty, both in situ experimental techniques and modelling are required.
          +%% END the ABT structure
          +
          +

          If you don't know how to construct your idea, write down all the points you can think about and try to adapt them to one or more ABT structure(s).

          +

          If you think the linking somewhere is not smooth enough, rewrite it with an ABT structure.

          +

          Ahhh! Not bad!

          +

          Step THREE: repeat your words

          +

          With the two steps mentioned above, I believe you have worked out a comprehensible outline. Then, we need to strengthen the linking between sentences and make the logic more explicitly, by repeating the words in the last sentence. Hence, your texts can be easier to be followed! Here is an example:

          +
          Electric double layers (EDL) at the electrode/electrolyte interfaces are where electrochemical reactions occur, and thus are of paramount importance in electrochemistry.
          +% Electric double layers (EDL) <==> EDL
          +However, microscopic understanding of the EDL is still lacking due to its complexity and difficulty to probe.
          +% microscopic understanding <==> valuable insight
          +Thanks to the development of computational methods, modelling has shown great potential in studying the interface of the electrode and the electrolyte in the past few years, and provided valuable insight into EDL structures and dielectric properties.
          +
          +

          Maybe the repetition between the second and the third sentences is slightly implicit, but the idea is there. Nevertheless, I would not recommend a tyro to do so, since you might confuse the readers with rephrasing. If you are not sure, just repeat the words and make your texts clear!

          +
          +

          Albert Einstein: When you are out to describe the truth, leave elegance to the tailor.

          +
          +

          Here we come to another example (cited from DOI: 10.1126/SCIADV.ABB1219). I like this compact structure very much.

          +
          An electric double layer (EDL) formed at an electrified interface can afford a potential change of a few volts within a very thin layer of 3 to 5 Å, amounting to an extremely large electric field of similar strength to that in a particle accelerator.
          +% an extremely large electric field <==> a strong electric field
          +Naturally, one would wonder how solvent molecules such as water or any other reactive species inside the EDL would behave in response to such a strong electric field.
          +% how ... behave <==> this question
          +Answering this question is not only of fundamental interest but also of technological importance in a broad range of research areas in science and technology, to name a few, energy storage in supercapacitors, electrocatalysis of relevance to energy and environmental applications, self-assembly of colloidal particles, ion transport across biological membranes, and mineralization processes in earth science.
          +% fundamental interest, technological importance <==> its significance
          +Despite its significance, molecular-level understanding of EDL is largely missing, owing to its complexity and difficulty to probe.
          +% molecular-level understanding <==> microscopic structures
          +Because of the advent of advanced experimental (e.g., synchrotron-based techniques and Raman spectroscopy) and computational methods [e.g., ab intio molecular dynamics (AIMD)], it is not until recently that the microscopic structures of EDL have started to be unveiled.
          +
          +

          Yeah! Finally! I don't want to talk too much to distract you (but I still strongly recommend you to read the book mentioned above for fun!). I think the three tips above are sufficient to work out a readable draft for your big bosses. Don't be afraid of writing! Just have a try!

          +
          +

          I am not a natural in writing. On the contrary, I had really struggled with English writing and thought I was a dunderhead at all, even if I had a pretty nice and patient supervisor who helped me a lotttttttttt in my first paper. Things turned up in a day (shortly after I finished the quasi-final version of my first paper) when I was asked to give a hand to my colleague for a review. When I started to read the review, I knew how to put all I had been taught into practice magically. Just like a spark in my mind. Maybe you know what should be improved only when you need to deal with an "unreadable draft" ? (Just kidding! Don't kill me, Xiaohui!)

          +
          +

          Sincere thanks to Dr. Katharina Doblhoff-Dier in Leiden University

          +

          中文版

          +

          笔者写这篇短文的目的是为了分享几个笔者觉得很实用的写作小技巧,希望可以帮到正在(或将要)挣扎在科研写作中的同学们。

          +

          :笔者并不是科研写作的专家,只是一个刚刚敲完自己第一篇论文的菜鸟。以下的内容都是基于笔者所学的和笔者的理解,可能比较片面(希望没有错误)。尽管如此,笔者希望用一个初学者的视角去讲述,让这篇短文的内容对初学者来说是友好的和可实现的 ;-)

          +

          练习!练习......练习?

          +

          笔者相信相当一部分人在一些书里(或者其他地方)听到“练习!练习!练习!”这句话的时候会说一声:“就这?就这?”听起来是个完全正确的废话。总体来说笔者同意你们的观点,如果你们在练习的时候没有一个具体和合理的计划。毫无目的的练习有时候会事倍功半,甚至因打击你的自信而起到反效果。因此,笔者会推荐大家从 自己的文章(一个具体的例子)开始。对于那些还没有文章的同学,不妨试试实验报告之类的?然后,开始一步步打磨你的文章吧!

          +

          第一步:确定框架

          +

          这是最重要的一步,虽然看起来和写作没太大关系。一个好的框架可以大大节省后续写作的时间——想想重写一整个段落!通常来说,整个项目(文章)的大框架应该是在项目进行之前就和导师敲定好的,这个不会有大问题。问题在哪呢?下一个尺度:段落间和句子间的连接。就笔者个人经验而言,这步是很多学生(和导师)的噩梦...

          +

          那我们在写框架的时候应该写到什么程度呢?来看看一个例子。这里,我们希望介绍电化学界面的一些模拟方法(着重在双电层模拟)。并且在上一节里,我们已经介绍过了溶液相的一些模拟方法。根据这些内容,我们可以写个大致如下的框架,然后和我们的合作者或者导师进行下一步讨论。

          +
          the electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)
          +==>
          +EDL is hard to be probed (reason)
          +==>
          +we can get some info with in situ techniques and ab initio simulations
          +==>
          +One of the key characteristics of EDL is its capcacitance
          +==>
          +EDL capacitance can be measured by experiment (CV/impedance) and be a bencemark for modelling
          +==>
          +replace the solute by the electrode (from solution electrochemistry to interface electrochemistry)
          +==>
          +use similar simulation methods and focus on their performace on EDL modelling
          +
          +

          在这一步中,你不需要考虑语言的优美。简单而精准的文字在接下来的修改中更方便。

          +

          第二步:ABT 结构

          +

          “你需要花多长时间去讲述一个故事?” Randy Olson 在他的TEDMED 演讲中问了这个问题。(这是个油管链接,B 站没找着...)在这个演讲以及他的书Houston, We Have a Narrative: Why Science Needs Story中,Olson 介绍了一种非常简单的叙事方法,ABT 结构:

          +
          +

          (...) AND (...), BUT (...), THEREFORE (...)

          +
          +

          让我们试着把上一节里的框架用 ABT 结构改造一下!

          +
          %% start the ABT structure
          +% EDL is important (... AND ...)
          +the electric double layer (EDL) xxx (importance of EDL/why we want to investigate it)
          +% BUT it is hard to be probed
          +However, EDL is hard to be probed, not only because xxx but xxx
          +% THEREFORE, we need some tools
          +To address this difficulty, both in situ experimental techniques and modelling are required.
          +%% END the ABT structure
          +
          +

          如果你不知道怎么下笔,那就先把所有想到的点写下来并把它们往 ABT 结构里套。

          +

          如果你认为某处的过渡不够自然,也可以考虑用 ABT 结构重写一下 。

          +

          第三步:重复你的词汇

          +

          经过上面的两个步骤,我相信你已经获得了一个可理解的大纲。 现在,我们来进行最后一步:尽量使每个句子中都出现上一个句子中的单词。这个方法可以加强句子之间的连接,使逻辑更加清晰,从而让你的文字可以更容易被阅读! 看看这个例子:

          +
          Electric double layers (EDL) at the electrode/electrolyte interfaces are where electrochemical reactions occur, and thus are of paramount importance in electrochemistry.
          +% Electric double layers (EDL) <==> EDL
          +However, microscopic understanding of the EDL is still lacking due to its complexity and difficulty to probe.
          +% microscopic understanding <==> valuable insight
          +Thanks to the development of computational methods, modelling has shown great potential in studying the interface of the electrode and the electrolyte in the past few years, and provided valuable insight into EDL structures and dielectric properties.
          +
          +

          好吧,看上去第二个句子和第三个句子之间的重复有点隐晦?但是那个意思了。尽管如此,同义替换对于新手来说需要特别谨慎,以防出现表述偏差。如果你不是特别确定,那就简单地重复!让你的文章先变得清晰!

          +
          +

          Albert Einstein: When you are out to describe the truth, leave elegance to the tailor.

          +
          +

          这里是另一个例子 (引自 DOI: 10.1126/SCIADV.ABB1219)。笔者个人很喜欢这个简洁紧凑的例子!

          +
          An electric double layer (EDL) formed at an electrified interface can afford a potential change of a few volts within a very thin layer of 3 to 5 Å, amounting to an extremely large electric field of similar strength to that in a particle accelerator.
          +% an extremely large electric field <==> a strong electric field
          +Naturally, one would wonder how solvent molecules such as water or any other reactive species inside the EDL would behave in response to such a strong electric field.
          +% how ... behave <==> this question
          +Answering this question is not only of fundamental interest but also of technological importance in a broad range of research areas in science and technology, to name a few, energy storage in supercapacitors, electrocatalysis of relevance to energy and environmental applications, self-assembly of colloidal particles, ion transport across biological membranes, and mineralization processes in earth science.
          +% fundamental interest, technological importance <==> its significance
          +Despite its significance, molecular-level understanding of EDL is largely missing, owing to its complexity and difficulty to probe.
          +% molecular-level understanding <==> microscopic structures
          +Because of the advent of advanced experimental (e.g., synchrotron-based techniques and Raman spectroscopy) and computational methods [e.g., ab intio molecular dynamics (AIMD)], it is not until recently that the microscopic structures of EDL have started to be unveiled.
          +
          +

          文章到这里就结束了!笔者不想写太多点以至于让你们有点抓狂(但是笔者还是非常推荐你们去读读上面提到的书!很有趣!)总的来说,笔者认为上面提及的三点已经足以写出一个清晰的初稿给你们的老板了。

          +

          最后,向我第一个项目的日常导师,荷兰莱顿大学的 Dr. Katharina Doblhoff-Dier 表示诚挚的感谢。

          +

          Useful websites for writing

          +

          vocabulary

          +

          https://www.vocabulary.com

          +

          https://www.oxfordlearnersdictionaries.com

          +

          synonym

          +

          https://www.wordhippo.com

          +

          https://www.thesaurus.com

          +

          collocation

          +

          https://www.linggle.com

          +

          https://netspeak.org

          +

          sentence

          +

          https://www.phrasebank.manchester.ac.uk

          +

          rephrase

          +

          https://quillbot.com

          +

          translation

          +

          http://www.onedict.com/index.php

          +

          https://www.deepl.com/translator

          +

          年度汇报标准

          +

          研究生(博士和硕士)前两年每年应写一次汇报。汇报使用 **LaTeX**撰写。模板选用 revtex 的 AIP 模板。以 **英文**撰写

          +

          硕士生第一年报告的词数应在 4500 左右,第二年报告应在 6000 左右。

          +

          博士生第一年报告的词数应在 6000 左右,第二年报告应在 7500 左右。

          +

          使用 Overleaf 写作

          +

          Overleaf是一个在线的 LaTeX 编辑器,可以直接在浏览器中编辑 LaTeX 文档。使用 Overleaf 可以方便地进行合作写作,同时也可以方便地进行版本控制。现阶段,课题组的科研论文基本都是使用 Overleaf 进行写作。基本操作流程为:在需要写文章的时候联系管理员,请管理员将文章相关人员的邮箱添加到一个空白项目中,然后用个人 Overleaf 账号进行后续编辑。项目相关文件课题组会统一进行归档管理。

          +

          在需要写文章的时候请将以下信息发给管理员:

          +
            +
          • 所需模版(常用的如 ACS 和 AIP,如果有另外需求也可以告知管理员)
          • +
          • 项目名称(按照:作者名-序号-文章名 进行命名,比如:jxzhu-1-pt_oh_100
          • +
          • 需要添加的成员邮箱(除管理员外上限 5 人每项目) + 管理员添加相关人员邮箱后,请所有成员查看邮箱/登录 Overleaf 账号确认邀请。
          • +
          +

          版本管理

          +

          Overleaf 可以在修改的时候实现版本记录,也可以添加评论,具体的使用方法可以参考此教程

          +
            +
          1. 右上角History,可以查看历史版本,并自行标记版本。
          2. +
          3. 右上角Menu-Sync,可以进行手动备份。但是现阶段 GitHub 账号绑定仅限于会员(早期已绑定用户同步功能不受影响),故推荐使用 git+本地进行备份(也可在本地自行选择其他的托管平台)。git 相关教程参见此教程
          4. +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/skills/research_skill/index.html b/wiki/skills/research_skill/index.html new file mode 100644 index 00000000..a378048d --- /dev/null +++ b/wiki/skills/research_skill/index.html @@ -0,0 +1,2840 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 研究技能 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          研究技能入门

          +

          如何阅读文献

          +

          阅读文献入门

          +

          为什么要写作

          + + +

          如何写作

          + + +

          Whitesides教授的大作

          +

          Whitesides, G. M. Whitesides’ Group: Writing a Paper. Adv. Mater. 2004, 16 (15 SPEC. ISS.), 1375–1377.

          +

          如何用英语演讲

          +

          English for Presentations at International Conferences

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_development/lammps/installation/index.html b/wiki/software_development/lammps/installation/index.html new file mode 100644 index 00000000..8321569d --- /dev/null +++ b/wiki/software_development/lammps/installation/index.html @@ -0,0 +1,2846 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 在集群安装LAMMPS - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          在集群安装LAMMPS

          +

          Zeus 集群

          +
          # Load the necessary modules
          +module load cmake/3.20
          +module load intel/17.5.239 mpi/intel/2017.5.239 gcc/7.4.0
          +
          +# find the ver in https://download.lammps.org/tars/index.html
          +wget -c https://download.lammps.org/tars/lammps-23Jun2022.tar.gz
          +tar -zxvf lammps-23Jun2022.tar.gz
          +cd lammps-23Jun2022
          +mkdir -p build
          +cd build
          +cmake ../cmake -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ \
          +-DCMAKE_Fortran_COMPILER=gfortran \
          +-D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=mpi \
          +-D CMAKE_INSTALL_PREFIX=/data/jxzhu/apps/lammps/install/23Jun2022 \
          +-D CMAKE_INSTALL_LIBDIR=lib \
          +-D CMAKE_INSTALL_FULL_LIBDIR=/data/jxzhu/apps/lammps/install/23Jun2022/lib \
          +-C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake \
          +-D BUILD_SHARED_LIBS=yes
          +make -j 32
          +make install
          +
          +

          检查是否安装完成

          +
          ./lmp_mpi -h
          +
          +

          对于个人用户,可以将可执行文件所在路径(如/data/jxzhu/apps/lammps/lammps-23Jun2022/build)写入某个虚拟环境的环境变量,以实现版本控制。

          +

          IKKEM 集群

          +
          module load intel/2021.1
          +module load dev/cmake/3.26.3
          +module load gcc/9.3
          +
          +# find the ver in https://download.lammps.org/tars/index.html
          +# find the ver in https://download.lammps.org/tars/index.html
          +wget -c https://download.lammps.org/tars/lammps-23Jun2022.tar.gz
          +tar -zxvf lammps-23Jun2022.tar.gz
          +cd lammps-23Jun2022
          +mkdir -p build
          +cd build
          +cmake ../cmake -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ \
          +      -DCMAKE_Fortran_COMPILER=gfortran \
          +      -D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=intel_cpu_intelmpi \
          +      -D CMAKE_INSTALL_PREFIX=/public/home/jxzhu/apps/lammps/install/lammps-23Jun2022 \
          +      -D CMAKE_INSTALL_LIBDIR=lib \
          +      -D CMAKE_INSTALL_FULL_LIBDIR=/public/home/jxzhu/apps/lammps/install/lammps-23Jun2022/lib \
          +      -C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake \
          +      -D BUILD_SHARED_LIBS=yes 
          +make -j 32
          +make install
          +
          +

          检查是否安装完成

          +
          ./lmp_intel_cpu_intelmpi -h
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_development/lammps/plugin/index.html b/wiki/software_development/lammps/plugin/index.html new file mode 100644 index 00000000..6c28c0e9 --- /dev/null +++ b/wiki/software_development/lammps/plugin/index.html @@ -0,0 +1,2769 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 基于插件模式开发LAMMPS - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          基于插件模式开发LAMMPS

          + +

          一般来说,对代码进行功能添加/修改需要直接在源代码中进行,这样可能对原有代码产生影响。为了解决这个问题,LAMMPS引入了插件模式,使得用户可以在不改动源代码的情况下对LAMMPS进行功能扩展。接下来,我们通过官方的例子对插件的运行方式进行大致的了解:

          +
          ```bash
          +cd lammps-23Jun2022/examples/plugins
          +```
          +
          +

          make编译:

          +
          ```bash
          +make 
          +```
          +
          +

          或者cmake

          +
          ```bash
          +mkdir -p build
          +cd build
          +cmake ../
          +make
          +```
          +
          +

          编译后可以得到多个动态库文件.so。可以通过两种方式调用插件:

          +
            +
          1. 在lammps的input中,通过plugin load命令加载插件,即可使用插件中的功能。
            +
            plugin load morse2plugin.so
            +
          2. +
          3. 将动态库所在路径加入LAMMPS_PLUGIN_PATH,程序会自动加载搜索到的所有插件。
          4. +
          +

          注意:如果移动examples/plugins中例子所在路径,需要修改编译设置。如果采用make编译,需要修改Makefile中的CXXFLAGS

          +
          ```bash
          +CXXFLAGS=-I$(LAMMPS_SOURCE_DIR) -Wall -Wextra -O3 -fPIC -I$(LAMMPS_SOURCE_DIR)/OPENMP -fopenmp
          +```
          +
          +

          并设置LAMMPS_SOURCE_DIR为lammps源代码所在路径。

          +
          ```bash
          +export LAMMPS_SOURCE_DIR=/data/jxzhu/software/lammps/lammps-23Jun2022/src
          +make
          +```
          +
          +

          如果采用cmake编译,需要将plugins/CMakeLists.txt中22行注释掉(get_filename_component(LAMMPS_SOURCE_DIR ${PROJECT_SOURCE_DIR}/../../src ABSOLUTE)),并在执行cmake时指定lammps源代码所在目录

          +
          ```bash
          +mkdir -p build
          +cd build
          +rm *
          +cmake -DLAMMPS_SOURCE_DIR=/data/jxzhu/apps/lammps/lammps-23Jun2022/src ..
          +make
          +```
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_development/lammps/quick_start/index.html b/wiki/software_development/lammps/quick_start/index.html new file mode 100644 index 00000000..f9a3b70f --- /dev/null +++ b/wiki/software_development/lammps/quick_start/index.html @@ -0,0 +1,2809 @@ + + + + + + + + + + + + + + + + + + + + + + + + + LAMMPS开发准备 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          LAMMPS开发准备

          +

          为什么要学习LAMMPS开发?

          +

          作为一个开源的分子动力学模拟软件,LAMMPS在计算化学中有非常广泛的应用。现有的LAMMPS发行版本提供了大量的功能,大多数时候可以满足用户的需求。但是,有时候我们仍需要实现一些新的功能,或者对现有功能进行修改。此时,就需要我们对LAMMPS开发有大致了解。本教程面向已掌握LAMMPS的基本功能的用户,希望通过本教程的学习,读者可以掌握LAMMPS的基本开发方法,为自己的研究工作提供更多的可能性。考虑到现在已经有一些关于LAMMPS开发的教程(贴于下方),本教程将基于chenglab组内情况进行介绍。

          +

          阅读资料

          +
            +
          1. 官方开发指南
            +非常全面的开发指南,包括了LAMMPS的代码结构、并行算法等,但是篇幅较长。建议优先阅读代码架构单步中调用的功能
          2. +
          3. Extending and Modifying LAMMPS Writing Your Own Source Code: A pragmatic guide to extending LAMMPS as per custom simulation requirements
            +详细介绍了如何在LAMMPS中添加新的功能,可以根据需求找到对应的案例进行学习。
          4. +
          +

          如果你没有任何代码经验,建议先根据基础完成以下的内容学习:

          +
            +
          1. LAMMPS基础
          2. +
          3. Git基础
          4. +
          5. C++基础(请根据自己的代码基础选择合适的教程,比如C++ Primer Plus)
          6. +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_installation/cp2k-7.1/index.html b/wiki/software_installation/cp2k-7.1/index.html new file mode 100644 index 00000000..80ef4976 --- /dev/null +++ b/wiki/software_installation/cp2k-7.1/index.html @@ -0,0 +1,2933 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K 7.1 安装教程 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          CP2K 7.1 安装教程

          +

          这里以 7.1 版本为例介绍如何安装编译 CP2K,其他版本可参照修改。

          +

          环境准备

          +

          可参考官方支持编译环境

          +
            +
          • 使用 GCC 5.5.0 以上
          • +
          • Intel MPI 环境
          • +
          +

          一切就绪后,加载上述环境:

          +
          module load intel/17.5.239 mpi/intel/2017.5.239
          +module load gcc/5.5.0
          +
          +

          安装流程

          +

          首先,在 Release 页面 下载 CP2K 安装包,以 7.1 为例:

          +
          wget -c https://github.com/cp2k/cp2k/releases/download/v7.1.0/cp2k-7.1.tar.bz2
          +
          +

          拷贝 cp2k-7.1.tar.bz2 到安装路径下并解压。由于需要预编译所需的库等,这里为了防止后续使用时产生额外路径依赖,推荐直接在安装路径下编译。 +以/share/apps/cp2k为例:

          +
          cp cp2k-7.1.tar.bz2 /share/apps/cp2k
          +cd /share/apps/cp2k/
          +tar -jxf cp2k-7.1.tar.bz2
          +
          +

          更改目录名为 7.1,为后续添加 module 文件作准备(本步骤可选,也可保留默认名称,后续环境配置时需要相应修改):

          +
          mv cp2k-7.1 7.1
          +
          +

          进入到 toolchain 目录下,并修改install_mpich.sh, 将其中的check_command mpic++ "mpich"改为check_command mpicxx "mpich"

          +
          cd 7.1/tools/toolchain
          +sed -i 's/check_command mpic++/check_command mpicxx/g' scripts/install_mpich.sh
          +
          +

          (可选) 若需安装 ELPA 包,需要将静态库替换为动态库,否则会报错undefined reference to ...

          +
          sed -i 's/a libmkl_core.a libmkl_sequential.a/so libmkl_sequential.so libmkl_core.so/g' scripts/install_mkl.sh
          +sed -i 's/libmkl_gf_lp64.a/libmkl_gf_lp64.so/g' scripts/install_mkl.sh
          +sed -i 's/libmkl_core.a/libmkl_sequential.so/g' scripts/install_mkl.sh
          +sed -i 's/libmkl_scalapack_lp64.a/libmkl_scalapack_lp64.so/g' scripts/install_mkl.sh
          +sed -i 's/libmkl_blacs_intelmpi_lp64.a/libmkl_blacs_intelmpi_lp64.so/g' scripts/install_mkl.sh
          +sed -i 's/libmkl_blacs_openmpi_lp64.a/libmkl_blacs_openmpi_lp64.so/g' scripts/install_mkl.sh
          +sed -i 's/libmkl_core.a/libmkl_sequential.so/g' scripts/install_mkl.sh
          +
          +

          ref 1 +ref 2

          +

          (可选) 为加速安装、防止超时报错,在中国大陆可将 Github 统一替换为镜像。但后续从 cp2k 官方网站下载的包也可能出现超时报错,可能需要借助其他平台下载相应的软件包并放到build目录下。

          +
          sed -i 's/github.com/hub.fastgit.org/g' scripts/install_*.sh
          +
          +

          随后运行 toolchain 脚本安装依赖软件:

          +
          ./install_cp2k_toolchain.sh --gpu-ver=no   --enable-cuda=no  --with-mpich=system --with-sirius=no --with-openmpi=no  --with-spfft=no --with-hdf5=no
          +
          +

          过程中请注意输出信息和报错等,并相应地予以解决。如果一切顺利,会提示需要拷贝 arch 文件,并 source 所需的环境,按照提示操作即可。注意由于步骤不同这里的命令可能不同,仅供参考:

          +
          cp install/arch/local* /share/apps/cp2k/7.1/arch/
          +source /share/apps/cp2k/7.1/tools/toolchain/install/setup
          +
          +

          之后进行编译安装:

          +
          cd /share/apps/cp2k/7.1/
          +make -j 8 ARCH=local VERSION="popt psmp"
          +
          +

          如果一切顺利,可以得到编译好的二进制可执行文件,创建bin目录,并拷贝exe目录里的文件到bin

          +
          mkdir bin
          +cp ./exe/local/* ./bin
          +
          +

          最后删除bintools之外的所有文件,并删除tools/toolchain里的buildinstall目录。

          +

          Module 文件生成

          +

          若集群使用 module 管理环境变量,请在 modulefile 目录下(取决于集群的设置)新建目录cp2k并创建文件.module

          +
          #%Module
          +
          +# Help message
          +proc ModulesHelp { } {
          +    set nameversion [module-info name]
          +    regsub "/.*" $nameversion "" name
          +    regsub ".*/" $nameversion "" version
          +    puts stderr "\tLoads the $version $name environment"
          +}
          +
          +# Set variables
          +set nameversion [module-info name]
          +regsub "/.*" $nameversion "" name
          +regsub ".*/" $nameversion "" version
          +module-whatis    "$name $version"
          +
          +# set environment variables
          +set basedir /share/apps/$name/$version
          +
          +module load intel/17.5.239 mpi/intel/2017.5.239
          +module load gcc/5.5.0
          +
          +prepend-path    PATH            ${basedir}/bin
          +
          +

          然后创建符号链接,提供相应版本号的环境:

          +
          ln -s .module 7.1
          +
          +

          Q&A

          +
            +
          1. 如果所有标称为https://www.cp2k.org的压缩包均无法下载,且单独wget该压缩包时提示Issued certificate has expired,可以尝试更新证书服务,CentOS 7 命令如下:
          2. +
          +
          yum install ca-certificates
          +
          +
            +
          1. +

            以上欺骗手段仅适用于 Intel MPI <= 2018 的版本,对高版本 MPI 推荐直接安装更高版本的 CP2K,Toolchain 可提供完整支持。

            +
          2. +
          3. +

            如果make过程中频繁报错,还可能是系统没有正确配置地区设置,请使用如下命令加载环境变量:

            +
          4. +
          +
          export LANG=en_US.UTF-8
          +export LC_ALL=en_US.UTF-8
          +export LC_CTYPE="en_US.UTF-8"
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/index.html b/wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/index.html new file mode 100644 index 00000000..ffaa2d22 --- /dev/null +++ b/wiki/software_installation/deepmd-kit/deepmd-kit_installation_191/index.html @@ -0,0 +1,3182 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeepMD-kit快速安装 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          DeepMD-kit快速安装

          +

          为减少后续安装的困难,请优先参考最佳实践。本文介绍的方法成型时,DP尚未实现对Lammps的解耦,但仍然可用。

          +
          +

          本部分主体写于2021年,截至目前(2022.08)仍适用,并且随版本升级仍在更新。

          +

          教程中使用的尚且是CUDA 10.1,但对CUDA 11.x也适用。

          +
          +

          背景:以 Zeus 集群为例,在服务器通过源代码编译安装DeepMD-kit和包含完整接口的LAMMPS。虽然官方已经提供了通过 Conda 一键安装的方法,但由于此法所安装的各个组件均为预编译版本,因而无法做更多拓展和改动,且通过 Conda 安装的 Protobuf 存在版本冲突,无法进一步编译其他接口。这里介绍一种方法,通过 Conda 安装通常不需要较大改动的TensorFlow C++ Interface,其余部分仍手动编译。

          +

          初始环境说明

          +

          以下过程以 Zeus 集群为例,操作系统及版本为CentOS 7,管理节点联网,采用module作为环境管理。

          +

          以下是预先配置好的环境,对于其他集群,可以此要求准备环境,其中 Intel MPI 可以用 MPICH 代替,其余组件请自行安装。注意CUDA 10.1对Nvidia驱动版本有要求,需要预先检查好(可用nvidia-smi快速查看)。

          +
            +
          • 通过yum安装
          • +
          • Git >= 1.8.2
          • +
          • 通过module加载
          • +
          • CUDA 10.1
          • +
          • Miniconda 3
          • +
          • GCC >= 7.4.0
          • +
          • Intel MPI 2017 (暂未对其他版本进行测试)
          • +
          +
          +

          版本号仅供参考,实际安装可能会不一样,参考执行即可。

          +
          +

          创建新的环境

          +

          首先准备必要的依赖。

          +

          检查可用的模块,并加载必要的模块:

          +
          module avail
          +module add cuda/10.1
          +module add gcc/7.4.0
          +
          +

          注意这里导入的是GCC 7.4.0版本,如果采用低于4.9.4的版本(不导入GCC)则dp_ipi不会被编译。

          +

          然后创建虚拟环境,步骤请参考Anaconda 使用指南

          +

          假设创建的虚拟环境名称是 deepmd,则请将步骤最后的 <your env name> 替换为 deepmd。若采用该步骤的设置,则虚拟环境将被创建在/data/user/conda/env/deepmd下(假设用户名为user)。

          +

          注意请务必为创建的虚拟环境安装所需的Python环境。通常不指定Python版本号的情况下(例如文中的步骤conda create -n <your env name> python)会安装conda推荐的最新版本,如需要替代请对应指定,如conda create -n deepmd python=3.8

          +

          由于Zeus的GPU节点不能联网,故需要将所需的驱动程序库libcuda.solibcuda.so.1的名称手动链接到某个具有权限的路径/some/local/path并分别加入环境变量。

          +
          ln -s /share/cuda/10.0/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1
          +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/share/cuda/10.0/lib64/stubs:/some/local/path
          +
          +
          +

          提示

          +

          若在Zeus 集群上安装,管理员已事先把libcuda.so.1 链接在/share/cuda/10.0/lib64/stubs/下,故无需额外创建软链接,同理/some/local/path也无需加入环境变量,但仍需要驱动程序库的符号链接libcuda.so。注意这一步骤执行后,实际运行时需要从环境变量中移除

          +
          +

          安装Tensorflow的C++ 接口

          +

          以下安装,假设软件包下载路径均为/some/workspace, 以TensorFlow 2.3.0版本、DeePMD-kit 1.3.3 版本为例进行说明,其他版本的步骤请参照修改。

          +

          首先创建并进入虚拟环境,这里假设命名为deepmd

          +
          conda create -n deepmd python=3.8
          +conda activate deepmd
          +
          +

          搜索仓库,查找可用的TensorFlow的C++ 接口版本。

          +
          conda search libtensorflow_cc -c https://conda.deepmodeling.com
          +
          +

          结果如下:

          +
          Loading channels: done
          +# Name                       Version           Build  Channel
          +libtensorflow_cc              1.14.0  cpu_h9a2eada_0
          +libtensorflow_cc              1.14.0  gpu_he292aa2_0
          +libtensorflow_cc               2.0.0  cpu_h9a2eada_0
          +libtensorflow_cc               2.0.0  gpu_he292aa2_0
          +libtensorflow_cc               2.1.0  cpu_cudaNone_0
          +libtensorflow_cc               2.1.0  gpu_cuda10.0_0
          +libtensorflow_cc               2.1.0  gpu_cuda10.1_0
          +libtensorflow_cc               2.1.0   gpu_cuda9.2_0
          +libtensorflow_cc               2.3.0  cpu_cudaNone_0
          +libtensorflow_cc               2.3.0  gpu_cuda10.1_0
          +libtensorflow_cc               2.4.1  gpu_cuda11.0_0
          +libtensorflow_cc               2.4.1  gpu_cuda11.1_0
          +libtensorflow_cc               2.5.0  cpu_cudaNone_0
          +libtensorflow_cc               2.5.0  gpu_cuda10.1_0
          +libtensorflow_cc               2.5.0  gpu_cuda11.3_0
          +libtensorflow_cc               2.7.0  cpu_h6ddf1b9_0
          +libtensorflow_cc               2.7.0 cuda101h50fd26c_0
          +libtensorflow_cc               2.7.0 cuda113h3372e5c_0
          +libtensorflow_cc               2.7.0 cuda113hbf71e95_1
          +libtensorflow_cc               2.9.0  cpu_h681ccd4_0
          +libtensorflow_cc               2.9.0 cuda102h929c028_0
          +libtensorflow_cc               2.9.0 cuda116h4bf587c_0
          +
          +

          这里所希望安装的版本是2.3.0的GPU版本,CUDA版本为10.1,因此输入以下命令安装:

          +
          conda install libtensorflow_cc=2.3.0=gpu_cuda10.1_0 -c https://conda.deepmodeling.org
          +
          +

          若所安装的环境没有实际的GPU驱动(比如集群的登录节点)或需要用到Conda安装CudaToolkit,可能需要参照此处说明强制指定GPU环境。比如:

          +
          CONDA_OVERRIDE_CUDA="11.3" conda install libtensorflow_cc=2.7.0=cuda113hbf71e95_1 -c https://conda.deepmodeling.com
          +
          +

          请注意CONDA_OVERRIDE_CUDA的值需要与GPU支持以及希望用到的CUDA版本相匹配。

          +
          +

          提示

          +

          注意A100仅支持TF 2.4.0以上、CUDA11.2以上,安装时请对应选择。

          +
          +
          +

          提示

          +

          个别版本在后续编译时可能会提示需要libiomp5.so,请根据实际情况确定是否需要提前载入Intel环境(见下文Lammps编译部分)或者conda install intel-openmp

          +
          +
          +

          提示

          +

          conda命令可能速度较慢,也可以考虑切换为mamba,后者可大幅加速Conda的性能,且完全兼容。只需参照前述链接安装后将conda替换为mamba即可

          +
          +

          若成功安装,则定义环境变量:

          +
          export tensorflow_root=/data/user/conda/env/deepmd
          +
          +

          即虚拟环境创建的路径。

          +

          安装DeePMD-kit的Python接口

          +

          以防万一可以升级下pip的版本:

          +
          pip install --upgrade pip
          +
          +

          接下来安装Tensorflow的Python接口

          +
          pip install tensorflow==2.3.0
          +
          +

          若提示已安装,请使用--upgrade选项进行覆盖安装。若提示权限不足,请使用--user选项在当前账号下安装。

          +

          然后下载DeePMD-kit的源代码(注意把v1.3.3替换为需要安装的版本,如v2.0.3等)

          +
          cd /some/workspace
          +git clone --recursive https://github.com/deepmodeling/deepmd-kit.git deepmd-kit -b v1.3.3
          +
          +

          在运行git clone时记得要--recursive,这样才可以将全部文件正确下载下来,否则在编译过程中会报错。

          +
          +

          提示

          +

          如果不慎漏了--recursive, 可以采取以下的补救方法: +

          git submodule update --init --recursive
          +

          +
          +

          若集群上 Cmake 3没有安装,可以用pip进行安装:

          +
          pip install cmake
          +
          +

          修改环境变量以使得cmake正确指定编译器:

          +
          export CC=`which gcc`
          +export CXX=`which g++`
          +export FC=`which gfortran`
          +
          +

          若要启用CUDA编译,请导入环境变量:

          +
          export DP_VARIANT=cuda
          +
          +

          随后通过pip安装DeePMD-kit:

          +
          cd deepmd-kit
          +pip install .
          +
          +

          安装DeePMD-kit的C++ 接口

          +

          延续上面的步骤,下面开始编译DeePMD-kit C++接口:

          +
          deepmd_source_dir=`pwd`
          +cd $deepmd_source_dir/source
          +mkdir build 
          +cd build
          +
          +

          假设DeePMD-kit C++ 接口安装在/some/workspace/deepmd_root下,定义安装路径deepmd_root

          +
          export deepmd_root=/some/workspace/deepmd_root
          +
          +

          在build目录下运行:

          +
          cmake -DLAMMPS_VERSION_NUMBER=<value> -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..
          +
          +

          请根据自己即将安装的Lammps版本指定-DLAMMPS_VERSION_NUMBER的值,目前最新版本的DeePMD-kit默认为20210929,如需安装Lammps 29Oct2020,请设定为20201029

          +

          若通过yum同时安装了Cmake 2和Cmake 3,请将以上的cmake切换为cmake3

          +

          最后编译并安装:

          +
          make
          +make install
          +
          +

          若无报错,通过以下命令执行检查是否有正确输出:

          +
          $ ls $deepmd_root/bin
          +dp_ipi
          +$ ls $deepmd_root/lib
          +libdeepmd_ipi.so  libdeepmd_op.so  libdeepmd.so
          +
          +

          安装LAMMPS的DeePMD-kit模块

          +

          接下来安装

          +
          cd $deepmd_source_dir/source/build
          +make lammps
          +
          +

          此时在$deepmd_source_dir/source/build下会出现USER-DEEPMD的LAMMPS拓展包。

          +

          下载LAMMPS安装包,并把接口代码复制到src目录下:

          +
          cd /some/workspace
          +# Download Lammps latest release
          +wget -c https://lammps.sandia.gov/tars/lammps-stable.tar.gz
          +tar xf lammps-stable.tar.gz
          +cd lammps-*/src/
          +cp -r $deepmd_source_dir/source/build/USER-DEEPMD .
          +
          +

          Make命令安装

          +

          选择需要编译的包(若需要安装其他包,请参考Lammps官方文档):

          +
          make yes-user-deepmd
          +make yes-kspace
          +
          +

          如果没有make yes-kspace 会因缺少pppm.h报错。

          +

          这里也可以通过以下命令批量安装其他包:

          +
          make yes-all                        # install all packages
          +make no-lib                         # uninstall packages that require extra libraries
          +make no-ext                         # uninstall packages that require external libraries
          +
          +

          注意如Plumed、SMD、COLVARS等等需要提前配置或预先编译的插件如需安装请参考Lammps官方文档,同时诸如 Intel、GPU等加速包如果不需要编译可能需要额外手动取消安装。

          +
          +

          目前官方文档改动较大,且未提供历史版本,因而仅适用于官方最新Release版本(目前仅适用于Lammps 29Sep2021以后的版本,但可能随着后续更新适用面进一步缩窄。),使用旧版请注意甄别。

          +
          +

          加载MPI环境,并采用MPI方式编译Lammps可执行文件:

          +
          module load intel/17.5.239 mpi/intel/2017.5.239
          +make mpi -j4
          +
          +
          +

          注意

          +

          此处使用的GCC版本应与之前编译Tensorflow C++接口和DeePMD-kit C++接口一致,否则可能会报错:@GLIBCXX_3.4.XX。如果在前面的安装中已经加载了GCC 7.4.0,请在这里也保持相应环境的加载。

          +
          +

          经过以上过程,Lammps可执行文件lmp_mpi已经编译完成,用户可以执行该程序调用训练的势函数进行MD模拟。

          +

          Cmake安装

          +

          也可以直接使用Cmake进行编译,更加干净、快捷。

          +

          如需要安装Plumed,请首先利用Conda安装GSL环境:

          +
          conda install gsl
          +
          +

          然后请编辑lammps-stable/cmake/CMakeLists.txt,找到set(STANDARD_PACKAGES这一行,并在末尾括号内增加一项:USER-DEEPMD

          +
          set(STANDARD_PACKAGES
          +  ...  
          +  USER-DEEPMD)
          +
          +

          然后在lammps-stable目录下,新建build目录:

          +
          cd lammps-stable
          +mkdir build
          +cd build
          +
          +

          进行配置:

          +
          cmake -C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake \
          +-D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=mpi \
          +-D WITH_JPEG=no -D WITH_PNG=no -D WITH_FFMPEG=no \
          +-D PKG_PLUMED=yes -D PKG_COLVARS=yes -D PKG_USER-DEEPMD=ON \
          +-D CMAKE_INSTALL_PREFIX=/data/user/conda/env/deepmd \
          +-D CMAKE_CXX_FLAGS="-std=c++14 -DHIGH_PREC -DLAMMPS_VERSION_NUMBER=20220623 -I${deepmd_root}/include -I${tensorflow_root}/include -L${deepmd_root}/lib -L${tensorflow_root}/lib -Wl,--no-as-needed -ldeepmd_cc -ltensorflow_cc -ltensorflow_framework -Wl,-rpath=${deepmd_root}/lib -Wl,-rpath=${tensorflow_root}/lib" \
          +../cmake
          +
          +

          注意CMAKE_INSTALL_PREFIX指示的是安装路径,请根据实际情况修改。

          +
          +

          注意

          +

          这里额外关闭了图形输出模块(JPEG、PNG、FFMPEG),因为Conda自带的图形库会与系统有冲突,暂时没有解决,且使用make默认也不会安装。

          +
          +
          +

          注意

          +

          由于未知原因,有时候CMake会找不到Conda安装的GSL。但若提前编译好Plumed并采用Runtime方式载入,可不需要GSL:-D PLUMED_MODE=runtime

          +
          +

          然后进行编译:

          +
          make -j 16
          +make install
          +
          +

          经过以上过程,Lammps可执行文件lmp_mpi已经编译完成,用户可以执行该程序调用训练的势函数进行MD模拟。

          +

          DP-CP2K 安装指引

          +

          首先clone对应的安装包:

          +
          git clone https://github.com/Cloudac7/cp2k.git -b deepmd_latest --recursive --depth=1
          +
          +

          然后运行相应的Toolchain脚本:

          +
          cd tools/toolchain/
          +./install_cp2k_toolchain.sh --enable-cuda=no --with-deepmd=$deepmd_root --with-tfcc=$tensorflow_root --deepmd-mode=cuda --mpi-mode=no --with-libint=no --with-libxc=no --with-libxsmm=no
          +
          +

          根据脚本运行结尾的提示复制arch文件并source所需的环境变量。最后回到主目录进行编译:

          +
          make -j 4 ARCH=local VERSION="ssmp sdbg"
          +
          +

          编译正确完成后,可执行文件生成在exe/下,即cp2k.sopt

          +
          +

          注意目前DP-CP2K暂未支持MPI,因而请单独编译此Serial版本。且CP2K由于IO问题,性能相比Lammps低50%以上,如非刚需还是建议使用Lammps进行MD模拟,后者可提供更多特性和加速的支持。

          +

          同时目前开发者遇到一些困难,故提交的PR尚未更新且由于沉默过久已被官方关闭。如读者有在CP2K实现共享状态的开发经验,请联系作者,谢谢。

          +

          Now there is some difficulty in implemetion of shared state in CP2K run to decrease IO in each MD step. However, the developer has not find out a proper way as a solution, making the PR silent. If you could provide any experience, please contact me. Thanks!

          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/index.html b/wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/index.html new file mode 100644 index 00000000..be120d06 --- /dev/null +++ b/wiki/software_installation/deepmd-kit/deepmd-kit_installation_51/index.html @@ -0,0 +1,3191 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeepMD-kit安装:旧版 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          DeepMD-kit安装:旧版

          +
          +

          本部分写于2020年,适用于DeePMD-kit 1.x 和 TensorFlow 1.14。对目前较新的版本可能不适用,请移步安装最佳实践快速安装教程

          +
          +

          背景:以 Zeus 集群为例,在服务器安装DeepMD-kit和包含完整接口的LAMMPS。

          +

          参考:

          +

          DeepMD-kit

          +

          TensorFlow

          +

          初始环境说明

          +

          以下过程以 Zeus 集群为例,操作系统及版本为CentOS 7,采用module作为环境管理。

          +
            +
          • 通过yum安装:
          • +
          • Cmake 3.7
          • +
          • GCC 4.8.5
          • +
          • Git 1.8.2
          • +
          • 通过module加载
          • +
          • CUDA 10.0
          • +
          • Miniconda3 (Python 3.7)
          • +
          • GCC 4.9.4
          • +
          • Intel MPI 2017
          • +
          +

          创建新的环境

          +

          首先准备必要的依赖。

          +

          检查可用的模块,并加载必要的模块:

          +
          module avail
          +module add cuda/10.0
          +module add gcc/4.9.4
          +
          +

          注意这里导入的是gcc 4.9.4版本,如果采用更低的版本(不导入gcc)则dp_ipi不会被编译。

          +

          然后创建虚拟环境,步骤请参考Anaconda 使用指南

          +

          假设创建的虚拟环境名称是 deepmd,则请将步骤最后的 <your env name> 替换为 deepmd。若采用该步骤的设置,则虚拟环境将被创建在/data/user/conda/env/deepmd下(假设用户名为user)。

          +

          由于GPU节点不能联网,故我们需要将所需的驱动程序库libcuda.solibcuda.so.1手动链接到某个路径/some/local/path并加入环境变量。

          +
          ln -s /share/cuda/10.0/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1
          +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/share/cuda/10.0/lib64/stubs:/some/local/path
          +
          +
          +

          提示

          +

          若在 Zeus 集群上安装,管理员已事先把libcuda.so.1 链接在/share/cuda/10.0/lib64/stubs/下,故无需额外创建软链接,同理/some/local/path也无需加入环境变量。

          +
          +

          安装Tensorflow的C++ 接口

          +

          以下安装,假设软件包下载路径均为/some/workspace, 以TensorFlow 1.14.0版本、DeePMD-kit 1.2.0 版本为例进行说明,其他版本的步骤请参照修改。

          +

          下载对应的bazel安装包

          +
          cd /some/workspace
          +wget https://github.com/bazelbuild/bazel/releases/download/0.24.0/bazel-0.24.0-installer-linux-x86_64.sh
          +chmod +x bazel-0.24.0-installer-linux-x86_64.sh
          +./bazel-0.24.0-installer-linux-x86_64.sh --user
          +export PATH="$HOME/bin:$PATH"
          +
          +
          +

          注意

          +

          注意bazel的兼容性问题,合理的bazel版本设置请参阅Tensorflow官方文档中的说明

          +
          +

          下载TensorFlow源代码

          +
          cd /some/workspace 
          +git clone https://github.com/tensorflow/tensorflow tensorflow -b v1.14.0 --depth=1
          +cd tensorflow
          +
          +

          编译TensorFlow C++ Interface

          +

          tensorflow文件夹下运行configure,设置编译参数。

          +
          ./configure
          +Please specify the location of python. [Default is xxx]:
          +
          +Found possible Python library paths:
          +  /xxx/xxx/xxx
          +Please input the desired Python library path to use.  Default is [xxx]
          +
          +Do you wish to build TensorFlow with XLA JIT support? [Y/n]:
          +XLA JIT support will be enabled for TensorFlow.
          +
          +Do you wish to build TensorFlow with OpenCL SYCL support? [y/N]:
          +No OpenCL SYCL support will be enabled for TensorFlow.
          +
          +Do you wish to build TensorFlow with ROCm support? [y/N]:
          +No ROCm support will be enabled for TensorFlow.
          +
          +Do you wish to build TensorFlow with CUDA support? [y/N]: y
          +CUDA support will be enabled for TensorFlow.
          +
          +Do you wish to build TensorFlow with TensorRT support? [y/N]:
          +No TensorRT support will be enabled for TensorFlow.
          +
          +Found CUDA 10.0 in:
          +    /share/cuda/10.0/lib64
          +    /share/cuda/10.0/include
          +Found cuDNN 7 in:
          +    /share/cuda/10.0/lib64
          +    /share/cuda/10.0/include
          +
          +Please specify a list of comma-separated CUDA compute capabilities you want to build with.
          +You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.
          +Please note that each additional compute capability significantly increases your build time and binary size, and that TensorFlow only supports compute capabilities >= 3.5 [Default is: 3.5,7.0]:
          +
          +Do you want to use clang as CUDA compiler? [y/N]:
          +nvcc will be used as CUDA compiler.
          +
          +Please specify which gcc should be used by nvcc as the host compiler. [Default is /share/apps/gcc/4.9.4/bin/gcc]:
          +
          +Do you wish to build TensorFlow with MPI support? [y/N]:
          +No MPI support will be enabled for TensorFlow.
          +
          +Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native -Wno-sign-compare]:
          +
          +Would you like to interactively configure ./WORKSPACE for Android builds? [y/N]:
          +Not configuring the WORKSPACE for Android builds.
          +
          +Preconfigured Bazel build configs. You can use any of the below by adding "--config=<>" to your build command. See .bazelrc for more details.
          +    --config=mkl             # Build with MKL support.
          +    --config=monolithic      # Config for mostly static monolithic build.
          +    --config=gdr             # Build with GDR support.
          +    --config=verbs           # Build with libverbs support.
          +    --config=ngraph          # Build with Intel nGraph support.
          +    --config=numa            # Build with NUMA support.
          +    --config=dynamic_kernels    # (Experimental) Build kernels into separate shared objects.
          +    --config=v2              # Build TensorFlow 2.x instead of 1.x.
          +Preconfigured Bazel build configs to DISABLE default on features:
          +    --config=noaws           # Disable AWS S3 filesystem support.
          +    --config=nogcp           # Disable GCP support.
          +    --config=nohdfs          # Disable HDFS support.
          +    --config=noignite        # Disable Apache Ignite support.
          +    --config=nokafka         # Disable Apache Kafka support.
          +    --config=nonccl          # Disable NVIDIA NCCL support.
          +Configuration finished
          +
          +
          +

          注意

          +

          若采用前文导入的GCC 4.9.4版本,请根据which gcc的输出判断GCC的安装路径。但一般情况下安装程序可以直接检测到正确路径。

          +
          +

          随后进行编译,由于时间较长,可以考虑使用screen或者tmux将进程放置在后台。

          +
          bazel build -c opt --verbose_failures //tensorflow:libtensorflow_cc.so
          +
          +
          +

          说明

          +

          安装高版本Tensorflow(如2.1.0)时,若提示没有git -c的命令,请升级git到最新版。用户可能需要在本地进行编译并加入环境变量。

          +
          +
          +

          提示

          +

          一般情况下,bazel默认在~/.cache/bazel下进行编译。由于编译所需硬盘空间较大,如有需要,请在运行bazel前采用环境变量指定编译用临时文件夹,以/data/user/.bazel为例:

          export TEST_TMPDIR=/data/user/.bazel

          +
          +

          整合运行库与头文件

          +

          假设Tensorflow C++ 接口安装在/some/workspace/tensorflow_root下,则定义环境变量:

          +
          export tensorflow_root=/some/workspace/tensorflow_root
          +
          +

          创建上述文件夹并从编译结果中抽取运行库和头文件。

          +
          mkdir -p $tensorflow_root
          +
          +mkdir $tensorflow_root/lib
          +cp -d bazel-bin/tensorflow/libtensorflow_cc.so* $tensorflow_root/lib/
          +cp -d bazel-bin/tensorflow/libtensorflow_framework.so* $tensorflow_root/lib/
          +cp -d $tensorflow_root/lib/libtensorflow_framework.so.1 $tensorflow_root/lib/libtensorflow_framework.so
          +
          +mkdir -p $tensorflow_root/include/tensorflow
          +cp -r bazel-genfiles/* $tensorflow_root/include/
          +cp -r tensorflow/cc $tensorflow_root/include/tensorflow
          +cp -r tensorflow/core $tensorflow_root/include/tensorflow
          +cp -r third_party $tensorflow_root/include
          +cp -r bazel-tensorflow/external/eigen_archive/Eigen/ $tensorflow_root/include
          +cp -r bazel-tensorflow/external/eigen_archive/unsupported/ $tensorflow_root/include
          +rsync -avzh --include '*/' --include '*.h' --include '*.inc' --exclude '*' bazel-tensorflow/external/protobuf_archive/src/ $tensorflow_root/include/
          +rsync -avzh --include '*/' --include '*.h' --include '*.inc' --exclude '*' bazel-tensorflow/external/com_google_absl/absl/ $tensorflow_root/include/absl
          +
          +

          清理目标目录下赘余的源代码文件,保留编译好的接口。

          +
          cd $tensorflow_root/include
          +find . -name "*.cc" -type f -delete
          +
          +

          安装DeePMD-kit的Python接口

          +

          首先安装Tensorflow的Python接口

          +
          pip install tensorflow-gpu==1.14.0
          +
          +

          若提示已安装,请使用--upgrade选项进行覆盖安装。若提示权限不足,请使用--user选项在当前账号下安装。

          +

          然后下载DeePMD-kit的源代码。

          +
          cd /some/workspace
          +git clone --recursive https://github.com/deepmodeling/deepmd-kit.git deepmd-kit
          +
          +

          在运行git clone时记得要--recursive,这样才可以将全部文件正确下载下来,否则在编译过程中会报错。

          +
          +

          提示

          +
          +

          如果不慎漏了--recursive, 可以采取以下的补救方法:

          +
          git submodule update --init --recursive
          +
          +

          " %}

          +

          随后通过pip安装DeePMD-kit:

          +
          cd deepmd-kit
          +pip install .
          +
          +

          安装DeePMD-kit的C++ 接口

          +

          延续上面的步骤,下面开始编译DeePMD-kit C++接口:

          +
          deepmd_source_dir=`pwd`
          +cd $deepmd_source_dir/source
          +mkdir build 
          +cd build
          +
          +

          假设DeePMD-kit C++ 接口安装在/some/workspace/deepmd_root下,定义安装路径deepmd_root

          +
          export deepmd_root=/some/workspace/deepmd_root
          +
          +

          修改环境变量以使得cmake正确指定编译器:

          +
          export CC=`which gcc`
          +export CXX=`which g++`
          +
          +

          在build目录下运行:

          +
          cmake -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..
          +
          +

          若通过yum同时安装了Cmake 2和Cmake 3,请将以上的cmake切换为cmake3

          +

          最后编译并安装:

          +
          make
          +make install
          +
          +

          若无报错,通过以下命令执行检查是否有正确输出:

          +
          $ ls $deepmd_root/bin
          +dp_ipi
          +$ ls $deepmd_root/lib
          +libdeepmd_ipi.so  libdeepmd_op.so  libdeepmd.so
          +
          +

          因为GCC版本差别,可能没有$deepmd_root/bin/dp_ipi

          +

          安装LAMMPS的DeePMD-kit模块

          +

          接下来安装

          +
          cd $deepmd_source_dir/source/build
          +make lammps
          +
          +

          此时在$deepmd_source_dir/source/build下会出现USER-DEEPMD的LAMMPS拓展包。

          +

          下载LAMMPS安装包,按照常规方法编译LAMMPS:

          +
          cd /some/workspace
          +# Download Lammps latest release
          +wget -c https://lammps.sandia.gov/tars/lammps-stable.tar.gz
          +tar xf lammps-stable.tar.gz
          +cd lammps-*/src/
          +cp -r $deepmd_source_dir/source/build/USER-DEEPMD .
          +
          +

          选择需要编译的包(若需要安装其他包,请参考Lammps官方文档):

          +
          make yes-user-deepmd
          +make yes-kspace
          +
          +

          如果没有make yes-kspace 会因缺少pppm.h报错。

          +

          加载MPI环境,并采用MPI方式编译Lammps可执行文件:

          +
          module load intel/17u5 mpi/intel/17u5
          +make mpi -j4
          +
          +
          +

          注意

          +

          此处使用的GCC版本应与之前编译Tensorflow C++接口和DeePMD-kit C++接口一致,否则可能会报错:@GLIBCXX_3.4.XX。如果在前面的安装中已经加载了GCC 4.9.4,请在这里也保持相应环境的加载。

          +
          +

          经过以上过程,Lammps可执行文件lmp_mpi已经编译完成,用户可以执行该程序调用训练的势函数进行MD模拟。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/index.html b/wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/index.html new file mode 100644 index 00000000..e1b4621a --- /dev/null +++ b/wiki/software_installation/deepmd-kit/deepmd-kit_installation_ikkem/index.html @@ -0,0 +1,2889 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeePMD-kit安装实战:嘉庚超算 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          DeePMD-kit安装实战:嘉庚超算

          +

          嘉庚超算中心没有统一安装DeepMD-kit软件,用户使用前需要自行编译。本文参考最佳实践,基于嘉庚超算预装的模块进行。此处以DeepMD-kit v2.2.0版本为例。

          +

          初次安装

          +
            +
          1. +

            创建虚拟环境(此处以deepmd为例)

            +
            module load anaconda/2020.3
            +conda create -n deepmd python=3.9
            +
            +
          2. +
          3. +

            (可选)虚拟环境激活/退出的配置,也可将activate.sh中代码每次手动设置

            +
            # replace your own username here!
            +mkdir -p $CONDA_PREFIX/etc/conda/activate.d
            +touch $CONDA_PREFIX/etc/conda/activate.d/activate.sh
            +mkdir -p $CONDA_PREFIX/etc/conda/deactivate.d
            +touch $CONDA_PREFIX/etc/conda/deactivate.d/deactivate.sh
            +conda env config vars set LD_LIBRARY_PATH=$tensorflow_root/lib:$deepmd_root/lib:$CONDA_PREFIX/lib:$LD_LIBRARY_PATH
            +
            +
              +
            • $CONDA_PREFIX/etc/conda/activate.d/activate.sh
            • +
            +
            module load intel/2018.3
            +module load gcc/9.2
            +module load cmake/3.21
            +module load cuda/11.3
            +module load lammps/2022.6.23
            +
            +export CC=`which gcc`
            +export CXX=`which g++`
            +export FC=`which gfortran`
            +
            +# replace CONDA_PREFIX and deepmd_source_dir!!!
            +export deepmd_source_dir=/public/home/username/apps/deepmd-2.2.0
            +export tensorflow_root=$deepmd_source_dir/_skbuild/tensorflow_root
            +export deepmd_root=$deepmd_source_dir/_skbuild/deepmd_root
            +export LAMMPS_PLUGIN_PATH=$deepmd_root/lib/deepmd_lmp
            +
            +
              +
            • $CONDA_PREFIX/etc/conda/deactivate.d/deactivate.sh
            • +
            +
            module unload intel/2018.3
            +module unload gcc/9.2
            +module unload cmake/3.21
            +module unload cuda/11.3
            +module unload lammps/2022.6.23
            +
            +unset deepmd_source_dir
            +unset tensorflow_root
            +unset deepmd_root
            +unset LAMMPS_PLUGIN_PATH
            +
            +

            设置好后,重启虚拟环境。此后每次激活虚拟环境时,会自动加载相应的模块。 +3. 训练代码安装

            +

            pip install tensorflow==2.7 --upgrade
            +pip install scikit-build ninja
            +pip install protobuf==3.20
            +cd $deepmd_source_dir
            +export DP_VARIANT=cuda
            +pip install .
            +
            +4. (可选)第三方接口安装

            +
            mkdir -p $tensorflow_root/lib 
            +cd $tensorflow_root
            +ln -s $CONDA_PREFIX/lib/python3.9/site-packages/tensorflow/include .
            +cd lib
            +ln -s $CONDA_PREFIX/lib/python3.9/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so libtensorflow_cc.so
            +ln -s $CONDA_PREFIX/lib/python3.9/site-packages/tensorflow/libtensorflow_framework.so.2 .
            +ln -s libtensorflow_framework.so.2 libtensorflow_framework.so
            +
            +mkdir -p $deepmd_source_dir/source/build
            +mkdir -p $deepmd_root
            +cd $deepmd_source_dir/source/build
            +cmake -DLAMMPS_SOURCE_ROOT=/public/software/lammps/lammps-2022.6.23-intel -DUSE_TF_PYTHON_LIBS=TRUE -DUSE_CUDA_TOOLKIT=TRUE -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root ..
            +make -j20
            +make install
            +
            +
          4. +
          +

          代码更新

          +
            +
          1. +

            Python代码

            +
            cd $deepmd_source_dir
            +export DP_VARIANT=cuda
            +pip install .
            +
            +
          2. +
          3. +

            C++代码

            +
            cd $deepmd_source_dir/source/build
            +make -j20
            +make install
            +
            +
          4. +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/index.html b/wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/index.html new file mode 100644 index 00000000..3e58742a --- /dev/null +++ b/wiki/software_installation/deepmd-kit/deepmd-kit_installation_new/index.html @@ -0,0 +1,3392 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeepMD-kit安装最佳实践 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          DeepMD-kit安装最佳实践

          +

          背景:以 Zeus 集群为例,在服务器通过源代码编译安装DeepMD-kit和包含完整接口的LAMMPS。虽然官方已经提供了通过 Conda 一键安装的方法,但由于此法所安装的各个组件均为预编译版本,因而针对课题实际情况无法做更多拓展和改动,且通过 Conda 安装的 Protobuf 存在版本冲突,无法进一步编译其他接口。这里介绍一种方法,通过 Conda 安装通常不需要改动的TensorFlow C++ Interface,其余部分仍手动编译。由于目前新版Lammps已经提供Plugin支持,DeePMD亦支持通过Plugin调用,故可令组件之间相互解耦、减少后续安装的工序。

          +

          初始环境说明

          +

          以下过程以 Zeus 集群为例,操作系统及版本为CentOS 7,管理节点联网,采用module作为环境管理。

          +

          以下是预先配置好的环境,对于其他集群,可以此要求准备环境,其中 Intel MPI 可以用 MPICH 代替,其余组件请自行安装。注意CUDA 11.3对Nvidia驱动版本有要求,需要预先检查好(可用nvidia-smi快速查看)。

          +
            +
          • 通过yum安装
          • +
          • Git >= 1.8.2
          • +
          • 通过module加载
          • +
          • CUDA 11.3
          • +
          • Miniconda 3
          • +
          • GCC >= 7.4.0
          • +
          • Intel MPI 2017 (暂未对其他版本进行测试)
          • +
          +
          +

          版本号仅供参考,实际安装因人而异,参考执行即可。

          +
          +

          DeePMD-kit 常用组件关系

          +
          flowchart TB
          +  tfcpp(TensorFlow C++ Interface) -.-> tfpy(TensorFlow Python Interface)
          +  tfpy --> dppy(DeePMD Python Interface)
          +  dpcpp(DeePMD C++ Interface) -.-> dppy
          +  tfcpp --> dpcpp
          +  dpcpp --> lmp(DeePMD Lammps API)
          +  tfcpp --> lmp
          +

          如图所示展示了DeePMD-kit各个常用组件之间的联系,需要声明的是,图示并非对代码的严谨解析,仅仅是对组织结构的直观表现。

          +

          势函数训练过程通常依赖于DeePMD Python Interface,这一部分在用 Pip 安装时即依赖于TensorFlow的Python Interface,因此在图中用实线箭头表示。而用Pip安装的TensorFlow Wheel已经预先编译了底层所需的Tensorflow C++ Interface,这一隐含的依赖用虚线箭头表示。类似地,DeePMD-kit在Pip安装时也会调用CMake来编译一部分所需的C++库,因而也存在类似的关系。

          +

          当用训练好的势函数来进行MD模拟时,则需要运行Lammps等分子动力学软件调用DeePMD-kit接口。以Lammps为例,现有的两种方式分别是: + - 在Lammps安装时即额外编译DeePMD API(即USER-DEEPMD) + - 编译DeePMD Plugin,由支持Plugin的Lammps版本调用

          +

          这两种方式在编译时均需要调用DeePMD-kit和TensorFlow的C++ Interface,故在图中也用实线表示。而TensorFlow C++ Interface实际上可由源代码结合必要的底层依赖(如GCC、CUDA等)独立编译,DeePMD-kit C++ Interface只需在TensorFlow C++ Interface基础上进行编译(图中实线箭头)。

          +

          因而在实际处理安装关系时,我们也可以采用相对独立的编译方式来最大化解耦组件。下文的思路将按以下步骤展开:

          +
            +
          1. 建立独立的Conda环境,用 Pip 安装 TensorFlow 和 DeePMD-kit,提供势函数训练功能;
          2. +
          3. 结合必要的组件、环境等编译Lammps,提供经典分子动力学模拟功能;
          4. +
          5. 编译 DeePMD C++ Interface,在此基础上编译 DeePMD-kit Lammps Plugin供Lammps调用,提供 DeePMD 模拟功能;
          6. +
          7. 编译 DeePMD CP2K API 和对应的CP2K版本(No free lunch.)
          8. +
          +

          安装DeePMD-kit Python Interface

          +

          创建新的环境

          +

          首先准备必要的依赖。

          +

          检查可用的模块,并加载必要的模块:

          +
          module avail
          +module add cuda/11.3
          +module add gcc/7.4.0
          +
          +

          注意这里导入的是GCC 7.4.0版本,如果采用低于4.9.4的版本(不导入GCC)则dp_ipi不会被编译。

          +

          然后创建虚拟环境,步骤请参考Anaconda 使用指南

          +

          假设创建的虚拟环境名称是 deepmd,则请将步骤最后的 <your env name> 替换为 deepmd。若采用该步骤的设置,则虚拟环境将被创建在/data/user/conda/env/deepmd下(假设用户名为user)。

          +
          conda create -n deepmd python=3.9
          +conda activate deepmd
          +
          +

          注意请务必为创建的虚拟环境安装所需的Python环境。通常不指定Python版本号的情况下(例如文中的步骤conda create -n <your env name> python)会安装Conda推荐的最新版本,如需要替代请对应指定,如conda create -n deepmd python=3.10

          +

          对于无法联网的节点,在编译时需要将所需的驱动程序库的符号库libcuda.solibcuda.so.1的名称手动链接到某个具有权限的路径/some/local/path并分别加入环境变量,以通过编译流程:

          +
          ln -s /data/share/apps/cuda/11.3/lib64/stubs/libcuda.so /some/local/path/libcuda.so.1
          +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/share/apps/cuda/11.3/lib64/stubs:/some/local/path
          +
          +
          +

          提示

          +

          若在Zeus 集群上安装,管理员已事先把libcuda.so.1 链接在/data/share/apps/cuda/11.3/lib64/stubs/下,故无需额外创建软链接,同理/some/local/path也无需加入环境变量,但仍需要驱动程序库的符号链接libcuda.so。注意这一步骤执行后,实际运行时需要从环境变量中移除

          +
          +

          安装DeePMD-kit的Python接口

          +

          以防万一可以升级下pip的版本:

          +
          pip install --upgrade pip
          +
          +

          接下来安装Tensorflow的Python接口

          +
          pip install tensorflow
          +
          +

          若提示已安装,请使用--upgrade选项进行覆盖安装。若提示权限不足,请使用--user选项在当前账号下安装。

          +

          然后下载DeePMD-kit的源代码(注意把v2.1.5替换为需要安装的版本,如v2.0.3等)

          +
          cd /some/workspace
          +git clone --recursive https://github.com/deepmodeling/deepmd-kit.git deepmd-kit -b v2.1.5
          +
          +

          在运行git clone时记得要--recursive,这样才可以将全部文件正确下载下来,否则在编译过程中会报错。

          +
          +

          提示

          +

          如果不慎漏了--recursive, 可以采取以下的补救方法: +

          git submodule update --init --recursive
          +

          +
          +

          若集群上 CMake 3没有安装,可以用pip进行安装:

          +
          pip install cmake
          +
          +

          修改环境变量以使得cmake正确指定编译器:

          +
          export CC=`which gcc`
          +export CXX=`which g++`
          +export FC=`which gfortran`
          +
          +

          若要启用CUDA编译,请导入环境变量:

          +
          export DP_VARIANT=cuda
          +
          +

          随后通过pip安装DeePMD-kit:

          +
          cd deepmd-kit
          +pip install .
          +
          +

          安装Lammps

          +

          注意这一部分可以从DeePMD安装中解耦出来,因而兼顾对Lammps的不同需求,而不必为DeePMD专门编译一个Lammps可执行文件。

          +

          环境准备

          +

          首先加载所需的环境,包括CMake、Intel MPI等。若不需要编译Lammps原生的GPU加速,可不需要加载CUDA环境。注意需要把Intel MPI提供的头文件(mpi.h等)所在路径加入C_INCLUDE_PATH中。

          +

          仍以Zeus为例,如下所示。注意这里使用的是全局的CMake,如果与上一部分采用同一个环境,可不需重复加载。

          +
          module load cmake/3.20
          +module load intel/17.5.239 mpi/intel/2017.5.239 gcc/7.4.0
          +# if not included
          +export C_INCLUDE_PATH=<intel_installation_dir>/impi/2017.4.239/include64:$C_INCLUDE_PATH
          +
          +

          若需要编译对应的Lammps组件(如Plumed、NetCDF等),请对应加载所需的环境:

          +
          module load netcdf/4.9.0_intel17
          +module load plumed
          +
          +

          如需编译Lammps原生的GPU加速,可加载CUDA环境,注意这会使得编译得到的Lammps无法在不包括GPU的节点上运行。

          +
          # gpu acceleration support
          +module load cuda/11.3
          +
          +
          +

          Warning

          +

          若编译Lammps原生的GPU加速,请注意原生默认采用半精度。Lammps在开启GPU加速时速度可有较大提升,但精度问题已知可能导致DeePMD势函数模拟误差上升(体现为Model Deviation相比不开启GPU加速显著上升),请针对体系做测试确认误差是否符合预期。DeePMD接口官方未提供Lammps的GPU加速支持,且默认编译的是双精度版本,请务必注意。

          +
          +

          配置编译

          +

          创建文件夹

          +
          cd <lammps_source_code>
          +mkdir build
          +cd build
          +
          +

          进行编译

          +
          cmake  -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ \
          +-DCMAKE_Fortran_COMPILER=gfortran \
          +-D BUILD_MPI=yes -D BUILD_OMP=yes -D LAMMPS_MACHINE=mpi \
          +-D BUILD_SHARED_LIBS=yes \
          +-D CMAKE_INSTALL_PREFIX=<lammps_installation_dir> \
          +-D CMAKE_INSTALL_LIBDIR=lib \
          +-D CMAKE_INSTALL_FULL_LIBDIR=<lammps_installation_dir>/lib \
          +-C ../cmake/presets/most.cmake -C ../cmake/presets/nolib.cmake ../cmake
          +
          +

          CMAKE_INSTALL_PREFIX 可以根据安装实际路径修改,但这一方法得到的是共享库( *.so ),所以包括Lammps源代码在内都不要移动。

          +

          若开启对应插件,请注意在 ../cmake 前插入对应选项,如:

          +
          -D PKG_PLUMED=yes -D PLUMED_MODE=shared \
          +-D PKG_H5MD=yes -D PKG_NETCDF=yes \
          +-D NETCDF_INCLUDE_DIR=<netcdf_installation_dir>/include 
          +
          +

          若希望开启GPU加速,请增加选项:

          +
          -D PKG_GPU=on -D GPU_API=cuda
          +
          +

          开始编译

          +

          运行

          +
          make
          +make install
          +
          +

          编译DeePMD-kit Lammps Plugin

          +

          方法一:静态编译

          +

          安装Tensorflow的C++ 接口

          +

          以下安装,假设软件包下载路径均为 /some/workspace, 以 TensorFlow 2.7.0版本、DeePMD-kit 2.1.5 版本为例进行说明,其他版本的步骤请参照修改。注意为保证模型兼容性,版本号最好与 Python Interface对应。

          +

          本步骤需要使用 Conda,因此在前文基础上进行。

          +

          搜索仓库,查找可用的 TensorFlow 的 C++ 接口版本。

          +
          conda search libtensorflow_cc -c https://conda.deepmodeling.com
          +
          +

          结果如下:

          +
          Loading channels: done
          +# Name                       Version           Build  Channel
          +libtensorflow_cc              1.14.0  cpu_h9a2eada_0
          +libtensorflow_cc              1.14.0  gpu_he292aa2_0
          +libtensorflow_cc               2.0.0  cpu_h9a2eada_0
          +libtensorflow_cc               2.0.0  gpu_he292aa2_0
          +libtensorflow_cc               2.1.0  cpu_cudaNone_0
          +libtensorflow_cc               2.1.0  gpu_cuda10.0_0
          +libtensorflow_cc               2.1.0  gpu_cuda10.1_0
          +libtensorflow_cc               2.1.0   gpu_cuda9.2_0
          +libtensorflow_cc               2.3.0  cpu_cudaNone_0
          +libtensorflow_cc               2.3.0  gpu_cuda10.1_0
          +libtensorflow_cc               2.4.1  gpu_cuda11.0_0
          +libtensorflow_cc               2.4.1  gpu_cuda11.1_0
          +libtensorflow_cc               2.5.0  cpu_cudaNone_0
          +libtensorflow_cc               2.5.0  gpu_cuda10.1_0
          +libtensorflow_cc               2.5.0  gpu_cuda11.3_0
          +libtensorflow_cc               2.7.0  cpu_h6ddf1b9_0
          +libtensorflow_cc               2.7.0 cuda101h50fd26c_0
          +libtensorflow_cc               2.7.0 cuda113h3372e5c_0
          +libtensorflow_cc               2.7.0 cuda113hbf71e95_1
          +libtensorflow_cc               2.9.0  cpu_h681ccd4_0
          +libtensorflow_cc               2.9.0 cuda102h929c028_0
          +libtensorflow_cc               2.9.0 cuda116h4bf587c_0
          +
          +

          这里所希望安装的版本是2.7.0的GPU版本,CUDA版本为11.3,因此输入以下命令安装:

          +
          conda install libtensorflow_cc=2.7.0=cuda113hbf71e95_1 -c https://conda.deepmodeling.com
          +
          +

          若所安装的环境没有实际的GPU驱动(比如集群的登录节点)或需要用到Conda安装CudaToolkit,可能需要参照此处说明强制指定GPU环境。比如:

          +
          CONDA_OVERRIDE_CUDA="11.3" conda install libtensorflow_cc=2.7.0=cuda113hbf71e95_1 -c https://conda.deepmodeling.com
          +
          +

          请注意 CONDA_OVERRIDE_CUDA 的值需要与GPU支持以及希望用到的CUDA版本相匹配。

          +
          +

          提示

          +

          注意A100仅支持TF 2.4.0以上、CUDA11.2以上,安装时请对应选择。

          +
          +
          +

          提示

          +

          个别版本在后续编译时可能会提示需要libiomp5.so,请根据实际情况确定是否需要载入Intel环境或者conda install intel-openmp

          +
          +
          +

          提示

          +

          conda命令可能速度较慢,也可以考虑切换为mamba,后者可大幅加速Conda的性能,且完全兼容。只需参照前述链接安装后将conda替换为mamba即可

          +
          +

          若成功安装,则定义环境变量:

          +
          export tensorflow_root=/data/user/conda/env/deepmd
          +
          +

          即虚拟环境创建的路径。后文将使用 $tensorflow_root 来指定该路径。

          +

          安装DeePMD-kit的C++ 接口

          +

          下面开始编译DeePMD-kit C++接口:

          +
          deepmd_source_dir=`pwd`
          +cd $deepmd_source_dir/source
          +mkdir build 
          +cd build
          +
          +

          假设DeePMD-kit C++ 接口安装在 /some/workspace/deepmd_root 下,定义安装路径 deepmd_root

          +
          export deepmd_root=/some/workspace/deepmd_root
          +
          +

          在build目录下运行:

          +
          cmake -DLAMMPS_SOURCE_ROOT=<lammps_source_code> \
          +-DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root \
          +-DUSE_CUDA_TOOLKIT=TRUE ..
          +
          +

          注意这里的 <lammps_source_code> 对应前文中Lammps的源码路径。

          +

          最后编译并安装:

          +
          make
          +make install
          +
          +

          若无报错,通过以下命令执行检查是否有正确输出:

          +
          $ ls $deepmd_root/lib
          +deepmd_lmp/           libdeepmd_cc_low.so   libdeepmd_gromacs.so  libdeepmd_ipi.so      libdeepmd_lmp.so      libdeepmd_op.so
          +deepmd_lmp_low/       libdeepmd_cc.so       libdeepmd_ipi_low.so  libdeepmd_lmp_low.so  libdeepmd_op_cuda.so  libdeepmd.so
          +
          +

          注意应当包含deepmd_lmp/libdeepmd_lmp.so,后两者即为Lammps插件的位置。

          +

          方法二:采用TensorFlow Python 版本的库

          +

          从 DeePMD-kit v2.2 起,cmake 支持设置 -DUSE_TF_PYTHON_LIBS=TRUE的方式,从而免去了安装 libtensorflow_cc 的麻烦。

          +
          cmake -DLAMMPS_SOURCE_ROOT=<lammps_source_code> \
          +-DUSE_TF_PYTHON_LIBS=TRUE -DUSE_CUDA_TOOLKIT=TRUE \
          +-DCMAKE_INSTALL_PREFIX=$deepmd_root ..
          +
          +
          +

          Tip

          +

          请注意,这种方法采用Python Wheel提供的 libtensorflow_framework.so.2_pywrap_tensorflow_internal.so (作为 libtensorflow_cc.so的替代)进行编译。 +后者依赖 Python 库 libpython3.*.so.*(因版本不同而异),请注意基于上述库的编译应保证后者路径也在 LD_LIBRARY_PATH 中。

          +
          +

          为使得编译好的库文件可以更容易找到上述依赖,请执行以下操作,建立一个伪 tensorflow_root 目录,假设该路径位于 /some/workspace/tensorflow_root 下,同时假设 Conda 环境仍位于 /data/user/conda/env/deepmd 下:

          +
          export tensorflow_root=/some/workspace/tensorflow_root
          +mkdir -p $tensorflow_root/lib 
          +cd $tensorflow
          +ln -s /data/user/conda/env/deepmd/lib/python3.10/site-packages/tensorflow/include .
          +cd lib
          +ln -s /data/user/conda/env/deepmd/lib/python3.10/site-packages/tensorflow/python/_pywrap_tensorflow_internal.so libtensorflow_cc.so
          +ln -s /data/user/conda/env/deepmd/lib/python3.10/site-packages/tensorflow/libtensorflow_framework.so.2 .
          +ln -s libtensorflow_framework.so.2 libtensorflow_framework.so
          +
          +

          于是,我们便构建了一个伪 tensorflow_root 目录。注意后文的 $tensorflow_root 此时应指向该路径。

          +

          调用方法

          +

          使用前请加载好环境变量。注意若未定义 $deepmd_root$tensorflow_root,请补全为完整路径。这里的 /data/user/conda/env/deepmd 仍是 Conda 环境的路径,请相应替换。

          +
          export LD_LIBRARY_PATH=$tensorflow_root/lib:$deepmd_root/lib:/data/user/conda/env/deepmd/lib:$LD_LIBRARY_PATH
          +export LAMMPS_PLUGIN_PATH=$deepmd_root/lib/deepmd_lmp
          +
          +

          Lammps便会自动寻找插件并加载,从而可以实现DeePMD的支持。

          +
          pair_style      deepmd ../graph.pb
          +pair_coeff      * *
          +
          +

          若无法自动找到,也可以手动在 输入文件 中加载,写在 pair_style 上一行即可,注意 $deepmd_root$tensorflow_root 须替换为完整路径

          +
          plugin load     $deepmd_root/lib/libdeepmd_lmp.so
          +pair_style      deepmd ../graph.pb
          +pair_coeff      * *
          +
          +

          运行命令仍然是 lmp_mpi -i <input_file>

          +

          DP-CP2K 安装指引

          +

          首先clone对应的安装包:

          +
          git clone https://github.com/cp2k/cp2k.git --recursive --depth=1
          +
          +

          然后运行相应的Toolchain脚本:

          +
          module unload mpi/intel/2017.5.239 # (1)!
          +module load mpi/openmpi/4.1.6-gcc # (2)!
          +cd tools/toolchain/
          +./install_cp2k_toolchain.sh --with-gcc=system --mpi-mode=openmpi --with-deepmd=$deepmd_root
          +
          +
            +
          1. 新版CP2K会自动检测 Intel MPI 且无视强制使用其他环境如 OpenMPI 的设定,旧版 Intel MPI不被兼容
          2. +
          3. 由于 --with-openmpi=install 在 Zeus 上无法正确安装,这里预先安装好了 OpenMPI。
          4. +
          +

          如不需要 MPI 和 DFT 相关功能,可以如下设置以减少步骤(注意后续编译移除掉 psmp pdbg 选项):

          +
          cd tools/toolchain/
          +module unload mpi/intel/2017.5.239 # (1)!
          +./install_cp2k_toolchain.sh --with-deepmd=$deepmd_root --mpi-mode=no --with-libint=no --with-libxc=no --with-libxsmm=no
          +
          +
            +
          1. 新版CP2K会自动检测 Intel MPI 且无视强制使用其他环境如 OpenMPI 的设定,旧版 Intel MPI不被兼容
          2. +
          +

          根据脚本运行结尾的提示复制arch文件并source所需的环境变量。

          +

          这里的目的是让编译时可以正确链接 libpython3.*.so.*,因而 /data/user/conda/env/deepmd/ 仍旧是 Conda 环境路径。

          +

          最后回到主目录进行编译:

          +
          make -j 4 ARCH=local VERSION="psmp pdbg ssmp sdbg" # (1)!
          +
          +
            +
          1. 如不需要 MPI ,请移除掉 psmp pdbg
          2. +
          +

          编译正确完成后,可执行文件生成在 exe/ 下,即 cp2k.ssmpcp2k.psmp

          +

          关于 DP-CP2K 的使用,请参考 CP2K: DeePMD插件

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/index.html b/wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/index.html new file mode 100644 index 00000000..fb870f99 --- /dev/null +++ b/wiki/software_installation/deepmd-kit/deepmd-kit_installation_pc/index.html @@ -0,0 +1,3076 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeePMD-kit安装实战:PC篇 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          DeePMD-kit安装实战:PC篇

          +

          背景

          +

          需要对DeePMD-kit的源码进行一些修改,针对新的物理量构建模型。对代码的调试需要GPU,但是不需要很好的性能,所以在PC端进行可以节省在集群上的排队时间。

          +

          安装系统:Ubuntu 20.04

          +

          DeePMD-kit代码结构

          +

          在记录安装过程之前先简单描述一下DeePMD-kit的代码结构。

          +

          DeePMD-kit在训练部分的代码是在.py文件中调用 TensorFlow 实现的(TF自带OP/自定义OP)。但是TF的底层是用 C++ 构建的,所以在使用 DeePMD-kit 时需要安装 TF/python 接口。

          +

          进入到修改过代码的文件夹,执行:

          +
          pip install .
          +
          +

          此时会基于已修改的代码生成新的可执行文件。

          +

          如果想基于DeePMD-kit生成的模型和lammps/CP2K等软件的对接,需要另外安装C++接口。这部分可以参考之前的教程(编译/修改代码后重新编译)。

          +

          conda安装

          +

          如果不需要对源码进行修改,可以利用官方教程 easy installation 中的 conda 安装

          +
          #(base)
          +conda create -n deepmd deepmd-kit=*=*gpu libdeepmd=*=*gpu lammps-dp cudatoolkit=11.3 horovod -c https://conda.deepmodeling.org
          +
          +

          此命令新建了一个名为deepmd的虚拟环境,并将deepmd-kit安装在这个环境中。 +Conda 安装会一并安装 CUDA Toolkit,因此只要保证电脑的驱动支持即可。可通过以下指令查看驱动版本及其支持的cuda版本:

          +
          nvidia-smi
          +
          +
          +

          目前通过conda默认安装的是10.1版本的CUDA Toolkit,由于CUDA向下兼容,故版本高于10.1即可。如果驱动支持的CUDA版本过低,可以在Ubuntu的Software&Updates/Additional Drivers里选择新版的驱动进行升级。

          +
          +

          利用 Conda 便捷安装时,DeePMD-kit的C++底层文件全部都已经编译成可执行文件.so,在本地只能查看到可执行文件.so.py文件,无法对底层进行修改。所以如果需要对源码进行修改,需要手动安装编译。

          +

          Conda安装包括了预编译的 TF/C++ 接口,可通过定义环境变量省去以前教程中提到的编译的步骤。(见下文)

          +

          手动编译

          +

          上一节的 Conda 安装是在deepmd虚拟环境下安装的,手动安装我们新建一个环境dp-tf

          +
          conda info -e
          +# if you have been in `deepmd`, deactivate first
          +conda deactivate
          +# create a new environment
          +conda create -n dp-tf
          +# if you want to specify the version of python in dp-tf
          +#conda create -n dp-tf python=3.9
          +
          +
          +

          tip

          +

          建议在新建环境dp-tf 时设置python版本和deepmd保持一致,否则后续安装tensorflow时可能因为python版本不兼容报错No matching distribution found for tensorflow。

          +
          +

          下载源码&设置环境变量

          +

          下载源码(注意一定要有--recursive,具体见[wiki](./deepmd-kit_installation_51.md)

          +
          #(tf-dp)
          +git clone --recursive https://github.com/deepmodeling/DeePMD-kit.git DeePMD-kit
          +
          +

          设置环境变量

          +
          #(tf-dp)
          +cd DeePMD-kit
          +# set $deepmd_source_dir as the directory of the deepmd source code
          +deepmd_source_dir=$(pwd)
          +# set $tensorflow_root as the directory of the TF/C++ interface
          +# the dir of the environment with conda DP
          +tensorflow_root=/dir/for/env/with/condaDP
          +
          +
          +

          可以用conda env list指令查看环境deepmd的地址(/dir/for/env/with/condaDP)

          +
          +

          如果担心安装过程中需要退出,可以临时加到~/.bashrc文件中并source ~/.bashrc

          +

          TF/Python 接口

          +

          首先可以更新一下pip,并安装新版TensorFlow:

          +
          #(tf-dp)
          +pip install --upgrade pip
          +pip install --upgrade tensorflow==2.5.0
          +
          +
          +

          tip

          +

          利用conda便捷安装可以省去后面TF/C++接口的安装,所以这里的TF安装和conda安装中的TF保持一致。(具体版本在conda安装过DeePMD-kit的环境(deepmd)下查看已安装的tensorflow-base版本。

          +
          +

          例如: +

          # assume you have been in dp-tf env
          +#(tf-dp)
          +conda deactivate
          +#(base)
          +conda activate deepmd
          +#(deepmd)
          +conda list
          +>>> tensorflow-base           2.5.0           gpu_py39h7c1560b_0    https://conda.deepmodeling.org
          +#(deepmd)
          +conda deactivate
          +#(base)
          +conda activate dp-tf
          +#(tf-dp)
          +pip install --upgrade tensorflow==2.5.0
          +

          +

          DeePMD-kit/Python 接口

          +
          #(tf-dp)
          +cd $deepmd_source_dir
          +DP_VARIANT=cuda
          +pip install .
          +
          +

          这一步的pip installdeepmd_source_dir下的文件进行编译。

          +
          +

          warning

          +

          环境变量DP_VARIANT的默认值是cpu,要记得根据需要进行修改!

          +
          +
          +

          info

          +

          如果对源码进行了修改,需要重新编译。

          +
          +

          这一步中报错可能的应对措施:

          +
            +
          • 网络问题1
          • +
          +

          修改镜像源(具体可参考使用帮助

          +
          pip install pip -U
          +pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
          +
          +
            +
          • 网络问题2(...timed out...
          • +
          +

          多试几次...

          +
            +
          • 升级setuptools
          • +
          +
          pip install --upgrade setuptools --no-build-isolation
          +
          +
            +
          • 缺各种包
          • +
          +

          如果直接pip install会发现所有都是已安装的,需要pip uninstallpip install

          +

          conda list检查发现应该是没有安装到这个环境里。

          +

          如果有报错而无法直接卸载:

          +
          It is a distuils installed project and thus we cannot accurately determine which files belongs to it which would lead to only a partial uninstall.
          +
          +

          可以考虑强制覆盖安装:

          +
          pip install some_package --ignore-installed
          +
          +
            +
          • GCC版本问题
          • +
          +
              138 | #error -- unsupported GNU version! gcc versions later than 8 are not supported!
          +
          +

          Ubuntu 20.04默认的GCC版本是9.3.0(gcc --version查看),需要卸载再重装低版本(比如7.5)

          +
          sudo apt remove gcc
          +sudo apt-get install gcc-7 g++-7 -y
          +sudo ln -s /usr/bin/gcc-7 /usr/bin/gcc
          +sudo ln -s /usr/bin/g++-7 /usr/bin/g++
          +sudo ln -s /usr/bin/gcc-7 /usr/bin/cc
          +sudo ln -s /usr/bin/g++-7 /usr/bin/c++
          +gcc --version
          +
          +

          DeePMD-kit/C++ 接口

          +

          官方教程(可能需要apt-get安装cmake,如果没有足够权限也可以通过pip安装)。

          +

          和其他计算软件(如lammps)的接口

          +

          官方教程这里

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_installation/gcc/index.html b/wiki/software_installation/gcc/index.html new file mode 100644 index 00000000..7174ab3f --- /dev/null +++ b/wiki/software_installation/gcc/index.html @@ -0,0 +1,2758 @@ + + + + + + + + + + + + + + + + + + + + + + + + + GCC 安装教程 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          GCC 安装教程

          +

          这里以 5.5.0 版本为例,其他版本可以参考,只需将版本号替换即可。

          +

          首先下载 gcc 安装包,国内直接访问 gnu 官网较慢,可以通过 tuna 等镜像安装

          +
          wget https://mirrors.tuna.tsinghua.edu.cn/gnu/gcc/gcc-5.5.0/gcc-5.5.0.tar.gz
          +
          +

          解压并下载编译所需环境:

          +
          tar -zxvf gcc-5.5.0.tar.gz
          +cd gcc-5.5.0
          +./contrib/download_prerequisites
          +cd ..
          +
          +

          创建编译目录,并在其中进行编译:

          +
          mkdir objdir
          +cd objdir
          +../gcc-5.5.0/configure --prefix=/share/apps/gcc/5.5.0 --enable-languages=c,c++,fortran,go --disable-multilib
          +make
          +make install
          +
          +

          编写 modulefile ,修改环境变量:

          +
          #%Module1.0#####################################################################
          +##
          +## GCC modulefile
          +##
          +proc ModulesHelp { } {
          +        global version
          +
          +        puts stderr "\tSets up environment for GCC v$version"
          +}
          +
          +module-whatis   "sets up environment for GCC v5.5.0"
          +
          +# for Tcl script use only
          +set     version 5.5.0
          +set     root    /share/apps/gcc/$version
          +
          +prepend-path    INFOPATH        $root/share/info
          +prepend-path    LD_LIBRARY_PATH $root/lib64:$root/lib:$root/libexec
          +prepend-path    INCLUDE         $root/include
          +prepend-path    MANPATH         $root/share/man
          +prepend-path    PATH            $root/bin
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_installation/install_from_src_in_conda/index.html b/wiki/software_installation/install_from_src_in_conda/index.html new file mode 100644 index 00000000..f397a817 --- /dev/null +++ b/wiki/software_installation/install_from_src_in_conda/index.html @@ -0,0 +1,2766 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 虚拟环境下源码安装教程 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          虚拟环境下源码安装 C/C++程序:以 valgrind 为例

          +

          源码安装一般由 3 个步骤组成:配置(configure)、编译(make)、安装(make install)。默认情况下进入源码所在文件夹下顺序执行./configure && make && make install 会将文件安装在/usr/local下。但是,这种做法有两个不足:

          +
            +
          • 某些软件(版本)仅应用于特定工作任务中,不同任务中的软件(版本)可能会有冲突
          • +
          • 集群上普通用户没有权限修改/usr/local进行安装
          • +
          +

          是否可以采用类似将 python 包安装到特定虚拟环境下的做法,把 C/C++程序通过源码安装到特定虚拟环境中呢?答案是:可以!接下来,以 Valgrind 为例说明如何将 C/C++软件包安装到特定虚拟环境下。

          +
          +
          +

          虚拟环境地址(根据自己情况修改):/new_data/jxzhu/envs/test_env

          +
          +
            +
          1. 下载源码并解压
          2. +
          +
          # download source code from official website
          +wget -c https://sourceware.org/pub/valgrind/valgrind-3.19.0.tar.bz2
          +# decompress
          +tar -jxvf valgrind-3.19.0.tar.bz2
          +
          +
            +
          1. 进入文件夹并执行安装前序工作(此处根据需安装软件的指引进行)
          2. +
          +
          # enter the source code folder
          +cd valgrind-3.19.0
          +# NOTE: This is not a general procedure
          +# Please check the installation guide for your package
          +./autogen.sh
          +
          +
            +
          1. 通过--prefix将安装地址指定为虚拟环境所在地址
          2. +
          +
          # configure with given installation path
          +./configure --prefix=/new_data/jxzhu/envs/test_env/
          +
          +
            +
          1. 编译及安装
          2. +
          +
          # make in parallel
          +make -j20
          +# install software
          +make install
          +
          +

          快速测试

          +
          (base) [jxzhu@login01:] /data/jxzhu/software/valgrind-3.19.0 $ which valgrind
          +/usr/bin/which: no valgrind in (...)
          +(base) [jxzhu@login01:] /data/jxzhu/software/valgrind-3.19.0 $ conda activate /new_data/jxzhu/envs/test_env/
          +(test_env) [jxzhu@login01:] /data/jxzhu/software/valgrind-3.19.0 $ which valgrind
          +/new_data/jxzhu/envs/test_env/bin/valgrind
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_installation/softwares/index.html b/wiki/software_installation/softwares/index.html new file mode 100644 index 00000000..d915307d --- /dev/null +++ b/wiki/software_installation/softwares/index.html @@ -0,0 +1,4453 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Guidances for installation of codes - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          Installation Guide for Codes and Libraries

          +

          First of all! Load the environments!

          +

          Before you install anything, especially when you need to compile codes, make sure the type of compiler and the version of compiler you have. Usually, in your personal computer, you can use compiler command directly, for instance, gcc, gfortran, ifort,mpic++. In remote cluster(High Performance Cluster), the compiler is managed by module. You cannnot use it unless you load it in advance. Therefore, make sure which compiler you have in module, and use command such as module load gcc/4.9.4 to load required compilers.

          +

          General Protocal for Installation:

          +
            +
          1. Compile the Code
          2. +
          3. Quick test the code at server node
          4. +
          5. Write module files to code (we recommend to manage codes by module)
          6. +
          7. Test the code in the client node
          8. +
          9. write example lsf file in /share/base/scripts
          10. +
          +

          Where to Install?

          +

          Install in the /share/ directory. /share/ directory is the one synchronized to all the nodes by nfs.

          +
            +
          1. Libraries: /share/apps/lib/<library name>/<version>
          2. +
          3. Codes, Pacakges, Softwares: /share/apps/<packages name>/<version>
          4. +
          +

          Standard in Writing Module file

          +
            +
          1. module name: <package name>/<version>, like cp2k/6.1
          2. +
          +

          Standard in Writing lsf file

          +
            +
          1. export necessary environmental variable
          2. +
          3. load prerequisite module
          4. +
          +

          Anaconda Installation Guide

          +

          Short Introduction

          +

          The open-source Anaconda Distribution is the easiest way to perform Python/R data science and machine learning on Linux, Windows, and Mac OS X. Choose the one suitable for you usage. If you'd like to use Anaconda in Cluster, ask cluster administrator if Anaconda have been installed, which avoid storage waste in your cluster's storage.

          +
          +

          Tip

          +

          A minimum version of Conda is install in cluster51 by Yunpei Liu. Use it by module command

          +
          +

          Installation Guide

          +
            +
          • Go to this website, choose the right version for you. Personally, I recommend command line Installer for Linux and Mac OS System, while the Graphical Installer for Windows System
          • +
          • Follow the instruction in this page
          • +
          +

          QUIP Installation Guide

          +

          Short Introduction

          +

          The QUIP package is a collection of software tools to carry out molecular dynamics simulations. It implements a variety of interatomic potentials and tight binding quantum mechanics, and is also able to call external packages, and serve as plugins to other software such as LAMMPS, CP2K and also the python framework ASE. Various hybrid combinations are also supported in the style of QM/MM, with a particular focus on materials systems such as metals and semiconductors.

          +
          +

          Tip

          +

          The tested compiler version: and for your information.

          +
          +

          Use QUIP and quippy in cluster 51

          +

          If you need use QUIP/GAP in cluster 51, please used command:

          +
          module load gcc/6.3.0 mpi/openmpi/3.0.0
          +module load QUIP/GAP
          +
          +

          If you want to use quippy:

          +
          module load miniconda/3
          +source activate /share/apps/QUIP/quippy-py3/
          +
          +

          Install Guide

          +
            +
          • Git clone from repository
          • +
          +
          git clone --recursive https://github.com/libAtoms/QUIP.git
          +
          +
            +
          • Go to the package root and export variable
          • +
          +
          export QUIP_ARCH=linux_x86_64_gfortran
          +
          +
            +
          • Make configuration
          • +
          +
          make config
          +#if everything fine
          +make
          +
          +

          Packages and Extra Interfaces of QUIP

          +

          Add GAP Packages

          +
            +
          • Download GAP file from here, then you obtain a tar file named GAP.tar, unzip it
          • +
          +
          tar -xvf GAP.tar
          +
          +
            +
          • You will obtain a directory named GAP/, copy this directory into QUIP root/src.
          • +
          +
          cp -r GAP <QUIP root>/src/
          +
          +
            +
          • Reconfig your make by choose install GAP as y
          • +
          +
          #recompile this code again
          +make
          +
          +

          Build QUIPPY, A QUIP Python Interface

          +
            +
          • Export another environmental variable
          • +
          +
          #install for your self
          +export QUIPPY_INSTALL_OPTS=--user
          +#choose the location for installation of quippy
          +export QUIPPY_INSTALL_OPTS=--prefix=<directory>
          +
          +
            +
          • Go to <QUIP root>/src/f90wrap, and install f90wrap by:
          • +
          +
          pip install .
          +
          +
            +
          • Back to <QUIP root>
          • +
          +
          make install-quippy
          +
          +
            +
          • Test whether installed successfully.
          • +
          +
          make test
          +
          +

          Trouble Shooting

          +
          ImportError: dynamic module does not define module export function
          +
          Example:
          +Traceback (most recent call last):
          +  File "<stdin>", line 1, in <module>
          +  File "/share/apps/QUIP/quippy-py3/lib/python3.8/site-packages/quippy-https_github.com_libAtoms_QUIP.git_ec1ed34_dirty-py3.8-linux-x86_64.egg/quippy/__init__.py", line 2, in <module>
          +    import _quippy
          +ImportError: dynamic module does not define module export function (PyInit__quippy)
          +
          +

          Solution: add /build/${QUIP_ARCH} into your Python PATH

          +

          VASP

          +

          Short Introduction

          +

          (TODO)

          +

          Install Guide

          +
            +
          1. +

            Get the VASP source code and pseudopotentials.

            +
          2. +
          3. +

            Load environment +

            module load intel
            +

            +
          4. +
          5. +

            Choose makefile.include according to the platform and make +

            cd vasp.5.4.4
            +make std
            +make gam
            +

            +
          6. +
          7. +

            If everything is right, you will find vasp_std in vasp.5.4.4/build/std and you can run it with mpirun -np 24 vasp_std.

            +
          8. +
          +

          Plugins

          +

          Wannier90

          +
            +
          1. +

            Download Wannier90 from http://www.wannier.org/download/ . Notice: currently VASP only support Wannier90-1.2

            +
          2. +
          3. +

            Modify compile file for Wannier90 make.sys.intel. Here we use the MKL. +

            #LIBDIR = /opt/intel/mkl721/lib/32
            +#LIBS = -L$(LIBDIR) -lmkl_lapack -lmkl_ia32 -lguide -lpthread
            +LIBDIR = $(MKLROOT)/lib/intel64
            +LIBS = -L$(LIBDIR) -mkl -lpthread
            +

            +
          4. +
          5. +

            Compile and test +

            cp ./config/make.inc.ifort make.inc
            +make 
            +make lib # compile to get the libary: libwannier.a 
            +make tests # test whether the compilation is success
            +

            +
          6. +
          7. +

            Copy the libwannier.a libary file to VASP libary path and modify VASP makefile.include.

            +
          8. +
          +
          #Precompiler options
          +CPP_OPTIONS= -DHOST=\"LinuxIFC\"\
          +             -DMPI -DMPI_BLOCK=8000 \
          +             -Duse_collective \
          +             -DscaLAPACK \
          +             -DCACHE_SIZE=4000 \
          +             -Davoidalloc \
          +             -Duse_bse_te \
          +             -Dtbdyn \
          +             -Duse_shmem \
          +             -DVASP2WANNIER90   ## modify this line for Wannier90
          +
          +LLIBS += ../../libwannier.a  ## change here to the location of libwannier.a
          +
          +

          Compilation optimization

          +

          If you use Intel Xeon Silver/Gold/Platium CPU, using the following compilation parameters will get a 2✖ speedup! (Already test on 205 server) +

          OFLAG      = -O3 -xCORE-AVX512
          +

          +

          TODO in the future

          +
            +
          1. Install vasp_gpu version
          2. +
          3. Benchmark different libary (FFTW/MKL)
          4. +
          5. other plugins: VASP-neb, vasp-beef
          6. +
          7. vasp6
          8. +
          +

          LAMMPS Installation Guide

          +

          Short Introduction

          +

          LAMMPS is a classical molecular dynamics code with a focus on materials modeling. It's an acronym for Large-scale Atomic/Molecular Massively Parallel Simulator.

          +
          +

          Tip

          +

          I have installed one in cluster51, in directory /share/apps/lammps-7Aug19/. The compiler version: and for your information.

          +
          +

          Install Guide

          +
            +
          • Git clone or download package from website
          • +
          +
          # command for git
          +git clone -b stable https://github.com/lammps/lammps.git mylammps
          +
          +
            +
          • We assume you the package path is
          • +
          +
          cd <lammps-root>/src
          +#choose one of the following or both
          +# build a serial LAMMPS executable
          +make serial 
          +# build a parallel LAMMPS executable with MPI
          +make mpi        
          +
          +
            +
          • You will see the executable binary in src/lmp_serial or src/lmp_mpi
          • +
          +

          Packages and Extra Interfaces of LAMMPS

          +
          +

          Tip

          +

          Contact Cluster Administrator if you need any uninstalled packages

          +
          +

          General for Installing Package

          +
            +
          • To install package of LAMMPS, just type make yes-<package name> for example, make yes-user-intel
          • +
          +

          Building USER-ATC Package

          +
            +
          • Before you install this package by make yes-user-atc, you should install lib-atc which is a library for atc package
          • +
          • Go to the directory <LAMMPS root>/lib/atc, you can follow the instruction in the README. Remember to load module gcc and open mpi
          • +
          +
          cd <LAMMPS root>/lib/atc
          +
          +
            +
          • lib-atc need library lapack and blas installed. Check whether this library installed or not by command:
          • +
          +
          #check for lapack library
          +ldconfig -p | grep lapack
          +#check for blas library
          +ldconfig -p | grep blas
          +
          +
            +
          • If lapack and blas are installed. Change the value of EXTRAMAKE variable to Makefile.lammps.installed in the file Makefile.mpi.
          • +
          +
          EXTRAMAKE= Makefile.lammps.installed
          +
          +
            +
          • Make library by following command
          • +
          +
          make -f Makefile.mpi
          +
          +
            +
          • Make sure you have libatc.a and Makefile.lammps in your current directory
          • +
          • Back to directory <LAMMPS root>/src/ and type make mpi to compile mpi version of LAMMPS
          • +
          +

          Building Inteface with n2p2

          +
            +
          • make sure you have shared library libnnpif-shared in your <path to n2p2>/lib/
          • +
          • export the following in your environmental variable(optional)
          • +
          +
          #export this if you use shared library, skip if you are using static library
          +export LD_LIBRARY_PATH=<path to n2p2>/lib:${LD_LIBRARY_PATH}
          +
          +
            +
          • Go to LAMMPS root
          • +
          +
          cd <LAMMPS root>/
          +ln -s <path to n2p2> lib/nnp
          +cp -r <path to n2p2>/src/interface/LAMMPS/src/USER-NNP <LAMMPS root>/src
          +cd <LAMMPS root>/src
          +make yes-user-nnp
          +make mpi
          +
          +

          Building with Plumed

          +
            +
          • Before you install, make sure the Plumed has installed
          • +
          • To directory <LAMMPS root>/src/
          • +
          +
          make lib-plumed args="-p <path to plumed directory>"
          +make yes-user-plumed
          +make mpi
          +
          +

          DeePMD Installation Guide

          +

          Short Introduction

          +

          DeePMD-kit is a package written in Python/C++, designed to minimize the effort required to build deep learning based model of interatomic potential energy and force field and to perform molecular dynamics (MD). This brings new hopes to addressing the accuracy-versus-efficiency dilemma in molecular simulations. Applications of DeePMD-kit span from finite molecules to extended systems and from metallic systems to chemically bonded systems. Ref. Paper

          +

          Install Guide

          +
            +
          • Here, we display the most easiest way to install DeePMD Code.
          • +
          • Make sure you have GPU install in your computer. Usually, you can check with the drive of GPU
          • +
          • Install the anaconda3 from website. After you installed anaconda3, you can use conda command.
          • +
          • Install DeePMD with cpu or gpu version. Installation by this way will install lammps as well.
          • +
          +
          #install of cpu version
          +conda install deepmd-kit=*=*cpu lammps-dp=*=*cpu -c deepmodeling
          +#install of gpu version
          +conda install deepmd-kit=*=*gpu lammps-dp=*=*gpu -c deepmodeling
          +
          +
            +
          • That's all for installation. Check the install package use command:
          • +
          +
          conda list | grep deep
          +
          +
            +
          • You will find four packages related with DeePMD code. You can now directly use command dp , lmp.
          • +
          • To test DeePMD Code. Download DeePMD code from github by:
          • +
          +
          git clone https://github.com/deepmodeling/deepmd-kit.git
          +
          +
            +
          • Go to the directory examples/water/train/
          • +
          • Test training by
          • +
          +
          dp train water_se_a.json
          +
          +

          Install Guide of DeePMD

          +

          快速安装

          +

          n2p2 Installation Guide

          +

          Short Introduction

          +

          n2p2 is a machine learning code to training a machine learning potential. It original paper is from J. Behler and M. Parrinello, Phys. Rev. Lett. 98, 146401 (2007)

          +

          Install Guide

          +
            +
          • Before Installation, make sure you have installed the Eigen Library and the GSL Library.
          • +
          • Make sure you have gcc compiler (including gfortran), I haven't successfully compiled by intel compiler. Make sure you have open MPI(i. e. for mpic++ command).
          • +
          • Download the n2p2 code from github: https://github.com/CompPhysVienna/n2p2. For example, using the following command.
          • +
          +
          git clone https://github.com/CompPhysVienna/n2p2.git
          +
          +
            +
          • You can see a directory named n2p2, now go into that by:
          • +
          +
          cd n2p2/src
          +
          +
            +
          • Modify the configure file makefile.gnu
          • +
          +
          #modify this file, I just pick out the part you need to modify
          +# Enter here paths to GSL or EIGEN if they are not in your standard include
          +# path. DO NOT completely remove the entry, leave at least "./".
          +PROJECT_GSL=<path to gsllib>/gsl/include/ # substitute <path> with real path
          +PROJECT_EIGEN=<path to eigen>/eigen-eigen-323c052e1731 # substitute <path> with real path
          +
          + ###############################################################################
          + # COMPILERS AND FLAGS
          + ###############################################################################
          +PROJECT_CFLAGS=-O3 -march=native -std=c++11 -fopenmp -L<pato to gsllib>gsl/lib
          +PROJECT_LDFLAGS_BLAS=-lblas -lgslcblas
          +
          +
            +
          • Save and quit this file, use the following command to compile code:
          • +
          +
          #choose one of the following command
          +make MODE=shared # compile a binary with shared library
          +make MODE=static # compile a binary with static library, I use this one
          +
          +
            +
          • After you compiled successfully, you will have all the excutable binary at n2p2/bin/ directory
          • +
          • Add n2p2/bin/ to your PATH environmental variable, you can easily use this. The most important binary is nnp-train, this is used for training.
          • +
          • Add n2p2 library to your LD_LIBRARY_PATH in .bashrc
          • +
          +
          export LD_LIBRARY_PATH=<Path to n2p2>/lib/:$LD_LIBRARY_PATH
          +
          +

          Plumed Installation Guide

          +

          Short Introduction

          +

          PLUMED is an open-source, community-developed library that provides a wide range of different methods, which include:

          +
            +
          • enhanced-sampling algorithms
          • +
          • free-energy methods
          • +
          • tools to analyze the vast amounts of data produced by molecular dynamics (MD) simulations.
          • +
          +

          These techniques can be used in combination with a large toolbox of collective variables that describe complex processes in physics, chemistry, material science, and biology.

          +
          +

          Tip

          +

          I have installed one in cluster51. Use module load plumed/2.6.0 to use this library. The compiler version: for your information

          +
          +

          Install Guide

          +
            +
          • Download package from here.
          • +
          • Basic Configure
          • +
          +
          ./configure --prefix=<path you want to install> LDFLAGS=-L'/share/apps/lib/fftw/3.3.8/lib' CPPFLAGS=-I'/share/apps/lib/fftw/3.3.8/lib '
          +
          +
            +
          • Compile
          • +
          +
          make -j 32
          +make install
          +
          +

          Eigen Library Installation Guide

          +

          Short Introduction

          +

          Eigen is a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms.

          +

          Install Guide

          +
            +
          • Download the package from wiki:http://eigen.tuxfamily.org/index.php?title=Main_Page#Overview. For me, I choose the Eigen 3.3.7 released version.
          • +
          +
          wget http://bitbucket.org/eigen/eigen/get/3.3.7.tar.bz2
          +
          +
            +
          • Unpack this tar file by
          • +
          +
          tar -zxvf 3.3.7.tar.gz
          +
          +
            +
          • You will have eigen-eigen-* directory in your computer
          • +
          • These are all steps you need to install eigen library
          • +
          +

          GSL Library Installation Guide

          +

          Short Introduction

          +

          The GNU Scientific Library (GSL) is a numerical library for C and C++ programmers. It is a free open source library under the GNU General Public License.

          +

          This guide is from: website tutorial

          +
          +

          Tip

          +

          I have installed one in cluster51, in directory /share/apps/lib/gsl-2.6. The compiler version: for your information

          +
          +

          Install Guide

          + +
          wget ftp://ftp.gnu.org/gnu/gsl/gsl-latest.tar.gz
          +
          +
            +
          • Place the file in whatever directory you want to install and unpack the file with the following command:
          • +
          +
          tar -zxvf gsl-latest.tar.gz
          +
          +
            +
          • This will create a directory called gsl-*.* in your home directory. Change to this directory.
          • +
          +
          cd gsl-*.*
          +
          +
            +
          • The next step is to configure the installation and tell the system where to install the files. Create a directory to install your gsl package, say <Path to libgsl>/gsl with the following command
          • +
          +
          mkdir <Path to libgsl>/gsl
          +
          +
            +
          • Now configure the installation and tell it to use your new directory. This step may take a few minutes.
          • +
          +
          ./configure --prefix=<Path to libgsl>/gsl
          +
          +
            +
          • If there are no errors, compile the library. This step will take several minutes.
          • +
          +
          make
          +
          +
            +
          • Now it is necessary to check and test the library before actually installing it. This step will take some time.
          • +
          +
          make check
          +
          +
            +
          • If there are no errors, go ahead and install the library with:
          • +
          +
          make install
          +
          +
            +
          • Now we can write a test program to see if the library works. Create the following program and name it example.c
          • +
          +
          #include <stdio.h>
          +#include <gsl/gsl_sf_bessel.h>
          +
          +int
          +main (void)
          +{
          +    double x = 15.0;
          +    double y = gsl_sf_bessel_J0 (x);
          +    printf ("J0(%g) = %.18e/n", x, y);
          +    return 0;
          +}
          +
          +
            +
          • Compile and link the program with the following commands (but use the correct path for your username):
          • +
          +
          gcc -Wall -I<Path to libgsl>/gsl/include -c example.c
          +gcc -L<Path to libgsl>/gsl/lib example.o -lgsl -lgslcblas -lm
          +
          +
            +
          • Now run your program!
          • +
          +
          ./a.out
          +
          +
            +
          • If it is succesfully installed, it will print a number in your screen.
          • +
          • add libray path to LD_LIBRARY_PATH in .bashrc
          • +
          +
          export LD_LIBRARY_PATH=<path to libgsl>/lib:$LD_LIBRARY_PATH
          +
          +

          Libxc Library Installation Guide

          +
            +
          • Download the latest stable version of libxc from official website:
          • +
          +
          wget http://www.tddft.org/programs/libxc/down.php?file=4.3.4/libxc-4.3.4.tar.gz
          +
          +

          FFTW Library Installation Guide

          +

          Short Introduction

          +

          FFTW is a C subroutine library for computing the discrete Fourier transform (DFT) in one or more dimensions, of arbitrary input size, and of both real and complex data (as well as of even/odd data, i.e. the discrete cosine/sine transforms or DCT/DST).

          +
          +

          Tip

          +

          I have installed one in cluster51, in directory /share/apps/lib/fftw/3.3.8. Use module load fftw/3.3.8 to use this library. The compiler version: for your information

          +
          +

          Install Guide

          +
            +
          • Download the release version from official website using wget
          • +
          +
          wget http://www.fftw.org/fftw-3.3.8.tar.gz
          +
          +
            +
          • Unzi the package
          • +
          +
          tar -xvf fftw-3.3.8.tar.gz
          +
          +
            +
          • Go to the directory fftw-3.3.8
          • +
          +
          ./configure --prefix=<path to you want to install>    \
          +            --enable-shared  \
          +            --enable-threads \
          +            --enable-sse2    \
          +            --enable-avx     
          +
          +
            +
          • If configure is finished
          • +
          +
          make
          +#check if you install finished
          +make check
          +#install to the final directory which you have set in --prefix
          +make install
          +
          +

          CP2K Installation Guide

          +
            +
          • Download the release version from official website using wget like
          • +
          +
          wget https://github.com/cp2k/cp2k/releases/download/v6.1.0/cp2k-6.1.tar.bz2
          +
          +
            +
          • Unzip the cp2k package
          • +
          +
          tar -xvf cp2k-6.1.tar.bz2
          +
          +
            +
          • Go into directory cp2k-6.1/tools/toolchains/
          • +
          • Stop here! you should check you compiler version, if you are in the High Performance Cluster, Please load the module for compiler and MPI/Open MPI
          • +
          • Note: for gcc version, gcc <= 7.4.0
          • +
          • Execute the following script to see the help message
          • +
          +
          ./install_cp2k_toolchain.sh -h
          +
          +
            +
          • Choose which package you want to install before cp2k.
          • +
          +

          Some packages are essential for cp2k, please check this in the official web site

          +
            +
          • the minimum required is with-openblas=install, if you want to compile successfully.
          • +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/DP-GEN/index.html b/wiki/software_usage/DP-GEN/index.html new file mode 100644 index 00000000..de72634f --- /dev/null +++ b/wiki/software_usage/DP-GEN/index.html @@ -0,0 +1,3647 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DP-GEN使用入门 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          DP-GEN使用入门

          +

          简介

          +

          Deep Potential Generator (DP-GEN) 是一个将神经网络势能(machine learning potential)和主动学习(active learing)结合起来的工作流。该包主要由张林峰(普林斯顿大学),王涵(北京应用物理与计算数学研究所)开发。如有问题,可以向他们询问。

          +
          +

          提示

          +

          考虑到 DP-GEN 在集群运行可能存在一定的性能问题,推荐尝试 ai2-kit 运行势函数训练的 Close Loop Learning (CLL) 任务。

          +
          +

          以下为参考信息:

          + +
          +

          Warning

          +

          此页面仅限提供贡献者对于该软件的理解,如有任何问题请联系贡献者。建议在阅读此篇前先对DeePMD-kit有一定了解。
          +指路:DeePMD-kit

          +
          +

          DP-GEN的工作流是由以下三步组成的循环:

          +
            +
          • 训练:DeePMD-kit同时训练 多条(一般是4条)参数初始化不同的势函数(GPU)。
          • +
          • 采样和筛选:基于训练得到的势函数和指定的初始结构利用LAMMPS进行classical MD,扩展构型空间。然后对MD中得到的构型依照特定指标(对某个构型用不同的势函数预测所得的原子力的标准差)进行筛选(GPU)。
          • +
          • 标记:将筛选所得的构型进行DFTMD单点能计算,得到力和能量,加入训练集进行新一轮的训练(51或52)。
          • +
          +

          输入文件

          +

          为了使dpgen运行起来,我们需要准备如下的文件:

          +
            +
          • param.json
          • +
          +

          三步计算中所用的参数,具体指神经网络训练的参数,lammps中MD的参数和DFTMD计算单点能的参数。

          +
            +
          • machine.json
          • +
          +

          制定上述三个步骤分别在哪个服务器计算。

          +
          +

          Tip

          +

          在 Zeus 集群上配置 machine.json`,请参阅GPU使用说明

          +
          +
            +
          • 初始训练集数据
          • +
          +

          放在提交dpgen所在的服务器上,用于训练势函数,参照DeePMD-kit中方法生成。

          +
            +
          • MD采样的初始结构
          • +
          +

          放在提交dpgen所在的服务器上,必须使用vasp5.x的POSCAR,把.xyz文件转化为POSCAR的脚本可见文末

          +

          输出文件

          +

          在提交dpgen的文件夹下会出现以下输出文件,用于指示任务运行的状况:

          +
            +
          • dpgen.log
          • +
          +

          包括了运行轮数,单个任务提交的情况,采样准确度等详细的信息。

          +
            +
          • record.dpgen
          • +
          +

          由多行 x y 组成,记录任务进程。其中x为运行的轮数(iteration),从0开始;y取0-8,其中0-2指代训练,3-5指代采样和筛选,6-8指代标记。

          +

          dpgen通过读取这个文件来决定从哪里重启计算,所以我们可以通过手动修改这个文件来决定重启的点。例如,在第x轮中我们发现采样的准确度过低,需要增加初始结构的数量重新跑MD,我们就可以把record.dpgen文件在x 2之后的内容删除,重新提交dpgen任务。

          +
            +
          • nohup.out
          • +
          +

          这个并不是必要输出,但是建议使用nohup命令把dpgen挂在后台运行。这个文件中输出的信息和dpgen.log的基本一致。

          +

          例子

          +

          接下来,把铂水界面势函数训练所用的param.json分解成几个部分进行解释,在实际使用中需要把几段放在一起。

          +
          +

          comment

          +

          文件中的注释用_comment标注。

          +
          +

          基本参数设置: params.json

          +
          param.json
          { 
          +    "type_map": [        
          +        "O", 
          +        "H",
          +        "Pt"
          +    ], 
          +    "mass_map": [ 
          +        15.999,
          +        1.0079,
          +        195.08
          +    ], 
          +    "_comment": " atoms in your systems ",
          +    "init_data_prefix": "/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh", 
          +    "init_data_sys": [
          +        "init/system-000","init/system-001"
          +    ], 
          +    "_comment": " path of training set ",
          +    "init_batch_size": [
          +        1,1
          +    ], 
          +    "sys_configs": [
          +        ["/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh/init/configs/POSCAR_0[0-9]"],
          +        ["/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh/init/configs/POSCAR_1[0-9]"]
          +    ], 
          +    "_comment": " path of initial structure for sampling ",
          +    "sys_batch_size": [
          +        1,1
          +    ], 
          +
          +    ......
          +}
          +
          +
            +
          • 势函数训练(DPMD)
          • +
          +
          param.json
            {
          +      ......
          +      "numb_models": 4, 
          +      "_comment": " number of NNP for model deviation ",
          +      "train_param": "input.json", 
          +      "_comment": " name of automatically generated input file for DPMD ",
          +      "default_training_param": {
          +          "model": {
          +          "descriptor": {
          +          "type": "se_a",
          +    "_comment": "could be bigger than the number of atoms of the very element",
          +          "sel": [68, 136, 64], 
          +          "rcut_smth": 0.50, 
          +          "rcut": 5.00, 
          +          "neuron": [25, 50, 100], 
          +          "resnet_dt": false, 
          +          "axis_neuron": 16,
          +          "seed": 1
          +          },
          +          "fitting_net": {
          +          "n_neuron": [240, 240, 240], 
          +          "resnet_dt": true, 
          +          "seed": 1
          +          }},
          +          "learning_rate": {
          +          "type": "exp",
          +          "start_lr": 0.005, 
          +          "decay_steps": 2000,
          +          "_comment": "last 20000 or 400000", 
          +          "decay_rate": 0.95
          +          },
          +          "loss": {
          +          "start_pref_e": 0.02, 
          +          "limit_pref_e": 1, 
          +          "start_pref_f": 1000, 
          +          "limit_pref_f": 1, 
          +          "start_pref_v": 0, 
          +          "limit_pref_v": 0
          +          },
          +          "training": {
          +          "systems": [ ], 
          +          "set_prefix": "set", 
          +          "stop_batch": 400000, 
          +          "batch_size": 1, 
          +          "seed": 1,
          +          "disp_file": "lcurve.out", 
          +          "disp_freq": 100, 
          +          "numb_test": 4, 
          +          "save_freq": 1000, 
          +          "save_ckpt": "model.ckpt", 
          +          "load_ckpt": "model.ckpt", 
          +          "disp_training": true, 
          +          "time_training": true, 
          +          "profiling": false, 
          +          "profiling_file": "timeline.json"
          +          }},
          +      "_comment": "modify according your systems!", 
          +      ......
          +  }
          +
          +
            +
          • 采样和筛选(Lammps)
          • +
          +
          param.json
          {  
          +    "model_devi_dt":            0.0005,
          +    "_comment": "model_devi_dt: Timesteps for MD. Consistent with DFTMD!",
          +    "model_devi_skip":          0,
          +    "_comment": "model_devi_skip: the first x frames of the recorded frames",
          +    "model_devi_f_trust_lo":    0.075,
          +    "model_devi_f_trust_hi":    0.10,
          +    "_comment": "modify according to the error distribution of system",
          +    "model_devi_e_trust_lo":    1e10,
          +    "model_devi_e_trust_hi":    1e10,
          +    "model_devi_clean_traj":    false,
          +    "model_devi_jobs": [
          +    {"temps": [300,400],"sys_idx": [0,1],"trj_freq": 10,"nsteps":  2000,"ensemble": "nvt","_idx": 0},
          +    {"temps": [300,400],"sys_idx": [0,1],"trj_freq": 10,"nsteps":  2000,"ensemble": "nvt","_idx": 1}
          +    ],
          +    "_comment": "sys_idx should correspond to sys_configs in the beginning",
          +    "_comment": "add the _idx step by step",
          +    "_comment": "modify nsteps and sys_idx based on model deviation accuracy",
          +    ......
          +}
          +
          +
            +
          • 标记(计算单点能,此处以CP2K为例,VASP的设置可在官方文档中查看)
          • +
          +
          param.json
          {
          +    ......
          +    "fp_style":     "cp2k",
          +    "shuffle_poscar":   false,
          +    "fp_task_max":  200,
          +    "_comment":         "the maximum number of stcs to calc.",
          +    "fp_task_min":  5,
          +    "fp_pp_path":   ".",
          +    "fp_pp_files":  [],
          +    "_comment":"the maximum number of stcs to calc.",
          +     "_comment": "fp_params: modify according your systems!",
          +    "fp_params": {
          +        "FORCE_EVAL":{
          +            "DFT":{
          +                "BASIS_SET_FILE_NAME": "/data/kmr/BASIC_SET/BASIS_MOLOPT",
          +                "POTENTIAL_FILE_NAME": "/data/kmr/BASIC_SET/GTH_POTENTIALS",
          +                "MGRID":{
          +                    "CUTOFF": 400
          +                },
          +                "QS":{
          +                    "EPS_DEFAULT": 1.0E-13
          +                },
          +                "SCF":{
          +                    "SCF_GUESS": "ATOMIC",
          +                    "EPS_SCF": 1.0E-6,
          +                    "MAX_SCF": 500,
          +                    "ADDED_MOS": 500,
          +                    "CHOLESKY": "INVERSE",
          +                    "SMEAR":{"ON"
          +                        "METHOD": "FERMI_DIRAC",
          +                        "ELECTRONIC_TEMPERATURE": 300
          +                    },
          +                    "DIAGONALIZATION":{
          +                        "ALGORITHM": "STANDARD"
          +                    },
          +                    "MIXING":{
          +                               "METHOD": "BROYDEN_MIXING",
          +                               "ALPHA":   0.3,
          +                               "BETA":    1.5,
          +                               "NBROYDEN":  14
          +                    }
          +                },
          +                "XC":{
          +                        "XC_FUNCTIONAL":{"_": "PBE"},
          +                        "XC_GRID":{
          +                                "XC_SMOOTH_RHO": "NN50",
          +                                "XC_DERIV": "NN50_SMOOTH"
          +                        },
          +                        "vdW_POTENTIAL":{
          +                                "DISPERSION_FUNCTIONAL": "PAIR_POTENTIAL",
          +                                "PAIR_POTENTIAL":{
          +                                        "TYPE": "DFTD3",
          +                                        "PARAMETER_FILE_NAME": "/data/kmr/BASIC_SET/dftd3.dat",
          +                                        "REFERENCE_FUNCTIONAL": "PBE"
          +                                }
          +                        }
          +                }
          +           },
          +            "SUBSYS":{
          +                        "KIND":{
          +                                "_": ["O", "H","Pt"],
          +                                "POTENTIAL": ["GTH-PBE-q6", "GTH-PBE-q1","GTH-PBE-q10"],
          +                                "BASIS_SET": ["DZVP-MOLOPT-SR-GTH", "DZVP-MOLOPT-SR-GTH","DZVP-A5-Q10-323-MOL-T1-DERIVED_SET-1"]
          +                        }
          +            }
          +        }
          +    }
          +}
          +
          +
          +

          计算设置

          +

          CP2K的input中部分参数有默认设置写入,具体可参照cp2k.py。

          +
          +

          指路:cp2k.py

          +
          +

          计算设置

          +

          金属体系OT section需要手动关闭,具体见上方的设置。

          +
          +

          任务提交设置: machine.json

          +
          +

          从 DP-GEN 0.10.0 版本开始,官方引入了对 DPDispatcher 的支持,并计划将 machine.json 迁移到 DPDispatcher 上。 +DPDispatcher 相比原本 DP-GEN 自带的 Dispatcher,在接口和语法上有较大变化,需要额外指定 api_version 大于或等于 1.0。

          +
          +

          关于 DPDispatcher 项目的说明,请参阅这里

          +

          DPDispatcher 相比旧版,基于配置字典而非文件Flag来管理所提交的任务,稳定性更优,且对作业管理系统的支持更加灵活多样,内置接口可支持多任务并行提交。 +但新版在操作习惯上有较大改变,需要适应和调整。

          +

          以 LSF 为例,对 machine.json 的写法举例如下,请留意以下的注意事项。

          +
          +

          注意

          +

          train 部分和model_devi部分使用了对新版 LSF 提供支持的写法,即同时指定 gpu_usagegpu_new_syntaxTrue,从而可在提交脚本中使用新版 LSF 的语法。

          para_deg表示在同一张卡上同时运行的任务数,通常可不写出,此时默认值为1。这里给出的例子表示在同一张卡上同时运行两个Lammps任务。

          fp 部分使用的是针对CPU计算使用的语法。

          +
          +
          +

          注意

          +

          注意在fp部分,mpiexec.hydra需要明确写出以确保任务是并行执行的,可参考以下例子中的写法:mpiexec.hydra -genvall vasp_gam。若你不知道这部分该如何书写,请参考集群上的提交脚本说明(/data/share/base/scripts)。

          +
          +

          若在191上向191上提交任务,可以考虑使用LocalContext,可以减少文件压缩传输的额外IO开销。

          +
          machine.json
          {
          +  "api_version": "1.0",
          +  "train": [
          +    {
          +      "command": "dp",
          +      "machine": {
          +        "batch_type": "Slurm",
          +        "context_type": "LocalContext",
          +        "local_root": "./",
          +        "remote_root": "/data/tom/dprun/train",
          +      },
          +      "resources": {
          +        "number_node": 1,
          +        "cpu_per_node": 1,
          +        "gpu_per_node": 1,
          +        "queue_name": "gpu3",
          +        "group_size": 1,
          +        "module_list": [
          +          "deepmd/2.0"
          +        ]
          +      }
          +    }
          +  ],
          +  "model_devi":[
          +    {
          +      "command": "lmp_mpi",
          +      "machine":{
          +        "batch_type": "Slurm",
          +        "context_type": "SSHContext",
          +        "local_root": "./",
          +        "remote_root": "/data/jerry/dprun/md",
          +        "remote_profile": {
          +          "hostname": "198.76.54.32",
          +          "username": "jerry",
          +          "port": 6666
          +        }
          +      },
          +      "resources": {
          +        "number_node": 1,
          +        "cpu_per_node": 1,
          +        "gpu_per_node": 1,
          +        "queue_name": "gpu2",
          +        "group_size": 5,
          +        "kwargs": {
          +          "custom_gpu_line": [
          +            "#SBATCH --gres=gpu:1g.10gb:1"
          +          ]
          +        },
          +        "strategy": {"if_cuda_multi_devices": false},
          +        "para_deg": 2,
          +        "module_list": [
          +          "deepmd/2.1"
          +        ],
          +        "source_list": []
          +      }
          +    }
          +  ],
          +  "fp":[
          +    {
          +      "command": "mpiexec.hydra -genvall cp2k.popt input.inp",
          +      "machine":{
          +        "batch_type": "Slurm",
          +        "context_type": "SSHContext",
          +        "local_root": "./",
          +        "remote_root": "/data/jerry/dprun/fp",
          +        "remote_profile": {
          +          "hostname": "198.76.54.32",
          +          "username": "jerry",
          +          "port": 6666
          +        }
          +      },
          +      "resources": {
          +        "number_node": 2,
          +        "cpu_per_node": 32,
          +        "gpu_per_node": 0,
          +        "queue_name": "c53-medium",
          +        "group_size": 10,
          +        "module_list": [
          +          "intel/17.5.239",
          +          "mpi/intel/2017.5.239",
          +          "gcc/5.5.0"
          +          "cp2k/7.1"
          +        ]
          +      }
          +    }
          +  ]
          +}
          +
          +

          相关参数含义,详情请参阅官方文档 +machine 和 +resources 部分的说明。

          +

          以下是部分参数含义:

          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          参数描述
          machine指定远程服务器的配置信息。
          batch_type提交作业系统的类型,可指定 LSF, Slurm, Shell 等。
          context_type连接到远程服务器的方式,常用可选参数SSHContext, LocalContext, LazyLocalContext等。详见官方文档说明。
          SSHContext通过SSH连接到远程主机,通常情况下从一个服务器提交到另一个时可使用。
          LocalContext若需要在当前服务器上提交任务,可选择此选项,则不必通过SSH连接。此时 remote_profile 部分可不写。
          remote_root任务在目标主机上提交的绝对路径。
          remote_profile远程主机设置,若context_typeLocalContext, LazyLocalContext可不写。
          hostname远程主机IP。
          username远程主机用户名。
          password远程主机密码。若通过密钥登陆可不写。
          portSSH连接的端口,默认为22。
          key_filenameSSH密钥存放的路径。默认放在~/.ssh下,此时可不写。
          passphrase密钥安全口令,通常在创建密钥时设置。若为空可不写。
          resource作业提交相关配置信息。
          number_node作业使用的节点数。
          cpu_per_node每个节点上使用CPU核数。
          gpu_per_node每个节点上使用GPU卡数。
          kwargs可选参数,依据各作业系统支持的配置而定。详见官方文档。
          custom_gpu_line自定义GPU提交命令,可根据语法自定义。根据作业管理系统不同,以 #BSUB (LSF) 或 #SBATCH (Slurm) 开头。文中的例子即在gpu2上使用MIG实例(1g.10gb)。
          custom_flags其他需要使用的Flag,例如Walltime、作业名等设置。
          queue_name任务提交的队列名。
          group_size每个作业绑定的任务个数。
          if_cuda_multi_devices是否允许任务运行在多卡上,默认为 True。在Zeus上建议写成 False
          para_deg同一卡上同时运行的任务数。默认为1。
          module_list需要load的module。可不写。
          module_unload_list需要unload的module。可不写。
          source_list需要source的脚本路径。可不写。
          envs需要引入的环境变量。可不写。
          +
          +

          登录设置

          +

          如果服务器是密码登录,在username之后加上关键词password并写上密码。输入的内容要用引号括起!

          +
          +

          准备好所有的输入文件后,就可以用以下指令提交dpgen任务啦!

          +

          dpgen run param.json machine.json

          +
          +

          提交任务

          +

          如果在191提交,需要在服务器上自行安装dpgen。具体做法见官方GitHub。 +一般来说运行如下命令即可:

          +
          pip install --user dpgen
          +
          +
          +
          +

          Slurm获取状态异常问题的解决

          +

          若遇到以下报错,很大可能是因为Slurm暂时无法获取任务状态。由于旧版本DPDispatcher对这类波动导致的报错没有充分考虑,会直接退出:

          +
          RuntimeError: status command squeue fails to execute.job_id:13544 
          +error message:squeue: error: Invalid user for SlurmUser slurm, ignored
          +squeue: fatal: Unable to process configuration file
          +
          +

          新版这一部分已经做了调整,但由于之前的版本空文件夹复制过程存在严重bug,请务必保证DPDispatcher版本在0.5.6以上。

          +
          pip install --upgrade --user dpdispatcher
          +
          +
          +
          +

          支持

          +

          目前DP-GEN 0.11以上版本已经移除了旧版 dispatcher 的支持,推荐迁移到 DPDispatcher 上。为防止兼容性问题,这里仍保留了旧版的输入,请注意甄别。

          +
          machine_old.json
          {
          +  "train": [
          +    {
          +      "machine": {
          +        "machine_type": "slurm",
          +        "hostname": "123.45.67.89",
          +        "port": 22,
          +        "username": "kmr",
          +        "work_path": "/home/kmr/pt-oh/train"
          +      },
          +      "resources": {
          +        "node_gpu": 1,
          +        "numb_node": 1,
          +        "task_per_node": 1,
          +        "partition": "large",
          +        "exclude_list": [],
          +        "source_list": [],
          +        "module_list": [
          +            "deepmd/2.1"
          +        ],
          +        "time_limit": "23:0:0"
          +      },
          +      "python_path": "/share/apps/deepmd/2.1/bin/python"
          +    }
          +  ],
          +  "model_devi": [
          +    {
          +      "machine": {
          +        "machine_type": "slurm",
          +        "hostname": "123.45.67.89",
          +        "port": 22,
          +        "username": "kmr",
          +        "work_path": "/home/kmr/pt-oh/dpmd"
          +      },
          +      "resources": {
          +        "node_gpu": 1,
          +        "numb_node": 1,
          +        "task_per_node": 1,
          +        "partition": "large",
          +        "exclude_list": [],
          +        "source_list": [],
          +        "module_list": [
          +            "deepmd/2.1"
          +        ],
          +        "time_limit": "23:0:0"
          +      },
          +      "command": "lmp_mpi",
          +      "group_size": 80
          +    }
          +  ],
          +  "fp": [
          +    {
          +      "machine": {
          +        "machine_type": "slurm",
          +        "hostname": "123.45.67.90",
          +        "port": 6666,
          +        "username": "kmr",
          +        "work_path": "/data/kmr/edl/pzc/hydroxide/ml_potential/pt-oh/labelling"
          +      },
          +      "resources": {
          +        "cvasp": false,
          +        "task_per_node": 28,
          +        "numb_node": 1,
          +        "node_cpu": 28,
          +        "exclude_list": [],
          +        "with_mpi": true,
          +        "source_list": [
          +        ],
          +        "module_list": [
          +            "intel/17.5.239",
          +            "mpi/intel/17.5.239",
          +            "cp2k/6.1"
          +        ],
          +        "time_limit": "12:00:00",
          +        "partition": "medium",
          +        "_comment": "that's Bel"
          +      },
          +      "command": "cp2k.popt input.inp",
          +      "group_size": 50 
          +    }
          +  ]
          +}
          +
          +
          +

          训练集收集

          +

          DP-GEN代码迭代生成的训练集是分散储存的。可以用DP-GEN自带的collect函数进行数据收集。

          +

          首先可以使用dpgen collect -h 查看使用说明

          +

          常用用法是

          +
          dpgen collect JOB_DIR OUTPUT_DIR -p param.json
          +
          +

          JOB_DIR就是DP-GEN的输出目录,包含有iter.0000*一系列的目录。OUTPUT_DIR就是收集的数据准备放到哪。param.json就是运行DP-GEN跑的param文件。

          +

          例如:

          +
          dpgen collect ./ ./collect -p param-ruo2.json
          +
          +

          以上命令会把当前文件夹的DP-GEN数据收集好放入collect目录里。

          +
          init.000  init.001  sys.000  sys.001
          +
          +

          init.*是初始训练集,sys.*是后来DP-GEN生成的训练集,按照param的sys分类。

          +

          Bonus!

          +

          常见报错问题(欢迎补充&修正)

          +
            +
          • ... expecting value ...
          • +
          +

          可能是数组或者字典末尾多写了逗号

          +
            +
          • ERROR: lost atoms ...
          • +
          +

          可能是Lammps算model_devi的时候因为势函数太差导致有原子重合而报错。可以手动在对应的单条轨迹的input.lammps中加入

          +
            thermo_modify   lost ignore flush yes
          +
          +

          然后在上一级文件夹下面手动提交任务

          +

            bsub<*.sub
          +
          +- AssertionError

          +

          某个单点能计算中断后重新开始,导致cp2k的output中有重叠。可以在02.fp文件夹下用以下脚本进行检查: +

          import dpdata
          +import glob
          +l = glob.glob("task.002*")
          +l.sort()
          +stc = dpdata.LabeledSystem(l[0]+'/output',fmt='cp2k/output')
          +for i in l[1:]:
          +    print(i)
          +    stc += dpdata.LabeledSystem(i+'/output',fmt='cp2k/output')
          +

          +

          其中task.002.*代表遍历002system中的被标记的结构。如果不同系统的原子数相同,也可以直接用task.00*一次性检查所有的结构。

          +
            +
          • 如果你发现进行 model deviation 从一开始就非常大,并且测试集的结构被打乱,有可能是在 param 文件中设置了"shuffle_poscar": true。该选项会随机打乱测试集原始 POSCAR 中的行,并用打乱后的结构进行 model deviation 测试。该选项主要用于打乱合金体系的结构,然而对于界面或者共价键连接的体系(如半导体),随机打乱原子的将会使界面结构或者半导体结构变成混乱的一锅粥,没有任何化学含义,因此我们不用进行shuffle(也不可以)。请在 param 文件中设置: +
            ...
            +"shuffle_poscar": false
            +...
            +
          • +
          +

          script from xyz to POSCAR

          +

          from ase.io import iread, write
          +import ase.build
          +
          +for j in range(2):
          +    i=0
          +    for atoms in iread('./traj_'+str(j)+'.xyz', format='xyz'):
          +        atoms.set_cell([11.246, 11.246, 35.94,90,90,90])
          +        i=i+1
          +        if i%20==0:
          +            atoms=ase.build.sort(atoms)
          +            ase.io.write('POSCAR_'+str(j)+'_'+str(int(i/20)-1), atoms, format='vasp',vasp5=True)
          +
          +或者调用ase.io.vasp里的write:

          +
          def write_vasp(filename, atoms, label=None, direct=False, sort=None,
          +symbol_count=None, long_format=True, vasp5=False,
          +ignore_constraints=False):
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/DeePMD-kit/index.html b/wiki/software_usage/DeePMD-kit/index.html new file mode 100644 index 00000000..ddf16298 --- /dev/null +++ b/wiki/software_usage/DeePMD-kit/index.html @@ -0,0 +1,3720 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeePMD-kit 使用入门 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          DeePMD-kit 2.x 使用入门

          +

          简介

          +

          DeePMD-kit是一个训练神经网络势能(Machine Learning Potential)的代码包。该包主要由张林峰(普林斯顿大学),王涵(北京应用物理与计算数学研究所)开发。黄剑兴和庄永斌曾经短时间参与开发。如有问题,可以向他们询问。

          +
          +

          Danger

          +

          我们已经舍弃了1.x版本的教程。

          +
          +

          以下为参考信息:

          + +
          +

          Warning

          +

          此页面仅限提供贡献者对于该软件的理解,如有任何问题请联系贡献者

          +
          +

          第一次尝试

          +

          运行第一次机器学习

          +

          如果你正在使用 Zeus 集群,请使用 slurm 脚本来提交 DeePMD-kit 任务。

          +

          请从 Github 下载 DeePMD-kit 的代码,我们将会使用里面的水模型做为例子。

          +
          git clone https://github.com/deepmodeling/deepmd-kit.git
          +
          +

          首先进入含有水模型的例子的目录

          +
          cd <deepmd repositoy>/examples/water/se_e2_a/
          +
          +

          你会看到input.json文件,这是DeePMD-kit使用的输入文件。现在复制/data/share/base/script/deepmd.lsf到当前文件夹,并且修改它。

          +
          cp /data/share/base/script/deepmd.lsf ./
          +vim deepmd.lsf
          +
          +
          +

          Warning

          +

          如果调用的是1.0的版本,需要在learning_rate下加入decay_rate关键词,一般设为0.95.

          +
          +

          你现在仅需要修改 slurm 脚本中的输入文件名称即可。把脚本中的input.json替换成water_se_a.json

          +
          #!/bin/bash
          +
          +#BSUB -q gpu
          +#BSUB -W 24:00
          +#BSUB -J train
          +#BSUB -o %J.stdout
          +#BSUB -e %J.stderr
          +#BSUB -n 8
          +#BSUB -R "span[ptile=8]"
          +# ============================================
          +# modify the number of cores to use
          +# according to the number of GPU you select
          +# for example, 8 cores for one GPU card
          +# while there are 32 cores in total
          +# ============================================
          +
          +# add modulefiles
          +module add deepmd/2.2.7
          +
          +# automatic select the gpu
          +source /data/share/base/script/find_gpu.sh
          +
          +dp train input.json -l train.log
          +
          +

          使用如下命令提交任务:

          +
          #submit your job
          +bsub < deepmd.lsf
          +#check your job by
          +bjobs 
          +
          +

          当任务执行中,当前目录会生成以下文件:

          +
            +
          • train.log: 训练的记录文件
          • +
          • lcurve.out: 机器学习的学习曲线
          • +
          • model.ckpt.data-00000-of-00001, model.ckpt.index, checkpoint, model.ckpt.meta: 以上三个为训练存档点
          • +
          +

          非常好!已经成功开始第一次机器学习训练了!

          +

          浏览输出文件

          +

          使用 less 命令来浏览输出文件

          +
          less train.log
          +
          +

          你将会看到如下内容

          +
          # DEEPMD: initialize model from scratch
          +# DEEPMD: start training at lr 1.00e-03 (== 1.00e-03), final lr will be 3.51e-08
          +2019-12-07 00:03:49.659876: I tensorflow/stream_executor/platform/default/dso_loader.cc:42] Successfully opened dynamic library libcublas.so.10.0
          +# DEEPMD: batch     100 training time 5.95 s, testing time 0.18 s
          +# DEEPMD: batch     200 training time 4.58 s, testing time 0.20 s
          +# DEEPMD: batch     300 training time 4.56 s, testing time 0.14 s
          +# DEEPMD: batch     400 training time 4.49 s, testing time 0.13 s
          +# DEEPMD: batch     500 training time 4.60 s, testing time 0.14 s
          +# DEEPMD: batch     600 training time 4.61 s, testing time 0.15 s
          +# DEEPMD: batch     700 training time 4.43 s, testing time 0.18 s
          +# DEEPMD: batch     800 training time 4.59 s, testing time 0.13 s
          +# DEEPMD: batch     900 training time 4.41 s, testing time 0.17 s
          +# DEEPMD: batch    1000 training time 4.66 s, testing time 0.11 s
          +# DEEPMD: saved checkpoint model.ckpt
          +# DEEPMD: batch    1100 training time 4.45 s, testing time 0.15 s
          +# DEEPMD: batch    1200 training time 4.37 s, testing time 0.14 s
          +
          +

          batch后面的数字表明程序已经放入了多少数据进行训练。这个数字的显示间隔,即100,是在输入文件的"disp_freq": 100 设置的。

          +

          现在来看看你的学习曲线 lcurve.out

          +
          less lcurve.out
          +
          +

          你将会看到:

          +
          #  step      rmse_val    rmse_trn    rmse_e_val  rmse_e_trn    rmse_f_val  rmse_f_trn         lr
          +      0      1.69e+01    1.58e+01      1.52e+00    5.69e-01      5.35e-01    5.00e-01    1.0e-03
          +   1000      4.74e+00    4.68e+00      3.88e-02    4.02e-01      1.50e-01    1.48e-01    1.0e-03
          +   2000      5.06e+00    3.93e+00      1.86e-01    1.54e-01      1.60e-01    1.24e-01    1.0e-03
          +   3000      4.73e+00    4.34e+00      9.08e-02    3.90e-01      1.49e-01    1.37e-01    1.0e-03
          +   4000      4.65e+00    6.09e+00      2.24e-01    1.92e-01      1.47e-01    1.93e-01    1.0e-03
          +   5000      3.84e+00    3.25e+00      5.26e-02    2.40e-02      1.25e-01    1.06e-01    9.4e-04
          +   6000      4.17e+00    2.78e+00      6.35e-02    3.89e-02      1.36e-01    9.03e-02    9.4e-04
          +   7000      3.24e+00    3.00e+00      5.55e-02    8.58e-03      1.05e-01    9.76e-02    9.4e-04
          +   8000      2.97e+00    2.83e+00      2.97e-02    2.46e-02      9.68e-02    9.22e-02    9.4e-04
          +   9000      1.01e+01    6.92e+00      1.36e-01    1.89e-01      3.28e-01    2.25e-01    9.4e-04
          +  10000      3.73e+00    3.39e+00      4.38e-02    3.23e-02      1.25e-01    1.14e-01    8.9e-04
          +  11000      3.51e+00    2.76e+00      1.31e-01    3.47e-01      1.17e-01    8.98e-02    8.9e-04
          +  12000      2.59e+00    2.89e+00      1.35e-01    1.18e-01      8.57e-02    9.65e-02    8.9e-04
          +  13000      5.65e+00    4.68e+00      3.08e-01    3.28e-01      1.88e-01    1.55e-01    8.9e-04
          +
          +

          这些数字展示了当前机器学习模型对于数据预测的误差有多大。 rmse_e_trn 意味着在测试集上使用机器学习模型预测的能量误差会有多大。 rmse_e_val 意味着在训练集上使用机器学习模型预测的能量误差会有多大。 rmse_f_tst and rmse_f_trn 表示相同意义,不过是对于力的预测. 你可以使用Matplotlib Python包进行作图。

          +

          使用进阶

          +

          准备训练数据

          +

          前半部分仅仅是让你运行DeePMD-kit进行训练。为了训练一个针对你的体系的模型,你需要自己来准备数据。这些数据都是第一性原理计算得到的数据。这些数据可以是单点能计算得到的数据,或者是分子动力学模拟得到的数据。作为数据集需要的数据有:

          +
            +
          • 体系的结构文件:coord.npy
          • +
          • 体系的结构文件对应的元素标记:type.raw
          • +
          • 体系的结构文件对应的能量:energy.npy
          • +
          • 体系的结构文件对应的力:force.npy
          • +
          • 体系的结构文件对应的晶胞大小,如果是非周期性体系,请在训练文件里准备一个超大周期边界条件:box.npy
          • +
          +

          代码块里的文件名为DeePMD-kit使用的命名。npy后缀为Python的numpy代码包生成的文件,请在此之前学习numpy。如果你使用cp2k得到数据,你会有 *pos-1.xyz*frc-1.xyz 文件。你可以使用帮助的脚本转化成DeePMD-kit的数据集格式。

          +

          现在我们来看看DeePMD-kit的训练数据格式。之前我们训练的水模型的数据集储存在 <deepmd repository>/examples/water/data/data_0. 让我们来看看数据集的目录结构:

          +
          # directory structre for training data
          +.
          +├── data_0
          +│   ├── set.000
          +│      ├── box.npy
          +│      ├── coord.npy
          +│      ├── energy.npy
          +│      └── force.npy
          +│   ├── type.raw
          +│   └── type_map.raw
          +├── data_1
          +│   ├── set.000
          +│      ├── box.npy
          +│      ├── coord.npy
          +│      ├── energy.npy
          +│      └── force.npy
          +│   ├── set.001
          +│      ├── box.npy
          +│      ├── coord.npy
          +│      ├── energy.npy
          +│      └── force.npy
          +│   ├── type.raw
          +│   └── type_map.raw
          +├── data_2
          +│   ├── set.000
          +│      ├── box.npy
          +│      ├── coord.npy
          +│      ├── energy.npy
          +│      └── force.npy
          +│   ├── type.raw
          +│   └── type_map.raw
          +└── data_3
          +    ├── set.000
          +       ├── box.npy
          +       ├── coord.npy
          +       ├── energy.npy
          +       └── force.npy
          +    ├── type.raw
          +    └── type_map.raw
          +
          +

          显然,我们会看到type.raw文件和一堆以set开头的目录。type.raw文件记录了体系的元素信息。如果你打开你会发现它仅仅记录了一堆数字。这些数字对应着你在water_se_a.json"type_map":["O","H"]的信息。此时0代表O,1代表H。对应着["O","H"]中的位置,其中第一位为0。

          +
          0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
          +
          +

          box.npy, coord.npy, energy.npyforce.npy 储存的信息在上文已经说过。唯一需要注意的是这些文件都储存着一个超大的矩阵。如果我们有Y个结构,每个结构有X个原子。box.npy, coord.npy, energy.npyforce.npy 对应的矩阵形状分别是 (Y, 9), (Y, X*3), (Y, 1), (Y, X*3)。

          +

          设置你的输入文件

          +

          输入文件是json文件。你可以使用之前我们的json文件进行细微改动就投入到自己体系的训练中。这些需要修改的关键词如下:

          +
            +
          • type": "se_a": 设置描述符(descriptor)类型。一般使用se_a
          • +
          • "sel": [46, 92]: 设置每个原子的截断半径内所拥有的最大原子数。注意这里的两个数字46,92分别对应的是O原子和H原子。与你在type_map里设置的元素类型是相对应的。
          • +
          +

          "descriptor" :{
          +         "type":     "se_a",
          +         "sel":      [46, 92],
          +         "rcut_smth":    0.50,
          +         "rcut":     6.00,
          +         "neuron":       [25, 50, 100],
          +         "resnet_dt":    false,
          +         "axis_neuron":  16,
          +         "seed":     1,
          +         "_comment":     " that's all"
          +     },
          +
          +在"training"的"training_data"下 +- "systems": ["../data/data_0/", "../data/data_1/", "../data/data_2/"]: 设置包含训练数据的目录。 +- "batch_size": auto, 这个会根据体系原子数进行分配,不过我们自己通常设置为1,因为体系原子数有400-800个左右。

          +

              "training_data": {
          +        "systems":      ["../data/data_0/", "../data/data_1/", "../data/data_2/"],
          +        "batch_size":   "auto",
          +        "_comment":     "that's all"
          +    }
          +
          +在"training"的"validation_data"下 +- "systems": ["../data/data_3"]: 设置包含测试数据的目录。 +- "batch_size": 1, 这个会根据体系原子数进行分配,不过我们自己通常设置为1,因为体系原子数有400-800个左右。 +- "numb_btch": 3 , 每次迭代中,测试的结构数量为batch_size乘以numb_btch。 +- 更多参数说明,请参考官方文档:https://deepmd.readthedocs.io/en/latest/train-input.html

          +
          +

          Warning

          +

          记住在集群上训练,请使用lsf脚本。

          +
          +

          开始你的训练

          +

          使用如下命令开始:

          +
          dp train input.json
          +
          +
          +

          Warning

          +

          记住在集群上训练,请使用 Slurm 脚本。

          +
          +

          重启你的训练

          +

          使用以下命令重启:

          +
          dp train input.json --restart model.ckpt
          +
          +
          +

          Warning

          +

          记住在集群上训练,请使用 Slurm 脚本。

          +
          +

          使用生成的势能函数进行分子动力学(MD)模拟

          +

          当我们完成训练之后,我们需要根据节点文件(model.ckpt*)冻结(Freeze)出一个模型来。

          +

          利用如下命令,可以冻结模型:

          +
          dp freeze
          +
          +

          你将会得到一个*.pb文件。利用此文件可以使用LAMMPS, ASE, CP2K 等软件进行分子动力学模拟。

          +

          利用压缩模型进行产出(Production)

          +

          机器学习势能*.pb文件进行MD模拟虽然已经非常迅速了。但是还有提升的空间。首先我们需要用2.0以上版本的deepmd进行训练势能函数,并得到*.pb文件。利用1.2/1.3版本的deepmd训练得到势能函数也不用担心,可以利用以下命令对旧版本的势能函数进行转换。例如想要从1.2转换的话:

          +
          dp convert-from 1.2 -i old_frozen_model.pb -o new_frozen_model.pb
          +
          +
          +

          关于兼容性的说明

          +

          关于目前势函数的兼容性,请参考官方文档。 +目前DeePMD-kit支持从 v0.12, v1.0, v1.1, v1.2, v1.3 版本到新版本的转换。

          +
          +

          建议将原训练文件夹备份后复制,我们利用如下命令进行压缩(文件夹下应该含有对应的input.json文件和checkpoint文件):

          +
          module load deepmd/2.0-cuda11.3
          +dp compress -i normal-model.pb -o compressed-model.pb -l compress.log
          +
          +
          +

          适用范围

          +

          注意模型压缩仅适用于部分模型,如 se_e2_a, se_e3, se_e2_r 和上述模型的 Hybrid 模型。

          +

          若使用其他模型,如 se_attn 模型 (DPA-1),模型压缩尚未被支持,可能会报错。

          +

          另外请注意,压缩模型是通过使用 5 次多项式拟合 Embedding-net 从而换取性能提升,这一改动 几乎 不会对预测精度产生影响,但实际上部分牺牲了精度。 +因而使用时请务必注意观察默认参数是否适用于当前体系的情况,如是否出现误差漂移,并针对修改参数,如拟合时采用的步数 --step

          +
          +

          压缩模型与原始模型对比

          +

          测试2080Ti, 显存11G

          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          体系原子数提速前 (ns/day)提速后(ns/day)提升倍率
          LIGePS50000.8063.5694.42
          SnO2/water interface60210.0590.3556.01
          SnO2/water interface53520.0670.3825.70
          SnO2/water interface26760.1320.7385.59
          SnO2/water interface13380.2611.3675.23
          SnO2/water interface6690.5012.2364.46
          LiGePS4007.46123.9923.21
          Cu131351.26865.9441.28
          +

          SnO2/water interface: 原始模型Maximum 6021 ——> 压缩模型Maximum 54189个原子

          +

          Trouble Shooting

          +

          warning: loc idx out of lower bound

          +

          Solution: https://github.com/deepmodeling/deepmd-kit/issues/21

          +

          ValueError: NodeDef missing attr 'T' from ...

          +

          当一个模型使用 deepmd/1.2 训练,但是用更高版本的 deepmd-kit (> v1.3) 进行 lammps 任务的时候经常会报这个错,例子:

          + +

          但是,现在发现这个报错在压缩 v1.3 版本模型的时候也会出现。使用下列命令:

          +
          dp compress ${input} --checkpoint-folder ${ckpt} 1.3-model.pb -o compressed-model.pb -l compress.log
          +
          +

          其中${input}${ckpt}分别是对应模型的输入脚本所在路径和检查点目录。在这个例子里,我们仅把需要压缩的模型复制到了工作文件夹下,输入脚本所在路径和检查点目录人工指认。至于为什么这样会报错 ‘ValueError’,目前还没有找到原因。

          +

          因此,我们建议 备份之前的训练文件夹,在训练文件夹的一个 copy 下进行压缩任务

          +

          Extra Support

          +

          Script for convertion from cp2k xyz to numpy set

          +
          from ase.io import read
          +import numpy as np
          +import os, sys
          +import glob
          +import shutil
          +
          +
          +#############################
          +# USER INPUT PARAMETER HERE #
          +#############################
          +
          +# input data path here, string, this directory should contains
          +#   ./data/*frc-1.xyz ./data/*pos-1.xyz
          +data_path = "./data"
          +
          +#input the number of atom in system
          +atom_num = 189
          +
          +#input cell paramter here
          +cell = [[10.0,0,0],[0,10.0,0],[0,0,10.0]]
          +
          +# conversion unit here, modify if you need
          +au2eV = 2.72113838565563E+01
          +au2A = 5.29177208590000E-01
          +
          +
          +####################
          +# START OF PROGRAM #
          +####################
          +
          +def xyz2npy(pos, atom_num, output, unit_convertion=1.0):
          +    total = np.empty((0,atom_num*3), float)
          +    for single_pos in pos:
          +        tmp=single_pos.get_positions()
          +        tmp=np.reshape(tmp,(1,atom_num*3))
          +        total = np.concatenate((total,tmp), axis=0)
          +    total = total * unit_convertion
          +    np.save(output, total)
          +
          +def energy2npy(pos, output, unit_convertion=1.0):
          +     total = np.empty((0), float)
          +     for single_pos in pos:
          +         tmp=single_pos.info.pop('E')
          +         tmp=np.array(tmp,dtype="float")
          +         tmp=np.reshape(tmp,1)
          +         total = np.concatenate((total,tmp), axis=0)
          +     total = total * unit_convertion
          +     np.save(output,total)
          +
          +def cell2npy(pos, output, cell, unit_convertion=1.0):
          +    total = np.empty((0,9),float)
          +    frame_num = len(pos)
          +    cell = np.array(cell, dtype="float")
          +    cell = np.reshape(cell, (1,9))
          +    for frame in range(frame_num):
          +        total = np.concatenate((total,cell),axis=0)
          +    total = total * unit_convertion
          +    np.save(output,total)
          +
          +def type_raw(single_pos, output):
          +    element = single_pos.get_chemical_symbols()
          +    element = np.array(element)
          +    tmp, indice = np.unique(element, return_inverse=True)
          +    np.savetxt(output, indice, fmt='%s',newline=' ')
          +
          +
          +# read the pos and frc
          +data_path = os.path.abspath(data_path)
          +pos_path = os.path.join(data_path, "*pos-1.xyz")
          +frc_path = os.path.join(data_path, "*frc-1.xyz")
          +#print(data_path)
          +pos_path = glob.glob(pos_path)[0]
          +frc_path = glob.glob(frc_path)[0]
          +#print(pos_path)
          +#print(frc_path)
          +pos = read(pos_path, index = ":" )
          +frc = read(frc_path, index = ":" )
          +
          +# numpy path
          +set_path = os.path.join(data_path, "set.000")
          +if os.path.isdir(set_path):
          +    print("detect directory exists\n now remove it")
          +    shutil.rmtree(set_path)
          +    os.mkdir(set_path)
          +else:
          +    print("detect directory doesn't exist\n now create it")
          +    os.mkdir(set_path)
          +type_path = os.path.join(data_path, "type.raw")
          +coord_path = os.path.join(set_path, "coord.npy")
          +force_path = os.path.join(set_path, "force.npy")
          +box_path = os.path.join(set_path, "box.npy")
          +energy_path = os.path.join(set_path, "energy.npy")
          +
          +
          +#tranforrmation
          +xyz2npy(pos, atom_num, coord_path)
          +xyz2npy(frc, atom_num, force_path, au2eV/au2A)
          +energy2npy(pos, energy_path, au2eV)
          +cell2npy(pos, box_path, cell)
          +type_raw(pos[0], type_path)
          +
          +

          升级到DeePMD-kit 2.0

          +

          目前 DeePMD-kit 2.0 正式版已经发布,相比旧版已有众多提升,且压缩模型为正式版特性。目前我们集群上已安装 DeePMD-kit 2.0.3。

          +

          输入文件

          +

          DeePMD-kit 2.0 相比 1.x 在输入文件上做了一定改动,以下给出一个 DeePMD-kit 2.0 输入文件的例子:

          +
          {
          +    "_comment": " model parameters",
          +    "model": {
          +        "type_map": [
          +            "O",
          +            "H"
          +        ],
          +        "descriptor": {
          +            "type": "se_e2_a",
          +            "sel": [
          +                46,
          +                92
          +            ],
          +            "rcut_smth": 0.50,
          +            "rcut": 6.00,
          +            "neuron": [
          +                25,
          +                50,
          +                100
          +            ],
          +            "resnet_dt": false,
          +            "axis_neuron": 16,
          +            "seed": 1,
          +            "_comment": " that's all"
          +        },
          +        "fitting_net": {
          +            "neuron": [
          +                240,
          +                240,
          +                240
          +            ],
          +            "resnet_dt": true,
          +            "seed": 1,
          +            "_comment": " that's all"
          +        },
          +        "_comment": " that's all"
          +    },
          +    "learning_rate": {
          +        "type": "exp",
          +        "decay_steps": 5000,
          +        "start_lr": 0.001,
          +        "stop_lr": 3.51e-8,
          +        "_comment": "that's all"
          +    },
          +    "loss": {
          +        "type": "ener",
          +        "start_pref_e": 0.02,
          +        "limit_pref_e": 1,
          +        "start_pref_f": 1000,
          +        "limit_pref_f": 1,
          +        "start_pref_v": 0,
          +        "limit_pref_v": 0,
          +        "_comment": " that's all"
          +    },
          +    "training": {
          +        "training_data": {
          +            "systems": [
          +                "../data/data_0/",
          +                "../data/data_1/",
          +                "../data/data_2/"
          +            ],
          +            "batch_size": "auto",
          +            "_comment": "that's all"
          +        },
          +        "validation_data": {
          +            "systems": [
          +                "../data/data_3"
          +            ],
          +            "batch_size": 1,
          +            "numb_btch": 3,
          +            "_comment": "that's all"
          +        },
          +        "numb_steps": 1000000,
          +        "seed": 10,
          +        "disp_file": "lcurve.out",
          +        "disp_freq": 100,
          +        "save_freq": 1000,
          +        "_comment": "that's all"
          +    },
          +    "_comment": "that's all"
          +}
          +
          +

          DeePMD-kit 2.0 提供了对验证集(Validation Set)的支持,因而用户可指定某一数据集作为验证集,并输出模型在该数据集上的误差。 +相比旧版而言,新版输入文件参数的具体含义变化不大,除了对数据集的定义外,大部分参数含义保持一致。

          +

          以下列出一些需要注意的事项:

          +
            +
          1. 训练数据集不再直接写在 training 下,而是写在 training 的子键 training_data 下,格式如下所示: +
            "training_data": {
            +         "systems": [
            +             "../data/data_0/",
            +             "../data/data_1/",
            +             "../data/data_2/"
            +         ],
            +         "batch_size": "auto"
            +     }
            +
            + 默认情况下,每一训练步骤中,DeePMD-kit随机从数据集中挑选结构加入本轮训练,这一步骤加入数据的多少取决于 batch_size 的大小,此时,各 system 中数据被使用的概率是均等的。 + 若希望控制各 system 数据的权重,可使用 auto_prob 来控制,其参数选项如下所示
              +
            • prob_uniform: 各 system 数据权重均等。
            • +
            • prob_sys_size: 各 system 数据的权重取决于其各自的大小。
            • +
            • prob_sys_size: 写法示例如下:sidx_0:eidx_0:w_0; sidx_1:eidx_1:w_1;...。 该参数中,sidx_ieidx_i 表示第 i 组数据的起止点,规则同 Python 语法中的切片,w_i 则表示该组数据的权重。在同一组中,各 system 数据的权重取决于各自的大小。 + batch_size 的值可手动设定,根据经验一般根据“乘以原子数≤32”的规则设定。新版则支持自动设定,若设定为"auto"则表示按照此规则自动设置,若设定为"auto:N"则根据“乘以原子数≤N”的规则设定。
            • +
            +
          2. +
          3. save_ckpt, load_ckpt, decay_rate 等为过时参数,若由 1.x 迁移,请删除这些参数,否则会导致报错。
          4. +
          5. n_neuron 更名为 neuronstop_batch 更名为 numb_steps,请注意更改。对应地,decay rate 由 start_lrstop_lr 决定。
          6. +
          7. lcurve.out 中删除了测试数据的 RMSE 值,因此旧版作图脚本需要对应修改,减少列数(能量在第3列,力在第4列)。若指定了验证集,则会输出模型在验证集上的 RMSE。
          8. +
          +

          更多详细说明,请参见官方文档

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/ECINT Tutorial/user/index.html b/wiki/software_usage/ECINT Tutorial/user/index.html new file mode 100644 index 00000000..3d4d7d76 --- /dev/null +++ b/wiki/software_usage/ECINT Tutorial/user/index.html @@ -0,0 +1,3176 @@ + + + + + + + + + + + + + + + + + + + + + + + ECINT 的使用 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          ECINT 的使用

          +

          安装与配置

          +

          在使用 ECINT 前,需安装并配置好 aiida-core 与 aiida 插件,不过也可以在 hydrogen 上体验已配置好的环境

          +

          如何进入 hydrogen

          +
            +
          1. +

            联系集群管理员,将你的公钥放到 hydrogen 上

            +
          2. +
          3. +

            在海洋楼网络环境下,通过以下命令可进入 hydrogen

            +
          4. +
          +
          ssh -p 8099 chenglab@10.24.3.144
          +
          +
          +

          建议在用工作流时,先在 ~/users 下建立一个以自己名字命名的工作目录,users/public.data 为 51/52 的 /public.data

          +
          +

          输入文件

          +

          在想要运行工作流的工作路径下准备一个 .json 输入文件,示例如下 (要用 ",而不是 '):

          +
          {
          +  "workflow": "NebWorkChain",
          +  "webhook": "https://oapi.dingtalk.com/robot/send?access_token=xxxxxx",
          +  "resdir": "results",
          +  "structure": ["ethane_1_opt.xyz", "ethane_s1.xyz", "ethane_ts.xyz", "ethane_s2.xyz"],
          +  "cell": [12, 12, 12],
          +  "metadata": {
          +    "kind_section": {
          +      "BASIS_SET": "TZV2P-GTH",
          +      "POTENTIAL": "GTH-PBE"
          +    }
          +  }
          +}
          +
          +

          或者也可以用 .yaml 输入文件,示例如下 (-ethane_1_opt.xyz 之间不要漏了空格):

          +
          workflow: NebWorkChain
          +webhook: https://oapi.dingtalk.com/robot/send?access_token=xxxxxx
          +resdir: results
          +structure:
          +  - ethane_1_opt.xyz
          +  - ethane_s1.xyz
          +  - ethane_ts.xyz
          +  - ethane_s2.xyz
          +cell: [12, 12, 12]
          +metadata:
          +  kind_section:
          +    BASIS_SET: TZV2P-GTH
          +    POTENTIAL: GTH-PBE
          +
          +
          +

          更多输入的例子在 https://github.com/chenggroup/ecint/tree/develop/example

          +
          +

          各关键词解释

          +
            +
          • +

            workflow (必填): workflow 的名字,具体可选的请见可选用的 workflow 部分

            +
          • +
          • +

            webhook (选填): 钉钉机器人 webhook,当工作流完成时想要即时收到钉钉提醒时可设置,否则可不用

            +
          • +
          • +

            resdir (选填, default: 当前所在路径): 结果文件的储存路径

            +
          • +
          • +

            structure/structures_folder (必填其中之一): 仅输入一个结构时,structure 为结构文件的路径 (非列表),对于 neb 这种需要多个输入结构的,structure 为结构文件路径的列表。如果批量进行计算,则把批量的结构所在文件夹加入 structures_folder (暂不支持 neb)

            +
          • +
          • +

            cell (选填): 设置了 cell 后会改变那些结构中不包含 cell 信息的 cell。如果用的是 .xyz 格式,一般需要设置 cell (因为 .xyz 一般不包含 cell 的信息),.cif or POSCAR(.vasp) 则不需要设置。cell 的格式与 ase 中的 cell 格式保持一致,如 [12, 12, 12] 或 [[12, 0, 0], [0, 12, 0], [0, 0, 12]] 或 [12, 12, 12, 90, 90, 90]

            +
          • +
          • +

            metadata (选填):

            +
          • +
          +
          +

          以下参数可不填,对于不同的 workflow 均有不同的默认值

          +
          +
            +
          • +

            config: 可以为 dict, .json, .yaml,表示 cp2k 输入参数的基本设置,以 dict 的形式来表示 cp2k 输入,一些细致的设置,如计算精度,可在此处修改,也可通过 cp2k 输入文件进行转化。无特殊需求可不更改。config 的示例如下:

            +
          • +
          • +

            kind_section: 配置 BASIS_SET 和 POTENTIAL 的基本信息,可以有四种输入形式

            +
            +

            若设置了 kind_section 的话,需同时设置 BASIS_SETPOTENTIAL。如果按元素来指定了 BASIS_SETPOTENTIAL 的话,需要指定所有元素的设置。设置比较复杂的话推荐以文件的方式 (下面的第四种方法) 来引用 kind_section

            +
            +
              +
            • ```python + # .json + "kind_section": {"BASIS_SET": "TZV2P-GTH", "POTENTIAL": "GTH-PBE"}
            • +
            +

            # or .yaml + kind_section: + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + ```

            +
              +
            • ```python + # .json + "kind_section": {"H": {"BASIS_SET": "TZV2P-GTH", "POTENTIAL": "GTH-PBE"}, "O": {"BASIS_SET": "TZV2P-GTH", "POTENTIAL": "GTH-PBE"}, ...}
            • +
            +

            # or .yaml + kind_section: + H: + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + O: + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + ... + ```

            +
              +
            • ```python + # .json + "kind_section": [{"": "H", "BASIS_SET": "TZV2P-GTH", "POTENTIAL": "GTH-PBE"}, {"": "O", "BASIS_SET": "TZV2P-GTH", "POTENTIAL": "GTH-PBE"}, ...]
            • +
            +

            # or .yaml + kind_section: + - _: H + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + - _: O + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + ... + ```

            +
              +
            • ```python + # <> example + kind_section: + H: + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + O: + BASIS_SET: TZV2P-GTH + POTENTIAL: GTH-PBE + ...
            • +
            +

            # .json + "kind_section": "<>" # YOUR_KIND_SECTION_FILE can be .json or .yaml

            +

            # or .yaml + kind_section: <> # .json or .yaml + ```

            +
          • +
          • +

            machine: 选择配置好的服务器 (目前仅支持 cp2k@aiida_test) 以及配置资源的使用情况

            +
            // example
            +{
            +    "code@computer": "cp2k@aiida_test",
            +    "nnode": 2,
            +    "queue": "medium"
            +}
            +
            +
              +
            • code@computer: 配置好的 aiida 服务器 (目前仅支持 cp2k@aiida_test)
            • +
            • nnode/nprocs/n (选填其中之一): 使用服务器节点数/使用服务器核数/使用服务器核数
            • +
            • walltime/max_wallclock_seconds/w (选填其中之一): 强制终止计算时间,单位 s
            • +
            • queue/queue_name/q (选填其中之一): 服务器队列名
            • +
            • ptile: 每节点至少需使用的核数,默认值为每节点的核数
            • +
            +
          • +
          • +

            ...: some parameters for special workflow

            +
          • +
          • +

            subdata (选填):

            +
          • +
          +
          +

          用于修改多步工作流中,每步工作流的 config, kind_section, machine, 其设置会覆盖掉 metada 中的相关设置。

          +

          e.g. NebWorkChain 由三部分组成: geoopt, neb, frequency. 若输入如下:

          +
          workflow: NebWorkChain
          +webhook: https://oapi.dingtalk.com/robot/send?access_token=xxx  # your own webhook
          +resdir: results_yaml
          +structure:
          +  - ethane_1_opt.xyz
          +  - ethane_s1.xyz
          +  - ethane_ts.xyz
          +  - ethane_s2.xyz
          +cell:
          +  - [12, 0, 0]
          +  - [0, 12, 0]
          +  - [0, 0, 12]
          +metadata:
          +  kind_section:
          +    BASIS_SET: DZVP-MOLOPT-SR-GTH
          +    POTENTIAL: GTH-PBE
          +subdata:
          +  geoopt:
          +    kind_section:
          +      BASIS_SET: TZV2P-MOLOPT-GTH
          +      POTENTIAL: GTH-PBE
          +
          +

          geoopt 部分的 kind_section 会被更新为 {"BASIS_SET": "TZV2P-MOLOPT-GTH", "POTENTIAL": "GTH-PBE"} ,而 nebfrequency 部分的 kind_section 则与 metadata 中的保持一致。

          +
          +
            +
          • <>:
              +
            • config: 见 metadata
            • +
            • kind_section: 见 metadata
            • +
            • machine: 见 metadata
            • +
            +
          • +
          • <>:
              +
            • config
            • +
            • kind_section
            • +
            • machine
            • +
            +
          • +
          • ...
          • +
          +

          可选用的 workflow

          +

          输出的基本信息在 results.dat 中,以下 workflow 中仅说明除了 results.dat 外的输出文件

          +

          EnergySingleWorkChain

          +
          +

          Just single point energy

          +
          +
            +
          • 输入默认值:
          • +
          • config: energy.json
          • +
          • kind_section: {"BASIS_SET": "DZVP-MOLOPT-SR-GTH", "POTENTIAL": "GTH-PBE"}
          • +
          • machine: {"code@computer": "cp2k@aiida_test", "nnode": 1, "walltime": 12 * 60 * 60, "queue": "medium"}
          • +
          • 其他输出:
          • +
          • 包含能量信息的结构: coords.xyz
          • +
          +

          GeooptSingleWorkChain

          +
          +

          Just geomertry optimization

          +
          +
            +
          • 输入默认值:
          • +
          • config: geoopt.json
          • +
          • kind_section: {"BASIS_SET": "DZVP-MOLOPT-SR-GTH", "POTENTIAL": "GTH-PBE"}
          • +
          • machine: {"code@computer": "cp2k@aiida_test", "nnode": 1, "walltime": 12 * 60 * 60, "queue": "medium"}
          • +
          • 其他输出:
          • +
          • 结构优化完后的结构: structure_geoopt.xyz
          • +
          +

          NebSingleWorkChain

          +
          +

          Just CI-NEB

          +
          +
            +
          • 输入默认值:
          • +
          • config: neb.json
          • +
          • kind_section: {"BASIS_SET": "DZVP-MOLOPT-SR-GTH", "POTENTIAL": "GTH-PBE"}
          • +
          • machine: {"code@computer": "cp2k@aiida_test", "nnode": number_of_replica, "queue": "large"}
          • +
          • 其他输出:
          • +
          • 包含始终态及中间态的 trajectory: images_traj.xyz
          • +
          • 势能曲线: potential_energy_curve.png
          • +
          • 过渡态结构: transition_state.xyz
          • +
          +

          FrequencySingleWorkChain

          +
          +

          Just vabrational analysis

          +
          +
            +
          • 输入默认值:
          • +
          • config: frequency.json
          • +
          • kind_section: {"BASIS_SET": "DZVP-MOLOPT-SR-GTH", "POTENTIAL": "GTH-PBE"}
          • +
          • machine: {"code@computer": "cp2k@aiida_test", "nnode": 4, "queue": "large"}
          • +
          • 其他输出:
          • +
          • 振动频率的值: frequency.txt
          • +
          +

          NebWorkChain

          +
          +

          Goopt for initial and final state → NEB → Vabrational analysis

          +
          +
            +
          • 输入默认值:
          • +
          • geoopt: {default value in GeooptSingleWorkChain}
          • +
          • neb: {default value in NebSingleWorkChain}
          • +
          • frequency: {default value in FrequencySingleWorkChain}
          • +
          • 其他输出:
          • +
          • all outputs of GeooptSingleWorkChain, NebSingleWorkChain and FrequencySingleWorkChain
          • +
          +

          CP2K input 转 config

          +

          使用工具 inp2config 可将 cp2k 输入文件转成 config 所需的形式, <<CP2K_INP>> 为 cp2k 输入文件路径 <<CONFIG>> 为输出的 config 文件路径,后缀为 .json/.yaml:

          +
          inp2config <<CP2K_INP>> <<CONFIG>>
          +# e.g.
          +inp2config input.inp config.yaml
          +
          +

          要根据 cp2k 输入文件一并生成 kind_section 的输入设置, <<KIND_SECTION>> 为输出的 kind_section 路径,后缀为 .json/.yaml:

          +
          inp2config <<CP2K_INP>> <<CONFIG>> -k <<KIND_SECTIOn>>
          +# e.g.
          +inp2config input.inp config.yaml -k kind_section.yaml
          +
          +

          提交任务

          +

          运行以下命令即可提交工作流,<<YOUR_INPUT_FILE>>.json.yaml 输入文件的路径,缺省值为当前路径下的 ecint.json

          +
          ecrun <<YOUR_INPUT_FILE>>
          +
          +

          推送

          +

          计算完成的截图如下:

          +

          image-20200804224518088

          +

          计算出错的截图如下:

          +

          image-20200805150759298

          +

          常见错误

          +

          读取结构文件错误

          +
            File "xxx/lib/python3.7/site-packages/ase/io/formats.py", line 599, in read
          +    io = ioformats[format]
          +KeyError: 'coord'
          +
          +

          错误原因: 无法识别扩展名

          +

          解决方案: 注意扩展名,使用正确的扩展名,如 .xyz, .cif, POSCAR 可用 POSCAR.vasp

          +

          读取 xyz 错误

          +
          ase.io.extxyz.XYZError: ase.io.extxyz: Expected xyz header but got: invalid literal for int() with base 10: ...
          +
          +

          错误原因: xyz 文件格式错误,xyz 文件第一行是所有原子个数,第二行是注释行(可空着),第三行开始才是坐标

          +

          解决方案: 如果第一行开始就是坐标的话,需要在前面加上原子个数 (如 180) 的行以及一个空行

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/MDAnalysis/index.html b/wiki/software_usage/MDAnalysis/index.html new file mode 100644 index 00000000..a47ef66d --- /dev/null +++ b/wiki/software_usage/MDAnalysis/index.html @@ -0,0 +1,3160 @@ + + + + + + + + + + + + + + + + + + + + + + + + + MDAnalysis 软件包的使用 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          MDAnalysis 软件包的使用

          +

          我是否需要使用MDAnalysis

          +

          MDAnalysis是一个处理分子动力学模拟轨迹的python软件包。它最为突出的是优点是全面的轨迹io方法,可以处理常见分子动力学模拟的输出轨迹格式。同时,MDAnalysis和的io理念使其更加适合作为大轨迹文件逐帧进行统计分析的工具。该软件内置了很多分子动力学模拟分析方法,所以你可以用它轻松地实现一些例行分析。比如,径向分布函数(RDF), 水密度(number density)和氢键分析等。除过内置方法,用户也可以用MDAnalysis自定义分析方法。

          +

          (内置分析)[https://docs.mdanalysis.org/stable/documentation_pages/analysis_modules.html]

          +

          (如何DIY你自己的分析)[https://userguide.mdanalysis.org/stable/examples/analysis/custom_trajectory_analysis.html]

          +

          如果你需要作如下的分析,MDAnalysis就非常适合你:

          +
            +
          • +

            MD统计分析:需要对MD轨迹中每一个单帧进行相同操作,并且需要循环整条轨迹的统计。例如,你需要统计A原子和B原子间的距离

            +
          • +
          • +

            周期性体系的距离计算:高效快速的距离计算库函数,提供[a, b, c, alpha, beta, gamma] cell parameter就可以考虑PBC下的距离。

            +
          • +
          +

          IO 理念

          +

          1. 初始化

          +

          MDAnalysis将轨迹文件,topology信息等抽象为一个Universe class. 例如一条xyz轨迹可以如下初始化,

          +
          from MDAnalysis import Universe
          +xyzfile = "./tio2-water.xyz"
          +u = Universe(xyzfile)
          +u.dimensions = np.array([10, 10, 10, 90, 90, 90])    # assign cell parameter
          +
          +

          这样初始化一个u实例其实并不会读取整个文件,在此阶段,用户可以使用u进行选择部分原子,得到一个atomgroup对象。例如,使用

          +
          ag      = u.atoms        # select all atoms
          +xyz     = ag.positions   # get the coordinates for these atoms
          +element = ag.elements    # the element labels for theses atoms
          +
          +

          可以将所有原子选取成一个atomgroup对象。其实MDAnalysis支持一些更fancy的选择语法,类似于VMD的语法,详见MDAnalysis选择语法。但是,根据笔者的经验,这中选择语法对我们研究的体系来说不好用,使用ASE进行这些选择就会更加方便。

          +

          2. 轨迹的读取

          +

          在初始化一个Universe后,你可以通过如下方法手动激活轨迹的读取:

          +
          print(u.trajectory)                 # reading the trajectory
          +n_frames = u.trajectory.n_frames    # get the number of frames of your traj
          +u.trajectory.ts.dt = 0.0005         # set dt to 0.0005 ps, otherwise you will get a warning 
          +
          +

          否则,在运行分析之前,MDAnalysis不会自动读取文件。

          +

          实际上,就算在上面的读取过程中,MDAnalysis也不会把轨迹读入内存,而是读取了一条轨迹的开头在文件的位置。以我们比较熟悉的xyz文件为例,

          +
          100                                 <- 帧开头
          +TIMESTEP: 0
          +*.*****    *.*****    *.*****
          +*.*****    *.*****    *.*****
          +              ·
          +              ·
          +              ·
          +              ·
          +*.*****    *.*****    *.*****
          +100                                 <- 帧开头
          +TIMESTEP: 2
          +*.*****    *.*****    *.***** 。    
          +
          +

          MDAnalysis会遍历整个文件流,将轨迹开头在文件流中的位置保存在u.trajectory._offsets中。

          +
              |+----------------+----------------+----------------+--···············--+----------------|
          +    |*             ️   *            ️    *            ️    *              ️     *                |
          +    |*             ️   *            ️    *            ️    *              ️     *                |
          +    |*             ️   *            ️    *            ️    *              ️     *                |
          +    |*             ️   *            ️    *            ️    *              ️     *                |
          +    |v                v                v                v                   v                |
          +    ------------------------------------------------------------------------------------------
          +    |0                1                2                3                   N                |
          +array(
          +    [<_offsets(0)>,   <_offsets(1)>,   <_offsets(2)>,   <_offsets(3)>, ..., <_offsets(N)>    ]
          +)  ---> u.trajectory._offsets
          +
          +

          有了这些帧开头在文件的地方,MDAnalysis就可以任意读区任意一帧轨迹文件的的数据。例如,如果你需要读区第70帧的坐标,你就可以

          +
          >>> print(u.trajectory)
          +>>> ag = u.atoms
          +>>> print(u.trajectory.ts)
          +< Timestep 0 with unit cell dimensions None >
          +>>> for ii in range(69):
          +...     u.trajectory.next()
          +>>> print(u.trajectory.ts) 
          +< Timestep 69 with unit cell dimensions None >
          +>>> xyz70 = ag.atoms
          +>>> u.trajectory.rewind()                       
          +< Timestep 0 with unit cell dimensions None >
          +
          +

          可以看到,u.trajectory其实是一个迭代器,你可以通过u.trajectory.next()方法下一得到下一帧的trajectory。同时,这一帧的坐标也会更新至atomgroup.position。实际上,在使用MDAnalysis进行分析时你不需要执行这些底层的nextrewind方法,这些繁琐的步骤已经包装好了。

          +

          实际上可以直接通过索引的方式对第70帧的结果进行提取: +

          >>> print(u.trajectory[70])
          +< Timestep 70 with unit cell dimensions None >
          +

          +

          同时,正因为u.trajectory是一个迭代器,对于其父类ProtoReader,其中定义了__iter__方法来返回一个迭代器对象,同时定义了__next__方法来回应对应的迭代过程。而在__next__方法返回的即为next()函数中的内容。因此如果想对一条轨迹进行切片分析而不是逐帧分析,我们就可以使用切片后的轨迹来进行迭代:

          +
          >>> for ts in u.trajectory[10:10000:20]:    # 从第10帧到10000帧每20步取一帧
          +...     print(ts.frame)
          +10 30 50 70 90 110 130 150 ...
          +
          +

          综上所述,MDAnalysis的轨迹读取方式有如下优点:

          +
            +
          • +

            因为实际读取的是offsets,也就是帧开头的位置,仅仅读了N个整数。不像隔壁ASE,会实例化N个Atoms(包括了整条轨迹的坐标),于是会非常占用内存。MDAnalysis的io方法内存占用小,loop也更快。

            +
          • +
          • +

            读取offsets后你可以将Universe对象保存下来见下文,读取后不需要再遍历整个轨迹文件。这样,假如你又有了新的分析,你就可以省下遍历文件的时间。

            +
          • +
          +

          保存一个Universe实例

          +

          假如说你现在有一条轨迹文件traj.xyz,你可以通过如下方法将其保存下来,节省二次分析时读取帧开头的时间。

          +
          import pickle
          +from MDAnalysis import Universe
          +
          +>>> xyzfile = "/path/to/traj.xyz"     # !!! Use absolute path. It's more robust.     
          +>>> outuni  = "./traj.uni"
          +>>> u = Universe(xyzfile)
          +>>> print(u.trajectory)               # This will take some time
          +<XYZReader /path/to/traj.xyz with 100 frames of 3240 atoms>
          +>>> with open(outuni, 'wb') as f:
          +...    pickle.dump(u, f)
          +
          +

          建议初始化Universe时使用绝对路径,这样你可以将复制到traj.uni复制到任意路径对轨迹分析。在二次分析时,你可以直接这样读取一个Universe:

          +
          >>> with open(outuni, 'rb') as f:
          +...     v = pickle.load(f) 
          +>>> print(v.trajectory)
          +<XYZReader /path/to/traj.xyz with 100 frames of 3240 atoms>
          +
          +

          笔者的经验是,在我们的<fat>节点上,遍历一个 6G 大小的xyz轨迹文件的帧开头需要 3 min。

          +

          距离计算库函数

          +

          MDAnalysis有优秀的底层距离计算函数库MDAnalysis.lib.distances,是开发者用C语言编写底层方法,用python包装一个库,详见lib.distances API。它长于在于计算周期性边界条件(PBC)下的原子间距离,并且文档翔实。而且与MDAnalysisUniverseAnalysis等类相独立,你只需要提供原子坐标,盒子大小,cutoff大小,就可以得到距离、角度等数据。

          +

          下面是笔者用该函数库里capped_distance方法包装的的一个配位数计算器。

          +
          def count_cn(atoms1, atoms2, cutoff_hi, cutoff_lo=None, cell=None):
          +    """count the coordination number(CN) for atoms1 (center atoms), where atoms2 are coordinate atom. This function will calculate CN within range cutoff_lo < d < cutoff_lo, where d is the distance between atoms1 and atoms2. Minimum image convention is applied if cell is not None
          +
          +    Args:
          +        atoms1 (numpy.ndarray): Array with shape (N, 3), where N is the number of center atoms. 'atoms1' are the position of center atoms. 
          +        atoms2 (numpy.ndarray): Array with shape (M, 3), where M is the number of coordination atoms. 'atoms2' are the positions of coordination atoms.
          +        cutoff_hi (float): Max cutoff for calculating coordination number. 
          +        cutoff_lo (float or None, optional): Min cutoff for calculating coordination number. This function will calculate CN within range cutoff_lo < d < cutoff_lo, where d is the distance between atoms1 and atoms2. Defaults to None.
          +        cell (numpy.ndarray, optional): Array with shape (6,), Array([a, b, c, alpha, beta, gamma]). Simulation cell parameters. If it's not None, the CN calculation will use minimum image convention. Defaults to None.
          +
          +    Returns:
          +        results: Array with shape (N,), CN of each atoms atoms1
          +    """
          +    pairs, _ = capped_distance(reference=atoms1,
          +                               configuration=atoms2,
          +                               max_cutoff=cutoff_hi,
          +                               min_cutoff=cutoff_lo,
          +                               box=cell)
          +    _minlength = atoms1.shape[0]
          +    results = np.bincount(pairs[:, 0], minlength=_minlength)
          +    return results
          +
          +

          其实隔壁ASE.geometry下也有类似的底层方法,但是笔者认为使用体验确实不如MDAnalysis.lib.distances(计算速度慢,文档少)。

          +

          下面对两组原子距离矩阵进行benchmark,每组100个原子,结果是一个100x100的numpy.array,可以发现MDAnalysis.lib.distances会快15倍。所以当你有上万个这样计算的时候,使用ASE的函数库会影响你的效率。

          +
          >>> import numpy as np
          +>>> from ase.geometry import get_distances
          +>>> from MDAnalysis.lib.distances import distance_array
          +                       ·
          +                       ·
          +                       ·
          +>>> print(xyz1.shape, xyz2.shape)
          +(100, 3) (100, 3)
          +>>> print(cell)
          +[[50.5123      0.          0.        ]
          + [ 5.05820546 13.34921731  0.        ]
          + [ 0.          0.         47.8433    ]]
          +>>> print(cellpar)
          +[50.5123 14.2754 47.8433 90.     90.     69.2476]
          +
          +In[1]: %%timeit
          +...    dmatrix_mda = distance_array(xyz1, xyz2, box=cellpar)
          +1.03 ms ± 5.11 µs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)
          +
          +In[2]: %%timeit
          +...    vec, dmatrix_ase = get_distances(xyz1, xyz2, cell=cell, pbc=True)
          +16.6 ms ± 133 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
          +
          +

          注意:如果你在处理非正交的模拟盒子

          +

          我们注意到,在上述距离计算的例子里,我们需要通过cell parameter,[a, b, c, alpha, beta, gamma],给MDAnalysis.lib.distances提供模拟盒子的信息。而实际上,计算距离的时候cell parameter会先通过内部方法转化成3x3的盒子矩阵。如果你的盒子并不是正交的,应该先检查你提供的cell parameter能否正确得到3x3的矩阵,再使用这个函数库,否则你可能会得到错误的结果。这里是他们使用的python转换脚本

          +

          复杂的轨迹分析——简要介绍面对对象编程的方法

          +

          在实际的催化自由能计算当中,在分析过程中实际涉及的变量往往不止于一个简单的分子动力学轨迹文件。如在增强采样的轨迹当中,就会有涉及bias偏置势的COLVAR文件,其中的*bias是获得不同结构在实际的相空间中的权重的重要参数。

          +

          同时,面对过程的编程方法在遇见大量的重复过程的时候往往会面对变量繁多、过程复杂的问题。比如对同样的化学体系在不同的温度下进行分子动力学采样,使用程序对结果进行分析的时候,如果是面对过程编程,要么使用大量的变量来对数据进行提取,要么增加一个变量中数据的维度。因此在遇到复杂的数据处理时很容易理不清过程,找不到问题所在。因此就需要引入面对对象编程的思路,在这种模式下我们解决问题的方法从过程导向变成了对象导向。这个对象可以是任何东西,比如一个文件,一个路径等等,而我们想获得这个对象的信息来解决我们想知道的问题,就需要定义一些方法来处理。

          +

          下面我们结合一个例子来大致说明面对对象编程的简要思路过程:

          +

          问题描述

          +

          在一条增强采样的轨迹当中,我们想要提取结构中一种原子(A)的另外一种原子(B)的配位数分布以及在不同配位数下面的结构分布特征,并结合增强采样的权重来对结果进行更为严谨的分析。首先我们需要引入轨迹文件中的分子结构信息,再计算给定环境的配位数,提取配位环境的原子坐标,并在后续计算局部结构的特征信息(如键角、二面角分布等)。在这些结构当中,我们需要同时提取COLVAR文件中对应的bias信息,用于在后续过程中的加权。

          +

          在使用MDAnalysis库对这样的文件结构分析的时候,得益于强大的工具支持,我们处理的复杂的问题也就可以更为迅速。参考本文开始时给出的网页中的相关段落,我们是首先引入相应的坐标信息文件以及COLVAR文件来初始化类(在问题的处理过程当中,这一步相当于引入原始信息,当然我们也可以在对应的方法当中再引入,如果有方法中需要用到的统一的信息,在__init__方法中引入是最为直观和方便的): +

          class A_center_analysis(AnalysisBase):
          +    def __init__(self, lmpfile, COLVAR_file, verbose=True):
          +        u = Universe(lmpfile, topology_format="LAMMPSDUMP")
          +        u.select_atoms("type 1").masses = 114
          +        u.select_atoms("type 2").masses = 514
          +        u.trajectory.ts.dt = 0.0005
          +        self.cell = u.dimensions
          +        self.bias = np.loadtxt(COLVAR_file, skiprows=1, usecols=-1)
          +
          +        assert u.trajectory.n_frames == len(self.bias) 
          +
          +        self.atomgroup = u.select_atoms("all")
          +        super(A_center_analysis, self).__init__(self.atomgroup.universe.trajectory, verbose=verbose)
          +
          +其中super()函数用于调用父类AnalysisBase当中的初始化函数,以便于后续变量的易用性。

          +

          此后我们定义准备好的结果变量,在原例当中只使用了一个self.result变量来包含所有的结果,在后续的调用当中不是很明朗,因此在此处可以多定义一些。 +

              def _prepare(self):
          +        self.cn_2 = np.array([])        
          +        self.angle_2 = np.array([])
          +        self.bias_2 = np.array([])
          +
          +此后,我们可以定义我们对应的分析方法,并将关心的结果放到合适的类属性当中(这一步就是将原始信息进行处理,获得对象的一系列属性的过程) +
              def _append(self, cn, data):
          +        assert data.shape == (cn+1, 3)
          +        if cn == 2:
          +            self.cn_2 = np.append(self.cn_2, data)
          +            self.cn_2 = self.cn_2.reshape(-1,3,3)
          +
          +            BAB_angle = calc_angles(data[1],data[0],data[2], box=self.cell)
          +            self.angle_2 = np.append(self.angle_2, BAB_angle)
          +            self.bias_2 = np.append(self.bias_2, self.bias[self.atomgroup.ts.frame])    # 此处对结构对应的bias信息进行提取
          +        else:
          +            pass
          +
          +    def _single_frame(self):
          +        A_coord = self.atomgroup.select_atoms("type 1").ts[:]
          +        B_coord = self.atomgroup.select_atoms("type 2").ts[:]
          +
          +        pairs, _ = capped_distance(reference=A_coord,configuration=o_coord,
          +                                   max_cutoff=2.6,min_cutoff=None,
          +                                   box=self.cell)
          +        _minlength = A_coord.shape[0]
          +        cn_results = np.bincount(pairs[:, 0], minlength=_minlength)
          +
          +        for A_cn in range(2,5):
          +            A_centers = A_coord[cn_results == A_cn]
          +            for ag_center in ag_centers:
          +                A_B_map = distance_array(A_center, B_coord, box=self.cell)[0]
          +                coordinated_B_coord = B_coord[A_B_map.argsort() < A_cn]
          +                self._append(A_cn, np.vstack((A_center, coordinated_B_coord)))
          +
          +其中,_single_frame()方法是后续在跑的过程当中循环迭代的主要程序,其中的self.atomgroup可以看作单一一帧的数据,后面的处理方法也是针对于这一帧的。在父类当中的run()方法会自动帮忙进行迭代,只需要规定好迭代的范围以及步长即可。

          +

          在此处多定义一个_append()方法的目的是为了将添加到结果变量中的程序和主要的分析程序分开,以便于后续的功能拓展(如对配位数等于3的坐标信息进行二次处理就可以直接在_append()函数当中添加功能而不用再动提取信息的相关程序)。

          +

          在后续的调用过程中,我们只需要初始化相关分析类并run就可以了 +

          >>> lmpfile = "300.lammpstrj"
          +>>> a_300 = A_center_analysis(lmpfile, "COLVAR")
          +>>> a_300.run(start=200000, stop=-1, step=100)
          +>>> print(a_300.bias_2.shape, a_300.angle_2.shape, a_300.cn_2.shape)
          +(20472,) (20472,) (20472, 3, 3)
          +
          +从输出的数据尺寸相同可以看出比较符合我们的预期,能够做到结构和对应的偏置势一一对应,在后续的分布处理过程当中,我们可以对其进行直方图概率密度估计(当然更严谨的方法是使用高斯核概率密度估计的方法): +
          >>> plt.hist(a_300.angle_2*180/np.pi, weights=np.exp(beta*a_300.bias_2), bins=100, density=True)
          +
          +同样的,可以将程序再向上封装一层,即相同体系的不同温度下的结果作为一个类的对象来进行分析,这样输出的结果更为清晰。这样的封装可能就需要统一文件命名方式,以及格式化的路径命名方式等等,在实际的工作当中带来的效率提升往往很可观。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/Tips_for_LaTeX/index.html b/wiki/software_usage/Tips_for_LaTeX/index.html new file mode 100644 index 00000000..f300f8a6 --- /dev/null +++ b/wiki/software_usage/Tips_for_LaTeX/index.html @@ -0,0 +1,2795 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Tips for paper writing with LaTeX - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          Tips for paper writing with LaTeX

          +

          cross referece

          +

          What should we do if we want to cite the figures or tables in supplmentary material? Use the xr package!

          +

          Firstly, put the following into the preamble of the SI:

          +
          %Number supplementary material with an S
          +\renewcommand{\thepage}{S\arabic{page}}
          +\renewcommand{\thesection}{S\arabic{section}} 
          +\renewcommand{\thetable}{S\arabic{table}} 
          +\renewcommand{\thefigure}{S\arabic{figure}}
          +\renewcommand{\theequation}{S\arabic{equation}}
          +
          +

          Then, you can refer to the Figures with Figure Sxxx in your SI file. To cite them in your main text, you can use \ref, by adding the following to the main file:

          +
          %%Crossreferencing to the SI
          +\usepackage{xr}
          +\externaldocument[SI-]{<path to folder in which you have the SI>}
          +
          +

          Now you can reference figures in the SI as

          +
          \ref{SI-<label you gave the figure in the SI>}
          +
          +

          Be cautious: You need to recompile both the paper and the SI after doing so.

          +

          For overleaf users, please refer to here.

          +

          Thanks for the suggestion from Dr. Katharina Doblhoff-Dier in Leiden University.

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-constrainedmd/index.html b/wiki/software_usage/cp2k/cp2k-constrainedmd/index.html new file mode 100644 index 00000000..b8a57635 --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-constrainedmd/index.html @@ -0,0 +1,2925 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:ConstrainedMD - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          + +
          +
          + + + +
          +
          + + + + + + + +

          CP2K: Constrained MD

          +

          学习目标

          +
            +
          • CP2K Constrained MD 设置
          • +
          • Potential of Mean Force 方法计算反应自由能
          • +
          +

          学习资料

          + +

          CP2K Constrained MD 设置

          +

          CP2K 提供了将施加 Constraint 过程中的拉格朗日乘子输出的能力,其统计平均即该反应坐标下的Potential of Mean Force (PMF)。 +PMF对反应坐标积分即反应自由能。MLMD 可实现高精度长时间尺度模拟,因而适用于计算化学反应体系的自由能。 +这里我们可结合 DeePMD 势进行 Constrained MD 模拟。

          +

          首先定义 Collective Variable (CV),这里我们选择两原子间距离进行控制:

          +
          &FORCE_EVAL
          +   ...
          +   &SUBSYS
          +      ...
          +      &COLVAR
          +         &DISTANCE
          +            ATOMS 225 226
          +         &END DISTANCE
          +      &END COLVAR
          +      ...
          +   &END SUBSYS
          +   ...
          +&END FORCE_EVAL
          +
          +

          其中 225226 即为所需控制键长的原子序号。注意 CP2K 中原子序号从 1 开始。

          +

          然后定义所需控制的键长:

          +
          &MOTION
          +   &CONSTRAINT
          +      &COLLECTIVE
          +         COLVAR 1
          +         INTERMOLECULAR .TRUE.
          +         TARGET 3.4015070391941524 # (1)!
          +      &END COLLECTIVE
          +      &LAGRANGE_MULTIPLIERS ON
          +         COMMON_ITERATION_LEVELS 10000000 # (2)!
          +      &END LAGRANGE_MULTIPLIERS
          +   &END CONSTRAINT
          +   ...
          +&MOTION
          +
          +
            +
          1. 设置两原子距离的目标值,注意这里的单位是 a.u.
          2. +
          3. 缺省值为1,为防止输出过长的日志文件,请设置为一个大于总步数的值
          4. +
          +

          注意这里 TARGET 的单位是 a.u.,请把常用的单位(如 Å )转换为原子单位。

          +

          附录:物理常数和单位换算

          +
          *** Fundamental physical constants (SI units) ***
          +
          + *** Literature: B. J. Mohr and B. N. Taylor,
          + ***             CODATA recommended values of the fundamental physical
          + ***             constants: 2006, Web Version 5.1
          + ***             http://physics.nist.gov/constants
          +
          + Speed of light in vacuum [m/s]                             2.99792458000000E+08
          + Magnetic constant or permeability of vacuum [N/A**2]       1.25663706143592E-06
          + Electric constant or permittivity of vacuum [F/m]          8.85418781762039E-12
          + Planck constant (h) [J*s]                                  6.62606896000000E-34
          + Planck constant (h-bar) [J*s]                              1.05457162825177E-34
          + Elementary charge [C]                                      1.60217648700000E-19
          + Electron mass [kg]                                         9.10938215000000E-31
          + Electron g factor [ ]                                     -2.00231930436220E+00
          + Proton mass [kg]                                           1.67262163700000E-27
          + Fine-structure constant                                    7.29735253760000E-03
          + Rydberg constant [1/m]                                     1.09737315685270E+07
          + Avogadro constant [1/mol]                                  6.02214179000000E+23
          + Boltzmann constant [J/K]                                   1.38065040000000E-23
          + Atomic mass unit [kg]                                      1.66053878200000E-27
          + Bohr radius [m]                                            5.29177208590000E-11
          +
          + *** Conversion factors ***
          +
          + [u] -> [a.u.]                                              1.82288848426455E+03
          + [Angstrom] -> [Bohr] = [a.u.]                              1.88972613288564E+00
          + [a.u.] = [Bohr] -> [Angstrom]                              5.29177208590000E-01
          + [a.u.] -> [s]                                              2.41888432650478E-17
          + [a.u.] -> [fs]                                             2.41888432650478E-02
          + [a.u.] -> [J]                                              4.35974393937059E-18
          + [a.u.] -> [N]                                              8.23872205491840E-08
          + [a.u.] -> [K]                                              3.15774647902944E+05
          + [a.u.] -> [kJ/mol]                                         2.62549961709828E+03
          + [a.u.] -> [kcal/mol]                                       6.27509468713739E+02
          + [a.u.] -> [Pa]                                             2.94210107994716E+13
          + [a.u.] -> [bar]                                            2.94210107994716E+08
          + [a.u.] -> [atm]                                            2.90362800883016E+08
          + [a.u.] -> [eV]                                             2.72113838565563E+01
          + [a.u.] -> [Hz]                                             6.57968392072181E+15
          + [a.u.] -> [1/cm] (wave numbers)                            2.19474631370540E+05
          + [a.u./Bohr**2] -> [1/cm]                                   5.14048714338585E+03
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-deepmd/index.html b/wiki/software_usage/cp2k/cp2k-deepmd/index.html new file mode 100644 index 00000000..aafcb782 --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-deepmd/index.html @@ -0,0 +1,2942 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:DeePMD-kit插件 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          CP2K: DeePMD-kit插件

          +

          学习目标

          +
            +
          • 用 CP2K 调用 DeePMD-kit 以进行 MLMD 模拟
          • +
          • Constrained MD 的参数设置
          • +
          +

          学习资料

          +

          CP2K官方手册:

          + +

          适用版本

          +

          以下教程适用于最新版本加入 DeePMD 支持的 CP2K。 +Zeus集群上的 cp2k/2024.2-devdeepmd/2.2.7 (未编译MPI和DFT支持) 可以运行以下教程。

          +

          注意 cp2k/2024.2-dev 的作业脚本写法如下:

          +
          module load gcc/9.3.0
          +module load intel/17.5.239
          +module load cuda/11.8
          +module load mpi/openmpi/4.1.6-gcc
          +module load cp2k/2024.2-dev
          +
          +

          CP2K MD Section 的输入文件

          +

          请先了解CP2K的输入文件语法,指路:

          + +

          由于 MLMD 通常会需要纳秒甚至更长时间尺度的模拟,若未进行适当配置,可能会产生过长的输出文件,因此我们在 GLOBAL 下做以下调整:

          +
          &GLOBAL
          +   PROJECT pmf # (1)!
          +   RUN_TYPE MD
          +   PRINT_LEVEL SILENT # (2)!
          +   WALLTIME 95:00:00 # (3)!
          +&END GLOBAL
          +
          +
            +
          1. 根据自己的项目名修改,决定输出文件的名称
          2. +
          3. 如果跑DeePMD, 请务必设置为 SILENT, 防止输出文件过大
          4. +
          5. 推荐稍短于作业的 Walltime 以免截断轨迹
          6. +
          +

          然后我们配置如下的力场参数:

          +
          &FORCE_EVAL
          +   METHOD FIST
          +   &MM
          +      &FORCEFIELD
          +         &NONBONDED
          +            &DEEPMD
          +               ATOMS C O Pt
          +               ATOMS_DEEPMD_TYPE 0 1 2 # (1)!
          +               POT_FILE_NAME ../graph.000.pb
          +            &END DEEPMD
          +         &END NONBONDED
          +         IGNORE_MISSING_CRITICAL_PARAMS .TRUE. # (2)!
          +      &END FORCEFIELD
          +      &POISSON
          +         &EWALD
          +            EWALD_TYPE none
          +         &END EWALD
          +      &END POISSON
          +   &END MM
          +   ...
          +&END FORCE_EVAL
          +
          +
            +
          1. 与元素列表对应,元素在 type_map 中的索引顺序
          2. +
          3. ⚠ 请保留这一行以忽略未定义参数
          4. +
          +

          通常 MLMD 轨迹文件不需要每步都输出,因而通过以下方式设置输出间隔:

          +
          &MOTION
          +   ...
          +   &MD
          +      ...
          +      &PRINT
          +         &ENERGY
          +            &EACH
          +               MD 100 # (1)!
          +            &END EACH
          +         &END ENERGY
          +      &END PRINT
          +   &END MD
          +   &PRINT
          +      &CELL
          +         &EACH
          +            MD 100 # (2)!
          +         &END EACH
          +      &END CELL
          +      &FORCES
          +         &EACH
          +            MD 100 # (3)!
          +         &END EACH
          +      &END FORCES
          +      &RESTART_HISTORY
          +         &EACH
          +            MD 200000 # (4)!
          +         &END EACH
          +      &END RESTART_HISTORY
          +      &TRAJECTORY
          +         &EACH
          +            MD 100 # (5)!
          +         &END EACH
          +      &END TRAJECTORY
          +   &END PRINT
          +&END MOTION
          +
          +
            +
          1. 此处修改ener的输出频率,通常与结构轨迹保持一致
          2. +
          3. 此处修改晶胞参数的输出频率,注意如果晶胞参数不变可不写这一部分
          4. +
          5. 此处修改力轨迹的输出频率,通常与结构轨迹保持一致
          6. +
          7. 此处修改restart文件的输出频率,可根据 Walltime 和总步数进行估计
          8. +
          9. 此处修改结构轨迹的输出频率
          10. +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-dft+u/index.html b/wiki/software_usage/cp2k/cp2k-dft+u/index.html new file mode 100644 index 00000000..3272da51 --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-dft+u/index.html @@ -0,0 +1,2903 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:DFT+U - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          CP2K: DFT+U

          +

          学习目标

          +
            +
          • +

            学习资料

            +
          • +
          • +

            DFT+U基本原理

            +
          • +
          • +

            CP2K DFT+U设置

            +
          • +
          • +

            DFT+U 查看电子占据态

            +
          • +
          +

          学习资料

          +

          Dudarev, S. L., Manh, D. N., & Sutton, A. P. (1997). Effect of Mott-Hubbard correlations on the electronic structure and structural stability of uranium dioxide. Philosophical Magazine B: Physics of Condensed Matter; Statistical Mechanics, Electronic, Optical and Magnetic Properties, 75(5), 613–628..

          +

          Dudarev, S. L., Botton, G. A., Savrasov, S. Y., Humphreys, C. J., & Sutton, A. P. (1998). Electron-energy-loss spectra and the structural stability of nickel oxide: An LSDA+U study. Physical Review B, 57(3), 1505–1509. .

          +

          Himmetoglu, B.; Floris, A.; de Gironcoli, S.; Cococcioni, M. Hubbard-Corrected DFT Energy Functionals: The LDA+U Description of Correlated Systems. International Journal of Quantum Chemistry 2013, 114 (1), 14–49..

          +

          DFT+U基本原理

          +

          DFT对于电子的描述是偏向离域化的,因此DFT可以较好地描述金属态固体。对于过渡金属系列的氧化物,例如Fe2O3,CoO,Co3O4,NiO等。过渡金属中仍然含有d电子。在固体中,d电子较为局域,且局域在过渡金属离子周围。此时单单使用DFT并不能很好的描述局域化的电子。我们可以通过加大d电子之间的静电排斥(U)来达到目的。

          +

          CP2K DFT+U设置

          +

          CP2K_INPUT / FORCE_EVAL / DFT

          +

          PLUS_U_METHOD MULLIKEN
          +
          +其中MULLIKEN_CHARGES不推荐, LOWDIN方法好像更准但是不能够算FORCES,cp2k v8.2版本后可以算FORCES,(详细参考)[https://groups.google.com/g/cp2k/c/BuIOSWDqJTc/m/fSL89NZaAgAJ]

          +

          CP2K_INPUT / FORCE_EVAL / SUBSYS / KIND / DFT_PLUS_U

          +

          对想要+U的元素的对应KIND设置

          +
          &DFT_PLUS_U
          +    # 轨道角动量 0 s轨道 1 p轨道 2 d轨道 3 f轨道
          +    L 2 
          +    # 有效U值,记得写[eV],不然默认为原子单位
          +    U_MINUS_J [eV]  3 
          +&END DFT_PLUS_U
          +
          +

          DFT+U 查看电子占据态

          +

          如果我们想知道+U之后对应原子中,例如d轨道的电子,的占据情况。我们可以利用如下设置将其print在output中。

          +

          CP2K_INPUT / FORCE_EVAL / DFT / PRINT / PLUS_U下,

          +
          &PLUS_U MEDIUM
          +    ADD_LAST NUMERIC
          +&END PLUS_U
          +
          +

          你会在output中得到如下输出

          +

            DFT+U occupations of spin 1 for the atoms of atomic kind 3: Fe1
          +
          +    Atom   Shell       d-2     d-1      d0     d+1     d+2   Trace
          +      37       1     1.068   1.088   1.047   1.093   1.069   5.365
          +      37       2     0.008   0.008   0.011   0.007   0.009   0.043
          +           Total     1.076   1.096   1.058   1.100   1.077   5.408
          +
          +      38       1     1.064   1.102   1.047   1.089   1.086   5.388
          +      38       2     0.009   0.007   0.011   0.009   0.008   0.044
          +           Total     1.073   1.109   1.058   1.097   1.094   5.432
          +
          +如果想看不加U的原子的占据情况,那可以给对应原子加一个非常小的U值,比如1e-20。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-e-f/index.html b/wiki/software_usage/cp2k/cp2k-e-f/index.html new file mode 100644 index 00000000..cb660672 --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-e-f/index.html @@ -0,0 +1,3080 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:能量与力的计算 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          CP2K能量与力的计算

          +

          学习目标

          +
            +
          • +

            认识CP2K的基础输入文件

            +
          • +
          • +

            认识CP2K输入文件的主要部分

            +
          • +
          • +

            运行计算

            +
          • +
          +

          计算文件下载

          +

          本教程改编自CP2K原教程,以半导体和OT为例子,更符合组内需求。

          +

          认识CP2K的基础输入文件

          +

          原则上来说CP2K的输入文件只需要三个,一个是输入参数的设置文件input.inp,一个是赝势的参数文件GTH_POTENTIALS,一个是基组的参数文件BASIS_SET

          +

          在集群上,管理员已经把GTH_POTENTIALSBASIS_SET放置在特定文件夹,并且使用特殊的链接方法可以让CP2K程序自动寻找到。因此在后文中涉及到赝势和基组的部分可以直接填写对应的文件名称。

          +

          认识CP2K输入文件的主要部分

          +

          现在让我们打开input.inp

          +

          CP2K的输入文件主要包含两个SECTION.

          +
            +
          • "GLOBAL": 一些让CP2K跑起来的通用选项,比如任务名称,任务类型。
          • +
          • "FORCE_EVAL": 包含了所有跟求解原子的力有关的参数设置,也包括了原子的坐标信息
          • +
          +

          现在我们先看GLOBAL

          +
           &GLOBAL
          +   PROJECT Universality
          +   RUN_TYPE ENERGY_FORCE
          +   PRINT_LEVEL
          + &END GLOBAL
          +
          +

          当要计算体系的力和能量时,我们必须在RUN_TYPE中对计算的类型进行指定。比如RUN_TYPE ENERGY_FORCE就是对当前的体系进行力和能量的计算。其他类型的计算可以在CP2K手册里找到。

          +

          PROJECT定义了这个计算的项目名称,通常被用来命名一些输出文件。

          +

          PRINT_LEVEL定义了CP2K output文件里输出信息量的大小。

          +

          现在我们接着看FORCE_EVAL

          +
          METHOD Quickstep
          +
          +

          METHOD Quickstep表明选择了使用密度泛函理论(Density Functional Theory)中的GPW方法进行计算原子受力。

          +
             &SUBSYS
          +     &CELL
          +       ABC [angstrom]    4.593 4.593 2.959
          +     &END CELL
          +     &COORD
          + @include rutile.xyz
          +     &END COORD
          +     &KIND O
          +       BASIS_SET DZVP-MOLOPT-SR-GTH
          +       POTENTIAL GTH-PBE-q6
          +     &END KIND
          +     &KIND Ti
          +       BASIS_SET DZVP-MOLOPT-SR-GTH
          +       POTENTIAL GTH-PBE-q12
          +     &END KIND
          +   &END SUBSYS
          +
          +

          Subsection SUBSYS定义了模拟的晶胞大小(ABC晶胞长度角度等)和原子坐标的初始结构. 有关于@include的用法,请参考这里

          +

          Subsection KIND 定义了计算中出现的元素。对于每一种元素必须要有一个对应的KIND Section. 然后在KIND里面定义它的基组(BASIS_SET)和赝势(POTENTIAL)。

          +

          BASIS_SET和POTENTIAL的名称一定要对应到基组文件里和赝势文件里存在的条目。

          +
           O GTH-PBE-q6 GTH-PBE
          +     2    4
          +      0.24455430    2   -16.66721480     2.48731132
          +     2
          +      0.22095592    1    18.33745811
          +      0.21133247    0
          +
          +

          Subsection CELL 定义了模拟中的晶胞大小。 此例子中,ABC指的是晶胞的边长。如不额外指定角度,默认为90, 90, 90度。[angstrom]是指定长度单位。

          +

          Subsection COORD定义初始的原子坐标。 原子位置的默认格式为

          +
          <ATOM_KIND> X Y Z
          +
          +

          X Y Z 为笛卡尔坐标,单位为Angstrom。如果添加SCALED .TRUE.,便是分数坐标。

          +

          Subsection DFT 控制了所有跟DFT计算有关的细节。该Subsection只有当你把method选择为quickstep时才会起作用。

          +
          BASIS_SET_FILE_NAME  BASIS_SET
          +POTENTIAL_FILE_NAME  GTH_POTENTIALS
          +
          +

          BASIS_SET_FILE_NAMEPOTENTIAL_FILE_NAME定义了基组和赝势的文件路径。由于管理员已经在集群上设置好了路径,用户直接填写这两个文件名即可。

          +
          &QS
          +  EPS_DEFAULT 1.0E-13
          +&END QS
          +
          +

          SubsectionQS包含了一些通用的控制参数。EPS_DEFAULT设置了所有quickstep会用到的默认容忍度。

          +
               &MGRID
          +       CUTOFF 400
          +       REL_CUTOFF 60
          +     &END MGRID
          +
          +

          Subsection MGRID 定义了如何使用quickstep中的积分网格。quickstep使用了多网格方法来表示高斯函数。比较窄和尖的高斯函数会被投影到更精细的网格,而宽和顺滑的高斯函数则相反。在这个例子中,我们告诉代码需要设置最精细的网格为400Ry,并且REL_CUTOFF为60Ry。关于CUTOFF和REL_CUTOFF方面请阅读

          +

          Subsection XC

          +
               &XC
          +       &XC_FUNCTIONAL PBE
          +       &END XC_FUNCTIONAL
          +     &END XC
          +
          +

          这里定义了我们想使用的交换-关联密度泛函,在这个例子中我们选择了PBE泛函。P泛函要与基组和赝势的选择一致。

          +
               &SCF
          +       SCF_GUESS ATOMIC
          +       EPS_SCF 3.0E-7
          +       MAX_SCF 50
          +       &OUTER_SCF
          +         EPS_SCF 3.0E-7
          +         MAX_SCF 10
          +       &END OUTER_SCF
          +       &OT
          +         MINIMIZER DIIS
          +         PRECONDITIONER FULL_SINGLE_INVERSE
          +       &END OT
          +     &END SCF
          +
          +

          SCF_GUESS设置了应该如何生成初始的尝试电子密度。在这个例子中,初始密度是由原子电荷密度重叠生成的。一个好的电子密度可以帮助CP2K快速得到收敛结果。EPS_SCF设置了电子密度差异的容忍度(收敛精度要求)。这个会覆盖EPS_DEFAULT设置的值。MAX_SCF指最多会迭代多少次。

          +

          Subsection OUTER_SCF这里暂时先不多介绍,但是一般精度设置要跟以上的EPS_SCF一样。以上的SCF为INNER_SCF。OUTER_SCF设置MAX_SCF 为10。在计算中实际上会迭代的次数是INNER_SCF乘以OUTER_SCF,即50*10,500次。

          +

          Subsection OT是利用Orbital Transformation的方法来优化波函数。

          +
          &PRINT
          +  &FORCES ON
          +  &END FORCES
          +&END PRINT
          +
          +

          这个subsection可以在output里打印出体系的原子受力。

          +

          运行计算

          +

          正常运行CP2K的方法为

          +
          mpirun -n 32 cp2k.popt input.inp > output & 
          +
          +

          在集群上,我们使用lsf脚本文件提交,这行命令已经写在了脚本文件里,请直接提交。

          +

          输出结果

          +

          在任务结束后,你会得到如下文件

          +
            +
          • output
          • +
          • Universality-RESTART.wfn
          • +
          • Universality-RESTART.wfn.bak-1
          • +
          • Universality-RESTART.wfn.bak-2
          • +
          • Universality-RESTART.wfn.bak-3
          • +
          +

          文件output包含了计算的主要输出。Universality-RESTART.wfn是计算最后得到波函数。Universality-RESTART.wfn.bak-<n>记录了最后第\<n>步前SCF得到的波函数。此例中,Universality-RESTART.wfn.bak-1是SCF最后一步的波函数。

          +

          但你想要利用波函数重启计算时,可以改为SCF_GUESS RESTART

          +

          他会自动从<PROJECT_NAME>-RESTART.wfn文件开始重启计算。

          +

          我们现在详细看一下output文件里的部分

          +
           SCF WAVEFUNCTION OPTIMIZATION
          +
          +  ----------------------------------- OT ---------------------------------------
          +  Minimizer      : DIIS                : direct inversion
          +                                         in the iterative subspace
          +                                         using   7 DIIS vectors
          +                                         safer DIIS on
          +  Preconditioner : FULL_SINGLE_INVERSE : inversion of
          +                                         H + eS - 2*(Sc)(c^T*H*c+const)(Sc)^T
          +  Precond_solver : DEFAULT
          +  stepsize       :    0.08000000                  energy_gap     :    0.08000000
          +  eps_taylor     :   0.10000E-15                  max_taylor     :             4
          +  ----------------------------------- OT ---------------------------------------
          +
          +  Step     Update method      Time    Convergence         Total energy    Change
          +  ------------------------------------------------------------------------------
          +     1 OT DIIS     0.80E-01    0.5     0.15753643      -176.9839582002 -1.77E+02
          +     2 OT DIIS     0.80E-01    0.8     0.09878604      -178.9306891883 -1.95E+00
          +     3 OT DIIS     0.80E-01    0.8     0.04863529      -179.6564913758 -7.26E-01
          +     4 OT DIIS     0.80E-01    0.8     0.03582212      -179.9871432342 -3.31E-01
          +     5 OT DIIS     0.80E-01    0.8     0.02520552      -180.2247770848 -2.38E-01
          +     6 OT DIIS     0.80E-01    0.8     0.01876959      -180.4037691134 -1.79E-01
          +     7 OT DIIS     0.80E-01    0.8     0.01356216      -180.5257615047 -1.22E-01
          +     8 OT DIIS     0.80E-01    0.8     0.01016476      -180.5867232155 -6.10E-02
          +     9 OT DIIS     0.80E-01    0.8     0.00712662      -180.6348174041 -4.81E-02
          +    10 OT DIIS     0.80E-01    0.8     0.00528671      -180.6543176954 -1.95E-02
          +    11 OT DIIS     0.80E-01    0.8     0.00401555      -180.6682811925 -1.40E-02
          +    12 OT DIIS     0.80E-01    0.8     0.00331228      -180.6769383021 -8.66E-03
          +    13 OT DIIS     0.80E-01    0.8     0.00273633      -180.6824801501 -5.54E-03
          +    14 OT DIIS     0.80E-01    0.8     0.00227705      -180.6858569326 -3.38E-03
          +    15 OT DIIS     0.80E-01    0.8     0.00189452      -180.6891762522 -3.32E-03
          +    16 OT DIIS     0.80E-01    0.8     0.00163117      -180.6913433711 -2.17E-03
          +    17 OT DIIS     0.80E-01    0.8     0.00137647      -180.6931734207 -1.83E-03
          +    18 OT DIIS     0.80E-01    0.8     0.00119961      -180.6942368984 -1.06E-03
          +    19 OT DIIS     0.80E-01    0.9     0.00100873      -180.6952066209 -9.70E-04
          +    20 OT DIIS     0.80E-01    0.8     0.00084472      -180.6960712607 -8.65E-04
          +    21 OT DIIS     0.80E-01    0.9     0.00073811      -180.6966143834 -5.43E-04
          +    22 OT DIIS     0.80E-01    0.8     0.00062100      -180.6969845494 -3.70E-04
          +    23 OT DIIS     0.80E-01    0.8     0.00052079      -180.6972986282 -3.14E-04
          +    24 OT DIIS     0.80E-01    0.8     0.00044814      -180.6975096788 -2.11E-04
          +    25 OT DIIS     0.80E-01    0.8     0.00038815      -180.6976499085 -1.40E-04
          +    26 OT DIIS     0.80E-01    0.8     0.00034010      -180.6977592686 -1.09E-04
          +    27 OT DIIS     0.80E-01    0.8     0.00029429      -180.6978276824 -6.84E-05
          +    28 OT DIIS     0.80E-01    0.8     0.00025218      -180.6979007896 -7.31E-05
          +    29 OT DIIS     0.80E-01    0.8     0.00022927      -180.6979456455 -4.49E-05
          +    30 OT DIIS     0.80E-01    0.8     0.00020201      -180.6979830729 -3.74E-05
          +    31 OT DIIS     0.80E-01    0.8     0.00017896      -180.6980145219 -3.14E-05
          +    32 OT DIIS     0.80E-01    0.8     0.00016066      -180.6980416001 -2.71E-05
          +    33 OT DIIS     0.80E-01    0.8     0.00014606      -180.6980603801 -1.88E-05
          +    34 OT DIIS     0.80E-01    0.8     0.00012970      -180.6980811127 -2.07E-05
          +    35 OT DIIS     0.80E-01    0.8     0.00011431      -180.6980956614 -1.45E-05
          +    36 OT DIIS     0.80E-01    0.8     0.00009560      -180.6981114298 -1.58E-05
          +    37 OT DIIS     0.80E-01    0.8     0.00008482      -180.6981210277 -9.60E-06
          +    38 OT DIIS     0.80E-01    0.8     0.00007281      -180.6981278770 -6.85E-06
          +    39 OT DIIS     0.80E-01    0.8     0.00006188      -180.6981329264 -5.05E-06
          +    40 OT DIIS     0.80E-01    0.8     0.00005294      -180.6981368983 -3.97E-06
          +    41 OT DIIS     0.80E-01    0.8     0.00004688      -180.6981391197 -2.22E-06
          +    42 OT DIIS     0.80E-01    0.8     0.00004055      -180.6981410282 -1.91E-06
          +    43 OT DIIS     0.80E-01    0.8     0.00003559      -180.6981421977 -1.17E-06
          +    44 OT DIIS     0.80E-01    0.8     0.00003040      -180.6981432648 -1.07E-06
          +    45 OT DIIS     0.80E-01    0.8     0.00002734      -180.6981439881 -7.23E-07
          +    46 OT DIIS     0.80E-01    0.8     0.00002451      -180.6981445033 -5.15E-07
          +    47 OT DIIS     0.80E-01    0.8     0.00002178      -180.6981449169 -4.14E-07
          +    48 OT DIIS     0.80E-01    0.8     0.00001953      -180.6981452985 -3.82E-07
          +    49 OT DIIS     0.80E-01    0.8     0.00001795      -180.6981455598 -2.61E-07
          +    50 OT DIIS     0.80E-01    0.8     0.00001622      -180.6981458123 -2.52E-07
          +
          +  Leaving inner SCF loop after reaching    50 steps.
          +
          +
          +  Electronic density on regular grids:        -47.9999999967        0.0000000033
          +  Core density on regular grids:               48.0000000000       -0.0000000000
          +  Total charge density on r-space grids:        0.0000000033
          +  Total charge density g-space grids:           0.0000000033
          +
          +  Overlap energy of the core charge distribution:               0.00000000000007
          +  Self energy of the core charge distribution:               -379.90298629198736
          +  Core Hamiltonian energy:                                    102.12467948924306
          +  Hartree energy:                                             125.99881317904760
          +  Exchange-correlation energy:                                -28.91865218857406
          +
          +  Total energy:                                              -180.69814581227070
          +
          +  outer SCF iter =    1 RMS gradient =   0.16E-04 energy =       -180.6981458123
          +
          +

          以上显示了我们使用OT DIIS方法进行计算。现在计算已经进行了50个SCF迭代。当然现在还未达到收敛限。我们可以看到最后一个outer SCF iter = 1也就是说一个outer SCF包含了一个完整的innter SCF。

          +
           ATOMIC FORCES in [a.u.]
          +
          + # Atom   Kind   Element          X              Y              Z
          +      1      1      Ti          0.00000026    -0.00000079     0.00000063
          +      2      1      Ti          0.00000026    -0.00000027     0.00000004
          +      3      2      O          -0.07002277     0.07002168    -0.00000018
          +      4      2      O           0.07002184    -0.07002056     0.00000006
          +      5      2      O           0.07002270     0.07002086    -0.00000083
          +      6      2      O          -0.07002229    -0.07002093     0.00000028
          + SUM OF ATOMIC FORCES           0.00000000    -0.00000000     0.00000000     0.00000000
          +
          +

          以上显示了原子受力的情况,我们发现有些原子的受力不接近于0,说明这个系统还没处在最佳的结构位置。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-geoopt/index.html b/wiki/software_usage/cp2k/cp2k-geoopt/index.html new file mode 100644 index 00000000..45750b12 --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-geoopt/index.html @@ -0,0 +1,2892 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:结构和晶胞优化 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          CP2K: 结构和晶胞优化

          +

          学习目标

          +
            +
          • +

            学习资料

            +
          • +
          • +

            基本原理

            +
          • +
          • +

            CP2K 结构优化设置

            +
          • +
          • +

            CP2K 结构优化问题

            +
          • +
          +

          学习资料

          +

          Slides

          +

          基本原理

          +

          建设中, 参考官网

          +

          CP2K 结构优化设置

          +

          结构优化

          +
          &GLOBAL
          +RUN_TYPE GEO_OPT
          +&END GLOBAL
          +
          +

          晶胞优化

          +
          &GLOBAL
          +RUN_TYPE CELL_OPT
          +&END GLOBAL
          +
          +

          同时,在MOTION下设置OPTIMIZER和一些CONSTRAIN

          +
          &MOTION
          +  &CELL_OPT
          +    OPTIMIZER LBFGS 
          +    KEEP_ANGLES
          +    TYPE DIRECT_CELL_OPT
          +  &END CELL_OPT
          +&END MOTION
          +
          +

          LBFGS是对大体系常用的,BFGS针对小体系,更为Robust的是CG。

          +

          KEEP_ANGLES是指保持晶胞的角度不变。

          +

          TYPE默认是DIRECT_CELL_OPT,即同时优化晶胞和里面的位置,是最快的优化方法。

          +

          CP2K 结构优化问题

          +

          晶胞优化需要计算STRESS TENSOR。通常采用ANALYTICAL方法计算即可,也是最快的方法。但是一些泛函并没有实现相应的STRENSS TENSOR的计算,可以采用NUMERICAL的方法进行计算。比如SCAN。在cp2k v8.2后加入了METAGGA(包括SCAN)的STRESS TENSOR,但是仅实现 kinetic energy density的部分,优化会出问题,原因不明。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-hf/index.html b/wiki/software_usage/cp2k/cp2k-hf/index.html new file mode 100644 index 00000000..559d0307 --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-hf/index.html @@ -0,0 +1,3207 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:杂化泛函 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          CP2K: 杂化泛函

          +

          学习目标

          +
            +
          • 学习资料
          • +
          • 杂化泛函基本原理
          • +
          • 杂化泛函辅助基组
          • +
          • CP2K杂化泛函设置
          • +
          • 参数的测试和收敛
          • +
          • 一些元素推荐的ADMM
          • +
          +

          学习资料

          +

          Slides: UCL DFT with Hybrid Functionals

          +

          Slides: Hybrid Functional and ADMM

          +

          官方练习

          +

          杂化泛函基本原理

          +

          建设中

          +

          杂化泛函辅助基组

          +

          建设中

          +

          CP2K杂化泛函设置

          +
              # BASIS Purification
          +    BASIS_SET_FILE_NAME BASIS_ADMM_MOLOPT
          +    BASIS_SET_FILE_NAME BASIS_ADMM
          +    &AUXILIARY_DENSITY_MATRIX_METHOD
          +      METHOD BASIS_PROJECTION
          +      ADMM_PURIFICATION_METHOD MO_DIAG
          +    &END AUXILIARY_DENSITY_MATRIX_METHOD
          +    # KIND 设置例子
          +    &KIND O
          +      BASIS_SET DZVP-MOLOPT-SR-GTH
          +      POTENTIAL GTH-PBE-q6
          +      BASIS_SET AUX_FIT cFIT3
          +    &END KIND
          +
          +
          # HSE06泛函部分
          +      &XC_FUNCTIONAL
          +        &PBE
          +          SCALE_X 0.0
          +          SCALE_C 1.0
          +        &END PBE
          +        &XWPBE
          +          SCALE_X -0.25
          +          SCALE_X0 1.0
          +          OMEGA 0.11
          +        &END XWPBE
          +      &END XC_FUNCTIONAL
          +      &HF
          +        &SCREENING
          +          EPS_SCHWARZ 1.0E-6
          +          SCREEN_ON_INITIAL_P FALSE
          +        &END SCREENING
          +        &INTERACTION_POTENTIAL
          +          POTENTIAL_TYPE SHORTRANGE
          +          OMEGA 0.11
          +          T_C_G_DATA t_c_g.dat
          +        &END INTERACTION_POTENTIAL
          +        &MEMORY
          +          MAX_MEMORY 10000
          +          EPS_STORAGE_SCALING 0.1
          +        &END MEMORY
          +        # this depends on user
          +        &PERIODIC
          +          NUMBER_OF_SHELLS 0
          +        &END PERIODIC
          +        FRACTION 0.25
          +      &END HF
          +
          +

          参数的测试和收敛

          +

          RESTART波函数

          +

          务必使用相同原子结构的PBE泛函优化后的波函数进行重启,可以省下大量机时,除非你很有钱。

          +

          在测试参数收敛前**务必**把SCF步数调成1。只要计算的数值收敛即可。

          +
          &SCF
          +      EPS_SCF 3.0E-7
          +      MAX_SCF 1
          +&END SCF
          +
          +

          EPS_PGF_ORB的收敛

          +

          在初次计算中,用户会遇到如下Warning

          +
           *** WARNING in hfx_energy_potential.F:605 :: The Kohn Sham matrix is not  ***
          + *** 100% occupied. This may result in incorrect Hartree-Fock results. Try ***
          + *** to decrease EPS_PGF_ORB and EPS_FILTER_MATRIX in the QS section. For  ***
          + *** more information see FAQ: https://www.cp2k.org/faq:hfx_eps_warning    ***
          +
          +

          这是因为CP2K会根据某些设定的值,来筛选出不需要计算的四电子积分。可以有效降低Hartree-Fock矩阵的计算。如果筛选的积分过多,那么H-F计算出来的结果就会失真。也是此Warning的来源。

          +

          控制这个筛选标准的有EPS_PGF_ORB这个参数。越小的话筛选的积分越少,H-F结果也就越真实。通常情况下这个Warning是不会消失的,即使用户调到一个非常小的量级,例如1.0E-20。

          +

          我们可以通过比对不同的EPF_PGF_ORB的能量收敛来选择合适的值。

          + + + + + + + + + + + + + + + + + + + + + + + + + +
          EPS_PGF_ORB能量(a. u.)与上一个的误差
          1.0E-13-8402.872803898026177
          1.0E-15-8402.872803587537419-3.1E-07
          1.0E-17-8402.872803510470476-7.7E-08
          +

          一般的SCF收敛限在3.0E-7,能量基本也在这个量级以下,因此能量收敛需要达到1.0E-7以下最好。所以我们选择1.0E-15作为EPS_PGF_ORB的值。

          +

          ADMM基组的收敛

          +

          与EPS_PGF_ORB类似的是ADMM基组的收敛。对于同一种元素, CP2K提供多了多种基组,例如cFIT10, cFIT11, cFIT12 等...。测试的方法就是逐渐增大ADMM基组。能量误差必须归一到每个原子。通常保证误差在1meV/atom的量级最好。

          +

          以SrTiO3体系为例

          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          ADMM_BASIS For Ti能量(a.u.)与上一个的误差(meV/atom)原子数
          cFIT10-9062.291421862293646368
          cFIT11-9062.255359275355659-2.6368
          cFIT12-9062.2600560887713070.3368
          cFIT13-9062.210205928951837-3.6368
          +

          这个时候选择**cFIT10**或者**cFIT11**即可

          +

          一些元素推荐的ADMM

          +

          笔者亲测,通常与体系关系不大。

          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          元素ADMM基组
          OcFIT3
          HcFIT3
          TicFIT11
          CdcFIT10
          SncFIT9
          PbcFIT9
          SrcFIT9
          PtcFIT10
          MgcpFIT3
          BacFIT9
          NacFIT3
          TacFIT10
          +

          其他Warning处理

          +

          其他的Warning在官方文档中有提过 +杂化泛函计算Warning

          +

          Cutoff Radiis Warning +*** WARNING in hfx_types.F:1287 :: Periodic Hartree Fock calculation *** + *** requested with use of a truncated or shortrange potential. The cutoff *** + *** radius is larger than half the minimal cell dimension. This may lead *** + *** to unphysical total energies. Reduce the cutoff radius in order to *** + *** avoid possible problems. ***

          +

          这是由于在周期边界条件下, CP2K只取HF exchange短程部分,而长程部分则由DFT exchange来补充。因此需要短程的长度,即Cutoff Radiis。 对于该Warning有如下三种处理方式。

          +
            +
          • 如果使用HSE06,请忽视,因为这个cutoff由omega确定。
          • +
          • 减少CUTOFF_RADIUS,如果你用的是PBE0-TC
          • +
          • 用更大周期边界盒子
          • +
          +

          参考

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-neb/index.html b/wiki/software_usage/cp2k/cp2k-neb/index.html new file mode 100644 index 00000000..73259f9c --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-neb/index.html @@ -0,0 +1,2947 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:NEB - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          CP2K: Nudged Elastic Band

          +

          学习目标

          +
            +
          • +

            学习资料

            +
          • +
          • +

            NEB 基本原理

            +
          • +
          • +

            CP2K NEB设置

            +
          • +
          +

          学习资料

          +
            +
          • +

            Henkelman, G. & Jónsson, H. Improved tangent estimate in the nudged elastic band method for finding minimum energy paths and saddle points. J. Chem. Phys. 113, 9978–9985 (2000).

            +
          • +
          • +

            Henkelman, G., Uberuaga, B. P. & Jónsson, H. A climbing image nudged elastic band method for finding saddle points and minimum energy paths. J Chem Phys 113, 9901–9904 (2000).

            +
          • +
          +

          NEB 基本原理

          +

          当确定反应物和产物结构后可以找到从反应物到产物的能量最小路径(Minimum Energy Path, MEP). 处于能量最小路径上的任意一个结构中,作用在原子上并垂直于MEP的力分量都为0. NEB是一种寻找MEP的方法。首先NEB在反应物结构和产物结构之间建立一套结构(称为image或者replica)。 这些相邻的image之间用弹簧力连接(spring force),形成一条类橡皮筋(Elastic Band)的构造。其中每个image受到垂直于MEP的真正的力同时受到平行于MEP的弹簧力,通过最小化这个Band的力,即可得到MEP。

          +

          CP2K NEB设置

          +

          首先把RUN_TYPE设置为BAND +

          &GLOBAL
          +    RUN_TYPE BAND
          +&END GLOBAL
          +

          +

          其次是MOTION部分 +

          &MOTION
          +    &BAND
          +        # 提交任务时 总cpu数目为NROC_REP*NUMBER_OF_REPLICA
          +        NROC_REP 24 #一个image要用多少cpu来算
          +        NUMBER_OF_REPLICA 8 #创造多少image, 这里是包含初始结构和最终结构的数目。 
          +        BAND_TYPE CI-NEB #使用Climbing Image NEB方法,具体内容参照文献SEC. IV
          +        K_SPRING 0.05 弹簧振子的强度,理论上弹簧振子强度不会影响优化的结果
          +        &CONVERGENCE_CONTROL # 跟结构优化类似
          +            MAX_FORCE 0.0030
          +            RMS_FORCE 0.0050
          +            MAX_DR 0.002
          +            RMS_DR 0.005
          +        &END CONVERGENCE_CONTROL
          +        ROTATE_FRAMES F
          +        ALIGN_FRAMES F
          +        &CI_NEB 
          +            NSTEPS_IT  2 # 在变成CI之前,需要跑正常NEB, 这里设置跑正常NEB的回合数目
          +        &END CI_NEB
          +        &OPTIMIZE_BAND
          +            OPT_TYPE DIIS
          +            &DIIS
          +                NO_LS T
          +                MAX_STEPS 1000
          +                N_DIIS 3
          +            &END DIIS
          +        &END OPTIMIZE_BAND
          +        &REPLICA #初始结构的坐标
          +            &COORD
          +            @include init.xyz # 第一种方法,只包含坐标xyz,不需要元素
          +            &END COORD
          +        &END REPLICA
          +        &REPLICA # 最终结构的坐标
          +            &COORD
          +            @include fin.xyz # 只包含坐标xyz,不需要元素,
          +            &END COORD
          +        &END REPLICA
          +        &REPLICA # 最终结构的坐标
          +            COORD_FILE_NAME ./tr7.xyz # 第二种方法,这个是正常的xyz文件
          +        &END REPLICA
          +        &PROGRAM_RUN_INFO # 看REPLICA间的距离
          +            INITIAL_CONFIGURATION_INFO
          +        &END
          +    &END BAND
          +&END MOTION
          +
          +注意到如果只定义两个REPLICA section,并且小于你的NUMBER_OF_REPLICA,那么剩余的REPLICA结构将会由CP2K自己生成。 +如果定义的REPLICA section数目等于NUMBER_OF_REPLICA,那么CP2K将不会自动生成REPLICA的结构。

          +

          重新启动NEB

          +

          在cp2k input文件里加入EXT_RESTART section。并且将xxx-1.restart改成你的真实的restart文件。 +

          &EXT_RESTART
          +  RESTART_BAND
          +  RESTART_FILE_NAME   xxx-1.restart
          +&END
          +
          +同时,我们可以利用之前的波函数RESTART,只需要在FORCE_EVAL/DFT/SCF下设置 +
          SCF_GUESS RESTART
          +
          +即可。 +假设你的PROJECT NAME 是 water,见GLOBAL/PROJECT,同时你的NUMBER_OF_REPLICA为8, 那么你将会生成如下文件 +
          water-BAND01-RESTART.wfn
          +water-BAND02-RESTART.wfn
          +water-BAND03-RESTART.wfn
          +water-BAND04-RESTART.wfn
          +water-BAND05-RESTART.wfn
          +water-BAND06-RESTART.wfn
          +water-BAND07-RESTART.wfn
          +water-BAND08-RESTART.wfn
          +
          +其中BAND后面的数字代表REPLICA的序数。在重新启动时,则会自动读取这些波函数。如果波函数是通过其他方法生成或者提前准备好的,也可以通过更改波函数的名称使其符合上述规则来启动NEB。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-reftraj/index.html b/wiki/software_usage/cp2k/cp2k-reftraj/index.html new file mode 100644 index 00000000..5e8c2d74 --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-reftraj/index.html @@ -0,0 +1,2987 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:REFTRAJ根据已有MD轨迹计算 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          根据已有轨迹运行CP2K分子动力学计算

          +

          学习目标

          +
            +
          • +

            CP2K 分子动力学计算的输入文件

            +
          • +
          • +

            如何根据已有的CP2K轨迹进行计算

            +
          • +
          +

          学习资料

          +

          CP2K官方手册:Section MD

          +

          CP2K官方练习:AIMD of bulk liquid water

          +

          CP2K MD Section 的输入文件

          +

          请先了解CP2K的输入文件语法,指路:CP2K:能量与力的计算

          +

          CP2K 的输入文件由不同的 SECTION 组成,而每个 SECTION 下级有可以包含 SUBSECTIONKEYWORDS,这些不同等级的 SECTION 和 KEYWORD 都是大写英文单词。一份输入文件的语法如下:

          +
          &SECTION
          +  &SUSECTION
          +  ...
          +  &END SUBSECTION
          +  KEYWORD1 <value>
          +  KEYWORD2 <value>
          +  ...
          +&END SECTION
          +
          +

          而如果希望用CP2K进行MD计算,需要根据体系的需要,配置CP2K:能量与力的计算中介绍的 GLOBALFORCE_EVAL 这两部分,并且将 SECTION GLOBAL 下的关键字 RUN_TYPE 改为MD

          +
          &GLOBAL
          +  ...
          +  RUN_TYPE MD <---- 运行MD任务请将 RUN_TYPE 改为 MD
          +&END GLOBAL
          +
          +

          此外,还需要在配置文件 input.inp 中写入 :

          +
            +
          • MOTION: 包含如何演变原子核(例如MD),控制输出什么数据
          • +
          +
          +

          SECTION in input.inp. This section defines a set of tool connected with the motion of the nuclei.

          +
          +
            +
          • MD: 包含了一些分子动力学模拟的基本参数,如选择什么系综(ensemble)、温度、步长和总步数等。
          • +
          +
          +

          SUBSECTION in MOTION. This section defines the whole set of parameters needed perform an MD run.

          +
          +

          一个简单的 MOTION 部分的例子

          +
          &MOTION 
          +  &MD
          +    ENSEMBLE NVE
          +    STEPS 10
          +    TIMESTEP 0.5
          +    TEMPERATURE 300.0
          +  &END MD
          +  &PRINT
          +    &CELL
          +      &EACH
          +        MD 1
          +      &END EACH
          +    &END CELL
          +    &FORCES
          +      &EACH
          +        MD 1
          +      &END EACH
          +    &END FORCES
          +    &TRAJECTORY
          +      &EACH
          +        MD 1
          +      &END EACH
          +    &END TRAJECTORY
          +    &VELOCITIES
          +      &EACH
          +        MD 1
          +      &END EACH
          +    &END VELOCITIES
          +  &END PRINT
          +&END MOTION
          +
          +

          以上例子非常直接,一行一行读下来字面意思就是MD的参数设置。值得注意的是在 PRINT 部分中的 &EACH MD 1 &END EACH 控制的是MD打印输出的频率,指的是每一步MD模拟对应一个输出,设置成3就是每三步输出一次。EACH中MD输出频率缺省值是1

          +
          +

          Warning

          +

          为了方便分析,CELL 的输出频率应该和 TRAJECTORY 的保持一致

          +
          +

          根据已有轨迹进行MD计算

          +

          有的时候,我们需要对已有的一条MD轨迹进行计算:

          +
            +
          • +

            对机器学习势函数生成的MD轨迹进行精确计算

            +
          • +
          • +

            更改FORCE_EVAL 部分的参数,提升已有轨迹能量和力的计算的精度

            +
          • +
          • +

            ……

            +
          • +
          +

          我们可以在CP2K输入文件的 MD SECTION 下加入REFTRAJ SECTION来实现对已有轨迹的计算。

          +

          以TiO2为例子,需要在提交任务的目录下准备:

          +
          tree
          +.
          +├── cp2k.lsf                    <---- cp2k 任务提交脚本(/data/share/base/scripts/cp2k.lsf) 
          +├── input.inp               <---- cp2k 输入文件
          +├── reftraj.xyz       <---- 已有的轨迹
          +└── rutile.xyz          <---- 可以是轨迹中的一帧结构
          +
          +0 directories, 4 files
          +
          +

          其中 rutile.xyz 对应的是输入文件input.inpSUBSYS 中指定盒子中的原子坐标文件,可以直接选用已有轨迹中的某一帧数据。

          +

          针对这一任务,在 MOTION 部分写入

          +
          &MOTION
          +  &MD
          +    &REFTRAJ
          +      TRAJ_FILE_NAME reftraj.xyz
          +      EVAL_ENERGY_FORCES .TRUE.
          +      EVAL_FORCES .TRUE.
          +      FIRST_SNAPSHOT 1
          +      LAST_SNAPSHOT 50
          +      STRIDE 1
          +    &END REFTRAJ
          +    ...
          +  &END MD
          +  &PRINT
          +    ...
          +  &END PRINT
          +
          +

          其中 TRAJ_FILE_NAME 关键字指定了当前文件夹下的 reftraj.xyz 做为需要计算的轨迹。

          +

          值得注意的是,CP2K输入文件中给关键字赋逻辑值时用 .TRUE..FALSE.,而 EVAL_ENERGY_FORCES 和 EVAL_FORCES 的缺省值是 .FALSE.,因此如果要计算能量和力必须要明确指定这两个关键字。

          +

          FIRST_SNAPSHOT , LAST_SNAPSHOTSTRIDE这一组关键词指定了如何对 reftraj.xyz 的结构进行计算。指的是从已有轨迹的第 FIRST_SNAPSHOT 帧到第 LAST_SNAPSHOT 帧结构,每 STRIDE 帧结构计算一次。而对于本例子,reftraj.xyz中共有50帧结构,因此以上配置文件表明从已有轨迹的第 1 帧到第 50 帧结构,每 1 帧结构计算一次,所以这样设置会计算已有轨迹中的每一个结构的能量和力。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-scan/index.html b/wiki/software_usage/cp2k/cp2k-scan/index.html new file mode 100644 index 00000000..451f132e --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-scan/index.html @@ -0,0 +1,2888 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:SCAN泛函 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          CP2K: SCAN泛函

          +

          学习目标

          +
            +
          • +

            学习资料

            +
          • +
          • +

            SCAN基本原理

            +
          • +
          • +

            CP2K SCAN泛函设置

            +
          • +
          • +

            CP2K SCAN泛函的问题

            +
          • +
          +

          学习资料

          +

          Sun, J., Remsing, R. C., Zhang, Y., Sun, Z., Ruzsinszky, A., Peng, H., … Perdew, J. P. (2016). Accurate first-principles structures and energies of diversely bonded systems from an efficient density functional. Nature Chemistry, 8(9), 831–836. https://doi.org/10.1038/nchem.2535

          +

          Sun, J., Remsing, R. C., Zhang, Y., Sun, Z., Ruzsinszky, A., Peng, H., … Perdew, J. P. (2015). SCAN: An Efficient Density Functional Yielding Accurate Structures and Energies of Diversely-Bonded Materials, 1–19. Retrieved from http://arxiv.org/abs/1511.01089

          +

          SCAN基本原理

          +

          SCAN泛函属于MetaGGA的一类。加入了密度梯度的二阶导数。近年来,SCAN泛函被用于水的计算研究逐渐增多,同时对于半导体**体相**计算的能带也比较准。

          +

          CP2K SCAN泛函设置

          +

          SCAN泛函并不是CP2K源码自带,实际是引用了libxc中的泛函函数。只有CP2K4.1以上版本的libxc库才能够使用SCAN泛函

          +
          &XC_FUNCTIONAL
          +     &LIBXC
          +        FUNCTIONAL MGGA_X_SCAN
          +     &END LIBXC
          +     &LIBXC
          +        FUNCTIONAL MGGA_C_SCAN
          +     &END LIBXC
          +&END XC_FUNCTIONAL
          +
          +

          SCAN泛函有一套自己对应的赝势,放在Hutter的github库中。

          +

          具体可以参考以下谷歌论坛链接

          +

          https://github.com/juerghutter/GTH/blob/master/SCAN/POTENTIAL

          +

          主集群上我已经放置了一份SCAN赝势。名称为GTH-SCAN-POTENTIAL

          +

          cp2k 输入文件设置为如下即可:

          +
          POTENTIAL_FILE_NAME GTH-SCAN-POTENTIAL
          +
          +

          CP2K SCAN泛函的问题

          +

          SCAN泛函对于有大量真空的体系似乎非常难以收敛。笔者至今试用过了Hematite Slab模型和SrTiO3模型,均无法正常收敛。其他意见参考谷歌论坛。如有任何建议建议快速联系笔者。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-slab/index.html b/wiki/software_usage/cp2k/cp2k-slab/index.html new file mode 100644 index 00000000..31c32a23 --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-slab/index.html @@ -0,0 +1,2861 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:Slab计算 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          CP2K: Slab计算

          +

          学习目标

          +
            +
          • +

            什么是Slab模型

            +
          • +
          • +

            CP2K 偶极矫正

            +
          • +
          • +

            其他去除周期性的方式

            +
          • +
          +

          什么是Slab模型

          +

          Slab模型是在三维周期性边界条件下计算固体表面的一种方法。通常选择z方向为表面朝向,即模型的z方向中有一半是真空(无原子)另一半为固体模型。如下图为一个典型的Slab模型:

          +

          image-20210529141736287

          +

          CP2K 偶极矫正

          +

          Slab模型虽然是代表表面,但是实际上在z方向是固体-真空-固体-真空-...的交替。如果我们建立的Slab模型在z方向是非对称的,模型就会产生一个沿z方向的偶极。偶极会产生静电势,静电势接着会影响模型的镜像(周期性边界条件)。最后算出来的模型的总能量和力与真实情况是不相符的。因此我们需要方法去矫正这种虚假的静电影响。

          +

          一种常用的方法就是偶极矫正,在真空部分加入一个超窄的但是方向相反的偶极。这样一来,固体模型产生的偶极和真空中的偶极就会相互抵消。模型和其镜像之间的静电势影响就会抵消。

          +

          具体的设置如下:

          +

          在FORCE_EVAL/QS/DFT下开启

          +
          SURFACE_DIPOLE_CORRECTION .TRUE.
          +
          +

          其他去除周期性的方式

          +

          表面偶极矫正仅有z方向可以去除,若要去除其他三个方向的周期,可以采用另外的设置

          +

          在FORCE_EVAL/SUBSYS/CELL下

          +
          PERIODIC NONE
          +
          +

          在FORCE_EVAL/DFT/POISSON下

          +
          PERIODIC NONE
          +POISSON_SOLVER MT (其他也可以 笔者仅试过MT)
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-tools/index.html b/wiki/software_usage/cp2k/cp2k-tools/index.html new file mode 100644 index 00000000..23374bfd --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-tools/index.html @@ -0,0 +1,2784 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:tools - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          CP2K

          +

          cp2k有许多方便的工具。可以帮我们脚本化工作流程,节约时间。

          +

          PYCP2K: 脚本化输入文件生成工具

          +

          主要使用Python语言,可以把cp2k输入工作集成为Python +具体使用链接看这里

          +

          要注意的是,他这里只适用v5.1以前版本的 cp2k。如果我们使用例如v7.1以上的版本,那么可以自己生成对应的包。 +详情见Pycp2k github的 README 中 Manual installation 部分。 +在我们集群,要生成 xml 文件,首先module load cp2k/7.1,然后使用cp2k.popt --xml命令即可得到 xml 文件。 +其他按照 Manual installation 的指示即可。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k-zpe/index.html b/wiki/software_usage/cp2k/cp2k-zpe/index.html new file mode 100644 index 00000000..c3ae4e66 --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k-zpe/index.html @@ -0,0 +1,2960 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K:ZPE - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          + +
          + + + +
          +
          + + + + + + + +

          CP2K: ZPE(Zero-point energy)

          +

          学习目标

          +
            +
          • +

            ZPE基本原理

            +
          • +
          • +

            CP2K Frequence设置

            +
          • +
          • +

            CP2K Frequency计算结果检查

            +
          • +
          • +

            ZPE求解

            +
          • +
          • +

            注意事项

            +
          • +
          +

          ZPE基本原理

          +

          零点能(ZPE)是量子力学系统可能具有的最低可能能量,此时系统所处的态称为基态;所有量子力学系统都有零点能。与经典力学不同,量子系统在Heisenberg不确定性原理所描述的最低能量状态下不断波动。

          +

          我们在计算吉布斯自由能(\(G=E_{DFT}+ZPE-TS^\circ\))时会涉及到零点振动能,零点振动能的计算公式为:

          +

          \(ZPE=\sum_{i=0}^{3N}\frac{\hbar\omega}{2}\)

          +

          因此我们需借助CP2K计算得到振动频率\(\omega\)

          +

          Boyer, T. H. Quantum Energy and Long-Range Forces. Ann. Phys 1970, 56, 474–503.

          +

          Girod, M.; Grammaticos, B. The Zero-Point Energy Correction and Its Effect on Nuclear Dynamics. Nucl. Physics, Sect. A 1979, 330 (1), 40–52. https://doi.org/10.1016/0375-9474(79)90535-9.

          +

          CP2K Frequence设置

          +
            +
          1. 设置CP2K INPUT / GLOBAL / RUN_TYPE
          2. +
          +

          RUN_TYPE  VIBRATIONAL_ANALYSIS
          +
          +2. 在CP2K INPUT / VIBRATIONAL_ANALYSIS

          +
          &VIBRATIONAL_ANALYSIS
          +  NPROC_REP 192  # 总核数=节点数*核数(通常与提交作业cp2k.lsf文件中的核数一致)
          +  DX 0.02
          +  FULLY_PERIODIC
          +  &PRINT
          +    &MOLDEN_VIB
          +    &END
          +    &CARTESIAN_EIGS
          +    &END
          +    &PROGRAM_RUN_INFO
          +      &EACH
          +        REPLICA_EVAL 1
          +      &END
          +    &END
          +  &END PRINT
          +&END VIBRATIONAL_ANALYSIS
          +
          +
            +
          1. CP2K INPUT / MOTION
          2. +
          +
          &MOTION
          +  &CONSTRAINT
          +    &FIXED_ATOMS
          +      LIST 1..320 # 计算时需要固定的无关原子对应的序号
          +    &END
          +  &END
          +&END MOTION
          +
          +

          CP2K Frequency计算结果检查

          +

          正常计算结束会输出project-VIBRATIONS-1.mol文件,里面[FREQ]模块即为计算得到的frequence(unit:\(cm^{-1}\))

          +
          [FREQ]
          +      204.783042
          +      296.784083
          +      379.892297
          +      414.559665
          +      913.554709
          +     3650.225071
          +
          +

          在CP2K计算NEB的过程中寻找过度态时,过渡态的Frequence中会有虚频,对应负值:

          +
          [FREQ]
          +     -150.004617
          +       76.011787
          +       90.652110
          +      105.659737
          +      114.363774
          +      118.342870
          +      125.738357
          +      ……
          +
          +

          ZPE求解

          +

          \(ZPE=\sum_{i=0}^{3N}\frac{\hbar\omega_i}{2}\)

          +

          CP2K计算得到的Frequence是波长的倒数\(\frac{1}{\lambda}\),单位为\(cm^{-1}\),根据\(\frac{1}{\omega}=\frac{\lambda}{c}\)可以计算得到振动频率\(\omega\)

          +

          N对应计算的原子个数。

          +

          注意事项

          +

          (1) 由于PBC条件的限制,CP2K的Frequence计算结果中不包含平动频率,是否包含转动频率取决于体系的状态(CONSTRAINT),通常振动频率远大于转动频率。

          +

          (2) 计算真空中一个分子的Frequence时,要去除盒子所有方向的周期性,通常可以用\(20Å\times20Å\times20Å\)的盒子进行测试。

          +

          (3) 使用CP2K计算一个稳定结构式的频率时,也常会出现多个虚频。这是CP2K计算使用GTH赝势时存在的一个问题。详细内容请参考(https://groups.google.com/forum/?fromgroups#!topic/cp2k/DVCV0epl7Wo)

          +

          解决方案有四种:

          +

          a. 使用NLCC赝势(http://arxiv.org/abs/1212.6011)。不过NLCC赝势很不完整,只有B-Cl的元素有,且只提供了PBE泛函的赝势。

          +

          b. 增大CUTOFF,使用600 Ry以上的CUTOFF。

          +

          c. 在XC_GRID部分使用平滑参数SMOOTING,不推荐使用。

          +

          d. 在XC_GRID部分使用USE_FINER_GRID。加上这个参数后,XC部分的格点的精度提高为4*CUTOFF。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/cp2k/cp2k/index.html b/wiki/software_usage/cp2k/cp2k/index.html new file mode 100644 index 00000000..f7eaee2c --- /dev/null +++ b/wiki/software_usage/cp2k/cp2k/index.html @@ -0,0 +1,3003 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CP2K入门 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + + + + + +
          +
          + + + + + + + +

          CP2K 入门

          +

          学习目标

          +
            +
          • 设置CP2K环境变量
          • +
          • 书写CP2K的输入文件
          • +
          • 检查CP2K输入文件
          • +
          • 单点能计算
          • +
          • 结构优化
          • +
          • 分子动力学
          • +
          +

          CP2K的特色

          +

          CP2K同时使用了平面波基组和高斯基组,因此可以在傅立叶空间里描述长程作用力和实空间里描述局域的波函数。使用CP2K进行分子动力学(MD)运算效率很高。CP2K使用了单k点的计算方式,又称为gamma approximation,因此在早期CP2K版本中没有K点的设置。近年仅在单点能中加入了k点的计算。

          +

          设置CP2K环境变量

          +

          哪里获取Basis和PseudoPotential文件

          +

          Github

          +

          省略路径

          +

          CP2K需要用到赝势和基组文件。假设这些文件都存在于目录/somewhere/basis/下。可以通过设置环境变量CP2K_DATA_DIR来让CP2K自己找到文件。

          +

          打开自己的 ~/.bashrc文件. 添加以下命令

          +
          export CP2K_DATA_DIR=/somewhere/basis/
          +
          +

          之后在使用赝势和基组时可以直接写文件名字而不需要指出路径。

          +

          书写CP2K输入文件

          +

          CP2K输入文件的书写在CP2K官网中有许多例子,请大家自己上网学习。

          +

          除了简单的SECTION, VALUE的书写形式以外,CP2K还提供了一些简单的变量设置条件判断等设定方式,具体参考CP2K输入参考手册

          +

          什么是好的输入文件习惯?

          +

          CP2K的输入文件参数设置繁杂,往往我们是第一次从头到位写一遍或者直接拿别人的input修改后进行使用。但是这样会造成书写错误或者设置错误频繁发生。提交超算之后被退回来的话排队时间就浪费了。在此笔者有几个建议:

          +
            +
          1. 使用cp2k.popt -c input.inp 检查输入文件的语法
          2. +
          3. 使用注释(#)来提醒输入文件的设置
          4. +
          5. 使用变量和条件判断来简单的开关CP2K的功能
          6. +
          +
          #a good example of input file
          +#set variable and condition to open/close section in CP2K
          +#if variable is 0 in condition, it is false, otherwise it is true
          +@SET HSE06 0
          +
          +########## This part is HSE06 ##########
          +@IF ${HSE06}
          +            &XC_FUNCTIONAL
          +                &PBE
          +                    SCALE_X 0.0
          +                    SCALE_C 1.0
          +                &END PBE
          +                &XWPBE
          +                    SCALE_X -0.25
          +                    SCALE_X0 1.0
          +                    OMEGA 0.11
          +                &END XWPBE
          +            &END XC_FUNCTIONAL
          +            &HF
          +                &SCREENING
          +                    EPS_SCHWARZ 1.0E-6
          +                    SCREEN_ON_INITIAL_P FALSE
          +                &END SCREENING
          +                &INTERACTION_POTENTIAL
          +                    POTENTIAL_TYPE SHORTRANGE
          +                    OMEGA 0.11
          +                    T_C_G_DATA t_c_g.dat
          +                &END INTERACTION_POTENTIAL
          +                &MEMORY
          +                    MAX_MEMORY 10000
          +                    EPS_STORAGE_SCALING 0.1
          +                &END MEMORY
          +                &PERIODIC
          +                     NUMBER_OF_SHELLS 0
          +                &END PERIODIC
          +                FRACTION 0.25
          +            &END HF
          +@ENDIF
          +
          +
          +

          Warning

          +

          注释要单独占一行,代码和注释混合会导致input读入错误

          +
          +

          检查CP2K输入文件

          +

          在服务器上,需要通过module load cp2k/版本号 来启动CP2K软件。Load后,可以使用cp2k.popt命令,这是CP2K软件的主要程序。

          +

          CP2K的计算运行是

          +
          cp2k.popt input.inp > output
          +
          +

          当然在服务器上需要通过提交脚本来执行命令。

          +

          由于CP2K输入文件有时较为庞大,经常会有误写或者语法错误的情况发生,为了避免提交之后被退回来,可以先使用命令检查:

          +
          cp2k.popt -c input.inp
          +
          +
          +

          Warning

          +

          cp2k.popt -c 仅检查是否有语法错误,实际运行的错误不会检查出来

          +
          +

          单点能计算

          +

          参见官网的例子: CP2K能量和力的计算

          +

          参见官网的例子: CP2K中CUTOFF和REL_CUTOFF的测试

          +

          结构优化

          +

          建设中

          +

          分子动力学

          +

          建设中

          +

          CP2K的一些常用工具

          +

          CP2K Vim input 插件

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/default_version/index.html b/wiki/software_usage/default_version/index.html new file mode 100644 index 00000000..0d6b92bb --- /dev/null +++ b/wiki/software_usage/default_version/index.html @@ -0,0 +1,2756 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 软件默认版本推荐 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          软件默认版本推荐

          +

          目前集群上很多软件都编译了多个版本,但由于软硬件平台、版本、环境的更新,需要对常用软件的一些版本梳理如下,并给出建议使用版本。

          +

          Zeus 集群采用 module 对软件环境进行管理,通常使用前需要加载环境,例如 module load vasp/5.4.4即可加载 VASP 5.4.4 版本运行所需环境。因此下文对软件推荐版本的说明,将会列出Zeus上使用的<module name>,具体使用时请自行补全为module load <module name>

          +

          注意如果在 ~/.bashrc~/.bash_profile 中加载了环境,如果与下述版本用到的环境存在冲突,可在提交脚本中加入module purge行进行卸载,以免产生冲突。

          +

          注意: CentOS 7 默认使用的 GCC 版本为4.9.4,Python 版本为2.7,Python 3 版本为 3.6,故以下涉及到上述环境若未加载,则表示使用默认环境。

          + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          软件名推荐版本命令需要调用环境备注
          VASPvasp/5.4.4常规计算:vasp_std
          Gamma点 :vasp_gam
          intel/17.5.239
          mpi/intel/2017.5.239
          CPU并行计算
          CP2Kcp2k/7.1启用OpenMP:cp2k_psmp
          未启用:cp2k_popt
          gcc/5.5.0
          intel/17.5.239
          mpi/intel/2017.5.239
          CPU并行计算
          DeePMD-kitdeepmd/2.0-cuda11.3训练:dp
          跑MD:lmp_mpi
          cuda/11.3
          gcc/7.4.0
          intel/17.5.239
          mpi/intel/2017.5.239
          GPU加速势函数训练,采用的Lammps版本是20201029
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/experience_of_dpmd_and_dpgen/index.html b/wiki/software_usage/experience_of_dpmd_and_dpgen/index.html new file mode 100644 index 00000000..c1023bec --- /dev/null +++ b/wiki/software_usage/experience_of_dpmd_and_dpgen/index.html @@ -0,0 +1,2963 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DPMD和DPGEN使用经验 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          + +
          + + + +
          +
          + + + + + + + +

          DPMD和DPGEN使用经验

          +

          DPMD train.json参数设置和理解:

          +

          dp-kit 安装

          +
            +
          • 如果本地有GPU,推荐使用dp-kit全包下载,总大小1G。 shell执行安装。便于在本地开发测试。
          • +
          +

          DeepPotential

          +
            +
          1. 形象化理解sel_a:一个原子越高概率出现,对应sela越大;sela对应以任意原子为center,能找到的该原子的最大数目
          2. +
          3. neuron network和resnet大小一般不修改;同时训练多个势函数需要修改随机种子seed
          4. +
          5. 用于实际使用的势函数需要well-train,需要“长训练”,常用设置为:
          6. +
          +
          "learning_rate" - "decay_steps"20000,
          +"stop_batch": 400000, # 使用200000 步也大致没有问题。
          +
          +

          DPGEN 使用

          +
            +
          1. 提交训练后需要跟踪train的情况。有时候由于提交后无法配置GPU资源(被其他程序占用或其他原因),导致训练输出为“nan”,需要重新提交并确保获取GPU资源。
          2. +
          3. V100卡上短训练一般在4~8小时。长训练是短训练10倍时间。理论上dpmd方法训练时间随元素类型数目线性增长。(MN,M原子数,N类型数)。
          4. +
          5. 用于训练的数据要正确的设置type.raw。尤其注意初始数据的处理,保证元素顺序,编号正确。
          6. +
          7. 注意测试k-points,dpgen在vasp的INCAR中使用kspacingkgamma来决定kpoints。一般要求能量收敛到 1 meV/atom ,力分量收敛到 5 meV/A 以下。
          8. +
          9. dpgen 的exploration步骤通过md采样,探索步数一般随着迭代增加到10000~20000即可。一般增加随机的md起点数目比增加探索步数采样更高效。这是最关键的步骤,设计exploration策略时需要考虑实际使用时要探索体系和采样空间相类似。
          10. +
          11. 通过修改machine.json对应配置让dpgen报错停下,用于数据分析和检测。例如设置错误的端口/IP使任务在某步停下。
          12. +
          13. 如果训练了较旧版本的势函数,可以用更新版本从checkpoint开始,再增加2000步后freeze。(版本差异不能过大)
          14. +
          15. 神经网络拟合能力是很强的,不consistent的数据(不同k点)也能拟合出非常小的能量/力误差。所以,要注意使用测试体系检查势函数质量,测试体系取决于所研究的问题。也要注意输入的DFT数据做好充分的计算参数测试。
          16. +
          17. 提交任务后lcurve.out出现NaN;原因可能是内存或gpu没有正确分配。需要重启。
          18. +
          19. dp restart/freeze 要保持在相同的路径下,如果改变了文件夹位置/名称,可以修改checkpoint指明model路径。
          20. +
          21. MD同时使用四个模型评估不影响速度(在显存不占满的情况下)。
          22. +
          23. 使用多个模型MD,在旧版本中是用平均值,新版本>1.0是用第一个势函数值。
          24. +
          25. 注意可视化每轮的训练结果,包括学习曲线(训练误差随batch下降趋势),model_deviation的分布,单点能的收敛和结构正确,对每轮的结果进行分析。
          26. +
          +

          DFT单点能计算经验

          +
            +
          • 一般对体系影响最大的是k点,需要测试不同的k点,k点数目和计算成本是对应的
          • +
          • vasp擅长小体系多k点并行;大体系少k点会显著较慢;可以使用kspacing控制,参照
          • +
          +
          from pymatgen import Structure
          +from math import pi
          +import numpy as np
          +import pandas as pd
          +stc = Structure.from_file('POSCAR')
          +a,b,c = stc.lattice.abc
          +# CASTEP 和 VASP 计算KSPACING不同,差一个常数2pi
          +kspacing_range = np.linspace(0.1, 0.6, 21)
          +kpoint_a = np.ceil( 2*pi/kspacing_range/a).astype('int')
          +kpoint_b = np.ceil( 2*pi/kspacing_range/b).astype('int')
          +kpoint_c = np.ceil( 2*pi/kspacing_range/c).astype('int')
          +
          +df = pd.DataFrame({'kspacing': kspacing, 'a': kpoint_a, 'b': kpoint_b, 'c': kpoint_c})
          +print(df) # 查看不同kspacing 对应的K点数目
          +
          +
            +
          • 主要的INCAR计算参数是
              +
            • ENCUT(一般取600/650保证quality,对计算速度影响不明显);
            • +
            • ISMEAR=0(ISMEAR=-5的 Bloch方法需要k不小于4个,有时候不能用,测试表明,二者能量/力误差在1e-3以下,ISMEAR=0计算成本更低)
            • +
            • spin会对体系有非常大影响,一种brute force做法是直接给一个好的初猜(代码辅助),
            • +
            • LASPH可以考虑加入,提高精度,极少量成本。
            • +
            • LWAVE,LCHARG关掉,减少计算时间和储存空间浪费。
            • +
            +
          • +
          • 测试计算的思路应当是:先选一个最贵的,再提高精度,看是否收敛,之后以此为参照,降低不同参数。在保证了精度可靠的基础上,减少计算成本
          • +
          +
          from ase.io import read
          +at = read('OUTCAR')
          +ref = read('ref/OUTCAR') # 
          +dE = ref.get_potential_energy() - at.get_potential_energy() # 一般dE 小于10meV
          +dEperAtom = dE/len(ref) # 要求小于1meV/atom
          +dF = ref.get_forces() - at.get_forces()
          +pritn(dF.max(), dF.min()) # 要求在5meV/A以下,尽可能在1meV/A 以下
          +
          +
            +
          1. LREAL = auto,对于大体系,推荐是real(auto默认会设置),对于GPU,必须要real。由于求积分方法差异,在实空间计算会引入1·2meV/atom的系统误差。
          2. +
          3. VASP输出的结构只要是电子步收敛的,都可以添加到训练集。需要注意添加了错误的结构(能量绝对值极大)会导致训练误差无法下降。
          4. +
          5. 如果VASP计算只有单K点,使用vasp_gam,相对vasp_std可以节省⅙ - ⅓的时间。
          6. +
          +

          文件空间管理

          +

          随着模拟时间和模拟体系扩增,储存文件占用的空间非常巨大。在储存文件时候注意: +1. 保留必要的输入和输出文件:包括初始结构(data.lmp),计算设置(input.lammps),计算输出(log),轨迹(traj) +2. 建议用如下方案压缩:

          +
          zip -9r -y data.zip data/   # 使用最大压缩率;保留文件相对路径压缩
          +
          +

          也可以用npz压缩,相比zip直接压缩提高5%左右。

          +
          import numpy as np
          +data = ...
          +data = data.astype('float32') # 保存为32位不损失坐标/力等需要的精度
          +np.save_compressionz('data.npz', data=data)
          +data = np.load(data)['data']  # 重新载入
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/i-pi/index.html b/wiki/software_usage/i-pi/index.html new file mode 100644 index 00000000..9a703e20 --- /dev/null +++ b/wiki/software_usage/i-pi/index.html @@ -0,0 +1,2661 @@ + + + + + + + + + + + + + + + + + + + + + + + + + I pi - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          I pi

          + + + + + + + + + + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/n2p2/index.html b/wiki/software_usage/n2p2/index.html new file mode 100644 index 00000000..c3d53fee --- /dev/null +++ b/wiki/software_usage/n2p2/index.html @@ -0,0 +1,2967 @@ + + + + + + + + + + + + + + + + + + + + + + + + + n2p2 Usage Guide - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          n2p2 Usage Guide

          +

          Short Introduction

          +

          This repository provides ready-to-use software for high-dimensional neural network potentials in computational physics and chemistry.

          +

          The following link is for your information:

          + +
          +

          Warning

          +

          This page is just the experience and understanding of author. If you find any mistake or vague part, please report the issue

          +
          +

          Basic Principle

          +

          The basic of n2p2 softeware is based on the method of neural network fitting. For detail of neural network(NN), please refer to [here].

          +

          The extra works done by Behler and Parrinello, is build a link between Potential Energy Surface and NN.

          +

          At first, they decomposes the total energy into atomic energy(\(E^{atom}\)). \(E^{atom}\) is not the energy of neutral atom in the vacuum as we have seen in quantum chemistry book. The \(E^{atom}\) is just the decomposition of total energy into the contribution of every atoms, as expressed by the following equation: +$$ +E_{tot}=\sum_i {E_i^{atom}} +$$ +Where i runs over the index of atom in a system.

          +

          Usage in Cluster

          +

          n2p2 has installed in Cluster51. Use command module load n2p2/2.0.0 to load the code n2p2. After that, you can use all the executable binary of n2p2. LSF script is in the directory /share/base/script/n2p2.lsf. Explanation of lsf script is put in here

          +

          Training Procedure

          +

          Overview

          +

          The Core library in n2p2 is nnp-train. You can see this command after load module n2p2/2.0.0. Enter the Directory of prepared files and type nnp-train is all enough. For mpi running of command, just type mpirun nnp-train. The input files for nnp-train include:

          +
            +
          • input.nn: input setup for training
          • +
          • input.data: input training set for training procedure.
          • +
          • scaling.data: scaling data from data set (you will obtain this from nnp-scaling)
          • +
          +

          Example input file is in the github repository <n2p2 root>/examples/nnp-train

          +

          File: input.data

          +

          See input.data format here

          +

          Python script for convertion from cp2k xyz to input.data

          +
           from ase.io import read, write
          + import os, sys
          +
          + # data_path: directory contains forces.xyz and coords.xyz
          + data_path = "./test_data"
          + data_path = os.path.abspath(data_path)
          +
          + #input cell parameter here, a 3x3 list
          + cell = [[10., 0., 0. ], [0., 10., 0.], [0., 0., 10.]]
          +
          + #read coords and forces
          + pos_path= os.path.join(data_path, "coords.xyz")
          + frc_path= os.path.join(data_path, "forces.xyz")
          + pos = read(pos_path, index = ":")
          + frc = read(frc_path, index = ":")
          +
          + out_path = os.path.join(data_path, "input.data")
          + fw = open(out_path, "w")
          + for frame_idx in range(len(pos)):
          +     fw.write("begin\n")
          +     for i in range(3):
          +         fw.write("lattice{:10.4f}{:10.4f}{:10.4f}\n".format(cell[i][0], cell[i][1], cell[i][2]))
          +     for atom in zip(pos[i], frc[i]):
          +         fw.write("atom{:12.5f}{:12.5f}{:12.5f}".format(atom[0].position[0], atom[0].position[1], atom[0].position[2]))
          +         fw.write("{:3}".format(atom[0].symbol))
          +         fw.write("{:10.4f}{:10.4f}".format(0.0, 0.0))
          +         fw.write("{:12.5f}{:12.5f}{:12.5f}\n".format(atom[1].position[0], atom[1].position[1], atom[1].position[2]))
          +     fw.write("energy{:20.4f}\n".format(pos[i].info['E']))
          +     fw.write("charge{:20.4f}\n".format(0.0))
          +     fw.write("end\n")
          +
          +

          nnp-scaling

          +

          nnp-scaling should be executed before nnp-train in order to obtain file scaling-data. There are only two files you need:

          +
            +
          • input.nn
          • +
          • input.data
          • +
          +

          Example input file is in the github repository <n2p2 root>/examples/nnp-scaling. A point is worth to notice. The random_seed keyword in file input.nn is followed by a number. This number serves as a initialization of psudo-random code. However as you can imply from the name, this random number is fake. It depends strongly on your initialization number (more exactly, you will get a same serial number if you start by a same random seed number). Therefore, if you would like a random starting for parameter in NN, set a different number for random seed.

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/software_usage/vmd/index.html b/wiki/software_usage/vmd/index.html new file mode 100644 index 00000000..c9c5b646 --- /dev/null +++ b/wiki/software_usage/vmd/index.html @@ -0,0 +1,2937 @@ + + + + + + + + + + + + + + + + + + + + + + + + + vmd 使用说明 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          VMD 使用说明

          +

          VMD介绍

          +

          VMD是分子可视化软件,主要用以查看分子动力学轨迹,

          +

          官网: http://www.ks.uiuc.edu/Research/vmd/

          +

          VMD安装

          +

          Linux 和 Windows

          +

          直接查看官网,其他无需特殊注意

          +

          MacOS Catalina版本以上

          +

          由于苹果不再支持32位的软件,因此需要64位版本的VMD。

          +

          已经编译好的软件从这里下载: https://www.ks.uiuc.edu/Research/vmd/mailing_list/vmd-l/31222.html

          +

          使用集群的VMD进行远程查看

          +

          现在51和52集群上均安装了VMD/1.9.3

          +

          使用方法是

          +
          module load vmd/1.9.3
          +
          +

          然后如同在本地端使用vmd一样使用即可。

          +

          集群打开vmd报错

          +

          如果遇到报错

          +
          XRequest.149: BadMatch (invalid parameter attributes) 0xa00105
          +XRequest.149: GLXBadContext 0xa00001
          +
          +

          首先在集群上查看

          +
          glxinfo
          +glxgears
          +
          +

          如果得到报错

          +
          name of display: localhost:24.0
          +libGL error: No matching fbConfigs or visuals found
          +libGL error: failed to load driver: swrast
          +X Error of failed request:  GLXBadContext
          +  Major opcode of failed request:  149 (GLX)
          +  Minor opcode of failed request:  6 (X_GLXIsDirect)
          +  Serial number of failed request:  23
          +  Current serial number in output stream:  22
          +
          +

          +
          libGL error: No matching fbConfigs or visuals found
          +libGL error: failed to load driver: swrast
          +X Error of failed request:  BadValue (integer parameter out of range for operation)
          +  Major opcode of failed request:  149 (GLX)
          +  Minor opcode of failed request:  3 (X_GLXCreateContext)
          +  Value in failed request:  0x0
          +  Serial number of failed request:  28
          +  Current serial number in output stream:  30
          +
          +

          那么请在**本地Mac/iMac的终端上**退出**XQuartz**然后在本地终端里输入:

          +
          defaults write org.macosforge.xquartz.X11 enable_iglx -bool true 
          +
          +

          即可解决问题

          +

          Ref: https://www.ks.uiuc.edu/Research/vmd/mailing_list/vmd-l/28494.html

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/teamwork/archive_rules/index.html b/wiki/teamwork/archive_rules/index.html new file mode 100644 index 00000000..63399999 --- /dev/null +++ b/wiki/teamwork/archive_rules/index.html @@ -0,0 +1,2944 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 如何整理项目文件 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          如何归档/整理项目文件

          +

          数据整理的必要性

          +

          为了能让接收项目的人,以及组里其他人的数据能够相互参考,避免不必要的重复计算和浪费。我与云霈总结了一些简单的整理规则。

          +

          数据整理的规则

          +

          规则1:

          +

          以项目名称命名大文件夹。例:SnO2110面的机器学习

          +
          SnO2110-ML #项目文件名
          +
          +

          规则2:

          +

          数字 作为目录名前缀,以下 划线命名法 来给目录命名。

          +

          因为计算必定伴随着 目的,所以目录名以计算的 目的 来命名。

          +

          数字 可以使目录按照自己的意志来排序, 下划线命名法 可以有效的阅读。例:

          +
          ./SnO2110-ML
          +├── 00.train_set #放训练集
          +├── 01.train_set_test #做训练集测试
          +├── 02.DP_Pots #放机器学习势能
          +├── 03.dissociation #计算解离度
          +├── 04.surface_tension #计算表面张力
          +
          +

          注意:再次一级目录可不按照以上方法来命名,尽量使用 下划线命名法 即可。

          +

          规则3:

          +

          对于 作图类的目录,要保留作图的 数据原始脚本作出来的图。例:

          +
          01.train_set_test
          +├── TrainSetEnergy.pdf #作出来的图
          +├── TrainSetForce.png #作出来的图
          +├── TrainingSetError.py #处理作图的脚本 可以直接运行!
          +├── e.out #作图的原始数据
          +└── f.out #作图的原始数据
          +
          +

          对于 计算类的目录,要保留 必要的输出文件输入文件。例:

          +
          02.DP_Pots #放机器学习势能
          +├── v1.0 #版本号
          +│   ├── graph.000.pb #势能函数,输出文件的一种
          +│   ├── graph.001.pb
          +│   ├── graph.002.pb
          +│   ├── graph.003.pb
          +│   ├── input.000.json #对应的输入文件
          +│   ├── input.001.json
          +│   ├── input.002.json
          +│   └── input.003.json
          +├── v1.2
          +│   ├── graph.000.pb
          +│   ├── graph.001.pb
          +│   ├── graph.002.pb
          +│   ├── graph.003.pb
          +│   ├── input.000.json
          +│   ├── input.001.json
          +│   ├── input.002.json
          +│   └── input.003.json
          +└── v1.3
          +    ├── README
          +    ├── graph.000.pb
          +    ├── graph.001.pb
          +    ├── graph.002.pb
          +    └── graph.003.pb
          +
          +

          规则4:

          +

          在文件夹里放入必要的说明文件,例如 README

          +
          └── v1.3
          +    ├── README #必要的说明文件,推荐使用markdown语言书写
          +    ├── graph.000.pb
          +    ├── graph.001.pb
          +    ├── graph.002.pb
          +    └── graph.003.pb
          +
          +
          # README
          + converted from v1.2 pot
          + compress input use that v1.2 training input
          +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/teamwork/git_usage/index.html b/wiki/teamwork/git_usage/index.html new file mode 100644 index 00000000..f04b3421 --- /dev/null +++ b/wiki/teamwork/git_usage/index.html @@ -0,0 +1,3190 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Git 基本使用教程 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          + +
          + + + +
          +
          + + + + + + + +

          Git 基本使用教程

          +
          +

          Git是目前世界上最先进的分布式版本控制系统(没有之一)—— 廖雪峰

          +
          +

          版本控制系统可以帮助用户快速识别、整理项目的修改等,避免出现诸如 "新建文本文件_by浩二-第19版_修改190810-v114.514 - 副本(9).txt" 等令人血压上升、呕吐不止的情况。

          +

          Git作为开源社区常用的版本控制系统,有着强大的功能,可以帮助用户管理以文本(如代码等)为主的项目。当然对二进制文件,例如docx、pptx等,Git的支持尚不够完善,加上服务器众所周知的原因,因而不建议把Github当成网盘使用。

          +

          目前组内有关机器学习、自动化的工作逐渐增多,需要代码共享和协同的场合逐渐增加。另一方面,基于 LaTeX 等标记语言的论文写作,其其实质上对也是文本文件的处理。但鉴于 Git 的入门和使用尚有一定门槛,需要一些基础命令的入门。因而写下这篇文字,整理一些常用的Git操作,限于篇幅和水平,可能会有一些缺漏,还请指正。

          +

          本文将长期更新,不定期收录一些小故事小Trick。

          +

          项目创建

          +

          基于 Github 创建项目

          +

          首先注册 Github 账号,这里不作赘述。

          +
          +

          提示

          +

          若正在阅读本文的读者是在校师生,可通过 Github 官方渠道 申请成为校园专业用户(Campus Expert),从而可以免费使用(白嫖)专业版特性,并享受一系列优惠(包括Pycharm专业版等,详见官网介绍)。当然这不影响我们后文的操作,读者可以稍后申请。注意申请的IP需要位于校园网环境内,并且最好保证IP定位在校区范围内以免出现错误识别导致申请失败。例如厦门大学曾呈奎楼不位于Github认可的校区范围内,请最好到化学楼、卢嘉锡楼、图书馆等地申请。申请时可能需要提供学校邮箱、学生卡照片信息等,请按照相应提示操作。

          +
          +

          完成注册到达首页,便可以看到如图的按钮,点击"New"即可创建一个仓库(Repository)。

          +

          image-20211222110147676

          +

          随后便出现如下图的界面,可以选择设置该仓库的归属(Owner)、名称(Repository name)、说明(Description)、权限等。需要说明的是,公共仓库(Public)的内容任何人都能看到,但提交(Push)需要设置权限;而私有仓库(Private)的访问权限取决于归属者,若为个人仓库默认仅自己可见,若为组织(Organization)则仅该组织成员可见。

          +

          截屏2021-12-22 上午11.15.52

          +

          尽管直接点击“Create repository”我们便可以快速创建一个仓库,这里推荐根据情况选择是否要创建说明文档(README file)、忽略信息(.gitignore)以及开源协议(License)。关于开源协议的说明,请点击"Learn more",这里限于篇幅原因不过多描述。

          +

          需要说明的是.gitignore,如图所示,可以看到 Github 提供了多种模板供选择,例如需要创建的项目以Python代码为主,则可以选择Python。则仓库创建后,Git将不再追踪文件夹下可能存在的日志文件、预编译文件(如.pyc)、Jupyter Notebook缓存等,这对于保持工作区和修改信息的清晰有很大帮助。当然,这里的模板可能无法包含所有需求,故也可以先创建仓库再添加。

          +

          为了合作的快捷、防止在提交时把过多无用文件提交到Git仓库中,强烈推荐在项目创建之初就建立.gitignore文件。后文将更加详细地介绍这一文件的用法。

          +

          截屏2021-12-22 上午11.24.54

          +

          远程↔︎本地

          +

          在Github上创建项目后,下一个关心的议题自然是,如何把本地的代码上传到远程。

          +

          截屏2021-12-22 上午11.32.43

          +

          相信不少人已经对上图中的按钮 "Add file" 跃跃欲试了,点击即可看到两个选项,即创建文件和上传文件。前者可以提供一个文本框输入你想要建立的文字,后者则提供了一个区域可以通过浏览器拖动文件手动上传或者打开资源管理器选择要上传的文件。但当文件较多、较大时,这两种方法便显得不够便捷。因此这里我们从 Git 命令行出发,介绍更常用的提交方式。

          +

          实际上 Github 仅仅是世界最大的 Git 远程项目管理平台,Git 本身则不依赖于 Github 存在,因此我们在本地即可追踪文件的修改,进行版本控制。Git在本地的安装非常简单,用户可以参照廖雪峰老师的教程进行。在安装的最后,用户需要设置自己的信息,即用户名和密码。为了使在远程的用户信息和本地保持一致,通常与Github的用户名和注册邮箱保持一致。

          +
          git config --global user.name "Your Name"
          +git config --global user.email "email@example.com"
          +
          +

          注意git config命令的--global参数,用了这个参数,表示你这台机器上所有的Git仓库都会使用这个配置,当然也可以对某个仓库指定不同的用户名和Email地址,即去掉--global

          +

          在远程创建仓库后,我们便可以把远程的仓库拉取(Pull)到本地。点击绿色的Code按钮,即可看到如图的对话框,点击文本框右侧的按钮复制链接。

          +

          截屏2021-12-22 上午11.46.27

          +

          若在本地某个目录下,输入如下命令:

          +
          git clone https://github.com/chenggroup/Test.git
          +
          +

          即可将远程仓库拉取到本地,并创建一个Test目录用于存放文件。

          +

          先别急着输入上面的命令。由于安全性原因,Github官方从2021年8月起关闭了通过HTTPS协议直接上传提交的功能,因此要想从本地向远程上传提交,需要使用SSH协议,因此我们需要进行额外配置,请参考廖雪峰老师的教程操作

          +

          配置完成后,即可用SSH面板里的链接来克隆(Clone)远程仓库到本地:

          +

          截屏2021-12-22 上午11.52.29

          +
          git clone git@github.com:chenggroup/Test.git
          +
          +

          注意 git clone 后的链接要修改为你复制的链接。

          +

          随后 cd Test 进入本地仓库,便可以对本地仓库进行编辑。这里我们用Vim创建一个文件,为演示操作方便,文件名假设是first_commit.txt

          +
          vim first_commit.txt
          +
          +

          在文件中进行一些编辑,例如输入:

          +
          test
          +2021
          +first commit
          +
          +

          如果尚不熟悉 Vim 的操作,请参考Linux快速基础入门

          +

          保存并退出,输入git status,可以看到已经监测到尚未提交的更改:

          +
          $ git status
          +On branch master
          +Your branch is up to date with 'origin/master'.
          +
          +Untracked files:
          +  (use "git add <file>..." to include in what will be committed)
          +    first_commit.txt
          +
          +nothing added to commit but untracked files present (use "git add" to track)
          +
          +

          注意这里提到,我们正处于master分支上,并与远程的origin/master分支保持一致。输入

          +
          git add .
          +
          +

          即可将当前目录下修改的文件添加到暂存区,可供提交。因此输入:

          +
          git commit -m "some description"
          +
          +

          即可生成一个提交,包含了上述文件的修改。这里some description可以参照自己的编辑进行修改。

          +

          但上述步骤仅仅是提交到本地的Git仓库,要想和远程同步,则需要:

          +
          git push origin
          +
          +

          将本地的更改提交到远程对应的分支,即上述的origin/master,输出如下:

          +
          $ git push origin
          +Enumerating objects: 4, done.
          +Counting objects: 100% (4/4), done.
          +Delta compression using up to 4 threads
          +Compressing objects: 100% (2/2), done.
          +Writing objects: 100% (3/3), 309 bytes | 309.00 KiB/s, done.
          +Total 3 (delta 0), reused 0 (delta 0)
          +To github.com:chenggroup/Test.git
          +   26c6605..d964d89  master -> master
          +
          +

          回到远程页面就会发现,我们已经提交成功。

          +

          截屏2021-12-22 下午1.12.12

          +

          点击进入,内容和本地一致:

          +

          截屏2021-12-22 下午1.14.44

          +

          从而我们可以把本地仓库的修改同步到远程。在git commit之前,实际上任何修改都可以添加到暂存区中,但这里需要注意可以被Track的文件是否是自己想要的,而不要无脑git add .甚至git add *,以免追踪到一些“不速之客”。

          +

          项目维护

          +

          分支

          +

          如果项目本身内容较多,且由多个人维护,将所有提交都放到同一条时间线上,就会形成非常长的修改,不利于每个人追踪自己的修改。并且有时会希望在重构的同时,保持主线完整性。这一需求可由Git轻松解决。

          +

          Git支持创建分支(Branch),即可以从主线分支出一个独立的Branch,并在该Branch修改,通过后再合并(Merge)到主线上。这样,便可以在不干涉主线的情况对分支进行维护和修改。并且每个人都可以创建自己的独立分支,从而避免各自的修改之间出现冲突,导致混乱。

          +

          切换分支的命令如下:

          +
          git checkout -b devel
          +
          +

          若本地之前不存在devel分支,则可由当前分支出发创建一个。这样的实现方式就如同从当前地铁站换乘到另一条地铁线路,再继续乘坐。之后的所有修改便体现在devel分支上。

          +

          当修改的代码测试完善,我们便可以把支线代码合并到主线上,即在换乘线路的地铁站修建一个换乘站,与主线换乘,并保留之前的所有修改。命令如下:

          +
          git checkout master
          +git merge devel
          +
          +

          关于分支管理,更详细的介绍,可以参考廖雪峰的教程

          +

          拉取请求(Pull Request)

          +

          类似于分支的实现,对公开在Github上的远程项目,可以由当前项目出发,建立项目的复刻(Fork)。复刻出的项目可以看作是主项目的分支,并保留了初始项目的相应分支。

          +

          截屏2021-12-22 下午2.31.01

          +

          Fork的项目仍是远程项目,因而可以Clone到本地作进一步修改,并可以与本地同步从而更新远程的Fork项目,而原始项目保持不变(并且很可能也没权限改变)

          +

          此时,要想向原始项目提交自己的修改,则需要创建拉取请求(Pull request,简写为PR)。点击页面上的"Contribute",点击"Open pull request"即可创建PR。

          +

          截屏2021-12-22 下午2.35.58

          +

          随后,便可以指定从Fork项目的某个分支提交PR到原始项目的某个分支。例如图中是从自己的devel到原始的master分支。在下方的文本框中可以输入自己的修改及对应的描述,便于原始项目的维护者审核、处理、合并PR。

          +

          提交PR

          +

          页面向下翻,可以看到自己的历史提交,以及修改的文件等。注意在创建PR前,请务必查看本次PR相比原始文件修改了哪些,以免一些不希望上传的内容混进来,给审核人员带来困扰,不利于抓住真正核心的修改。、

          +

          提交PR以后,审核人员可能会提出一些建议,甚至是修改意见。若提交到对应的复刻分支,则相应的修改也会同步到PR中,因此不需要额外提交修改请求。

          +

          创建议题(Issue)

          +

          当发现代码可能存在BUG或者自己有一些疑问需要维护者回答时,抑或是有一些想要开发者实现的新功能,用户也可以在原始项目中创建议题(Issue),用尽可能简洁的语言描述自己遇到的问题,或自己的需求。一些流行的项目可能会提供Issue模板,请按照模板提示填写,提高解决问题的效率,方便开发者对应修复BUG或者开发特性。

          +

          DeePMD-kit 项目中的Issue

          +

          如果你看到相关的Issue,而恰好你的修改可以为之提供帮助,也可以提交PR,并在PR的描述中用#<ID>连接到对应的Issue,便于提问者同步你的修改。

          +

          .gitignore 文件

          +

          开发者常常需要在项目文件夹下调试,而论文撰稿人常常需要编译 LaTex 项目产生 PDF 供预览。这些过程,都可能产生一些日志、缓存、输出等文件,一些甚至是二进制文件。在默认情况下,Git 会监测项目目录下的所有文件,如果git add .,则会全部加入到暂存区。若在git commit时仍未发现问题,这些文件就会一并被仓库追踪。当上传到远程仓库,有权限查看这些项目的人便会在Github或者其他地方看到这些文件,血压可能会无法抑制地急速上升……

          +

          为了避免这种情况,便需要有办法拒绝追踪这些文件。Git提供的解决方案便是创建一个.gitignore文件,记录这些希望被忽略的文件或目录。其格式如下所示,即把希望忽略或者排除的文件加入其中。

          +
          # 排除特定文件
          +text.txt
          +
          +# 排除tmp下的所有文件
          +tmp/*
          +
          +# 排除所有.开头的隐藏文件
          +.*
          +
          +# 排除所有.class文件
          +*.class
          +
          +# 不排除.gitignore和App.class
          +!.gitignore
          +!App.class
          +
          +

          可以想像,如果所有规则都手动编写,对于维护者可能会有困扰。因此,Github上亦有维护一个.gitignore文件的仓库(github/gitignore: A collection of useful .gitignore templates),用户只需要根据自己的需求从中选取相应的忽略信息,加入到本地的.gitignore即可。注意,该仓库的根目录下放置的是一些常用语言环境,而一些编辑器或IDE同样会产生缓存文件,这些模板见于global下。实际上,从Github创建的仓库便是从这个仓库中拉取.gitignore的模板。

          +

          但是,很多意识到自己需要.gitignore的用户往往是经历了血压的上升,想要亡羊补牢的。即已经把诸如日志文件一类的文件提交到远程仓库中,甚至在clone时才发现问题。一个比较快速的解决方案便是,在建立.gitignore后,直接运行:

          +
          git rm -r --cached .
          +git add .
          +
          +

          相当于从头开始,直接将不希望继续track的文件标记为删除,从而在提交上彻底忽略这些文件的存在,但同时不删除原始文件。但这些文件的记录仍存在于远程。

          +

          另一种思路则是利用git update-index --assume-unchanged <file>命令,忽略掉该文件的更改,但仍保留了文件本身。总之,这两种方法都无法从根本上解决已经提交到远程的文件,因此还是推荐在git init之初就写好.gitignore,或利用 Github 自带的模板。

          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/wiki/teamwork/tutorial_rules/index.html b/wiki/teamwork/tutorial_rules/index.html new file mode 100644 index 00000000..124c4574 --- /dev/null +++ b/wiki/teamwork/tutorial_rules/index.html @@ -0,0 +1,2785 @@ + + + + + + + + + + + + + + + + + + + + + + + + + 如何组织培训 - XMU Chenggroup Wiki + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          + + + + 跳转至 + + +
          +
          + +
          + + + + + + +
          + + +
          + +
          + + + + + + +
          +
          + + + +
          +
          +
          + + + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          +
          + + + +
          +
          + + + + + + + +

          如何组织培训

          +
            +
          1. +

            一定要在培训和展示**前**把幻灯片和培训材料发送给学员

            +
          2. +
          3. +

            培训材料请遵循以下格式

            +
          4. +
          5. +

            主题

            +
          6. +
          7. +

            目标和此次培训的收益

            +

            --- 例:1.理解工作流 2.学习如何自动化工作流 3.学习如何通过airflow可视化工作流

            +
          8. +
          9. +

            提前帮参与者/学员准备

            +

            --- 所需的背景知识和提供必要的引导 - 链接,书籍,必读文章等

            +

            --- 其他需要在培训前做好的准备

            +

            --- 例:1.安装PyCharm, Jupiter, python3.9等 2.安装和验证所需的包(airflow)

            +
          10. +
          11. +

            培训内容的时间安排

            +

            --- 例:

            +
              +
            1. 介绍工作流(10分钟)
            2. +
            3. 介绍aiida和aiflow(20分钟)
            4. +
            5. 练习工作流和可视化工作流(50分钟)
            6. +
            7. 答疑(19分钟)
            8. +
            +
          12. +
          13. +

            确保你足够早地发送的幻灯片和培训材料。留下充足的时间给学员完成准备的任务。

            +
          14. +
          +

          Training/Presentaion Guideline

          +
            +
          1. +

            Always send slides and agenda BEFORE presentation and training

            +
          2. +
          3. +

            Follow the agenda format as below:

            +

            a. Topic

            +

            b. Objective and benefit of training

            +

            —— e.g. 1. Understand workflow 2. Learn how to automate workflow 3. Learn how to visualize workflow via package'airflow'

            +

            c. Participant's preparation

            +

            —— State the desired background knowledge and provide induction — links, books, must-read papers etc.

            +

            —— State the preparation that the participants need to complete before attending the training

            +

            —— e.g. 1. Install IDE PyCharm, Jupiter, python3.9 etc. 2. Install and validate required packages(airflow)

            +

            d. Items with time slot

            +

            —— e.g.

            +
          4. +
          5. +

            Introduce workflow (10 minutes)

            +
          6. +
          7. +

            Introduce Aida and airflow (20 minutes)

            +
          8. +
          9. +

            Practice workflow and visualize via 'airflow' (50 minutes)

            +
          10. +
          11. +

            Q&A (10 minutes)

            +
          12. +
          13. +

            Make sure you send slides and agenda early and leave plenty of time for the participants to complete the preparation tasks.

            +
          14. +
          + + + + + + + + + + + + + + + +

          评论

          + + + + + + +
          +
          + + + +
          + + + +
          + + + +
          +
          +
          +
          + + + + + + + + + + + + + + + + + + \ No newline at end of file

在本地电脑显示服务器图像 (X11 Forwarding)