diff --git a/dev/backstage/create-gitea-token.sh b/dev/backstage/setup/create-gitea-token.sh similarity index 100% rename from dev/backstage/create-gitea-token.sh rename to dev/backstage/setup/create-gitea-token.sh diff --git a/dev/backstage/restart.sh b/dev/backstage/setup/restart.sh similarity index 100% rename from dev/backstage/restart.sh rename to dev/backstage/setup/restart.sh diff --git a/dev/camunda/camunda.yaml b/dev/camunda/camunda.yaml index 86dafe0..75c6315 100755 --- a/dev/camunda/camunda.yaml +++ b/dev/camunda/camunda.yaml @@ -43,7 +43,7 @@ spec: entryPoints: - websecure routes: - - match: Host(`camunda-prod.allarddcs.nl`) + - match: Host(`camunda-dev.allarddcs.nl`) kind: Rule services: - name: camunda diff --git a/dev/cockroachdb/certs/ca.crt b/dev/cockroachdb/certs/ca.crt deleted file mode 100644 index 7419c81..0000000 --- a/dev/cockroachdb/certs/ca.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDJTCCAg2gAwIBAgIQa/0mCEqslZ2d107ceEr9ATANBgkqhkiG9w0BAQsFADAr -MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y -NTAxMjUyMDIzNDRaFw0zNTAyMDMyMDIzNDRaMCsxEjAQBgNVBAoTCUNvY2tyb2Fj -aDEVMBMGA1UEAxMMQ29ja3JvYWNoIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAvBJOTewyeYeWUncc7wx27bRCaDH7YawGyaltYypUzo93li+8K5Uw -VSYfy3mxNp47IQXebDPCQITct5pGq/EBTrWGJ/MLf8ZcCfPvvzylsqsesFFfS5y0 -sYof+JzyowDOJflWsQnJLIK5kD32fvupvc0dKY8q/4WN/Ra1kiUm6ZcFYWVKJx2s -2ZVWcDP5xh+obCgP3F4cTsLjo1mkoRPMSLw5w9M5x3AiDgi6zwkcw9aUVq0lBciA -lI4cAHC4Awc1AP3OazYV/E+cC6dtzS+55KRGQIYOp/pkgBKsTAd2ahuZTh8ZWXyS -p30X0luRUO9wBksGEt5ixx5QdtOd0jQWLQIDAQABo0UwQzAOBgNVHQ8BAf8EBAMC -AuQwEgYDVR0TAQH/BAgwBgEB/wIBATAdBgNVHQ4EFgQU5Olr9c4vu7OLVJrlGOtF -rdh5+qQwDQYJKoZIhvcNAQELBQADggEBALTZARd4BA0ke5O4a9G+1Om1P4L16fk9 -R2uICKW1MEGg/1zDXZS/6dX+2xJrLLp3xhFcpFge78zi0MVyBfnrl0j+Uk+eSPar -iubS9S/qN7LkMKcZM8l2hZnPQ0bu6WbaKcH9Bu2KNcWdowsCLb7vgIEXkNPlxoKM -Q+lOZHorpLZgQph1Se7nnjhuXuqxzhxv5NlPVVy/ZiuoJ1FUn5nbS3vIvpGGiGsO -2bGltS2ADsfBNmCsRfgj1HutHERpUG+cvMsa9Wf9o3wuohUOzguPxxaL/Hpbxwp+ -hnL13ksKb/bs45VHtYRQuZaUPoqTWvLRMIdMMxaLNMzE6Xyzc8h/dbA= ------END CERTIFICATE----- diff --git a/dev/cockroachdb/certs/client.root.crt b/dev/cockroachdb/certs/client.root.crt deleted file mode 100644 index eee6ecc..0000000 --- a/dev/cockroachdb/certs/client.root.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDIDCCAgigAwIBAgIQJwncfRDbHgMyuJKxK0dKCDANBgkqhkiG9w0BAQsFADAr -MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y -NTAxMjUyMDIzNTdaFw0zMDAxMzAyMDIzNTdaMCMxEjAQBgNVBAoTCUNvY2tyb2Fj -aDENMAsGA1UEAxMEcm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -ALzsZkbDiGNFg+jC16+eLzL5GvygvInkFljBgxJcrajRueq3KKfWlg1WTw6SqoiU -+c1uBiK8wiz17zkyo6K1lOabIlRutyAPZNnx7F+iBvhbMw8uzrlvWZKNCTWAJi4M -tLNDesSqmcCdEl+7ycJkGEmXyyDjGz+UtI6Bq5ax/MN9lc8CoKKAc6KzqiiYf0MR -6A2f5wwm8th8kT89HIt541LyElUr0JjttYOhrR0O82gF11Uf6OTYCxiySaHXTXpW -yYXXs6YsFaqm+Y3UZfnIk3jkwMPTYuQ3HoVe66YPB87JbPfMmiO4+NBGgqpSq2d9 -n+l87zGJumwUaFQcq2s/1yUCAwEAAaNIMEYwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud -JQQMMAoGCCsGAQUFBwMCMB8GA1UdIwQYMBaAFOTpa/XOL7uzi1Sa5RjrRa3Yefqk -MA0GCSqGSIb3DQEBCwUAA4IBAQAyygcCWS9hC2/HI59i5IwirXxO6NXUJLQIrooz -z187fhAdfVGioAT6K1cU+NrrJgoFc9Znle4USjAgiCttfOu8ZXXySpm8kpwzlPCa -m7tg76cpOHB9Gw1vt4DQdgjTjBDiIMjQIa8BRdIgvjC0VodFMe950cBuYpTrX27W -KdFpsqWfD423uWPyVMxO/8k1E0epuHnLxqNEX55+yPM24PxiHVxsm6YSeViIAxj0 -NXNXYSAoHQKob+8NysWT4QhrezdF8Cj6zbvlIrpJdmRiwcvbvBp4bnj6wg5OYAPM -pNqjII1A52ryOn5jVEfZvBb6s18ZIm9d/xGPugVsbJhBJy6S ------END CERTIFICATE----- diff --git a/dev/cockroachdb/certs/client.root.key b/dev/cockroachdb/certs/client.root.key deleted file mode 100644 index 70371f7..0000000 --- a/dev/cockroachdb/certs/client.root.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAvOxmRsOIY0WD6MLXr54vMvka/KC8ieQWWMGDElytqNG56rco -p9aWDVZPDpKqiJT5zW4GIrzCLPXvOTKjorWU5psiVG63IA9k2fHsX6IG+FszDy7O -uW9Zko0JNYAmLgy0s0N6xKqZwJ0SX7vJwmQYSZfLIOMbP5S0joGrlrH8w32VzwKg -ooBzorOqKJh/QxHoDZ/nDCby2HyRPz0ci3njUvISVSvQmO21g6GtHQ7zaAXXVR/o -5NgLGLJJoddNelbJhdezpiwVqqb5jdRl+ciTeOTAw9Ni5DcehV7rpg8Hzsls98ya -I7j40EaCqlKrZ32f6XzvMYm6bBRoVByraz/XJQIDAQABAoIBAAVHOYhKmDnlzEyp -fOssKTdsXEOonfvgQnuSVH4j1ro7uc0D9v/Rb/nJaoYGtPsB5oTFySgZS/eDm35m -msnF9vYGaYwgV79ujqvEJY16cmVn7uJCtYXaxY7hn9s9zFNHCZlkjj6GYatO+B9y -mK10rHUJ56PwlGdPWUgN+WRJbr1rbXJ0XhaNlR7d39XxrxFFI4MOvw2DNOvAOG6g -foIpA4ZeLhcGYIjsZxqrOZqVh1br4w5rWEvGqONi6LCrvwtMuNLAWExASkLJKIzw -vQ9jHpxYNqak0PHpsrHtUx50WsMt0ea1u/ioMKPNXs/Lkj18eGYpVI+S1wxDgKV+ -m6K6uZUCgYEA9UKYCV1KiKAINTtwbTKHSa/vn/U6JKOLQUvPD2qpbVRdgS2R1mQS -soqeDW1d+Y4tRk/tnlmpolkuuNDxulr2CTm6wbgeU6TnF7pq7ClIZK3hv2VGTT3B -uXxx+cQ+zjqygAidopjLMUH/3aO7Ldw6gcuCLrjN1xEVJiD4IGTwxtsCgYEAxTJD -Fl9m5g3bCQPfpSfclI0weNPHIoVQ63IcqRHH+e0BR03YZWbq8lMl+t81q6G/rsIH -jD1Pl5RW9EhgguXOoMXeKVpT34M+gcJ0PdEI6+WZ3ZjJ0kwwPcypsA93aZmZx883 -iksC2ZfIKqpCwguDKyvb5EcLNzrDSnMAl7NZOf8CgYEAoVqKg76ohnIidEMCmBSi -BMyGrYm8Eta1iuPA+beGd7MFQTMluxJjaqrfiJ3nMYNkLdnvzjnW7EQYBOcR4TRu -oWslfsUOzqCymF3AclZGllX/KtgKBE8Y4FsK8PM3Dp53SNxiONKk+2ccWkiZoHY+ -1513rB1Q7qkCbO9LzqQZ8/kCgYEAgFAYPzKMrh1N7SvMFpc9fJvycmy7IsdExC9Y -XtrnGMUTE+afbDvvnQZlrDwZnDh/laNDbglnBObNPd7qjcIjFZIq4RWZhdLMlXqG -UML33ydjW0HT8TcKHOxTbfBibyA3ZEB9j0sH67ZL1Rc8oS8Ehs7fIkboEWP3NzZl -qFBXOtkCgYEAz9L2J9rpXQgwbPCOCjuPvm+zvAnGXdNgrUsVd8Tk1wczc5FyaBxw -DMgHo1BxELPETb0hNxEdQ0DdR83MXp0PZA1IG1XKcAH8CXloELwN3jpM+/6PHQRz -vdvkLPv3wM1Qdj4g6FlnPvlJHAlPytnDrUbSWxA6xMVYQJKw8na2Cm8= ------END RSA PRIVATE KEY----- diff --git a/dev/cockroachdb/certs/node.crt b/dev/cockroachdb/certs/node.crt deleted file mode 100644 index c387b0c..0000000 --- a/dev/cockroachdb/certs/node.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID+jCCAuKgAwIBAgIQI/uQsaTfs97kfvVSTD400zANBgkqhkiG9w0BAQsFADAr -MRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENvY2tyb2FjaCBDQTAeFw0y -NTAxMjUyMDI0MTBaFw0zMDAxMzAyMDI0MTBaMCMxEjAQBgNVBAoTCUNvY2tyb2Fj -aDENMAsGA1UEAxMEbm9kZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AJ8eplN7Xp2XZYJqlp+BvOh6sN0CqVo7tCbuXSt1ZpeC0EzRTU4u1j7cGhExzYSj -VUGootjPZIjB6OQu6JHzheubWUzYMXBC72PjKYbbwoE69b98GsIP9aJ3++0j5dln -TUP/SgiVf90w3ltb6MdlWX9VMpqsmCj3b1CqNfGT+Xc/pbSCN1oT7m5XUsaGkaux -BKp9QeI6Zii8q+qyt/U1+qFCE1AVMoJe/KRM3O3j+3G+90t/IKGnJj3wtSs8+BzC -FV2ZBPJcLsmL0are9yOVU+xhc8drLdefxZQiNL8nb3MgqQ/uVSfDhraMlna+mpxo -lLDm1Zm4AKlztwwxvIV+dT8CAwEAAaOCASAwggEcMA4GA1UdDwEB/wQEAwIFoDAd -BgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHwYDVR0jBBgwFoAU5Olr9c4v -u7OLVJrlGOtFrdh5+qQwgckGA1UdEQSBwTCBvoIJbG9jYWxob3N0ghJjb2Nrcm9h -Y2hkYi1wdWJsaWOCGmNvY2tyb2FjaGRiLXB1YmxpYy5kZWZhdWx0gixjb2Nrcm9h -Y2hkYi1wdWJsaWMuZGVmYXVsdC5zdmMuY2x1c3Rlci5sb2NhbIINKi5jb2Nrcm9h -Y2hkYoIVKi5jb2Nrcm9hY2hkYi5kZWZhdWx0gicqLmNvY2tyb2FjaGRiLmRlZmF1 -bHQuc3ZjLmNsdXN0ZXIubG9jYWyHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAIth -4wIOZDDcuNDtsy3dxB2q/6miFaO0p2/iUyMci3b1nwlLTliKzWGgOCwNGGR4UXOM -zVQ1bu8I2w4zY5xF047xQDQR+ek4HyOayxLlua1fVCVq4jxv23vgJA4Gv0IhUbay -TfjnDDFhijy9URzBoVAwXAx2hGu1PlFmZ1bHjre13s1mTohO3nMTA+GsMGkLk8FB -M5wWDP8UKC9zmUXPSFLEscLWzjJ015Y/tqZUMFWB4bFsGKAxdkBR2PTWbnDETfrJ -7HymCOLBFinbMs8m+NPz1j+B8MGlwi0Eu5SWxiyWkt5FtczBdMcgnuVhZBWqqxko -E13Q6CHbMt+P3Ky3FMQ= ------END CERTIFICATE----- diff --git a/dev/cockroachdb/certs/node.key b/dev/cockroachdb/certs/node.key deleted file mode 100644 index c220abd..0000000 --- a/dev/cockroachdb/certs/node.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAnx6mU3tenZdlgmqWn4G86Hqw3QKpWju0Ju5dK3Vml4LQTNFN -Ti7WPtwaETHNhKNVQaii2M9kiMHo5C7okfOF65tZTNgxcELvY+MphtvCgTr1v3wa -wg/1onf77SPl2WdNQ/9KCJV/3TDeW1vox2VZf1UymqyYKPdvUKo18ZP5dz+ltII3 -WhPubldSxoaRq7EEqn1B4jpmKLyr6rK39TX6oUITUBUygl78pEzc7eP7cb73S38g -oacmPfC1Kzz4HMIVXZkE8lwuyYvRqt73I5VT7GFzx2st15/FlCI0vydvcyCpD+5V -J8OGtoyWdr6anGiUsObVmbgAqXO3DDG8hX51PwIDAQABAoIBAFvoOi3yDl58Ohew -NTwAlfq6Ezo09Vi3L4FlIM+fShitaF9WbY6BIyK/wxa3a3v3U6FPJHCSqgEL79cM -+SyEOpAx9Myb+0Jahyds6GmKubgnNBbcOiBpU3n6T7tThsmiD1D9PefjYi2CsoyW -c8foVF9l+Iq6slDHSraO+gWFcQxc/9CizRsInGqHA64anN6XvBZoVBLlu2Fowg4G -EducEOiGCekYLiOUDcLBegv57STIA/lTQ8pqFk7HcFYgg4NQhMFoS1E79zdlkZfq -j7X/DHMbt8zvRZIlWp1PrDYMysYVQVCT0PbaSd8+x9bUbDKkoMkgSj/NHsQXYn4a -muEhj+ECgYEAx8NZxZ9JU4NL5rNN2crfs/QPwxCgKp+BI41px9hqLOWKqDfMB7fI -EjOlLJveZ07sFF2Yf2gMkzCwdrmHc2g0Rj0Csqzss6Si3ppvD6EIwREnmziiJplR -mq6dQzgd5u1p9YcbIZhjzKFvRWy9JR4Kl/0A+h0zN8QupvxelRBslZkCgYEAy+ow -J9cTUqEeBL69BQU2CUTnc/MKCKGeTPRWqtKfODd7uglTaUgQ0DxDBoJxnS4ORcCN -9isT/UNJov8ufoZ1U8Kk+nBX++K5QFb46/TEomxeW+oabBg1+oLEPyqmd0H2p5er -JDsgsURUAcgKEV6ac11rzl2rwwfhgo9WVTB2+JcCgYEAwEeu32QFBpe4tWUdqGd4 -kBR6H36fTKeffAMgMLaE7JY9stGSWFN0BuEjOh8GIlZ7MtcsdGZIxFz3XjASyukg -eAM915JPfFMaWj44bMjKTlwezW/j1Fd7jvJIeW1IiwE3HphfayTt2wgAvMh//3w9 -IjLrf9QfeqwhY6ZDvCPFAPECgYBHUHfW9xkC5OYisrJYdyIWy8pGetEfg6ZhM3K7 -+z1D4+OZhHlvcIywxuKJ/ETPu7OyIU2Esjwjbszp/GS+SzftOz2HeJLMvNYc8k3L -96ZtR4kYjB8BftYh7mnDzZ66Ro+EvT5VRXiBhmv604Lx4CwT/LAfVBMl+jOb/ZUr -5e81sQKBgEmLXN7NBs/3TXukSBwxvcixZWmgFVJIfrUhXN34p1T0BjaFKaTKREDZ -ulpnWImY9p/Q5ey1dpNlC3b9c/ZNseBXwOfmSP6TkaWpWBWNgwVOWMa6r6gPDVgZ -TlEn2zeJH+4YjrMZga0Aoeg7HcJondSV0s8jQqBhRNVZFSMjF+tA ------END RSA PRIVATE KEY----- diff --git a/dev/cockroachdb/my-safe-directory/ca.key b/dev/cockroachdb/my-safe-directory/ca.key deleted file mode 100644 index 2ef8b3d..0000000 --- a/dev/cockroachdb/my-safe-directory/ca.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAvBJOTewyeYeWUncc7wx27bRCaDH7YawGyaltYypUzo93li+8 -K5UwVSYfy3mxNp47IQXebDPCQITct5pGq/EBTrWGJ/MLf8ZcCfPvvzylsqsesFFf -S5y0sYof+JzyowDOJflWsQnJLIK5kD32fvupvc0dKY8q/4WN/Ra1kiUm6ZcFYWVK -Jx2s2ZVWcDP5xh+obCgP3F4cTsLjo1mkoRPMSLw5w9M5x3AiDgi6zwkcw9aUVq0l -BciAlI4cAHC4Awc1AP3OazYV/E+cC6dtzS+55KRGQIYOp/pkgBKsTAd2ahuZTh8Z -WXySp30X0luRUO9wBksGEt5ixx5QdtOd0jQWLQIDAQABAoIBAQCwnCQqap7vnxLb -t/1UwojAKeGehSlCjFAHefI+CFeBbhpnz8XNy5iKrXV4F3wCBU8TcLZxN524Bsxa -Iicxee23YyFrTIJE6BowQoGmPSaBBM6Z1qA9mhfZDRN+3KvBxJTR9jaho8Xl5ZCq -UnWyw1Of6Aj1qPtA3sL6oyO47OiAu3Ph2+jlXBTlpmNQlz3BjansHpV0l9IsYY0H -dhAieMY4piYzB6LIFQUBH8T7gxnToPvgulSWaKV1mG7Xw/lSoj1YpDXXWYWMfiDB -Xl55Pyrp44J8+cdATGFIgk+ln5aeDQNtVV3wLIHsSrZaZ6ojFFpBY3qj4LvYmRjS -0Sj79ErFAoGBAN/riyjNfgSRs2wqsMPcVwetKHmP7we5wA8WAWMj1glDfjhNfHo1 -J6gEYASc2ai44aK5P6XIGeAt1NmAAqaeJKKk1/fMUKbgCLLeG+Ds24Q9FTIigUpW -kMctLTHJ9mkr2xSNfBUrjwvsvnZKYox6tBcYPDsnpgj/lkEJ7S32S5MjAoGBANcD -/ElaTUHFOr/q6YALQUgw97xBSff1WLa5ESByUXrirpNyKchnU6hY1Ndo9snd4QZs -RZIsPEPBbR1hN2R/gTbUn2hVGPxLZ0wUs/IbsYPXAsunRD87g2gI0W++OR3sz5j4 -p/6NodgsRcOmAXG1pZwJAFAJLTqUkTF0yXg8dS5vAoGACK6MRbe59BlmGIKLOfzY -Dv8iu5veC7GjBbK3uQ1RpihMw4gVlHNtJzGMO4GNWuJYNUPzeM0KW8vLHee9spId -H4U+rmfolJ/JFo5QDGeCl1z67meyFZzHnkFdKDoJaMh/hQt7TSLUOAUk2VdG/OVh -CCgzZaPC50RpofntjUOoaHsCgYBORvoq7kAgCKCZy/jUD8TldkZKd+5o4h4472kn -ydaWCT6LGU3S0qMnL6fVADaQSUGp5/LwA0CxXhLOVl0nLjApeQDLp+dfukfR79uO -8bwPhlBTOgLjjlQJpOQybSs4FMWDKEtopcFdBMklMCNodTvkcXZ2rNCVeg7d1Wmf -Z0s16wKBgA8KPg/7fEdmXItkbcVd2tyngCOo1NNXyGmZ7SnrkoXilyiKzZwmeUZl -PN27ciS/VpKTb278tNdQudmlBs28/McKddz9SnAKvTP/WbUXAh3gpeDTX9KVD7++ -Z7wCBrQcb2z5WG2ojUwbYYZGjuouYJT2WGElDoOxRT4eCSbgj4kB ------END RSA PRIVATE KEY----- diff --git a/homeserver.db.snapshot b/homeserver.db.snapshot new file mode 100644 index 0000000..e69de29 diff --git a/lp/dnsutils/dnsutils.yaml b/lp/dnsutils/dnsutils.yaml new file mode 100755 index 0000000..7f423d2 --- /dev/null +++ b/lp/dnsutils/dnsutils.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: dnsutils +--- +apiVersion: v1 +kind: Pod +metadata: + name: dnsutils + namespace: dnsutils +spec: + containers: + - name: dnsutils + image: tutum/dnsutils:latest + command: + - sleep + - "infinity" + imagePullPolicy: IfNotPresent + restartPolicy: Always diff --git a/lp/hookshot/hookshot/.helmignore b/lp/hookshot/hookshot/.helmignore new file mode 100644 index 0000000..0cfe4ae --- /dev/null +++ b/lp/hookshot/hookshot/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +*.tgz diff --git a/lp/hookshot/hookshot/.yamllint b/lp/hookshot/hookshot/.yamllint new file mode 100644 index 0000000..9035535 --- /dev/null +++ b/lp/hookshot/hookshot/.yamllint @@ -0,0 +1,7 @@ +--- +extends: default +rules: + line-length: + level: warning + max: 120 + braces: disable diff --git a/lp/hookshot/hookshot/Chart.yaml b/lp/hookshot/hookshot/Chart.yaml new file mode 100644 index 0000000..43c7f4d --- /dev/null +++ b/lp/hookshot/hookshot/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +appVersion: 6.0.2 +description: Deploy a Matrix Hookshot instance to Kubernetes +name: hookshot +type: application +version: 0.1.16 diff --git a/lp/hookshot/hookshot/README.md b/lp/hookshot/hookshot/README.md new file mode 100644 index 0000000..53445c0 --- /dev/null +++ b/lp/hookshot/hookshot/README.md @@ -0,0 +1,122 @@ +# hookshot + +![Version: 0.1.13](https://img.shields.io/badge/Version-0.1.13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.2.0](https://img.shields.io/badge/AppVersion-3.2.0-informational?style=flat-square) +Deploy a Matrix Hookshot instance to Kubernetes + +Status: Beta + +## About + +This chart creates a basic Hookshot deployment inside Kubernetes. + +# Installation + +You'll need to have the Helm repository added to your local environment: + +``` bash +helm repo add hookshot https://matrix-org.github.io/matrix-hookshot +helm repo update +``` + +Which should allow you to see the Hookshot chart in the repo: + +``` bash +helm search repo hookshot + +NAME CHART VERSION APP VERSION DESCRIPTION +matrix-org/hookshot 0.1.13 1.16.0 A Helm chart for Kubernetes +``` + +Before you can install, however, you'll need to make sure to configure Hookshot properly. + +# Configuration + +You'll need to create a `values.yaml` for your deployment of this chart. You can use the [included defaults](./values.yaml) as a starting point. + +## Helm Values + +To configure Hookshot-specific parameters, the value `.Values.hookshot.config` accepts an arbitrary YAML map as configuration. This gets templated into the container by [templates/configmap.yaml](./templates/configmap.yaml) - thus anything you can set in the [Example Configuration](https://matrix-org.github.io/matrix-hookshot/latest/setup/sample-configuration.html) can be set here. + +## Existing configuration + +If you have an existing configuration file for Hookshot, you can create a configmap like so: + +``` bash +kubectl create --namespace "your hookshot namespace" configmap hookshot-custom-config --from-file=config.yml --from-file=registration.yml --from-file=passkey.pem +``` + +Note that the filenames must remain as listed based on the templating done in [templates/configmap.yaml](./templates/configmap.yaml) + +Once created, you can set `.Values.hookshot.existingConfigMap` to `custom-hookshot-config` (or whichever name you chose for your secret) and set `.Values.hookshot.config` to `{}` or null to prevent confusion with the default parameters. + +# Installation + +Once you have your `values.yaml` file ready you can install the chart like this: + +``` bash +helm install hookshot --create-namespace --namespace hookshot matrix-org/hookshot -f values.yaml +``` + +And upgrades can be done via: + +``` bash +helm upgrade hookshot --namespace hookshot matrix-org/hookshot -f values.yaml +``` + +# External access + +You'll need to configure your Ingress connectivity according to your environment. This chart should be compatible with most Ingress controllers and has been tested successfully with [ingress-nginx](https://github.com/kubernetes/ingress-nginx) and EKS ALB. You should also ensure that you have a way to provision certificates i.e. [cert-manager](https://cert-manager.io/) as HTTPS is required for appservice traffic. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity settings for deployment | +| autoscaling.enabled | bool | `false` | | +| fullnameOverride | string | `""` | Full name override for helm chart | +| hookshot.config | object | `{"bridge":{"bindAddress":"0.0.0.0","domain":"example.com","port":9002,"url":"https://example.com"},"generic":{"allowJsTransformationFunctions":true,"enableHttpGet":false,"enabled":true,"urlPrefix":"https://example.com/","userIdPrefix":"_webhooks_","waitForComplete":false},"listeners":[{"bindAddress":"0.0.0.0","port":9000,"resources":["webhooks","widgets"]},{"bindAddress":"0.0.0.0","port":9001,"resources":["metrics"]}],"logging":{"colorize":false,"json":false,"level":"info","timestampFormat":"HH:mm:ss:SSS"},"metrics":{"enabled":true},"passFile":"/data/passkey.pem","widgets":{"addToAdminRooms":false,"branding":{"widgetTitle":"Hookshot Configuration"},"publicUrl":"https://webhook-hookshot.example.com/widgetapi/v1/static","roomSetupWidget":{"addOnInvite":false},"setRoomName":false}}` | Raw Hookshot configuration. Gets templated into a YAML file and then loaded unless an existingConfigMap is specified. | +| hookshot.existingConfigMap | string | `nil` | Name of existing ConfigMap with valid Hookshot configuration | +| hookshot.passkey | string | `""` | | +| hookshot.registration.as_token | string | `""` | | +| hookshot.registration.hs_token | string | `""` | | +| hookshot.registration.id | string | `"matrix-hookshot"` | | +| hookshot.registration.namespaces.rooms | list | `[]` | | +| hookshot.registration.namespaces.users | list | `[]` | | +| hookshot.registration.rate_limited | bool | `false` | | +| hookshot.registration.sender_localpart | string | `"hookshot"` | | +| hookshot.registration.url | string | `"http://example.com"` | | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Hookshot image | +| image.repository | string | `"halfshot/matrix-hookshot"` | Repository to pull hookshot image from | +| image.tag | string | `nil` | Image tag to pull. Defaults to chart's appVersion value as set in Chart.yaml | +| imagePullSecrets | list | `[]` | List of names of k8s secrets to be used as ImagePullSecrets for the pod | +| ingress.appservice.annotations | object | `{}` | Annotations for appservice ingress | +| ingress.appservice.className | string | `""` | Ingress class name for appservice ingress | +| ingress.appservice.enabled | bool | `false` | Enable ingress for appservice | +| ingress.appservice.hosts | list | `[]` | Host configuration for appservice ingress | +| ingress.appservice.tls | list | `[]` | TLS configuration for appservice ingress | +| ingress.webhook.annotations | object | `{}` | Annotations for webhook ingress | +| ingress.webhook.className | string | `""` | Ingress class name for webhook ingress | +| ingress.webhook.enabled | bool | `false` | Enable ingress for webhook | +| ingress.webhook.hosts | list | `[]` | Host configuration for webhook ingress | +| ingress.webhook.tls | list | `[]` | TLS configuration for webhook ingress | +| nameOverride | string | `""` | Name override for helm chart | +| nodeSelector | object | `{}` | Node selector parameters | +| podAnnotations | object | `{}` | Extra annotations for Hookshot pod | +| podSecurityContext | object | `{}` | Pod security context settings | +| replicaCount | int | `1` | Number of replicas to deploy. Consequences of using multiple Hookshot replicas currently unknown. | +| resources | object | `{}` | Pod resource requests / limits | +| securityContext | object | `{}` | Security context settings | +| service.annotations | object | `{}` | Extra annotations for service | +| service.appservice.port | int | `9002` | Appservice port as configured in container | +| service.labels | object | `{}` | Extra labels for service | +| service.metrics.port | int | `9001` | Metrics port as configured in container | +| service.port | int | `80` | Port for Hookshot service | +| service.type | string | `"ClusterIP"` | Service type for Hookshot service | +| service.webhook.port | int | `9000` | Webhook port as configured in container | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| tolerations | list | `[]` | Tolerations for deployment | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) \ No newline at end of file diff --git a/lp/hookshot/hookshot/README.md.gotmpl b/lp/hookshot/hookshot/README.md.gotmpl new file mode 100644 index 0000000..5640e57 --- /dev/null +++ b/lp/hookshot/hookshot/README.md.gotmpl @@ -0,0 +1,74 @@ +{{ template "chart.header" . }} +{{ template "chart.deprecationWarning" . }} +{{ template "chart.badgesSection" . }} +{{ template "chart.description" . }} + +Status: Beta + +## About + +This chart creates a basic Hookshot deployment inside Kubernetes. + +# Installation + +You'll need to have the Helm repository added to your local environment: + +``` bash +helm repo add hookshot https://matrix-org.github.io/matrix-hookshot +helm repo update +``` + +Which should allow you to see the Hookshot chart in the repo: + +``` bash +helm search repo hookshot + +NAME CHART VERSION APP VERSION DESCRIPTION +matrix-org/hookshot 0.1.13 1.16.0 A Helm chart for Kubernetes +``` + +Before you can install, however, you'll need to make sure to configure Hookshot properly. + +# Configuration + +You'll need to create a `values.yaml` for your deployment of this chart. You can use the [included defaults](./values.yaml) as a starting point. + +## Helm Values + +To configure Hookshot-specific parameters, the value `.Values.hookshot.config` accepts an arbitrary YAML map as configuration. This gets templated into the container by [templates/configmap.yaml](./templates/configmap.yaml) - thus anything you can set in the [Example Configuration](https://matrix-org.github.io/matrix-hookshot/latest/setup/sample-configuration.html) can be set here. + +## Existing configuration + +If you have an existing configuration file for hookshot, you can create a configmap like so: + +``` bash +kubectl create --namespace "your hookshot namespace" configmap hookshot-custom-config --from-file=config.yml --from-file=registration.yml --from-file=passkey.pem +``` + +Note that the filenames must remain as listed based on the templating done in [templates/configmap.yaml](./templates/configmap.yaml) + +Once created, you can set `.Values.hookshot.existingConfigMap` to `custom-hookshot-config` (or whichever name you chose for your secret) and set `.Values.hookshot.config` to `{}` or null to prevent confusion with the default parameters. + +# Installation + +Once you have your `values.yaml` file ready you can install the chart like this: + +``` bash +helm install hookshot --create-namespace --namespace hookshot matrix-org/hookshot -f values.yaml +``` + +And upgrades can be done via: + +``` bash +helm upgrade hookshot --namespace hookshot matrix-org/hookshot -f values.yaml +``` + +# External access + +You'll need to configure your Ingress connectivity according to your environment. This chart should be compatible with most Ingress controllers and has been tested successfully with [ingress-nginx](https://github.com/kubernetes/ingress-nginx) and EKS ALB. You should also ensure that you have a way to provision certificates i.e. [cert-manager](https://cert-manager.io/) as HTTPS is required for appservice traffic. + +{{ template "chart.maintainersSection" . }} +{{ template "chart.sourcesSection" . }} +{{ template "chart.requirementsSection" . }} +{{ template "chart.valuesSection" . }} +{{ template "helm-docs.versionFooter" . }} \ No newline at end of file diff --git a/lp/hookshot/hookshot/templates/NOTES.txt b/lp/hookshot/hookshot/templates/NOTES.txt new file mode 100644 index 0000000..438e4d0 --- /dev/null +++ b/lp/hookshot/hookshot/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hookshot.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "hookshot.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hookshot.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "hookshot.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/lp/hookshot/hookshot/templates/_helpers.tpl b/lp/hookshot/hookshot/templates/_helpers.tpl new file mode 100644 index 0000000..6c5b3bb --- /dev/null +++ b/lp/hookshot/hookshot/templates/_helpers.tpl @@ -0,0 +1,85 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "hookshot.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "hookshot.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Helper for configmap name +*/}} +{{- define "hookshot.configMapName" -}} +{{- if .Values.hookshot.existingConfigMap }} +{{- printf "%s" .Values.hookshot.existingConfigMap -}} +{{- else }} +{{- printf "%s-config" (include "hookshot.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "hookshot.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "hookshot.labels" -}} +helm.sh/chart: {{ include "hookshot.chart" . }} +{{ include "hookshot.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "hookshot.selectorLabels" -}} +app.kubernetes.io/name: {{ include "hookshot.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "hookshot.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "hookshot.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "hookshot.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} diff --git a/lp/hookshot/hookshot/templates/_pod.tpl b/lp/hookshot/hookshot/templates/_pod.tpl new file mode 100644 index 0000000..09879a1 --- /dev/null +++ b/lp/hookshot/hookshot/templates/_pod.tpl @@ -0,0 +1,160 @@ +{{- define "hookshot.pod" -}} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +serviceAccountName: {{ template "hookshot.serviceAccountName" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} +{{- if .Values.securityContext }} +securityContext: +{{ toYaml .Values.securityContext | indent 2 }} +{{- end }} +{{- if .Values.hostAliases }} +hostAliases: +{{ toYaml .Values.hostAliases | indent 2 }} +{{- end }} +{{- if .Values.priorityClassName }} +priorityClassName: {{ .Values.priorityClassName }} +{{- end }} +initContainers: + +{{- if .Values.image.pullSecrets }} +imagePullSecrets: +{{- $root := . }} +{{- range .Values.image.pullSecrets }} + - name: {{ tpl . $root }} +{{- end}} +{{- end }} +containers: + - name: {{ .Chart.Name }} + {{- if .Values.image.sha }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.image.sha }}" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . }} + {{- end }} + {{- end}} +{{- if .Values.containerSecurityContext }} + securityContext: +{{- toYaml .Values.containerSecurityContext | nindent 6 }} +{{- end }} + volumeMounts: +{{- if or (and (not .Values.hookshot.existingConfigMap) (.Values.hookshot.config)) (.Values.hookshot.existingConfigMap) }} + - name: config + mountPath: "/data" +{{- end }} + ports: + - name: webhook + containerPort: 9000 + protocol: TCP + - name: metrics + containerPort: 9001 + protocol: TCP + - name: appservice + containerPort: 9002 + protocol: TCP + env: + + envFrom: + {{- if .Values.envFromSecret }} + - secretRef: + name: {{ tpl .Values.envFromSecret . }} + {{- end }} + {{- if .Values.envRenderSecret }} + - secretRef: + name: {{ template "hookshot.fullname" . }}-env + {{- end }} + {{- range .Values.envFromSecrets }} + - secretRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- range .Values.envFromConfigMaps }} + - configMapRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 6 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 6 }} +{{- if .Values.lifecycleHooks }} + lifecycle: {{ tpl (.Values.lifecycleHooks | toYaml) . | nindent 6 }} +{{- end }} + resources: +{{ toYaml .Values.resources | indent 6 }} +{{- with .Values.extraContainers }} +{{ tpl . $ | indent 2 }} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: +{{ toYaml . | indent 2 }} +{{- end }} +{{- $root := . }} +{{- with .Values.affinity }} +affinity: +{{ tpl (toYaml .) $root | indent 2 }} +{{- end }} +{{- with .Values.topologySpreadConstraints }} +topologySpreadConstraints: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: +{{ toYaml . | indent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ template "hookshot.configMapName" . }} +{{- $root := . }} +{{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + configMap: + name: {{ tpl .configMap $root }} + {{- if .items }} + items: {{ toYaml .items | nindent 6 }} + {{- end }} +{{- end }} + +{{- range .Values.extraSecretMounts }} +{{- if .secretName }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} + {{- if .items }} + items: {{ toYaml .items | nindent 6 }} + {{- end }} +{{- else if .projected }} + - name: {{ .name }} + projected: {{- toYaml .projected | nindent 6 }} +{{- else if .csi }} + - name: {{ .name }} + csi: {{- toYaml .csi | nindent 6 }} +{{- end }} +{{- end }} +{{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + {{- if .existingClaim }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} + {{- else if .hostPath }} + hostPath: + path: {{ .hostPath }} + {{- else }} + emptyDir: {} + {{- end }} +{{- end }} +{{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} +{{- end -}} +{{- if .Values.extraContainerVolumes }} +{{ tpl (toYaml .Values.extraContainerVolumes) . | indent 2 }} +{{- end }} +{{- end }} diff --git a/lp/hookshot/hookshot/templates/configmap.yaml b/lp/hookshot/hookshot/templates/configmap.yaml new file mode 100644 index 0000000..139ce15 --- /dev/null +++ b/lp/hookshot/hookshot/templates/configmap.yaml @@ -0,0 +1,21 @@ +--- +{{- if not .Values.hookshot.existingConfigMap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "hookshot.configMapName" . }} + namespace: {{ template "hookshot.namespace" . }} + labels: + {{- include "hookshot.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +data: + config.yml: | +{{ toYaml .Values.hookshot.config | indent 4 }} + registration.yml: | +{{ toYaml .Values.hookshot.registration | indent 4 }} + passkey.pem: | +{{ .Values.hookshot.passkey | indent 4 }} +{{- end }} diff --git a/lp/hookshot/hookshot/templates/deployment.yaml b/lp/hookshot/hookshot/templates/deployment.yaml new file mode 100644 index 0000000..cf14766 --- /dev/null +++ b/lp/hookshot/hookshot/templates/deployment.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "hookshot.fullname" . }} + namespace: {{ template "hookshot.namespace" . }} + labels: + {{- include "hookshot.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "hookshot.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "hookshot.selectorLabels" . | nindent 8 }} + spec: + {{- include "hookshot.pod" . | nindent 6 }} \ No newline at end of file diff --git a/lp/hookshot/hookshot/templates/hpa.yaml b/lp/hookshot/hookshot/templates/hpa.yaml new file mode 100644 index 0000000..011e030 --- /dev/null +++ b/lp/hookshot/hookshot/templates/hpa.yaml @@ -0,0 +1,30 @@ +--- +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "hookshot.fullname" . }} + namespace: {{ template "hookshot.namespace" . }} + labels: + {{- include "hookshot.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "hookshot.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/lp/hookshot/hookshot/templates/ingress-appservice.yaml b/lp/hookshot/hookshot/templates/ingress-appservice.yaml new file mode 100644 index 0000000..8dc1efb --- /dev/null +++ b/lp/hookshot/hookshot/templates/ingress-appservice.yaml @@ -0,0 +1,63 @@ +--- +{{- if .Values.ingress.appservice.enabled -}} +{{- $fullName := include "hookshot.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.appservice.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.appservice.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.appservice.annotations "kubernetes.io/ingress.class" .Values.ingress.appservice.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-appservice + namespace: {{ template "hookshot.namespace" . }} + labels: + {{- include "hookshot.labels" . | nindent 4 }} + {{- with .Values.ingress.appservice.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.appservice.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.appservice.className }} + {{- end }} + {{- if .Values.ingress.appservice.tls }} + tls: + {{- range .Values.ingress.appservice.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.appservice.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ .port }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ .port }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/lp/hookshot/hookshot/templates/ingress.yaml b/lp/hookshot/hookshot/templates/ingress.yaml new file mode 100644 index 0000000..22fe1bf --- /dev/null +++ b/lp/hookshot/hookshot/templates/ingress.yaml @@ -0,0 +1,63 @@ +--- +{{- if .Values.ingress.webhook.enabled -}} +{{- $fullName := include "hookshot.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.webhook.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.webhook.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.webhook.annotations "kubernetes.io/ingress.class" .Values.ingress.webhook.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ template "hookshot.namespace" . }} + labels: + {{- include "hookshot.labels" . | nindent 4 }} + {{- with .Values.ingress.webhook.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.webhook.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.webhook.className }} + {{- end }} + {{- if .Values.ingress.webhook.tls }} + tls: + {{- range .Values.ingress.webhook.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.webhook.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ .port }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ .port }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/lp/hookshot/hookshot/templates/service.yaml b/lp/hookshot/hookshot/templates/service.yaml new file mode 100644 index 0000000..7c4bcc7 --- /dev/null +++ b/lp/hookshot/hookshot/templates/service.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "hookshot.fullname" . }} + namespace: {{ template "hookshot.namespace" . }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + {{- include "hookshot.labels" . | nindent 4 }} +{{- with .Values.service.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.webhook.port }} + targetPort: webhook + protocol: TCP + name: webhook + - port: {{ .Values.service.metrics.port }} + targetPort: metrics + protocol: TCP + name: metrics + - port: {{ .Values.service.appservice.port }} + targetPort: appservice + protocol: TCP + name: appservice + selector: + {{- include "hookshot.selectorLabels" . | nindent 4 }} diff --git a/lp/hookshot/hookshot/templates/serviceaccount.yaml b/lp/hookshot/hookshot/templates/serviceaccount.yaml new file mode 100644 index 0000000..8f732d4 --- /dev/null +++ b/lp/hookshot/hookshot/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "hookshot.serviceAccountName" . }} + namespace: {{ template "hookshot.namespace" . }} + labels: + {{- include "hookshot.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/lp/hookshot/hookshot/templates/tests/test-connection.yaml b/lp/hookshot/hookshot/templates/tests/test-connection.yaml new file mode 100644 index 0000000..7e4d49f --- /dev/null +++ b/lp/hookshot/hookshot/templates/tests/test-connection.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "hookshot.fullname" . }}-test-connection" + labels: + {{- include "hookshot.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "hookshot.fullname" . }}:{{ .Values.service.webhook.port }}'] + restartPolicy: Never diff --git a/lp/hookshot/hookshot/values.yaml b/lp/hookshot/hookshot/values.yaml new file mode 100644 index 0000000..5e1953e --- /dev/null +++ b/lp/hookshot/hookshot/values.yaml @@ -0,0 +1,454 @@ +--- +# Note: This chart is released using the config.sample.yml file +# +# -- Number of replicas to deploy. Consequences of using multiple Hookshot replicas currently unknown. +replicaCount: 1 +image: + # -- Repository to pull hookshot image from + repository: halfshot/matrix-hookshot + # -- Pull policy for Hookshot image + pullPolicy: IfNotPresent + # -- Image tag to pull. Defaults to chart's appVersion value as set in Chart.yaml + tag: +# -- List of names of k8s secrets to be used as ImagePullSecrets for the pod +imagePullSecrets: [] +# -- Name override for helm chart +nameOverride: "" +# -- Full name override for helm chart +fullnameOverride: "" +serviceAccount: + # -- Specifies whether a service account should be created + create: true + # -- Annotations to add to the service account + annotations: {} + # -- The name of the service account to use. If not set and create is true, a name is generated using the fullname template + name: "" +# -- Extra annotations for Hookshot pod +podAnnotations: {} +# -- Pod security context settings +podSecurityContext: {} +# fsGroup: 2000 + +# -- Security context settings +securityContext: {} +# capabilities: +# drop: +# - ALL +# readOnlyRootFilesystem: true +# runAsNonRoot: true +# runAsUser: 1000 + +service: + # -- Service type for Hookshot service + type: ClusterIP + # -- Port for Hookshot service + port: 80 + # -- Extra annotations for service + annotations: {} + # -- Extra labels for service + labels: {} + webhook: + # -- Webhook port as configured in container + port: 9000 + metrics: + # -- Metrics port as configured in container + port: 9001 + appservice: + # -- Appservice port as configured in container + port: 9002 +ingress: + webhook: + # -- Enable ingress for webhook + enabled: false + # -- Ingress class name for webhook ingress + className: "" + # -- Annotations for webhook ingress + annotations: {} + # -- Host configuration for webhook ingress + hosts: [] + # -- TLS configuration for webhook ingress + tls: [] + appservice: + # -- Enable ingress for appservice + enabled: false + # -- Ingress class name for appservice ingress + className: "" + # -- Annotations for appservice ingress + annotations: {} + # -- Host configuration for appservice ingress + hosts: [] + # -- TLS configuration for appservice ingress + tls: [] +# -- Pod resource requests / limits +resources: {} +# We usually recommend not to specify default resources and to leave this as a conscious +# choice for the user. This also increases chances charts run on environments with little +# resources, such as Minikube. If you do want to specify resources, uncomment the following +# lines, adjust them as necessary, and remove the curly braces after 'resources:'. +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +autoscaling: + enabled: false +# -- Node selector parameters +nodeSelector: {} +# -- Tolerations for deployment +tolerations: [] +# -- Affinity settings for deployment +affinity: {} +hookshot: + # -- Name of existing ConfigMap with valid Hookshot configuration + existingConfigMap: + # -- Raw Hookshot configuration. Gets templated into a YAML file and then loaded unless an existingConfigMap is specified. + config: + # This is an example configuration file + bridge: + # Basic homeserver configuration + domain: example.com + url: http://localhost:8008 + mediaUrl: https://example.com + port: 9993 + bindAddress: 127.0.0.1 + logging: + # Logging settings. You can have a severity debug,info,warn,error + level: info + colorize: true + json: false + timestampFormat: HH:mm:ss:SSS + passFile: ./passkey.pem + # A passkey used to encrypt tokens stored inside the bridge. + # Run openssl genpkey -out passkey.pem -outform PEM -algorithm RSA -pkeyopt rsa_keygen_bits:4096 to generate + listeners: + # HTTP Listener configuration. + # Bind resource endpoints to ports and addresses. + # 'port' must be specified. Each listener must listen on a unique port. + # 'bindAddress' will default to '127.0.0.1' if not specified, which may not be suited to Docker environments. + # 'resources' may be any of webhooks, widgets, metrics, provisioning + - port: 9000 + bindAddress: 0.0.0.0 + resources: + - webhooks + - port: 9001 + bindAddress: 127.0.0.1 + resources: + - metrics + - provisioning + - port: 9002 + bindAddress: 0.0.0.0 + resources: + - widgets + registration: + #cache: + # # (Optional) Cache options for large scale deployments. + # # For encryption to work, this must be configured. + # redisUri: redis://localhost:6379 + + #encryption: + # # (Optional) Configuration for encryption support in the bridge. + # # If omitted, encryption support will be disabled. + # storagePath: + # # Path to the directory used to store encryption files. These files must be persist between restarts of the service. + # ./cryptostore + + #permissions: + # # (Optional) Permissions for using the bridge. See docs/setup.md#permissions for help + # - actor: example.com + # services: + # - service: "*" + # level: admin + + #github: + # # (Optional) Configure this to enable GitHub support + # auth: + # # Authentication for the GitHub App. + # id: 123 + # privateKeyFile: github-key.pem + # webhook: + # # Webhook settings for the GitHub app. + # secret: secrettoken + # oauth: + # # (Optional) Settings for allowing users to sign in via OAuth. + # client_id: foo + # client_secret: bar + # redirect_uri: https://example.com/oauth/ + # defaultOptions: + # # (Optional) Default options for GitHub connections. + # showIssueRoomLink: false + # hotlinkIssues: + # prefix: "#" + # userIdPrefix: + # # (Optional) Prefix used when creating ghost users for GitHub accounts. + # _github_ + + #gitlab: + # # (Optional) Configure this to enable GitLab support + # instances: + # gitlab.com: + # url: https://gitlab.com + # webhook: + # secret: secrettoken + # publicUrl: https://example.com/hookshot/ + # userIdPrefix: + # # (Optional) Prefix used when creating ghost users for GitLab accounts. + # _gitlab_ + # commentDebounceMs: + # # (Optional) Aggregate comments by waiting this many miliseconds before posting them to Matrix. Defaults to 5000 (5 seconds) + # 5000 + + #jira: + # # (Optional) Configure this to enable Jira support. Only specify `url` if you are using a On Premise install (i.e. not atlassian.com) + # webhook: + # # Webhook settings for JIRA + # secret: secrettoken + # oauth: + # # (Optional) OAuth settings for connecting users to JIRA. See documentation for more information + # client_id: foo + # client_secret: bar + # redirect_uri: https://example.com/oauth/ + + #generic: + # # (Optional) Support for generic webhook events. + # #'allowJsTransformationFunctions' will allow users to write short transformation snippets in code, and thus is unsafe in untrusted environments + + # enabled: false + # outbound: false + # urlPrefix: https://example.com/webhook/ + # userIdPrefix: _webhooks_ + # allowJsTransformationFunctions: false + # waitForComplete: false + # enableHttpGet: false + # sendExpiryNotice: false + # requireExpiryTime: false + # maxExpiryTime: 30d + + #figma: + # # (Optional) Configure this to enable Figma support + # publicUrl: https://example.com/hookshot/ + # instances: + # your-instance: + # teamId: your-team-id + # accessToken: your-personal-access-token + # passcode: your-webhook-passcode + + #feeds: + # # (Optional) Configure this to enable RSS/Atom feed support + # enabled: false + # pollIntervalSeconds: 600 + # pollTimeoutSeconds: 30 + # pollConcurrency: 4 + + #bot: + # # (Optional) Define profile information for the bot user + # displayname: Hookshot Bot + # avatar: mxc://half-shot.uk/2876e89ccade4cb615e210c458e2a7a6883fe17d + + #serviceBots: + # # (Optional) Define additional bot users for specific services + # - localpart: feeds + # displayname: Feeds + # avatar: ./assets/feeds_avatar.png + # prefix: "!feeds" + # service: feeds + + #widgets: + # # (Optional) EXPERIMENTAL support for complimentary widgets + # addToAdminRooms: false + # publicUrl: https://example.com/widgetapi/v1/static/ + # roomSetupWidget: + # addOnInvite: false + # disallowedIpRanges: + # - 127.0.0.0/8 + # - 10.0.0.0/8 + # - 172.16.0.0/12 + # - 192.168.0.0/16 + # - 100.64.0.0/10 + # - 192.0.0.0/24 + # - 169.254.0.0/16 + # - 192.88.99.0/24 + # - 198.18.0.0/15 + # - 192.0.2.0/24 + # - 198.51.100.0/24 + # - 203.0.113.0/24 + # - 224.0.0.0/4 + # - ::1/128 + # - fe80::/10 + # - fc00::/7 + # - 2001:db8::/32 + # - ff00::/8 + # - fec0::/10 + # branding: + # widgetTitle: Hookshot Configuration + + #provisioning: + # # (Optional) Provisioning API for integration managers + # secret: "!secretToken" + + #metrics: + # # (Optional) Prometheus metrics support + # enabled: true + + #sentry: + # # (Optional) Configure Sentry error reporting + # dsn: https://examplePublicKey@o0.ingest.sentry.io/0 + # environment: production + #github: + # # (Optional) Configure this to enable GitHub support + # auth: + # # Authentication for the GitHub App. + # id: 123 + # privateKeyFile: github-key.pem + # webhook: + # # Webhook settings for the GitHub app. + # secret: secrettoken + # oauth: + # # (Optional) Settings for allowing users to sign in via OAuth. + # client_id: foo + # client_secret: bar + # redirect_uri: https://example.com/oauth/ + # defaultOptions: + # # (Optional) Default options for GitHub connections. + # showIssueRoomLink: false + # hotlinkIssues: + # prefix: "#" + # userIdPrefix: + # # (Optional) Prefix used when creating ghost users for GitHub accounts. + # _github_ + + #gitlab: + # # (Optional) Configure this to enable GitLab support + # instances: + # gitlab.com: + # url: https://gitlab.com + # webhook: + # secret: secrettoken + # publicUrl: https://example.com/hookshot/ + # userIdPrefix: + # # (Optional) Prefix used when creating ghost users for GitLab accounts. + # _gitlab_ + # commentDebounceMs: + # # (Optional) Aggregate comments by waiting this many miliseconds before posting them to Matrix. Defaults to 5000 (5 seconds) + # 5000 + + #figma: + # # (Optional) Configure this to enable Figma support + # publicUrl: https://example.com/hookshot/ + # instances: + # your-instance: + # teamId: your-team-id + # accessToken: your-personal-access-token + # passcode: your-webhook-passcode + + #jira: + # # (Optional) Configure this to enable Jira support. Only specify `url` if you are using a On Premise install (i.e. not atlassian.com) + # webhook: + # # Webhook settings for JIRA + # secret: secrettoken + # oauth: + # # (Optional) OAuth settings for connecting users to JIRA. See documentation for more information + # client_id: foo + # client_secret: bar + # redirect_uri: https://example.com/oauth/ + + #generic: + # # (Optional) Support for generic webhook events. + # #'allowJsTransformationFunctions' will allow users to write short transformation snippets in code, and thus is unsafe in untrusted environments + + # enabled: false + # enableHttpGet: false + # urlPrefix: https://example.com/webhook/ + # userIdPrefix: _webhooks_ + # allowJsTransformationFunctions: false + # waitForComplete: false + + #feeds: + # # (Optional) Configure this to enable RSS/Atom feed support + # enabled: false + # pollConcurrency: 4 + # pollIntervalSeconds: 600 + # pollTimeoutSeconds: 30 + + #provisioning: + # # (Optional) Provisioning API for integration managers + # secret: "!secretToken" + + #bot: + # # (Optional) Define profile information for the bot user + # displayname: Hookshot Bot + # avatar: mxc://half-shot.uk/2876e89ccade4cb615e210c458e2a7a6883fe17d + + #serviceBots: + # # (Optional) Define additional bot users for specific services + # - localpart: feeds + # displayname: Feeds + # avatar: ./assets/feeds_avatar.png + # prefix: "!feeds" + # service: feeds + + #metrics: + # # (Optional) Prometheus metrics support + # enabled: true + + #cache: + # # (Optional) Cache options for large scale deployments. + # # For encryption to work, this must be configured. + # redisUri: redis://localhost:6379 + + #queue: + # # (Optional) Message queue configuration options for large scale deployments. + # # For encryption to work, this must not be configured. + # redisUri: redis://localhost:6379 + + #widgets: + # # (Optional) EXPERIMENTAL support for complimentary widgets + # addToAdminRooms: false + # disallowedIpRanges: + # - 127.0.0.0/8 + # - 10.0.0.0/8 + # - 172.16.0.0/12 + # - 192.168.0.0/16 + # - 100.64.0.0/10 + # - 192.0.0.0/24 + # - 169.254.0.0/16 + # - 192.88.99.0/24 + # - 198.18.0.0/15 + # - 192.0.2.0/24 + # - 198.51.100.0/24 + # - 203.0.113.0/24 + # - 224.0.0.0/4 + # - ::1/128 + # - fe80::/10 + # - fc00::/7 + # - 2001:db8::/32 + # - ff00::/8 + # - fec0::/10 + # roomSetupWidget: + # addOnInvite: false + # publicUrl: https://example.com/widgetapi/v1/static/ + # branding: + # widgetTitle: Hookshot Configuration + + #sentry: + # # (Optional) Configure Sentry error reporting + # dsn: https://examplePublicKey@o0.ingest.sentry.io/0 + # environment: production + + #permissions: + # # (Optional) Permissions for using the bridge. See docs/setup.md#permissions for help + # - actor: example.com + # services: + # - service: "*" + # level: admin + id: matrix-hookshot + as_token: "" + hs_token: "" + namespaces: + rooms: [] + users: [] + sender_localpart: hookshot + url: "http://example.com" + rate_limited: false + passkey: "" diff --git a/lp/hookshot/output.yaml b/lp/hookshot/output.yaml new file mode 100644 index 0000000..9186862 --- /dev/null +++ b/lp/hookshot/output.yaml @@ -0,0 +1,172 @@ +--- +# Source: hookshot/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hookshot + namespace: matrix + labels: + helm.sh/chart: hookshot-0.1.16 + app.kubernetes.io/name: hookshot + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "6.0.2" + app.kubernetes.io/managed-by: Helm +--- +# Source: hookshot/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: hookshot-config + namespace: matrix + labels: + helm.sh/chart: hookshot-0.1.16 + app.kubernetes.io/name: hookshot + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "6.0.2" + app.kubernetes.io/managed-by: Helm +data: + config.yml: | + bridge: + bindAddress: 127.0.0.1 + domain: matrix-lp.allarddcs.nl + mediaUrl: https://matrix-lp.allarddcs.nl + port: 9993 + url: http://matrix-lp:8008 + listeners: + - bindAddress: 0.0.0.0 + port: 9000 + resources: + - webhooks + - bindAddress: 127.0.0.1 + port: 9001 + resources: + - metrics + - provisioning + - bindAddress: 0.0.0.0 + port: 9002 + resources: + - widgets + logging: + colorize: true + json: false + level: info + timestampFormat: HH:mm:ss:SSS + passFile: passkey.pem + registration.yml: | + as_token: "" + generic: + enabled: true + urlPrefix: https://hookshot-lp.allarddcs.nl + hs_token: "" + id: matrix-hookshot + namespaces: + rooms: [] + users: [] + rate_limited: false + sender_localpart: hookshot + url: http://example.com + passkey.pem: | +--- +apiVersion: v1 +kind: Service +metadata: + name: hookshot + namespace: matrix + labels: + helm.sh/chart: hookshot-0.1.16 + app.kubernetes.io/name: hookshot + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "6.0.2" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - port: 9000 + targetPort: webhook + protocol: TCP + name: webhook + - port: 9001 + targetPort: metrics + protocol: TCP + name: metrics + - port: 9002 + targetPort: appservice + protocol: TCP + name: appservice + selector: + app.kubernetes.io/name: hookshot + app.kubernetes.io/instance: release-name +--- +# Source: hookshot/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hookshot + namespace: matrix + labels: + helm.sh/chart: hookshot-0.1.16 + app.kubernetes.io/name: hookshot + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "6.0.2" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: hookshot + app.kubernetes.io/instance: release-name + template: + metadata: + labels: + app.kubernetes.io/name: hookshot + app.kubernetes.io/instance: release-name + spec: + containers: + - name: hookshot + image: "halfshot/matrix-hookshot:6.0.2" + imagePullPolicy: IfNotPresent + volumeMounts: + - name: config + mountPath: "/data" + ports: + - name: webhook + containerPort: 9000 + protocol: TCP + - name: metrics + containerPort: 9001 + protocol: TCP + - name: appservice + containerPort: 9002 + protocol: TCP + env: + envFrom: + livenessProbe: + null + readinessProbe: + null + resources: + {} + volumes: + - name: config + configMap: + name: release-name-hookshot-config +--- +apiVersion: v1 +kind: Pod +metadata: + name: "hookshot-test-connection" + labels: + helm.sh/chart: hookshot-0.1.16 + app.kubernetes.io/name: hookshot + app.kubernetes.io/instance: release-name + app.kubernetes.io/version: "6.0.2" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['hookshot:9000'] + restartPolicy: Never diff --git a/lp/hookshot/values.yaml b/lp/hookshot/values.yaml new file mode 100644 index 0000000..25638ed --- /dev/null +++ b/lp/hookshot/values.yaml @@ -0,0 +1,244 @@ +--- +# Note: This chart is released using the config.sample.yml file +# +# -- Number of replicas to deploy. Consequences of using multiple Hookshot replicas currently unknown. +replicaCount: 1 +image: + # -- Repository to pull hookshot image from + repository: halfshot/matrix-hookshot + # -- Pull policy for Hookshot image + pullPolicy: IfNotPresent + # -- Image tag to pull. Defaults to chart's appVersion value as set in Chart.yaml + tag: +# -- List of names of k8s secrets to be used as ImagePullSecrets for the pod +imagePullSecrets: [] +# -- Name override for helm chart +nameOverride: "" +# -- Full name override for helm chart +fullnameOverride: "" +serviceAccount: + # -- Specifies whether a service account should be created + create: true + # -- Annotations to add to the service account + annotations: {} + # -- The name of the service account to use. If not set and create is true, a name is generated using the fullname template + name: "" +# -- Extra annotations for Hookshot pod +podAnnotations: {} +# -- Pod security context settings +podSecurityContext: {} +# fsGroup: 2000 + +# -- Security context settings +securityContext: {} +# capabilities: +# drop: +# - ALL +# readOnlyRootFilesystem: true +# runAsNonRoot: true +# runAsUser: 1000 + +service: + # -- Service type for Hookshot service + type: ClusterIP + # -- Port for Hookshot service + port: 80 + # -- Extra annotations for service + annotations: {} + # -- Extra labels for service + labels: {} + webhook: + # -- Webhook port as configured in container + port: 9000 + metrics: + # -- Metrics port as configured in container + port: 9001 + appservice: + # -- Appservice port as configured in container + port: 9002 +ingress: + webhook: + # -- Enable ingress for webhook + enabled: false + # -- Ingress class name for webhook ingress + className: "" + # -- Annotations for webhook ingress + annotations: {} + # -- Host configuration for webhook ingress + hosts: [] + # -- TLS configuration for webhook ingress + tls: [] + appservice: + # -- Enable ingress for appservice + enabled: false + # -- Ingress class name for appservice ingress + className: "" + # -- Annotations for appservice ingress + annotations: {} + # -- Host configuration for appservice ingress + hosts: [] + # -- TLS configuration for appservice ingress + tls: [] +# -- Pod resource requests / limits +resources: {} +# We usually recommend not to specify default resources and to leave this as a conscious +# choice for the user. This also increases chances charts run on environments with little +# resources, such as Minikube. If you do want to specify resources, uncomment the following +# lines, adjust them as necessary, and remove the curly braces after 'resources:'. +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +autoscaling: + enabled: false +# -- Node selector parameters +nodeSelector: {} +# -- Tolerations for deployment +tolerations: [] +# -- Affinity settings for deployment +affinity: {} +hookshot: + # -- Name of existing ConfigMap with valid Hookshot configuration + existingConfigMap: + # -- Raw Hookshot configuration. Gets templated into a YAML file and then loaded unless an existingConfigMap is specified. + config: + # This is an example configuration file + bridge: + # Basic homeserver configuration + domain: matrix-lp.allarddcs.nl + url: http://matrix-lp:8008 + mediaUrl: https://matrix-lp.allarddcs.nl + port: 9993 + bindAddress: 127.0.0.1 + passFile: passkey.pem + # A passkey used to encrypt tokens stored inside the bridge. + # Run openssl genpkey -out passkey.pem -outform PEM -algorithm RSA -pkeyopt rsa_keygen_bits:4096 to generate + logging: + # Logging settings. You can have a severity debug,info,warn,error + level: info + colorize: true + json: false + timestampFormat: HH:mm:ss:SSS + listeners: + # HTTP Listener configuration. + # Bind resource endpoints to ports and addresses. + # 'port' must be specified. Each listener must listen on a unique port. + # 'bindAddress' will default to '127.0.0.1' if not specified, which may not be suited to Docker environments. + # 'resources' may be any of webhooks, widgets, metrics, provisioning + - port: 9000 + bindAddress: 0.0.0.0 + resources: + - webhooks + - port: 9001 + bindAddress: 127.0.0.1 + resources: + - metrics + - provisioning + - port: 9002 + bindAddress: 0.0.0.0 + resources: + - widgets + registration: + + generic: + # # (Optional) Support for generic webhook events. + # #'allowJsTransformationFunctions' will allow users to write short transformation snippets in code, and thus is unsafe in untrusted environments + + enabled: true + # enableHttpGet: false + urlPrefix: https://hookshot-lp.allarddcs.nl + # userIdPrefix: _webhooks_ + # allowJsTransformationFunctions: false + # waitForComplete: false + + #feeds: + # # (Optional) Configure this to enable RSS/Atom feed support + # enabled: false + # pollConcurrency: 4 + # pollIntervalSeconds: 600 + # pollTimeoutSeconds: 30 + + #provisioning: + # # (Optional) Provisioning API for integration managers + # secret: "!secretToken" + + #bot: + # # (Optional) Define profile information for the bot user + # displayname: Hookshot Bot + # avatar: mxc://half-shot.uk/2876e89ccade4cb615e210c458e2a7a6883fe17d + + #serviceBots: + # # (Optional) Define additional bot users for specific services + # - localpart: feeds + # displayname: Feeds + # avatar: ./assets/feeds_avatar.png + # prefix: "!feeds" + # service: feeds + + #metrics: + # # (Optional) Prometheus metrics support + # enabled: true + + #cache: + # # (Optional) Cache options for large scale deployments. + # # For encryption to work, this must be configured. + # redisUri: redis://localhost:6379 + + #queue: + # # (Optional) Message queue configuration options for large scale deployments. + # # For encryption to work, this must not be configured. + # redisUri: redis://localhost:6379 + + #widgets: + # # (Optional) EXPERIMENTAL support for complimentary widgets + # addToAdminRooms: false + # disallowedIpRanges: + # - 127.0.0.0/8 + # - 10.0.0.0/8 + # - 172.16.0.0/12 + # - 192.168.0.0/16 + # - 100.64.0.0/10 + # - 192.0.0.0/24 + # - 169.254.0.0/16 + # - 192.88.99.0/24 + # - 198.18.0.0/15 + # - 192.0.2.0/24 + # - 198.51.100.0/24 + # - 203.0.113.0/24 + # - 224.0.0.0/4 + # - ::1/128 + # - fe80::/10 + # - fc00::/7 + # - 2001:db8::/32 + # - ff00::/8 + # - fec0::/10 + # roomSetupWidget: + # addOnInvite: false + # publicUrl: https://example.com/widgetapi/v1/static/ + # branding: + # widgetTitle: Hookshot Configuration + + #sentry: + # # (Optional) Configure Sentry error reporting + # dsn: https://examplePublicKey@o0.ingest.sentry.io/0 + # environment: production + + #permissions: + # # (Optional) Permissions for using the bridge. See docs/setup.md#permissions for help + # - actor: example.com + # services: + # - service: "*" + # level: admin + id: matrix-hookshot + as_token: "" + hs_token: "" + namespaces: + rooms: [] + users: [] + sender_localpart: hookshot + url: "http://example.com" + rate_limited: false + passkey: "" diff --git a/lp/matrix/-d b/lp/matrix/-d new file mode 100644 index 0000000..5d34749 --- /dev/null +++ b/lp/matrix/-d @@ -0,0 +1 @@ +{"errcode":"M_FORBIDDEN","error":"You are not a server admin"} \ No newline at end of file diff --git a/lp/matrix/README.md b/lp/matrix/README.md index 549fd09..62768ff 100644 --- a/lp/matrix/README.md +++ b/lp/matrix/README.md @@ -70,3 +70,59 @@ nc -zv coturn-lp.allarddcs.nl 5349 #checken certificaat: kubectl describe secret coturn-cert -n matrix + +#HOOKSHOT + + +#passkey genereren en in secret zetten. Passkey wordt gebruikt om andere sleutels versleuteld op te slaan. + +openssl genpkey -out passkey.pem -outform PEM -algorithm RSA -pkeyopt rsa_keygen_bits:4096 +kubectl -n matrix create secret generic hookshot-passkey --from-file=passkey.pem + +#opvragen access token: + +curl -X POST "https://matrix-lp.allarddcs.nl/_matrix/client/v3/login" -H "Content-Type: application/json" -d '{ + "type": "m.login.password", + "user": "admin", + "password": "Matrix01@" + }' + +syt_YWRtaW4_laSGClQJFMQNKKjqyfVj_1XLp4x + +#room aanmaken: + +curl -k -X POST \ + -H "Authorization: Bearer syt_YWRtaW4_bUqQKyFkonSgjkghnuxY_4IsleV" \ + -H "Content-Type: application/json" \ + -d '{"name":"harbor","preset":"private_chat"}' \ + "https://matrix-lp.allarddcs.nl/_matrix/client/v3/createRoom" + +{"room_id":"!AmeLKsUWrKBIkosFbY:matrix-lp.allarddcs.nl"} + +#toevoegen hookshot bot aan room: + +curl -X POST \ + -H "Authorization: Bearer syt_YWRtaW4_bUqQKyFkonSgjkghnuxY_4IsleV" \ + -H "Content-Type: application/json" \ + "https://matrix-lp.allarddcs.nl/_synapse/admin/v1/join/AmeLKsUWrKBIkosFbY:matrix-lp.allarddcs.nl" \ + -d '{ + "user_id": "@hookshot:matrix-lp.allarddcs.nl" + }' + +Opmerking: doordat de bot is toegevoegd wordt de status van de room veranderd in: gemanaged door de appservice. +Daardoor kun je de room niet publiceren en ook geen leden uitnodigen. + +#toevoegen allardhook aan room: + +curl -k -X POST -H "Authorization: Bearer $ACCESS_TOKEN" \ + -H "Content-Type: application/json" \ + "https://matrix-lp.allarddcs.nl/_synapse/admin/v1/join/!BPcyrRUfYdBJrFVdlV:matrix-lp.allarddcs.nl" \ + -d '{ + "user_id": "@allard:matrix-lp.allarddcs.nl" + }' + +#herstarten hookshot + +#transformationFunction in state-event schrijven: + +curl -k -X PUT -H "Authorization: Bearer $ACCESS_TOKEN" -H "Content-Type: application/json" "https://matrix-lp.allarddcs.nl/_matrix/client/r0/rooms/!BPcyrRUfYdBJrFVdlV:matrix-lp.allarddcs.nl/state/uk.half-shot.matrix-hookshot.generic.hook/generic-1" -d @harbor-webhook.json diff --git a/lp/matrix/as-token.txt b/lp/matrix/as-token.txt new file mode 100644 index 0000000..26c3d37 --- /dev/null +++ b/lp/matrix/as-token.txt @@ -0,0 +1 @@ +d3c8fccbe082aa2a59da362b3805abe4c4cebcd7e822cdbd700d84e7c55c485f diff --git a/lp/matrix/create-passkey-secret.sh b/lp/matrix/create-passkey-secret.sh new file mode 100644 index 0000000..7dda7fb --- /dev/null +++ b/lp/matrix/create-passkey-secret.sh @@ -0,0 +1 @@ +microk8s kubectl -n matrix create secret generic hookshot-passkey --from-file=passkey.pem diff --git a/lp/matrix/harbor-webhook.bak b/lp/matrix/harbor-webhook.bak new file mode 100644 index 0000000..7e20744 --- /dev/null +++ b/lp/matrix/harbor-webhook.bak @@ -0,0 +1,5 @@ +{ + "name": "Harbor Webhook", + "webhookId": "harbor", + "transformationFunction": "try { const repo=data.repository||{}; const resources=(data.event_data && data.event_data.resources)||[]; let msg=`📦 Repository: ${repo.namespace||''}/${repo.name||''}\n📝 Type: ${data.type||''}\n👤 Operator: ${data.operator||''}\n`; if(resources.length>0){ msg+=`\n💠 Resources:\n`; resources.forEach((r,idx)=>{ msg+=`\n🔹 Resource ${idx+1}:\n`; msg+=` 🏷 Tag: ${r.tag||''}\n`; msg+=` 🆔 Digest: ${r.digest||''}\n`; msg+=` 🌐 URL: ${r.resource_url||''}\n`; }); } result={plain:msg, version:'v2'}; } catch(e){ result={plain:`Error processing webhook: ${e.message}`, version:'v2'}; }" +} diff --git a/lp/matrix/harbor-webhook.json b/lp/matrix/harbor-webhook.json new file mode 100644 index 0000000..84ee57e --- /dev/null +++ b/lp/matrix/harbor-webhook.json @@ -0,0 +1,5 @@ +{ + "name": "Harbor Webhook", + "webhookId": "harbor", + "transformationFunction": "const repo = data.repository || {}; const resources = (data.event_data && data.event_data.resources) || []; let msg = ''; msg += '📦 Repository: ' + (repo.namespace || '') + '/' + (repo.name || '') + '\\n'; msg += '📝 Type: ' + (data.type || '') + '\\n'; msg += '👤 Operator: ' + (data.operator || '') + '\\n'; msg += '\\n'; msg += '💠 Resources:\\n'; resources.forEach((r, i) => { msg += '\\n'; msg += '🔹 Resource ' + (i + 1) + '\\n'; msg += '🏷 Tag: ' + (r.tag || '') + '\\n'; msg += '🆔 Digest: ' + (r.digest || '') + '\\n'; msg += '🌐 URL: ' + (r.resource_url || '') + '\\n'; }); result = { plain: msg, version: 'v2' };" +} diff --git a/lp/matrix/hookshot.yaml b/lp/matrix/hookshot.yaml new file mode 100644 index 0000000..2dbed98 --- /dev/null +++ b/lp/matrix/hookshot.yaml @@ -0,0 +1,133 @@ +# =========================== +# Hookshot Deployment +# =========================== +apiVersion: apps/v1 +kind: Deployment +metadata: + name: matrix-hookshot + namespace: matrix +spec: + replicas: 1 + selector: + matchLabels: + app: matrix-hookshot + template: + metadata: + labels: + app: matrix-hookshot + spec: + containers: + - name: hookshot + image: halfshot/matrix-hookshot:latest + imagePullPolicy: Always + ports: + - containerPort: 9000 # webhooks + - containerPort: 9001 # metrics + - containerPort: 9002 # widgets + - containerPort: 9003 # appservice + env: + # tell hookshot where to find files + - name: CONFIG_PATH + value: /data/config.yml + - name: REGISTRATION_PATH + value: /data/registration.yml + - name: TRANSFORM_PATH + value: /data/transformationFunction.js + volumeMounts: + - name: hookshot-data + mountPath: /data + - name: hookshot-registration + mountPath: /data/registration.yml + subPath: registration.yml + volumes: + - name: hookshot-data + persistentVolumeClaim: + claimName: hookshot-pvc + - name: hookshot-registration + secret: + secretName: matrix-hookshot-registration + +--- + +# =========================== +# Hookshot Service +# =========================== +apiVersion: v1 +kind: Service +metadata: + name: matrix-hookshot + namespace: matrix +spec: + selector: + app: matrix-hookshot + ports: + - name: webhooks + port: 9000 + targetPort: 9000 + - name: metrics + port: 9001 + targetPort: 9000 + - name: widgets + port: 9002 + targetPort: 9000 + - name: appservice + port: 9003 + targetPort: 9003 + - name: matrix + port: 9993 + targetPort: 9993 + +--- +# =========================== +# Hookshot IngressRoute +# =========================== +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: matrix-hookshot + namespace: matrix +spec: + entryPoints: + - websecure + routes: + - match: Host(`hookshot-lp.allarddcs.nl`) && PathPrefix(`/webhook`) + kind: Rule + services: + - name: matrix-hookshot + port: 9000 + tls: + certResolver: default +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: hookshot-pv +spec: + storageClassName: "" + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + mountOptions: + - hard + - nfsvers=4.1 + nfs: + server: 192.168.2.110 + path: /mnt/nfs_share/hookshot + readOnly: false +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: hookshot-pvc + namespace: matrix +spec: + storageClassName: "" + volumeName: hookshot-pv + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 1Gi diff --git a/lp/matrix/hs-token.txt b/lp/matrix/hs-token.txt new file mode 100644 index 0000000..ab3cd70 --- /dev/null +++ b/lp/matrix/hs-token.txt @@ -0,0 +1 @@ +c31f7a18d3d2d79bd7a03e2794d966317155c409699cb6fd0922023cf45f9c3b diff --git a/lp/matrix/matrix.yaml b/lp/matrix/matrix.yaml index 866736a..06b2764 100755 --- a/lp/matrix/matrix.yaml +++ b/lp/matrix/matrix.yaml @@ -28,10 +28,16 @@ spec: volumeMounts: - mountPath: /data name: matrix + - name: hookshot-registration + mountPath: /appservices/hookshot-registration.yml + subPath: registration.yml volumes: - name: matrix persistentVolumeClaim: claimName: matrix-pvc + - name: hookshot-registration + secret: + secretName: matrix-hookshot-registration --- apiVersion: v1 kind: Service @@ -45,7 +51,7 @@ spec: port: 8008 selector: app: matrix - type: NodePort + type: ClusterIP --- apiVersion: traefik.io/v1alpha1 kind: IngressRoute diff --git a/lp/matrix/passkey.pem b/lp/matrix/passkey.pem new file mode 100644 index 0000000..a6201ff --- /dev/null +++ b/lp/matrix/passkey.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQCzd15wg6AJEMPP +Yvx8C/68ypBQMzprQvMNgwB53ZPzJidKwrMUIwT/hyjvyFRo8scx4mNb66ASYmfi +53P7apReX/yIibrcmDaVXxk4FdxVJ2xoK+OKV1zBbHsfFsyPVOqOOobiMbOuLxJK +c8Pj3Z1kmD62+Oq/dr6hVHF5gpocie2RK5gJY6DVCc4/Eh2K/Gzhm3plDbYr82h/ +RMz6nRzDKxtEO/yvekOmL5BBC88seBnd2RZf9Dgts1qDslb5jIvgWwrZEsruAxcj +BLlSGZ/yHTYdytTwX0nqCzqWKxjGTCwAvPDbmy4Y4D8zmaaTYHpvWPHbAkDb5vAo +eJuI5+j48bTSJhw5s1kOtJ5OdRY6n7W20YtqP1HAg3zt2kVesHOhXLB8sQSlvHQL +P5iF//i0qMw/CWbGERkzz1DPRuJNsXytBt6Gc+9yZ31Z3xbJTALG47V/ylNgAlnt +qCqCUfrtJD/Pq0xG0tSpv5uYcRfadkWd0wIdYsIb9m8/7jaa7FVKshzzGIXmT/aY +4AiVgP8RpkH3gIzoHYXwd3UD8D0RMeWCz94vwz0Ls6zCd5OX8kXo5dtvhUMW4XCQ +glmlx6BXvWbR8H0VS7aT7KjI23/Pef2VzWo94G7tC8wNaXhvni1bjB8ZyFA6Y4qB +JreQfo0tD/h3Z+XZmQXHPKpVEahHNQIDAQABAoICAE+ASa2+IV7itltglki9Z5Nq +iJN+nhp2BgWuSr+xkKDd/fn4VixNdyFbVp8iN4JmSienCizmLHr4bNzve0mc5S0J +iG1T1htysntpLVrTFFAErtlW1+2o1cY5sDENgIDeNSncc3QwYFCBObERp22B9v9h +EicsDZHToI8HQV16pZqO31a34RoHsG+vcFYwSDYRrIokvguWD2VY5qm6aLN8dglf +RyzrBf0WRHGSEG1ANf6VFaBZUrgfD+Pu1+IPVsglorE4o42V7Z+SbuVsakwcXeZ7 +WkXIXJw/Ghc78jAj103A4J1TFb+y9rC+ZkENh+UxkAtTc5B5ZxIDM9A53WzAj0s6 +FPSWUI6ttsj8sAN6zu/McQmW/ysBZclFP8V54lfCCT+02lz/bA5LYwPEIClMAtRP +nmFuS/fRSP+J2I8hKALhVtufdFT1H7cVcHXsKNMWn8rBZTvMVs1VDfeUPz3EYgd8 +eORSSY/nD6ObY2Y++fpP8ARiR+W+DJR2c4SNqwCdEN7I0pa7I/RJNozu6LHPJZjb +UOhUxySXUAPUR3WWG+Gfu9o2X2O8uRtw5Q7GokoZkeH+N1O6hCGZNUQqfj/l5qIu +iqQtxyU7Bf6RQ3vbFdwAp3G1zgtmzN8wxRgPT6G1CezOsi6AJDXQcfA+AoAdhRfa +W6j34JxNf9WiFWUueoDhAoIBAQDnLAB1vo+ZC6aVE+t60Lr8DVyWFpE/pm2pbC5R +mlkBQY/qRfM0q78UGeJVNxmUXPOdtidD3BKl+wHDsN60k3Z99gN8XKTYvB1YA3hw +C0mKnZi/4LoOtPt0zc+ZHgl4jkVsQayS6g2zkZAN+djOwIiPGo1+u3sJAOjT6ko9 +nWimHlQUSSHoLqlzw1At6Se+bbp6M08egsr8EXlLSnoY/zkAIu4SqwhyhsI47DBh +ZR2y7rV8EPcIaUP8V2GvGj43U8LojjavzUkxxWGpfeF3DlNKS5ULR0tkXmbJNW2W +znltrfB5T54HFhTxxVZ6wz9+6MRwC+mUY1oPSpbK70UkeNl3AoIBAQDGvbzrpaUJ +9XOFQSljyEMVb5U0rXnU1736ycIPDcSI4cZn9SMuz7cd3XKR4pw2K8y7WWGoyINV +GfSl5q+EEN5QbuqGoL15uNUw/Oto/UGKoOr5AwCxXfjw6cHpCVm1PN0P9NGFCdBK +Ogwfec6ReriYtDtFADtyWomqCEi1VCZsJ1bkf832wQ6b6SjnQzAJU9O+Is0D3pOo +ApTnW56sIIBEH5wgXnLChocv1NHBGPSa8M/OTYWpt6UQxj9npgX3YAzrlsQX4jV5 +JYZtMWtR8inOLLoHMztiGjTf3HRUZRpWNMSELoeD1LlC3IKjmyqxk25X8HmeqMQf +UOyQrB0Vd8+zAoIBAQC5vyZjdYnGbJEeShI07dDAxC+3vUxaO2zz7CPPl9iaZMax +dCE2GiX/jcjJtetMxd9NwtYL9aHaUjSZu91GdRKJpYope/tM1uIxHSDBzp/5zCW3 +T2fQd2Vw9govuu3bO13W3XI/3ebCeVypji7B+fwAGFLHGPeERYHO/PtIRNmYWNgw +MI8ZE50+khozuZsgrORgWifhmTBml9fBQoeW3EQ9stNelqTD+TnIKSe6qlfRtXKH +zM3hqUtiUAgCfXCuXhRHbjXMzoNYhYOyf54RXqZYFSEeHdwbOqsWU2LKAP+avF9z +6iBNC1Yq6ehR3d6AJ6HncfruK2Jq1QES8Gy7IIuDAoIBAQDCBo8gSBE+DcGAQMOV +xPepP8ydJxegMhgpdTvd0oUjVbiL2YlFtEiEAaROxLvZLx2rcnGjhE6jkLmGFhcY +ihdg00hnsxoIcejA8MsrLsUEAGImRUYx4xqcqLpFNnm8NPVlFJBQn+oX7V/Uguim +ovcBLkQG81kWJUntFDVXwhXY1PdCEXwVARmwIQtr6+5DzPEV2Yfp1Sy2g/63eH0K +cpiG6Y1VFKBZRVlJ2y4EpMZ5VcyrDogFz2J5Y/KW0EFW5xD2F7TN1Gx8H6SDihHK +aO9jd8sOKHg7KHaibumrUyFJNC7/FSX8EHvlyL5J17zwrIy3kfBKc101bZ3nCAFE +6jl9AoIBAQDTDryGqb5NCG4O6LaV+coCYpvbcKnD7jss497XI9Fsg4cucypZvNX6 +zl9P4TFPLTWdBrgYxFr/rVS9Aks9sL+9bBMo9f1tVipfDiJ7NmmwYSdzew1ANxnH +Sh2zfQcQVQsE3Y79bKY/5quJpUMxVJ+fgdSm4js+4ubQvDDGkpy24rpSgghD3IAd +TJVY7t34aXLrp7L0i57OkxCa0gDCpVIkW2KrG4O2N7Aumk3XFmPttRBuAnZZV+22 +SiovNua1o1B9RYWx2Q/F1oJJMfZV1qDyHe4UxO+6GKO9Z2ETBjIimFc/s/zqLDSx +BixkpeaGIwGU11xCAxH+NzqpvaqqAZ0P +-----END PRIVATE KEY----- diff --git a/lp/matrix/redis.yaml b/lp/matrix/redis.yaml new file mode 100755 index 0000000..30eb408 --- /dev/null +++ b/lp/matrix/redis.yaml @@ -0,0 +1,42 @@ +# =========================== +# Redis Deployment +# =========================== +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis + namespace: matrix +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + containers: + - name: redis + image: redis:7-alpine + ports: + - containerPort: 6379 + +--- + +# =========================== +# Redis Service +# =========================== +apiVersion: v1 +kind: Service +metadata: + name: redis + namespace: matrix +spec: + selector: + app: redis + ports: + - port: 6379 + targetPort: 6379 + + diff --git a/lp/matrix/registration-secret.yaml b/lp/matrix/registration-secret.yaml new file mode 100644 index 0000000..49ca0fd --- /dev/null +++ b/lp/matrix/registration-secret.yaml @@ -0,0 +1,22 @@ +# =========================== +# Hookshot Registration Secret +# =========================== +apiVersion: v1 +kind: Secret +metadata: + name: matrix-hookshot-registration + namespace: matrix +type: Opaque +stringData: + registration.yml: | + id: hookshot + url: http://matrix-hookshot.matrix.svc.cluster.local:9993 + as_token: d3c8fccbe082aa2a59da362b3805abe4c4cebcd7e822cdbd700d84e7c55c485f + hs_token: c31f7a18d3d2d79bd7a03e2794d966317155c409699cb6fd0922023cf45f9c3b + sender_localpart: hookshot + namespaces: + users: + - exclusive: false + regex: "^@(hookshot|_webhooks_.*):matrix-lp.allarddcs.nl$" + rooms: [] + aliases: [] diff --git a/lp/matrix/synapse-admin.yaml b/lp/matrix/synapse-admin.yaml new file mode 100644 index 0000000..f8c4363 --- /dev/null +++ b/lp/matrix/synapse-admin.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: synapse-admin + namespace: matrix +spec: + replicas: 1 + selector: + matchLabels: + app: synapse-admin + template: + metadata: + labels: + app: synapse-admin + spec: + containers: + - name: synapse-admin + image: awesometechnologies/synapse-admin:latest + ports: + - containerPort: 80 + readinessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 5 + timeoutSeconds: 3 + livenessProbe: + httpGet: + path: / + port: 80 + initialDelaySeconds: 15 + timeoutSeconds: 3 +--- +apiVersion: v1 +kind: Service +metadata: + name: synapse-admin + namespace: matrix +spec: + selector: + app: synapse-admin + ports: + - name: http + port: 80 + targetPort: 80 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: synapse-admin + namespace: matrix + annotations: + traefik.ingress.kubernetes.io/router.entrypoints: websecure +spec: + tls: + - hosts: + - synapse-admin.matrix-lp.allarddcs.nl + rules: + - host: synapse-admin.matrix-lp.allarddcs.nl + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: synapse-admin + port: + number: 80 diff --git a/lp/matrix/werkt/hookshot.yaml b/lp/matrix/werkt/hookshot.yaml new file mode 100644 index 0000000..c530cfb --- /dev/null +++ b/lp/matrix/werkt/hookshot.yaml @@ -0,0 +1,221 @@ +# =========================== +# Hookshot ConfigMap +# =========================== +apiVersion: v1 +kind: ConfigMap +metadata: + name: matrix-hookshot-config + namespace: matrix +data: + config.yml: | + bridge: + domain: matrix-lp.allarddcs.nl + url: http://matrix.matrix.svc.cluster.local:8008 + port: 9993 + bindAddress: 0.0.0.0 + + homeserver: + url: http://matrix.matrix.svc.cluster.local:8008 + domain: matrix-lp.allarddcs.nl + + appservice: + id: hookshot + as_token: d3c8fccbe082aa2a59da362b3805abe4c4cebcd7e822cdbd700d84e7c55c485f + hs_token: c31f7a18d3d2d79bd7a03e2794d966317155c409699cb6fd0922023cf45f9c3b + bot: + username: hookshot + displayname: Hookshot + + logging: + level: debug + + listeners: + - port: 9000 + bindAddress: 0.0.0.0 + resources: + - webhooks + - port: 9001 + bindAddress: 0.0.0.0 + resources: + - metrics + - port: 9002 + bindAddress: 0.0.0.0 + resources: + - widgets + - port: 9003 + bindAddress: 0.0.0.0 + resources: + - appservice + + # Redis for persistent token storage + cache: + redisUri: redis://redis:6379 + + generic: + enabled: true + urlPrefix: https://hookshot-lp.allarddcs.nl/webhook/ + userIdPrefix: _webhooks_ + includeHookBody: true + allowJsTransformationFunctions: true + + passFile: /data/passkey.pem + + connections: + - connectionType: uk.half-shot.matrix-hookshot.generic.hook + stateKey: generic-1 + roomId: "!BPcyrRUfYdBJrFVdlV:matrix-lp.allarddcs.nl" + state: + name: "Harbor Webhook" + webhookId: harbor + } + + + + +--- +# =========================== +# Hookshot Deployment +# =========================== +apiVersion: apps/v1 +kind: Deployment +metadata: + name: matrix-hookshot + namespace: matrix +spec: + replicas: 1 + selector: + matchLabels: + app: matrix-hookshot + template: + metadata: + labels: + app: matrix-hookshot + spec: + containers: + - name: hookshot + image: halfshot/matrix-hookshot:latest + ports: + - name: webhooks + containerPort: 9000 + - name: metrics + containerPort: 9001 + - name: widgets + containerPort: 9002 + - name: appservice + containerPort: 9003 + - name: matrix + containerPort: 9993 + volumeMounts: + - name: config + mountPath: /data/config.yml + subPath: config.yml + - name: registration + mountPath: /data/registration.yml + subPath: registration.yml + - name: hookshot-passkey + mountPath: /data/passkey.pem + subPath: passkey.pem + env: + - name: CONFIG_FILE + value: /data/config.yml + - name: REGISTRATION_FILE + value: /data/registration.yml + - name: NODE_TLS_REJECT_UNAUTHORIZED + value: "0" + - name: NODE_OPTIONS + value: "--dns-result-order=ipv4first" + volumes: + - name: config + configMap: + name: matrix-hookshot-config + - name: registration + secret: + secretName: matrix-hookshot-registration + - name: hookshot-passkey + secret: + secretName: hookshot-passkey + +--- + +# =========================== +# Hookshot Service +# =========================== +apiVersion: v1 +kind: Service +metadata: + name: matrix-hookshot + namespace: matrix +spec: + selector: + app: matrix-hookshot + ports: + - name: webhooks + port: 9000 + targetPort: 9000 + - name: metrics + port: 9001 + targetPort: 9000 + - name: widgets + port: 9002 + targetPort: 9000 + - name: appservice + port: 9003 + targetPort: 9003 + - name: matrix + port: 9993 + targetPort: 9993 + +--- +# =========================== +# Hookshot IngressRoute +# =========================== +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: matrix-hookshot + namespace: matrix +spec: + entryPoints: + - websecure + routes: + - match: Host(`hookshot-lp.allarddcs.nl`) && PathPrefix(`/webhook`) + kind: Rule + services: + - name: matrix-hookshot + port: 9000 + tls: + certResolver: default +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: hookshot-pv +spec: + storageClassName: "" + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + mountOptions: + - hard + - nfsvers=4.1 + nfs: + server: 192.168.2.110 + path: /mnt/nfs_share/hookshot/lp + readOnly: false +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: hookshot-pvc + namespace: matrix +spec: + storageClassName: "" + volumeName: hookshot-pv + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 1Gi diff --git a/lp/matrix/werkt/matrix.yaml b/lp/matrix/werkt/matrix.yaml new file mode 100755 index 0000000..06b2764 --- /dev/null +++ b/lp/matrix/werkt/matrix.yaml @@ -0,0 +1,122 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: matrix + namespace: matrix + labels: + app: matrix +spec: + replicas: 1 + selector: + matchLabels: + app: matrix + template: + metadata: + labels: + app: matrix + spec: + containers: + - name: matrix + image: matrixdotorg/synapse:latest +# args: +# - generate + env: + - name: SYNAPSE_SERVER_NAME + value: "matrix-lp.allarddcs.nl" +# - name: SYNAPSE_REPORT_STATS +# value: "yes" + volumeMounts: + - mountPath: /data + name: matrix + - name: hookshot-registration + mountPath: /appservices/hookshot-registration.yml + subPath: registration.yml + volumes: + - name: matrix + persistentVolumeClaim: + claimName: matrix-pvc + - name: hookshot-registration + secret: + secretName: matrix-hookshot-registration +--- +apiVersion: v1 +kind: Service +metadata: + name: matrix + namespace: matrix +spec: + ports: + - name: http + targetPort: 8008 + port: 8008 + selector: + app: matrix + type: ClusterIP +--- +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: matrix-http + namespace: matrix +spec: + entryPoints: + - web + routes: + - match: Host(`matrix-lp.allarddcs.nl`) + kind: Rule + services: + - name: matrix + port: 8008 +--- +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: matrix-tls + namespace: matrix +spec: + entryPoints: + - websecure + routes: + - match: Host(`matrix-lp.allarddcs.nl`) + kind: Rule + services: + - name: matrix + port: 8008 + tls: + secretName: matrix-lp.allarddcs.nl-tls +# certResolver: letsencrypt +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: matrix-pv +spec: + storageClassName: "" + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + mountOptions: + - hard + - nfsvers=4.1 + nfs: + server: 192.168.2.110 + path: /mnt/nfs_share/matrix/lp + readOnly: false +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: matrix-pvc + namespace: matrix +spec: + storageClassName: "" + volumeName: matrix-pv + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 1Gi + diff --git a/lp/matrix/werkt/registration-secret.yaml b/lp/matrix/werkt/registration-secret.yaml new file mode 100644 index 0000000..f702894 --- /dev/null +++ b/lp/matrix/werkt/registration-secret.yaml @@ -0,0 +1,22 @@ +# =========================== +# Hookshot Registration Secret +# =========================== +apiVersion: v1 +kind: Secret +metadata: + name: matrix-hookshot-registration + namespace: matrix +type: Opaque +stringData: + registration.yml: | + id: hookshot + url: http://matrix-hookshot.matrix.svc.cluster.local:9993 + as_token: d3c8fccbe082aa2a59da362b3805abe4c4cebcd7e822cdbd700d84e7c55c485f + hs_token: c31f7a18d3d2d79bd7a03e2794d966317155c409699cb6fd0922023cf45f9c3b + sender_localpart: hookshot + namespaces: + users: + - exclusive: true + regex: "^@(hookshot|_webhooks_.*):matrix-lp.allarddcs.nl$" + rooms: [] + aliases: [] diff --git a/lp/postgres15/postgres15.yaml b/lp/postgres15/postgres15.yaml new file mode 100755 index 0000000..72f1feb --- /dev/null +++ b/lp/postgres15/postgres15.yaml @@ -0,0 +1,84 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: postgres15-pv +spec: + storageClassName: "" + capacity: + storage: 2Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + mountOptions: + - hard + - nfsvers=4.1 + nfs: + server: 192.168.2.110 + path: /mnt/nfs_share/lp/postgres15 + readOnly: false +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: postgres15-pvc + namespace: postgres +spec: + storageClassName: "" + volumeName: postgres15-pv + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: 2Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres15 + namespace: postgres +spec: + serviceName: postgres15 + replicas: 1 + selector: + matchLabels: + app: postgres15 + template: + metadata: + labels: + app: postgres15 + spec: + containers: + - name: postgres15 + image: postgres:15 + ports: + - containerPort: 5432 + env: + - name: POSTGRES_DB + value: postgres + - name: POSTGRES_USER + value: postgres + - name: POSTGRES_PASSWORD + value: postgres + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: postgres + volumes: + - name: postgres + persistentVolumeClaim: + claimName: postgres15-pvc +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres15 + namespace: postgres + labels: + name: postgres15 +spec: + type: ClusterIP + ports: + - port: 5432 + name: postgres + selector: + app: postgres15 diff --git a/prod/matterbridge/matterbridge.yaml b/prod/matterbridge/matterbridge.yaml index e7bcf6d..8c5bc39 100644 --- a/prod/matterbridge/matterbridge.yaml +++ b/prod/matterbridge/matterbridge.yaml @@ -59,6 +59,7 @@ data: Server="mattermost-prod.allarddcs.nl" Token="xfxh83q14prftd61c4y4hiuw6w" Team="matrix" + UseTLS=true RemoteNickFormat="{NICK}" [[gateway]] @@ -66,7 +67,7 @@ data: enable=true [[gateway.inout]] account="matrix.my-matrix" - channel="!UDCHpOSdDiIbbhoBrb:matrix-lp.allarddcs.nl" + channel="!AlPdEMyMVsLhoRYPOn:matrix-lp.allarddcs.nl" [[gateway.inout]] account="mattermost.my-mattermost" channel="matrix" diff --git a/prod/mattermost/mattermost.yaml b/prod/mattermost/mattermost.yaml index f6ca211..fa35ade 100755 --- a/prod/mattermost/mattermost.yaml +++ b/prod/mattermost/mattermost.yaml @@ -74,7 +74,7 @@ spec: port: 8065 selector: app: mattermost - type: NodePort + type: ClusterIP --- apiVersion: traefik.io/v1alpha1 kind: IngressRoute diff --git a/temp.txt b/temp.txt new file mode 100644 index 0000000..75fe018 --- /dev/null +++ b/temp.txt @@ -0,0 +1,1442 @@ +#!/usr/bin/env python +# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import curses +import logging +import os +import sys +import time +import traceback +from types import TracebackType +from typing import ( + Any, + Awaitable, + Callable, + Dict, + Generator, + Iterable, + List, + NoReturn, + Optional, + Set, + Tuple, + Type, + TypeVar, + cast, +) + +import yaml +from typing_extensions import TypedDict + +from twisted.internet import defer, reactor as reactor_ + +from synapse.config.database import DatabaseConnectionConfig +from synapse.config.homeserver import HomeServerConfig +from synapse.logging.context import ( + LoggingContext, + make_deferred_yieldable, + run_in_background, +) +from synapse.notifier import ReplicationNotifier +from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn +from synapse.storage.databases.main import FilteringWorkerStore, PushRuleStore +from synapse.storage.databases.main.account_data import AccountDataWorkerStore +from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore +from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore +from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore +from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore +from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore +from synapse.storage.databases.main.event_federation import EventFederationWorkerStore +from synapse.storage.databases.main.event_push_actions import EventPushActionsStore +from synapse.storage.databases.main.events_bg_updates import ( + EventsBackgroundUpdatesStore, +) +from synapse.storage.databases.main.media_repository import ( + MediaRepositoryBackgroundUpdateStore, +) +from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore +from synapse.storage.databases.main.profile import ProfileWorkerStore +from synapse.storage.databases.main.pusher import ( + PusherBackgroundUpdatesStore, + PusherWorkerStore, +) +from synapse.storage.databases.main.receipts import ReceiptsBackgroundUpdateStore +from synapse.storage.databases.main.registration import ( + RegistrationBackgroundUpdateStore, + find_max_generated_user_id_localpart, +) +from synapse.storage.databases.main.relations import RelationsWorkerStore +from synapse.storage.databases.main.room import RoomBackgroundUpdateStore +from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore +from synapse.storage.databases.main.search import SearchBackgroundUpdateStore +from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore +from synapse.storage.databases.main.stats import StatsStore +from synapse.storage.databases.main.user_directory import ( + UserDirectoryBackgroundUpdateStore, +) +from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore +from synapse.storage.engines import create_engine +from synapse.storage.prepare_database import prepare_database +from synapse.types import ISynapseReactor +from synapse.util import SYNAPSE_VERSION, Clock + +# Cast safety: Twisted does some naughty magic which replaces the +# twisted.internet.reactor module with a Reactor instance at runtime. +reactor = cast(ISynapseReactor, reactor_) +logger = logging.getLogger("synapse_port_db") + + +# SQLite doesn't have a dedicated boolean type (it stores True/False as 1/0). This means +# portdb will read sqlite bools as integers, then try to insert them into postgres +# boolean columns---which fails. Lacking some Python-parseable metaschema, we must +# specify which integer columns should be inserted as booleans into postgres. +BOOLEAN_COLUMNS = { + "access_tokens": ["used"], + "account_validity": ["email_sent"], + "device_lists_changes_in_room": ["converted_to_destinations"], + "device_lists_outbound_pokes": ["sent"], + "devices": ["hidden"], + "e2e_fallback_keys_json": ["used"], + "e2e_room_keys": ["is_verified"], + "event_edges": ["is_state"], + "events": ["processed", "outlier", "contains_url"], + "local_media_repository": ["safe_from_quarantine"], + "presence_list": ["accepted"], + "presence_stream": ["currently_active"], + "public_room_list_stream": ["visibility"], + "pushers": ["enabled"], + "redactions": ["have_censored"], + "room_stats_state": ["is_federatable"], + "rooms": ["is_public", "has_auth_chain_index"], + "users": ["shadow_banned", "approved", "locked"], + "un_partial_stated_event_stream": ["rejection_status_changed"], + "users_who_share_rooms": ["share_private"], + "per_user_experimental_features": ["enabled"], +} + + +# These tables are never deleted from in normal operation [*], so we can resume porting +# over rows from a previous attempt rather than starting from scratch. +# +# [*]: We do delete from many of these tables when purging a room, and +# presumably when purging old events. So we might e.g. +# +# 1. Run portdb and port half of some table. +# 2. Stop portdb. +# 3. Purge something, deleting some of the rows we've ported over. +# 4. Restart portdb. The rows deleted from sqlite are still present in postgres. +# +# But this isn't the end of the world: we should be able to repeat the purge +# on the postgres DB when porting completes. +APPEND_ONLY_TABLES = [ + "cache_invalidation_stream_by_instance", + "event_auth", + "event_edges", + "event_json", + "event_reference_hashes", + "event_search", + "event_to_state_groups", + "events", + "ex_outlier_stream", + "local_media_repository", + "local_media_repository_thumbnails", + "presence_stream", + "public_room_list_stream", + "push_rules_stream", + "received_transactions", + "redactions", + "rejections", + "remote_media_cache", + "remote_media_cache_thumbnails", + "room_memberships", + "room_names", + "rooms", + "sent_transactions", + "state_events", + "state_group_edges", + "state_groups", + "state_groups_state", + "stream_ordering_to_exterm", + "topics", + "transaction_id_to_pdu", + "un_partial_stated_event_stream", + "users", +] + + +IGNORED_TABLES = { + # We don't port these tables, as they're a faff and we can regenerate + # them anyway. + "user_directory", + "user_directory_search", + "user_directory_search_content", + "user_directory_search_docsize", + "user_directory_search_segdir", + "user_directory_search_segments", + "user_directory_search_stat", + "user_directory_search_pos", + "users_who_share_private_rooms", + "users_in_public_rooms", + # UI auth sessions have foreign keys so additional care needs to be taken, + # the sessions are transient anyway, so ignore them. + "ui_auth_sessions", + "ui_auth_sessions_credentials", + "ui_auth_sessions_ips", + # Ignore the worker locks table, as a) there shouldn't be any acquired locks + # after porting, and b) the circular foreign key constraints make it hard to + # port. + "worker_read_write_locks_mode", + "worker_read_write_locks", +} + + +# Error returned by the run function. Used at the top-level part of the script to +# handle errors and return codes. +end_error: Optional[str] = None +# The exec_info for the error, if any. If error is defined but not exec_info the script +# will show only the error message without the stacktrace, if exec_info is defined but +# not the error then the script will show nothing outside of what's printed in the run +# function. If both are defined, the script will print both the error and the stacktrace. +end_error_exec_info: Optional[ + Tuple[Type[BaseException], BaseException, TracebackType] +] = None + +R = TypeVar("R") + + +class Store( + EventPushActionsStore, + ClientIpBackgroundUpdateStore, + DeviceInboxBackgroundUpdateStore, + DeviceBackgroundUpdateStore, + EventsBackgroundUpdatesStore, + MediaRepositoryBackgroundUpdateStore, + RegistrationBackgroundUpdateStore, + RoomBackgroundUpdateStore, + RoomMemberBackgroundUpdateStore, + SearchBackgroundUpdateStore, + StateBackgroundUpdateStore, + MainStateBackgroundUpdateStore, + UserDirectoryBackgroundUpdateStore, + EndToEndKeyBackgroundStore, + EndToEndRoomKeyBackgroundStore, + StatsStore, + AccountDataWorkerStore, + FilteringWorkerStore, + ProfileWorkerStore, + PushRuleStore, + PusherWorkerStore, + PusherBackgroundUpdatesStore, + PresenceBackgroundUpdateStore, + ReceiptsBackgroundUpdateStore, + RelationsWorkerStore, + EventFederationWorkerStore, +): + def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]: + return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) + + def execute_sql(self, sql: str, *args: object) -> Awaitable[List[Tuple]]: + def r(txn: LoggingTransaction) -> List[Tuple]: + txn.execute(sql, args) + return txn.fetchall() + + return self.db_pool.runInteraction("execute_sql", r) + + def insert_many_txn( + self, txn: LoggingTransaction, table: str, headers: List[str], rows: List[Tuple] + ) -> None: + sql = "INSERT INTO %s (%s) VALUES (%s)" % ( + table, + ", ".join(k for k in headers), + ", ".join("%s" for _ in headers), + ) + + try: + txn.executemany(sql, rows) + except Exception: + logger.exception("Failed to insert: %s", table) + raise + + # Note: the parent method is an `async def`. + def set_room_is_public(self, room_id: str, is_public: bool) -> NoReturn: + raise Exception( + "Attempt to set room_is_public during port_db: database not empty?" + ) + + +class MockHomeserver: + def __init__(self, config: HomeServerConfig): + self.clock = Clock(reactor) + self.config = config + self.hostname = config.server.server_name + self.version_string = SYNAPSE_VERSION + + def get_clock(self) -> Clock: + return self.clock + + def get_reactor(self) -> ISynapseReactor: + return reactor + + def get_instance_name(self) -> str: + return "master" + + def should_send_federation(self) -> bool: + return False + + def get_replication_notifier(self) -> ReplicationNotifier: + return ReplicationNotifier() + + +class Porter: + def __init__( + self, + sqlite_config: Dict[str, Any], + progress: "Progress", + batch_size: int, + hs_config: HomeServerConfig, + ): + self.sqlite_config = sqlite_config + self.progress = progress + self.batch_size = batch_size + self.hs_config = hs_config + + async def setup_table(self, table: str) -> Tuple[str, int, int, int, int]: + if table in APPEND_ONLY_TABLES: + # It's safe to just carry on inserting. + row = await self.postgres_store.db_pool.simple_select_one( + table="port_from_sqlite3", + keyvalues={"table_name": table}, + retcols=("forward_rowid", "backward_rowid"), + allow_none=True, + ) + + total_to_port = None + if row is None: + if table == "sent_transactions": + ( + forward_chunk, + already_ported, + total_to_port, + ) = await self._setup_sent_transactions() + backward_chunk = 0 + else: + await self.postgres_store.db_pool.simple_insert( + table="port_from_sqlite3", + values={ + "table_name": table, + "forward_rowid": 1, + "backward_rowid": 0, + }, + ) + + forward_chunk = 1 + backward_chunk = 0 + already_ported = 0 + else: + forward_chunk, backward_chunk = row + + if total_to_port is None: + already_ported, total_to_port = await self._get_total_count_to_port( + table, forward_chunk, backward_chunk + ) + else: + + def delete_all(txn: LoggingTransaction) -> None: + txn.execute( + "DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,) + ) + txn.execute("TRUNCATE %s CASCADE" % (table,)) + + await self.postgres_store.execute(delete_all) + + await self.postgres_store.db_pool.simple_insert( + table="port_from_sqlite3", + values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0}, + ) + + forward_chunk = 1 + backward_chunk = 0 + + already_ported, total_to_port = await self._get_total_count_to_port( + table, forward_chunk, backward_chunk + ) + + return table, already_ported, total_to_port, forward_chunk, backward_chunk + + async def get_table_constraints(self) -> Dict[str, Set[str]]: + """Returns a map of tables that have foreign key constraints to tables they depend on.""" + + def _get_constraints(txn: LoggingTransaction) -> Dict[str, Set[str]]: + # We can pull the information about foreign key constraints out from + # the postgres schema tables. + sql = """ + SELECT DISTINCT + tc.table_name, + ccu.table_name AS foreign_table_name + FROM + information_schema.table_constraints AS tc + INNER JOIN information_schema.constraint_column_usage AS ccu + USING (table_schema, constraint_name) + WHERE tc.constraint_type = 'FOREIGN KEY' + AND tc.table_name != ccu.table_name; + """ + txn.execute(sql) + + results: Dict[str, Set[str]] = {} + for table, foreign_table in txn: + results.setdefault(table, set()).add(foreign_table) + return results + + return await self.postgres_store.db_pool.runInteraction( + "get_table_constraints", _get_constraints + ) + + async def handle_table( + self, + table: str, + postgres_size: int, + table_size: int, + forward_chunk: int, + backward_chunk: int, + ) -> None: + logger.info( + "Table %s: %i/%i (rows %i-%i) already ported", + table, + postgres_size, + table_size, + backward_chunk + 1, + forward_chunk - 1, + ) + + if not table_size: + return + + self.progress.add_table(table, postgres_size, table_size) + + if table == "event_search": + await self.handle_search_table( + postgres_size, table_size, forward_chunk, backward_chunk + ) + return + + if table in IGNORED_TABLES: + self.progress.update(table, table_size) # Mark table as done + return + + if table == "user_directory_stream_pos": + # We need to make sure there is a single row, `(X, null), as that is + # what synapse expects to be there. + await self.postgres_store.db_pool.simple_insert( + table=table, values={"stream_id": None} + ) + self.progress.update(table, table_size) # Mark table as done + return + + # We sweep over rowids in two directions: one forwards (rowids 1, 2, 3, ...) + # and another backwards (rowids 0, -1, -2, ...). + forward_select = ( + "SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,) + ) + + backward_select = ( + "SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid DESC LIMIT ?" + % (table,) + ) + + do_forward = [True] + do_backward = [True] + + while True: + + def r( + txn: LoggingTransaction, + ) -> Tuple[Optional[List[str]], List[Tuple], List[Tuple]]: + forward_rows = [] + backward_rows = [] + if do_forward[0]: + txn.execute(forward_select, (forward_chunk, self.batch_size)) + forward_rows = txn.fetchall() + if not forward_rows: + do_forward[0] = False + + if do_backward[0]: + txn.execute(backward_select, (backward_chunk, self.batch_size)) + backward_rows = txn.fetchall() + if not backward_rows: + do_backward[0] = False + + if forward_rows or backward_rows: + assert txn.description is not None + headers: Optional[List[str]] = [ + column[0] for column in txn.description + ] + else: + headers = None + + return headers, forward_rows, backward_rows + + headers, frows, brows = await self.sqlite_store.db_pool.runInteraction( + "select", r + ) + + if frows or brows: + assert headers is not None + if frows: + forward_chunk = max(row[0] for row in frows) + 1 + if brows: + backward_chunk = min(row[0] for row in brows) - 1 + + rows = frows + brows + rows = self._convert_rows(table, headers, rows) + + def insert(txn: LoggingTransaction) -> None: + assert headers is not None + self.postgres_store.insert_many_txn(txn, table, headers[1:], rows) + + self.postgres_store.db_pool.simple_update_one_txn( + txn, + table="port_from_sqlite3", + keyvalues={"table_name": table}, + updatevalues={ + "forward_rowid": forward_chunk, + "backward_rowid": backward_chunk, + }, + ) + + await self.postgres_store.execute(insert) + + postgres_size += len(rows) + + self.progress.update(table, postgres_size) + else: + return + + async def handle_search_table( + self, + postgres_size: int, + table_size: int, + forward_chunk: int, + backward_chunk: int, + ) -> None: + select = ( + "SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering" + " FROM event_search as es" + " INNER JOIN events AS e USING (event_id, room_id)" + " WHERE es.rowid >= ?" + " ORDER BY es.rowid LIMIT ?" + ) + + while True: + + def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]: + txn.execute(select, (forward_chunk, self.batch_size)) + rows = txn.fetchall() + assert txn.description is not None + headers = [column[0] for column in txn.description] + + return headers, rows + + headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r) + + if rows: + forward_chunk = rows[-1][0] + 1 + + # We have to treat event_search differently since it has a + # different structure in the two different databases. + def insert(txn: LoggingTransaction) -> None: + sql = ( + "INSERT INTO event_search (event_id, room_id, key," + " sender, vector, origin_server_ts, stream_ordering)" + " VALUES (?,?,?,?,to_tsvector('english', ?),?,?)" + ) + + rows_dict = [] + for row in rows: + d = dict(zip(headers, row)) + if "\0" in d["value"]: + logger.warning("dropping search row %s", d) + else: + rows_dict.append(d) + + txn.executemany( + sql, + [ + ( + row["event_id"], + row["room_id"], + row["key"], + row["sender"], + row["value"], + row["origin_server_ts"], + row["stream_ordering"], + ) + for row in rows_dict + ], + ) + + self.postgres_store.db_pool.simple_update_one_txn( + txn, + table="port_from_sqlite3", + keyvalues={"table_name": "event_search"}, + updatevalues={ + "forward_rowid": forward_chunk, + "backward_rowid": backward_chunk, + }, + ) + + await self.postgres_store.execute(insert) + + postgres_size += len(rows) + + self.progress.update("event_search", postgres_size) + + else: + return + + def build_db_store( + self, + db_config: DatabaseConnectionConfig, + allow_outdated_version: bool = False, + ) -> Store: + """Builds and returns a database store using the provided configuration. + + Args: + db_config: The database configuration + allow_outdated_version: True to suppress errors about the database server + version being too old to run a complete synapse + + Returns: + The built Store object. + """ + self.progress.set_state("Preparing %s" % db_config.config["name"]) + + engine = create_engine(db_config.config) + + hs = MockHomeserver(self.hs_config) + + with make_conn(db_config, engine, "portdb") as db_conn: + engine.check_database( + db_conn, allow_outdated_version=allow_outdated_version + ) + prepare_database(db_conn, engine, config=self.hs_config) + # Type safety: ignore that we're using Mock homeservers here. + store = Store(DatabasePool(hs, db_config, engine), db_conn, hs) # type: ignore[arg-type] + db_conn.commit() + + return store + + async def run_background_updates_on_postgres(self) -> None: + # Manually apply all background updates on the PostgreSQL database. + postgres_ready = ( + await self.postgres_store.db_pool.updates.has_completed_background_updates() + ) + + if not postgres_ready: + # Only say that we're running background updates when there are background + # updates to run. + self.progress.set_state("Running background updates on PostgreSQL") + + while not postgres_ready: + await self.postgres_store.db_pool.updates.do_next_background_update(True) + postgres_ready = await ( + self.postgres_store.db_pool.updates.has_completed_background_updates() + ) + + @staticmethod + def _is_sqlite_autovacuum_enabled(txn: LoggingTransaction) -> bool: + """ + Returns true if auto_vacuum is enabled in SQLite. + https://www.sqlite.org/pragma.html#pragma_auto_vacuum + + Vacuuming changes the rowids on rows in the database. + Auto-vacuuming is therefore dangerous when used in conjunction with this script. + + Note that the auto_vacuum setting can't be changed without performing + a VACUUM after trying to change the pragma. + """ + txn.execute("PRAGMA auto_vacuum") + row = txn.fetchone() + assert row is not None, "`PRAGMA auto_vacuum` did not give a row." + (autovacuum_setting,) = row + # 0 means off. 1 means full. 2 means incremental. + return autovacuum_setting != 0 + + async def run(self) -> None: + """Ports the SQLite database to a PostgreSQL database. + + When a fatal error is met, its message is assigned to the global "end_error" + variable. When this error comes with a stacktrace, its exec_info is assigned to + the global "end_error_exec_info" variable. + """ + global end_error + + try: + # we allow people to port away from outdated versions of sqlite. + self.sqlite_store = self.build_db_store( + DatabaseConnectionConfig("master-sqlite", self.sqlite_config), + allow_outdated_version=True, + ) + + # For safety, ensure auto_vacuums are disabled. + if await self.sqlite_store.db_pool.runInteraction( + "is_sqlite_autovacuum_enabled", self._is_sqlite_autovacuum_enabled + ): + end_error = ( + "auto_vacuum is enabled in the SQLite database." + " (This is not the default configuration.)\n" + " This script relies on rowids being consistent and must not" + " be used if the database could be vacuumed between re-runs.\n" + " To disable auto_vacuum, you need to stop Synapse and run the following SQL:\n" + " PRAGMA auto_vacuum=off;\n" + " VACUUM;" + ) + return + + # Check if all background updates are done, abort if not. + updates_complete = ( + await self.sqlite_store.db_pool.updates.has_completed_background_updates() + ) + if not updates_complete: + end_error = ( + "Pending background updates exist in the SQLite3 database." + " Please start Synapse again and wait until every update has finished" + " before running this script.\n" + ) + return + + self.postgres_store = self.build_db_store( + self.hs_config.database.get_single_database() + ) + + await self.run_background_updates_on_postgres() + + self.progress.set_state("Creating port tables") + + def create_port_table(txn: LoggingTransaction) -> None: + txn.execute( + "CREATE TABLE IF NOT EXISTS port_from_sqlite3 (" + " table_name varchar(100) NOT NULL UNIQUE," + " forward_rowid bigint NOT NULL," + " backward_rowid bigint NOT NULL" + ")" + ) + + # The old port script created a table with just a "rowid" column. + # We want people to be able to rerun this script from an old port + # so that they can pick up any missing events that were not + # ported across. + def alter_table(txn: LoggingTransaction) -> None: + txn.execute( + "ALTER TABLE IF EXISTS port_from_sqlite3" + " RENAME rowid TO forward_rowid" + ) + txn.execute( + "ALTER TABLE IF EXISTS port_from_sqlite3" + " ADD backward_rowid bigint NOT NULL DEFAULT 0" + ) + + try: + await self.postgres_store.db_pool.runInteraction( + "alter_table", alter_table + ) + except Exception: + # On Error Resume Next + pass + + await self.postgres_store.db_pool.runInteraction( + "create_port_table", create_port_table + ) + + # Step 2. Set up sequences + # + # We do this before porting the tables so that even if we fail half + # way through the postgres DB always have sequences that are greater + # than their respective tables. If we don't then creating the + # `DataStore` object will fail due to the inconsistency. + self.progress.set_state("Setting up sequence generators") + await self._setup_state_group_id_seq() + await self._setup_user_id_seq() + await self._setup_events_stream_seqs() + await self._setup_sequence( + "un_partial_stated_event_stream_sequence", + ("un_partial_stated_event_stream",), + ) + await self._setup_sequence( + "device_inbox_sequence", ("device_inbox", "device_federation_outbox") + ) + await self._setup_sequence( + "account_data_sequence", + ("room_account_data", "room_tags_revisions", "account_data"), + ) + await self._setup_sequence("receipts_sequence", ("receipts_linearized",)) + await self._setup_sequence("presence_stream_sequence", ("presence_stream",)) + await self._setup_auth_chain_sequence() + await self._setup_sequence( + "application_services_txn_id_seq", + ("application_services_txns",), + "txn_id", + ) + + # Step 3. Get tables. + self.progress.set_state("Fetching tables") + sqlite_tables = await self.sqlite_store.db_pool.simple_select_onecol( + table="sqlite_master", keyvalues={"type": "table"}, retcol="name" + ) + + postgres_tables = await self.postgres_store.db_pool.simple_select_onecol( + table="information_schema.tables", + keyvalues={}, + retcol="distinct table_name", + ) + + tables = set(sqlite_tables) & set(postgres_tables) + logger.info("Found %d tables", len(tables)) + + # Step 4. Figure out what still needs copying + self.progress.set_state("Checking on port progress") + setup_res = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background(self.setup_table, table) + for table in tables + if table not in ["schema_version", "applied_schema_deltas"] + and not table.startswith("sqlite_") + ], + consumeErrors=True, + ) + ) + # Map from table name to args passed to `handle_table`, i.e. a tuple + # of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`. + tables_to_port_info_map = { + r[0]: r[1:] for r in setup_res if r[0] not in IGNORED_TABLES + } + + # Step 5. Do the copying. + # + # This is slightly convoluted as we need to ensure tables are ported + # in the correct order due to foreign key constraints. + self.progress.set_state("Copying to postgres") + + constraints = await self.get_table_constraints() + tables_ported = set() # type: Set[str] + + while tables_to_port_info_map: + # Pulls out all tables that are still to be ported and which + # only depend on tables that are already ported (if any). + tables_to_port = [ + table + for table in tables_to_port_info_map + if not constraints.get(table, set()) - tables_ported + ] + + await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.handle_table, + table, + *tables_to_port_info_map.pop(table), + ) + for table in tables_to_port + ], + consumeErrors=True, + ) + ) + + tables_ported.update(tables_to_port) + + self.progress.done() + except Exception as e: + global end_error_exec_info + end_error = str(e) + # Type safety: we're in an exception handler, so the exc_info() tuple + # will not be (None, None, None). + end_error_exec_info = sys.exc_info() # type: ignore[assignment] + logger.exception("") + finally: + reactor.stop() + + def _convert_rows( + self, table: str, headers: List[str], rows: List[Tuple] + ) -> List[Tuple]: + bool_col_names = BOOLEAN_COLUMNS.get(table, []) + + bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names] + + class BadValueException(Exception): + pass + + def conv(j: int, col: object) -> object: + if j in bool_cols: + return bool(col) + if isinstance(col, bytes): + return bytearray(col) + elif isinstance(col, str) and "\0" in col: + logger.warning( + "DROPPING ROW: NUL value in table %s col %s: %r", + table, + headers[j], + col, + ) + raise BadValueException() + return col + + outrows = [] + for row in rows: + try: + outrows.append( + tuple(conv(j, col) for j, col in enumerate(row) if j > 0) + ) + except BadValueException: + pass + + return outrows + + async def _setup_sent_transactions(self) -> Tuple[int, int, int]: + # Only save things from the last day + yesterday = int(time.time() * 1000) - 86400000 + + # And save the max transaction id from each destination + select = ( + "SELECT rowid, * FROM sent_transactions WHERE rowid IN (" + "SELECT max(rowid) FROM sent_transactions" + " GROUP BY destination" + ")" + ) + + def r(txn: LoggingTransaction) -> Tuple[List[str], List[Tuple]]: + txn.execute(select) + rows = txn.fetchall() + assert txn.description is not None + headers = [column[0] for column in txn.description] + + ts_ind = headers.index("ts") + + return headers, [r for r in rows if r[ts_ind] < yesterday] + + headers, rows = await self.sqlite_store.db_pool.runInteraction("select", r) + + rows = self._convert_rows("sent_transactions", headers, rows) + + inserted_rows = len(rows) + if inserted_rows: + max_inserted_rowid = max(r[0] for r in rows) + + def insert(txn: LoggingTransaction) -> None: + self.postgres_store.insert_many_txn( + txn, "sent_transactions", headers[1:], rows + ) + + await self.postgres_store.execute(insert) + else: + max_inserted_rowid = 0 + + def get_start_id(txn: LoggingTransaction) -> int: + txn.execute( + "SELECT rowid FROM sent_transactions WHERE ts >= ?" + " ORDER BY rowid ASC LIMIT 1", + (yesterday,), + ) + + rows = txn.fetchall() + if rows: + return rows[0][0] + else: + return 1 + + next_chunk = await self.sqlite_store.execute(get_start_id) + next_chunk = max(max_inserted_rowid + 1, next_chunk) + + await self.postgres_store.db_pool.simple_insert( + table="port_from_sqlite3", + values={ + "table_name": "sent_transactions", + "forward_rowid": next_chunk, + "backward_rowid": 0, + }, + ) + + def get_sent_table_size(txn: LoggingTransaction) -> int: + txn.execute( + "SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,) + ) + result = txn.fetchone() + assert result is not None + return int(result[0]) + + remaining_count = await self.sqlite_store.execute(get_sent_table_size) + + total_count = remaining_count + inserted_rows + + return next_chunk, inserted_rows, total_count + + async def _get_remaining_count_to_port( + self, table: str, forward_chunk: int, backward_chunk: int + ) -> int: + frows = cast( + List[Tuple[int]], + await self.sqlite_store.execute_sql( + "SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk + ), + ) + + brows = cast( + List[Tuple[int]], + await self.sqlite_store.execute_sql( + "SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk + ), + ) + + return frows[0][0] + brows[0][0] + + async def _get_already_ported_count(self, table: str) -> int: + rows = await self.postgres_store.execute_sql( + "SELECT count(*) FROM %s" % (table,) + ) + + return rows[0][0] + + async def _get_total_count_to_port( + self, table: str, forward_chunk: int, backward_chunk: int + ) -> Tuple[int, int]: + remaining, done = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self._get_remaining_count_to_port, + table, + forward_chunk, + backward_chunk, + ), + run_in_background(self._get_already_ported_count, table), + ], + ) + ) + + remaining = int(remaining) if remaining else 0 + done = int(done) if done else 0 + + return done, remaining + done + + async def _setup_state_group_id_seq(self) -> None: + curr_id: Optional[ + int + ] = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True + ) + + if not curr_id: + return + + def r(txn: LoggingTransaction) -> None: + assert curr_id is not None + next_id = curr_id + 1 + txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,)) + + await self.postgres_store.db_pool.runInteraction("setup_state_group_id_seq", r) + + async def _setup_user_id_seq(self) -> None: + curr_id = await self.sqlite_store.db_pool.runInteraction( + "setup_user_id_seq", find_max_generated_user_id_localpart + ) + + def r(txn: LoggingTransaction) -> None: + next_id = curr_id + 1 + txn.execute("ALTER SEQUENCE user_id_seq RESTART WITH %s", (next_id,)) + + await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r) + + async def _setup_events_stream_seqs(self) -> None: + """Set the event stream sequences to the correct values.""" + + # We get called before we've ported the events table, so we need to + # fetch the current positions from the SQLite store. + curr_forward_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="events", keyvalues={}, retcol="MAX(stream_ordering)", allow_none=True + ) + + curr_backward_id = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="events", + keyvalues={}, + retcol="MAX(-MIN(stream_ordering), 1)", + allow_none=True, + ) + + def _setup_events_stream_seqs_set_pos(txn: LoggingTransaction) -> None: + if curr_forward_id: + txn.execute( + "ALTER SEQUENCE events_stream_seq RESTART WITH %s", + (curr_forward_id + 1,), + ) + + if curr_backward_id: + txn.execute( + "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", + (curr_backward_id + 1,), + ) + + await self.postgres_store.db_pool.runInteraction( + "_setup_events_stream_seqs", + _setup_events_stream_seqs_set_pos, + ) + + async def _setup_sequence( + self, + sequence_name: str, + stream_id_tables: Iterable[str], + column_name: str = "stream_id", + ) -> None: + """Set a sequence to the correct value.""" + current_stream_ids = [] + for stream_id_table in stream_id_tables: + max_stream_id = cast( + int, + await self.sqlite_store.db_pool.simple_select_one_onecol( + table=stream_id_table, + keyvalues={}, + retcol=f"COALESCE(MAX({column_name}), 1)", + allow_none=True, + ), + ) + current_stream_ids.append(max_stream_id) + + next_id = max(current_stream_ids) + 1 + + def r(txn: LoggingTransaction) -> None: + sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,) + txn.execute(sql + " %s", (next_id,)) + + await self.postgres_store.db_pool.runInteraction( + "_setup_%s" % (sequence_name,), r + ) + + async def _setup_auth_chain_sequence(self) -> None: + curr_chain_id: Optional[ + int + ] = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="event_auth_chains", + keyvalues={}, + retcol="MAX(chain_id)", + allow_none=True, + ) + + def r(txn: LoggingTransaction) -> None: + # Presumably there is at least one row in event_auth_chains. + assert curr_chain_id is not None + txn.execute( + "ALTER SEQUENCE event_auth_chain_id RESTART WITH %s", + (curr_chain_id + 1,), + ) + + if curr_chain_id is not None: + await self.postgres_store.db_pool.runInteraction( + "_setup_event_auth_chain_id", + r, + ) + + +############################################## +# The following is simply UI stuff +############################################## + + +class TableProgress(TypedDict): + start: int + num_done: int + total: int + perc: int + + +class Progress: + """Used to report progress of the port""" + + def __init__(self) -> None: + self.tables: Dict[str, TableProgress] = {} + + self.start_time = int(time.time()) + + def add_table(self, table: str, cur: int, size: int) -> None: + self.tables[table] = { + "start": cur, + "num_done": cur, + "total": size, + "perc": int(cur * 100 / size), + } + + def update(self, table: str, num_done: int) -> None: + data = self.tables[table] + data["num_done"] = num_done + data["perc"] = int(num_done * 100 / data["total"]) + + def done(self) -> None: + pass + + def set_state(self, state: str) -> None: + pass + + +class CursesProgress(Progress): + """Reports progress to a curses window""" + + def __init__(self, stdscr: "curses.window"): + self.stdscr = stdscr + + curses.use_default_colors() + curses.curs_set(0) + + curses.init_pair(1, curses.COLOR_RED, -1) + curses.init_pair(2, curses.COLOR_GREEN, -1) + + self.last_update = 0.0 + + self.finished = False + + self.total_processed = 0 + self.total_remaining = 0 + + super().__init__() + + def update(self, table: str, num_done: int) -> None: + super().update(table, num_done) + + self.total_processed = 0 + self.total_remaining = 0 + for data in self.tables.values(): + self.total_processed += data["num_done"] - data["start"] + self.total_remaining += data["total"] - data["num_done"] + + self.render() + + def render(self, force: bool = False) -> None: + now = time.time() + + if not force and now - self.last_update < 0.2: + # reactor.callLater(1, self.render) + return + + self.stdscr.clear() + + rows, cols = self.stdscr.getmaxyx() + + duration = int(now) - int(self.start_time) + + minutes, seconds = divmod(duration, 60) + duration_str = "%02dm %02ds" % (minutes, seconds) + + if self.finished: + status = "Time spent: %s (Done!)" % (duration_str,) + else: + if self.total_processed > 0: + left = float(self.total_remaining) / self.total_processed + + est_remaining = (int(now) - self.start_time) * left + est_remaining_str = "%02dm %02ds remaining" % divmod(est_remaining, 60) + else: + est_remaining_str = "Unknown" + status = "Time spent: %s (est. remaining: %s)" % ( + duration_str, + est_remaining_str, + ) + + self.stdscr.addstr(0, 0, status, curses.A_BOLD) + + max_len = max(len(t) for t in self.tables.keys()) + + left_margin = 5 + middle_space = 1 + + items = sorted(self.tables.items(), key=lambda i: (i[1]["perc"], i[0])) + + for i, (table, data) in enumerate(items): + if i + 2 >= rows: + break + + perc = data["perc"] + + color = curses.color_pair(2) if perc == 100 else curses.color_pair(1) + + self.stdscr.addstr( + i + 2, left_margin + max_len - len(table), table, curses.A_BOLD | color + ) + + size = 20 + + progress = "[%s%s]" % ( + "#" * int(perc * size / 100), + " " * (size - int(perc * size / 100)), + ) + + self.stdscr.addstr( + i + 2, + left_margin + max_len + middle_space, + "%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]), + ) + + if self.finished: + self.stdscr.addstr(rows - 1, 0, "Press any key to exit...") + + self.stdscr.refresh() + self.last_update = time.time() + + def done(self) -> None: + self.finished = True + self.render(True) + self.stdscr.getch() + + def set_state(self, state: str) -> None: + self.stdscr.clear() + self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD) + self.stdscr.refresh() + + +class TerminalProgress(Progress): + """Just prints progress to the terminal""" + + def update(self, table: str, num_done: int) -> None: + super().update(table, num_done) + + data = self.tables[table] + + print( + "%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"]) + ) + + def set_state(self, state: str) -> None: + print(state + "...") + + +############################################## +############################################## + + +def main() -> None: + parser = argparse.ArgumentParser( + description="A script to port an existing synapse SQLite database to" + " a new PostgreSQL database." + ) + parser.add_argument("-v", action="store_true") + parser.add_argument( + "--sqlite-database", + required=True, + help="The snapshot of the SQLite database file. This must not be" + " currently used by a running synapse server", + ) + parser.add_argument( + "--postgres-config", + type=argparse.FileType("r"), + required=True, + help="The database config file for the PostgreSQL database", + ) + parser.add_argument( + "--curses", action="store_true", help="display a curses based progress UI" + ) + + parser.add_argument( + "--batch-size", + type=int, + default=1000, + help="The number of rows to select from the SQLite table each" + " iteration [default=1000]", + ) + + args = parser.parse_args() + + logging.basicConfig( + level=logging.DEBUG if args.v else logging.INFO, + format="%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s", + filename="port-synapse.log" if args.curses else None, + ) + + if not os.path.isfile(args.sqlite_database): + sys.stderr.write( + "The sqlite database you specified does not exist, please check that you have the" + "correct path." + ) + sys.exit(1) + + sqlite_config = { + "name": "sqlite3", + "args": { + "database": args.sqlite_database, + "cp_min": 1, + "cp_max": 1, + "check_same_thread": False, + }, + } + + hs_config = yaml.safe_load(args.postgres_config) + + if "database" not in hs_config: + sys.stderr.write("The configuration file must have a 'database' section.\n") + sys.exit(4) + + postgres_config = hs_config["database"] + + if "name" not in postgres_config: + sys.stderr.write("Malformed database config: no 'name'\n") + sys.exit(2) + if postgres_config["name"] != "psycopg2": + sys.stderr.write("Database must use the 'psycopg2' connector.\n") + sys.exit(3) + + # Don't run the background tasks that get started by the data stores. + hs_config["run_background_tasks_on"] = "some_other_process" + + config = HomeServerConfig() + config.parse_config_dict(hs_config, "", "") + + def start(stdscr: Optional["curses.window"] = None) -> None: + progress: Progress + if stdscr: + progress = CursesProgress(stdscr) + else: + progress = TerminalProgress() + + porter = Porter( + sqlite_config=sqlite_config, + progress=progress, + batch_size=args.batch_size, + hs_config=config, + ) + + @defer.inlineCallbacks + def run() -> Generator["defer.Deferred[Any]", Any, None]: + with LoggingContext("synapse_port_db_run"): + yield defer.ensureDeferred(porter.run()) + + reactor.callWhenRunning(run) + + reactor.run() + + if args.curses: + curses.wrapper(start) + else: + start() + + if end_error: + if end_error_exec_info: + exc_type, exc_value, exc_traceback = end_error_exec_info + traceback.print_exception(exc_type, exc_value, exc_traceback) + + sys.stderr.write(end_error) + + sys.exit(5) + + +if __name__ == "__main__": + main()