Compare commits
718 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
182b8c2283 | ||
|
|
a77ebd3b55 | ||
|
|
00114287e5 | ||
|
|
fc8b1193de | ||
|
|
c832b5d29e | ||
|
|
df777c63fe | ||
|
|
e9c9a67365 | ||
|
|
ac080edb02 | ||
|
|
5e580f9372 | ||
|
|
39de098461 | ||
|
|
531f232418 | ||
|
|
5f047d22f4 | ||
|
|
efdc558cba | ||
|
|
04bd1cfa41 | ||
|
|
aa2a2e12cc | ||
|
|
8f231424d1 | ||
|
|
d03aadb367 | ||
|
|
749cde13c4 | ||
|
|
0b43aab855 | ||
|
|
fbc94cfbfc | ||
|
|
e631b145b9 | ||
|
|
8cf0ae0994 | ||
|
|
a551bc5375 | ||
|
|
417053a6a2 | ||
|
|
a1495dd33d | ||
|
|
5f3db8e567 | ||
|
|
43ecd8b362 | ||
|
|
722a30812f | ||
|
|
06bed20a2a | ||
|
|
9a9eb57676 | ||
|
|
86567e7fa5 | ||
|
|
b7b022cc7b | ||
|
|
521014cd1f | ||
|
|
871f78b570 | ||
|
|
753fbc0c5c | ||
|
|
748277aa0e | ||
|
|
bf40a9ef6d | ||
|
|
733000eaa2 | ||
|
|
6a399a7250 | ||
|
|
7ba22f1a09 | ||
|
|
f54f950f81 | ||
|
|
4625711606 | ||
|
|
5735ea2b3c | ||
|
|
b597d0366a | ||
|
|
9c6dcc4a43 | ||
|
|
27c5464cb6 | ||
|
|
1dad7965d2 | ||
|
|
c14ca1d7fd | ||
|
|
2b9e7432b8 | ||
|
|
547747ff74 | ||
|
|
e5b137b331 | ||
|
|
9e554bdecd | ||
|
|
765b542264 | ||
|
|
182a095420 | ||
|
|
0865cffddf | ||
|
|
5a312b9900 | ||
|
|
af2b2f33c2 | ||
|
|
9aa08dfb9b | ||
|
|
b28c673133 | ||
|
|
9a545f176d | ||
|
|
65728eb6ab | ||
|
|
531e037974 | ||
|
|
a96467cb3e | ||
|
|
6e92a7d93d | ||
|
|
740e63da2b | ||
|
|
a69cae22dd | ||
|
|
8ea3c3c29e | ||
|
|
63ab739b3d | ||
|
|
58bb788034 | ||
|
|
9e633b37e7 | ||
|
|
bb6a4842bd | ||
|
|
246727995d | ||
|
|
202695096a | ||
|
|
afbab293a8 | ||
|
|
78faf888af | ||
|
|
5164c21923 | ||
|
|
edcd1a3c5b | ||
|
|
532ab9128f | ||
|
|
a3072aacc2 | ||
|
|
27296d8880 | ||
|
|
8549b9bc37 | ||
|
|
7632373097 | ||
|
|
23b0674ac0 | ||
|
|
01f0484a0e | ||
|
|
3ca9035fdb | ||
|
|
caaf9d26db | ||
|
|
eb521b2332 | ||
|
|
68c29ab99e | ||
|
|
f12b7f4319 | ||
|
|
7db331320a | ||
|
|
97ad8a85c3 | ||
|
|
6f588196cb | ||
|
|
20241c27ee | ||
|
|
05d6aea37f | ||
|
|
7e0e7860cd | ||
|
|
a0afd7b8ed | ||
|
|
500369ab2b | ||
|
|
dc26d5c0c8 | ||
|
|
0def02f604 | ||
|
|
0ffa9167da | ||
|
|
a110e8f241 | ||
|
|
491f363392 | ||
|
|
33a67bf7b4 | ||
|
|
1e6f583431 | ||
|
|
5e3412d735 | ||
|
|
e6e4cd63f3 | ||
|
|
f5da5f4ef0 | ||
|
|
9a202cc124 | ||
|
|
c305deab52 | ||
|
|
0daaf3b1ec | ||
|
|
8e21504bdb | ||
|
|
fcf1be52ac | ||
|
|
394bc9ceb8 | ||
|
|
e3786592b2 | ||
|
|
d6eaf8d3d9 | ||
|
|
b1c23336e3 | ||
|
|
44c5073dea | ||
|
|
b7593fac44 | ||
|
|
af116794c4 | ||
|
|
88c85e1d8a | ||
|
|
9322b3d07e | ||
|
|
55f5329817 | ||
|
|
79d92c30f8 | ||
|
|
73229501c2 | ||
|
|
32ca91a7c9 | ||
|
|
9e03ac084e | ||
|
|
082c51109d | ||
|
|
8f44c75dc3 | ||
|
|
234f0d75e8 | ||
|
|
564186a1f9 | ||
|
|
ccdb477dbb | ||
|
|
5f92f9e965 | ||
|
|
c2db4390bb | ||
|
|
11c21b5259 | ||
|
|
3cd9e17e3f | ||
|
|
1982ce796f | ||
|
|
825e18a551 | ||
|
|
9ff0128fb1 | ||
|
|
36c3617204 | ||
|
|
90a9db3a91 | ||
|
|
59d6795d9e | ||
|
|
2c07cf50fa | ||
|
|
cc0e525dc5 | ||
|
|
73bd973109 | ||
|
|
a7e501d874 | ||
|
|
4676f0595c | ||
|
|
1d3d70e8d6 | ||
|
|
bada88157e | ||
|
|
13f3137701 | ||
|
|
d3316ff6ff | ||
|
|
1b384e61b4 | ||
|
|
addea20cab | ||
|
|
fac23f2f57 | ||
|
|
bffe1ccb3d | ||
|
|
e577434fe6 | ||
|
|
5d1d9827e4 | ||
|
|
dd28ad20ef | ||
|
|
ef416ef60b | ||
|
|
95b3b55971 | ||
|
|
b3f32ae03e | ||
|
|
c7472174e5 | ||
|
|
2ad749354d | ||
|
|
4ed9d2ea22 | ||
|
|
280eb47de7 | ||
|
|
324a12b0ff | ||
|
|
a2543ccddc | ||
|
|
22666412c3 | ||
|
|
dd58044cdf | ||
|
|
10312d89d7 | ||
|
|
b4c0d877cb | ||
|
|
e95d56a5d0 | ||
|
|
90424e8329 | ||
|
|
1bfeb42a06 | ||
|
|
a936f92954 | ||
|
|
0bc514ec17 | ||
|
|
a2cf4001af | ||
|
|
cb4e12a68c | ||
|
|
a7f5124dfe | ||
|
|
ccbf71c5e7 | ||
|
|
04bf5f58d9 | ||
|
|
ab3f5956d4 | ||
|
|
c1fe8e583f | ||
|
|
fd166c4433 | ||
|
|
f29c7ba4f2 | ||
|
|
88869e9710 | ||
|
|
f8404ab043 | ||
|
|
9fa5d1ff9e | ||
|
|
483f353fd0 | ||
|
|
a11bf5b5c7 | ||
|
|
d4113ff753 | ||
|
|
1969f036fa | ||
|
|
8c90e01016 | ||
|
|
756c5c9b99 | ||
|
|
ee54b355af | ||
|
|
26cbbc0c56 | ||
|
|
f4f719d52a | ||
|
|
f2071d8b7e | ||
|
|
df88a55784 | ||
|
|
3ccbc626ff | ||
|
|
71a15cf222 | ||
|
|
26ddf769b1 | ||
|
|
3137387c0c | ||
|
|
fc142cfde8 | ||
|
|
b0503fa507 | ||
|
|
b86a97c9c0 | ||
|
|
eb6cd23772 | ||
|
|
efae1e7e6c | ||
|
|
19d55b840e | ||
|
|
cc0c1d05ab | ||
|
|
f088f65d5a | ||
|
|
5441b5a06b | ||
|
|
efc56c0a88 | ||
|
|
321fca2c0a | ||
|
|
bbd66e9cb0 | ||
|
|
eb0277146c | ||
|
|
10ee32ec48 | ||
|
|
bdb4be89ff | ||
|
|
61445e0b56 | ||
|
|
f15a010e0e | ||
|
|
58747004fe | ||
|
|
e7ff1eb66b | ||
|
|
4a00bd4797 | ||
|
|
2e6fc7e4a0 | ||
|
|
4a8f323be7 | ||
|
|
c7d82102ed | ||
|
|
068b861edc | ||
|
|
3c908c6a09 | ||
|
|
ba3805786c | ||
|
|
70afb197f1 | ||
|
|
d966e35054 | ||
|
|
1675570291 | ||
|
|
9b88de656e | ||
|
|
3d39b5653d | ||
|
|
eb5f7f64ad | ||
|
|
9fc0164c4d | ||
|
|
65eb520cca | ||
|
|
f7f07932b4 | ||
|
|
de52494039 | ||
|
|
4d87ee2bb6 | ||
|
|
d0ba0936ca | ||
|
|
b08556861f | ||
|
|
c96628ad49 | ||
|
|
a615882b3f | ||
|
|
2bcc8e0d30 | ||
|
|
de519edf78 | ||
|
|
caf47943c3 | ||
|
|
427ab12724 | ||
|
|
eba16c0cc3 | ||
|
|
a485de6359 | ||
|
|
1a985f7e82 | ||
|
|
7867411095 | ||
|
|
2f6ebd16c1 | ||
|
|
878b235614 | ||
|
|
75f9c6b0fb | ||
|
|
7c1e2bf96f | ||
|
|
181b44e117 | ||
|
|
f7793976fb | ||
|
|
8ffcd9b60a | ||
|
|
52d3c4d62d | ||
|
|
0fb3e75253 | ||
|
|
2c40e403c4 | ||
|
|
d1c519ed0d | ||
|
|
27470ef934 | ||
|
|
8a1da87702 | ||
|
|
c8d89f805b | ||
|
|
c9fceafc16 | ||
|
|
bbb9980941 | ||
|
|
da55d6f7cd | ||
|
|
eeacdc1359 | ||
|
|
ee1e92e1cb | ||
|
|
705802e584 | ||
|
|
b2e509f055 | ||
|
|
cca70764d4 | ||
|
|
3ac94710fb | ||
|
|
ca73a47785 | ||
|
|
1ef67fc8e9 | ||
|
|
8f3c2f4f3d | ||
|
|
e42b98ec17 | ||
|
|
efb318a979 | ||
|
|
3c0a82293c | ||
|
|
e867f31c31 | ||
|
|
aeb6da111b | ||
|
|
2736fa5202 | ||
|
|
4d3df867da | ||
|
|
62f78e4312 | ||
|
|
d223ac4675 | ||
|
|
c16404bb2d | ||
|
|
cf70933e21 | ||
|
|
46222e9352 | ||
|
|
212e94756b | ||
|
|
b42abbd4a2 | ||
|
|
730a55e721 | ||
|
|
06cf83b901 | ||
|
|
673e5af030 | ||
|
|
a0bc16c255 | ||
|
|
76b5234f7b | ||
|
|
928de47d1d | ||
|
|
274db6f606 | ||
|
|
89ca0ca927 | ||
|
|
8047008fa5 | ||
|
|
f914110626 | ||
|
|
5656fd0b96 | ||
|
|
c3d8c72302 | ||
|
|
1eefff9025 | ||
|
|
1dc7c7b0a4 | ||
|
|
011bac7b4f | ||
|
|
dc2d6e60d8 | ||
|
|
7809b6e50f | ||
|
|
f7f0370bf5 | ||
|
|
6300fc5364 | ||
|
|
16270cbd1a | ||
|
|
3b226dd2c0 | ||
|
|
4ac61d18ff | ||
|
|
fd7abdb8a4 | ||
|
|
92cd85b204 | ||
|
|
4bb7998208 | ||
|
|
91b22311af | ||
|
|
ddd00d4c25 | ||
|
|
428997f26a | ||
|
|
c9d35d8096 | ||
|
|
761b3bd591 | ||
|
|
a440e6f115 | ||
|
|
837b1a9a73 | ||
|
|
bed37184d1 | ||
|
|
785ed480bb | ||
|
|
d8c39c42a1 | ||
|
|
4b06138d35 | ||
|
|
bd5668d15d | ||
|
|
1d6c61cc5b | ||
|
|
ed22e53cb6 | ||
|
|
d18a34785c | ||
|
|
79fb8de7b7 | ||
|
|
07f5f3f1bb | ||
|
|
8fffa40502 | ||
|
|
6680b32579 | ||
|
|
af618f42bd | ||
|
|
aafcce871e | ||
|
|
71d1418559 | ||
|
|
e0678cc869 | ||
|
|
74ddf7114c | ||
|
|
837d4c1597 | ||
|
|
ccb85737f7 | ||
|
|
f9a4699e84 | ||
|
|
bab3aea8ff | ||
|
|
c52cf1fc3f | ||
|
|
3fe43a5b57 | ||
|
|
1a8b6d2fe7 | ||
|
|
570a4b7915 | ||
|
|
63859b81ad | ||
|
|
d8d13f8bf6 | ||
|
|
c3ce44e202 | ||
|
|
3372cdc0df | ||
|
|
82fc945d73 | ||
|
|
040bd52705 | ||
|
|
415cfcb72f | ||
|
|
2b0efb32c1 | ||
|
|
a3a4fdd7fc | ||
|
|
78f6bbf7fe | ||
|
|
43606d26e4 | ||
|
|
b77c409257 | ||
|
|
96f77a6275 | ||
|
|
2336e36314 | ||
|
|
9146c31abf | ||
|
|
bd4c431eb4 | ||
|
|
b620e5319a | ||
|
|
f12df8ded4 | ||
|
|
0ecd920ad9 | ||
|
|
b40be8c494 | ||
|
|
f7c5e64fbc | ||
|
|
6eea2526f6 | ||
|
|
be9db47276 | ||
|
|
35cb81518c | ||
|
|
4042b8f026 | ||
|
|
a3d1b2d671 | ||
|
|
eec8c41e20 | ||
|
|
4f9fe7245b | ||
|
|
6e1ae69691 | ||
|
|
65a1fcfda5 | ||
|
|
373e11495d | ||
|
|
8b6eac3c1c | ||
|
|
43bae7fb01 | ||
|
|
18ee1e2685 | ||
|
|
5b91b5f436 | ||
|
|
54749dfd1e | ||
|
|
f86212dfe1 | ||
|
|
9ed2e2b0ca | ||
|
|
a29cd622c3 | ||
|
|
6cea0139d1 | ||
|
|
45a6a930c9 | ||
|
|
22b273b145 | ||
|
|
ca71c88744 | ||
|
|
20b93e9fba | ||
|
|
05b29a7e9a | ||
|
|
913ef5c817 | ||
|
|
60534597e0 | ||
|
|
a7173b6bc9 | ||
|
|
6deb51428a | ||
|
|
2f00a642be | ||
|
|
4e47960440 | ||
|
|
67b54ac1eb | ||
|
|
0e82b6981f | ||
|
|
d6bf52c11f | ||
|
|
c1ac66f6e5 | ||
|
|
b9e4a66fdc | ||
|
|
9c363be16f | ||
|
|
affab384cf | ||
|
|
0fc546962e | ||
|
|
d215d96b9b | ||
|
|
327e873ef6 | ||
|
|
a2f65de1ce | ||
|
|
bc23129759 | ||
|
|
3e7b184ab4 | ||
|
|
fe0b0d1157 | ||
|
|
55b1c021ec | ||
|
|
21cf4cd2ce | ||
|
|
defc98ab0e | ||
|
|
74af03408f | ||
|
|
1d151d8fa6 | ||
|
|
e5aeced045 | ||
|
|
17d39143ac | ||
|
|
26c37ba824 | ||
|
|
d380cc31fa | ||
|
|
aa2fedee9d | ||
|
|
14fa0e478a | ||
|
|
ac878d46a5 | ||
|
|
6da0a473be | ||
|
|
2642ec85e5 | ||
|
|
26d2152a36 | ||
|
|
1cfd404321 | ||
|
|
207020b7a0 | ||
|
|
6ad9a5952e | ||
|
|
0511680fc5 | ||
|
|
ad14503e9f | ||
|
|
9221f25e35 | ||
|
|
95eec90a62 | ||
|
|
927cb51b5d | ||
|
|
9f4025fdfb | ||
|
|
b57336f6cf | ||
|
|
6e1c2fd7fd | ||
|
|
50e3b7cd5a | ||
|
|
8beda5b0ae | ||
|
|
9998ed177b | ||
|
|
e2db3d84d8 | ||
|
|
141a390105 | ||
|
|
78ad5d5879 | ||
|
|
2ddd38796d | ||
|
|
35b220d7a5 | ||
|
|
8093faee19 | ||
|
|
10a7bd2eff | ||
|
|
2f8a25ae26 | ||
|
|
19bf80dfaf | ||
|
|
fbfaac9859 | ||
|
|
0c3d0dd525 | ||
|
|
1388632562 | ||
|
|
771ecaf3e5 | ||
|
|
2000a8f3ed | ||
|
|
719cd5512c | ||
|
|
afb4536247 | ||
|
|
71b19e6582 | ||
|
|
f37cfda365 | ||
|
|
f63a841cb5 | ||
|
|
d469e802ad | ||
|
|
1702c07481 | ||
|
|
31c5aebe90 | ||
|
|
8cf84a6cf2 | ||
|
|
18336e4d0a | ||
|
|
abf297d095 | ||
|
|
061a350cc6 | ||
|
|
c85491cc71 | ||
|
|
8b794c2299 | ||
|
|
11b11375fd | ||
|
|
c728f1a694 | ||
|
|
28f9fa35e5 | ||
|
|
f8ea2ebf62 | ||
|
|
7575e8c1de | ||
|
|
395db5f1cf | ||
|
|
ee1acda7aa | ||
|
|
1150f4c438 | ||
|
|
f04b90d9c6 | ||
|
|
53463077df | ||
|
|
e326c5be4a | ||
|
|
e199dbc37b | ||
|
|
2e8bfcc74d | ||
|
|
ca53793e32 | ||
|
|
a5f31fbf4e | ||
|
|
40d47c9f44 | ||
|
|
67743b37bb | ||
|
|
36911d7ed6 | ||
|
|
5564154da2 | ||
|
|
27f9869b38 | ||
|
|
f274747af3 | ||
|
|
05832b8b4b | ||
|
|
b9ce2bf2dc | ||
|
|
5442459b2d | ||
|
|
f0466aaa56 | ||
|
|
50111e37da | ||
|
|
76682ebef0 | ||
|
|
705653465a | ||
|
|
8cd2fac9b9 | ||
|
|
b2d7f4f606 | ||
|
|
2dd31fa93f | ||
|
|
df20d4f100 | ||
|
|
3ddeb5fa94 | ||
|
|
70baed88f4 | ||
|
|
5ba0d594a2 | ||
|
|
6505c4054f | ||
|
|
e1c30a918b | ||
|
|
f812e208fa | ||
|
|
9e7526c191 | ||
|
|
07194e52cd | ||
|
|
2f8d825970 | ||
|
|
c44eb3a2c3 | ||
|
|
8207770369 | ||
|
|
365952bbe9 | ||
|
|
5404ebce1c | ||
|
|
13411f1830 | ||
|
|
43090c9873 | ||
|
|
34000fb9f0 | ||
|
|
c2f9c6a38d | ||
|
|
a5c97d4c24 | ||
|
|
9514b97ca0 | ||
|
|
22e84cc922 | ||
|
|
13b97296f5 | ||
|
|
d5f7e15dfb | ||
|
|
7bf7b1e71e | ||
|
|
7b17498722 | ||
|
|
3473633e43 | ||
|
|
f455b8a007 | ||
|
|
daabba12d3 | ||
|
|
61864d082f | ||
|
|
a7cd1e0ce6 | ||
|
|
0dd6d3a500 | ||
|
|
bdb906bf26 | ||
|
|
61da050fe8 | ||
|
|
83fe391796 | ||
|
|
37657fa6ad | ||
|
|
908a945b95 | ||
|
|
36c720227f | ||
|
|
c22c80d3b0 | ||
|
|
15af827cbc | ||
|
|
4a54c7ca87 | ||
|
|
7b8a0eadf3 | ||
|
|
9a01a0df8e | ||
|
|
ea2d77f536 | ||
|
|
e29003539b | ||
|
|
97bdb2dd64 | ||
|
|
40d446ba32 | ||
|
|
5fa743755d | ||
|
|
0f027fefb8 | ||
|
|
56acb3f281 | ||
|
|
5268185604 | ||
|
|
635c3627c9 | ||
|
|
009f7ddf84 | ||
|
|
4526618c32 | ||
|
|
6dfd46197d | ||
|
|
778471d3cc | ||
|
|
bbcf2990f6 | ||
|
|
ac30ab223b | ||
|
|
50e7b479b5 | ||
|
|
1367428499 | ||
|
|
e5de91cbe5 | ||
|
|
244260e34a | ||
|
|
575ed06225 | ||
|
|
b6fdc57888 | ||
|
|
758d7d89c2 | ||
|
|
2db31b54e8 | ||
|
|
99d16a37d5 | ||
|
|
449968bc4e | ||
|
|
b0a55593c1 | ||
|
|
17ef97c375 | ||
|
|
36e0ba0f06 | ||
|
|
b365a60c00 | ||
|
|
88afb756f5 | ||
|
|
e2d58c2959 | ||
|
|
3cfc333512 | ||
|
|
89da50dd37 | ||
|
|
9319314672 | ||
|
|
6d805ae941 | ||
|
|
8ba932aa36 | ||
|
|
b580f549a6 | ||
|
|
cb9c01d94b | ||
|
|
f9b0f6ae35 | ||
|
|
1b1ff05c81 | ||
|
|
7b465ce10b | ||
|
|
ee66395dfe | ||
|
|
31af6eeb76 | ||
|
|
e9a2d81bbe | ||
|
|
7d7f03da4f | ||
|
|
8966b62ec7 | ||
|
|
ec8d9b0da8 | ||
|
|
38ba1251ef | ||
|
|
005c46cb06 | ||
|
|
4b0ff07d70 | ||
|
|
f1e065a448 | ||
|
|
c82c6eaf34 | ||
|
|
b8f3759739 | ||
|
|
70aba1605c | ||
|
|
2c5aa84fe7 | ||
|
|
753f395b8d | ||
|
|
f22f11eb58 | ||
|
|
123f71cb86 | ||
|
|
22af45fb6e | ||
|
|
0849df524a | ||
|
|
31952afe1e | ||
|
|
83755e93dc | ||
|
|
0fbcc11f99 | ||
|
|
d431fac7de | ||
|
|
53ca9b0420 | ||
|
|
a8749f574a | ||
|
|
a9d839fd8f | ||
|
|
477d37f87d | ||
|
|
d2195411a6 | ||
|
|
1f5e6dbff6 | ||
|
|
09c0448186 | ||
|
|
b318bf64f4 | ||
|
|
af1d2c1603 | ||
|
|
1c11d3d08f | ||
|
|
a4a8f33df0 | ||
|
|
889cf03c1c | ||
|
|
0ac5b34f2d | ||
|
|
37304a9d92 | ||
|
|
4ad9886517 | ||
|
|
8e9d2b5314 | ||
|
|
7916a2352f | ||
|
|
2b92d0f119 | ||
|
|
961a9342fa | ||
|
|
3cde39c7ed | ||
|
|
09922c8dfa | ||
|
|
0390954a85 | ||
|
|
948fb795f2 | ||
|
|
452c8ea2d9 | ||
|
|
9c41090a7a | ||
|
|
59eee33767 | ||
|
|
cc5e60ed90 | ||
|
|
27bc493884 | ||
|
|
75a2b2d2ab | ||
|
|
0b7d8b4db0 | ||
|
|
d05cd7de0d | ||
|
|
b0068a333b | ||
|
|
d947c2db13 | ||
|
|
90e09c8c25 | ||
|
|
dbf59a7853 | ||
|
|
4d89e3beba | ||
|
|
5a88f41718 | ||
|
|
435956a272 | ||
|
|
7854885465 | ||
|
|
901ea6203e | ||
|
|
9217d00528 | ||
|
|
f234f894af | ||
|
|
4286edd78f | ||
|
|
334437f677 | ||
|
|
183c5cda14 | ||
|
|
45265453cb | ||
|
|
80a06272cc | ||
|
|
473213d14b | ||
|
|
d53e295569 | ||
|
|
18e2c610bc | ||
|
|
e0c68c1911 | ||
|
|
34729c4509 | ||
|
|
ca778b327b | ||
|
|
bde6169746 | ||
|
|
3dfbf2fffd | ||
|
|
34068ef633 | ||
|
|
e11729013f | ||
|
|
cceef054ac | ||
|
|
b8751e7add | ||
|
|
37344f99a7 | ||
|
|
61bcd8720d | ||
|
|
6801ff996e | ||
|
|
c8fc9a98bf | ||
|
|
52de5426ad | ||
|
|
e7d0a81bfe | ||
|
|
4f3223d3ad | ||
|
|
4829637b46 | ||
|
|
7f2494a26b | ||
|
|
f7b5fb55d7 | ||
|
|
2b6e54da1e | ||
|
|
1023916390 | ||
|
|
6a0e9d5c0a | ||
|
|
7b4d657a2d | ||
|
|
b7e86bf556 | ||
|
|
fa777bbd63 | ||
|
|
2e7b2c15bc | ||
|
|
9bc0fc8f05 | ||
|
|
b354d30fe9 | ||
|
|
a253e95b5a | ||
|
|
7e4c0d660a | ||
|
|
6a8bf2b074 | ||
|
|
16729ebffc | ||
|
|
f44d432b6a | ||
|
|
93ee418f65 | ||
|
|
cd6bda2113 | ||
|
|
4a007cea78 | ||
|
|
ab532b4432 | ||
|
|
ee98b91a29 | ||
|
|
0294143b22 | ||
|
|
2890798342 | ||
|
|
2d44852ec4 | ||
|
|
b9de5755d1 | ||
|
|
84463673e2 | ||
|
|
56efe9811d | ||
|
|
a6234e4507 | ||
|
|
e41b2f6ca9 | ||
|
|
8cf000198f | ||
|
|
cc6cbbfb07 | ||
|
|
10d7a3d585 | ||
|
|
864555bcf0 | ||
|
|
5d3bc8cfa5 | ||
|
|
b130608a78 | ||
|
|
6fa0a9762f | ||
|
|
17270e41fd | ||
|
|
4ac03293ed | ||
|
|
7c17e26480 | ||
|
|
1ac711c864 | ||
|
|
82c2adbc7b | ||
|
|
1dcf390ee9 | ||
|
|
a143e2581e | ||
|
|
d7bdc15e49 |
36
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,36 +1,34 @@
|
||||
name: 🐛 Bug Report
|
||||
description: Create a report to help us improve EmbassyOS
|
||||
title: '[bug]: '
|
||||
description: Create a report to help us improve StartOS
|
||||
title: "[bug]: "
|
||||
labels: [Bug, Needs Triage]
|
||||
assignees:
|
||||
- dr-bonez
|
||||
- MattDHill
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Prerequisites
|
||||
description: Please confirm you have completed the following.
|
||||
options:
|
||||
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already report this problem, without success.
|
||||
- label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already report this problem.
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: EmbassyOS Version
|
||||
description: What version of EmbassyOS are you running?
|
||||
placeholder: e.g. 0.3.0
|
||||
label: Server Hardware
|
||||
description: On what hardware are you running StartOS? Please be as detailed as possible!
|
||||
placeholder: Pi (8GB) w/ 32GB microSD & Samsung T7 SSD
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: StartOS Version
|
||||
description: What version of StartOS are you running?
|
||||
placeholder: e.g. 0.3.4.3
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Device
|
||||
description: What device are you using to connect to Embassy?
|
||||
options:
|
||||
- Phone/tablet
|
||||
- Laptop/Desktop
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Device OS
|
||||
label: Client OS
|
||||
description: What operating system is your device running?
|
||||
options:
|
||||
- MacOS
|
||||
@@ -45,14 +43,14 @@ body:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Device OS Version
|
||||
label: Client OS Version
|
||||
description: What version is your device OS?
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Browser
|
||||
description: What browser are you using to connect to Embassy?
|
||||
description: What browser are you using to connect to your server?
|
||||
options:
|
||||
- Firefox
|
||||
- Brave
|
||||
|
||||
10
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@@ -1,16 +1,16 @@
|
||||
name: 💡 Feature Request
|
||||
description: Suggest an idea for EmbassyOS
|
||||
title: '[feat]: '
|
||||
description: Suggest an idea for StartOS
|
||||
title: "[feat]: "
|
||||
labels: [Enhancement]
|
||||
assignees:
|
||||
- dr-bonez
|
||||
- MattDHill
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Prerequisites
|
||||
description: Please confirm you have completed the following.
|
||||
options:
|
||||
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already suggest this feature, without success.
|
||||
- label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already suggest this feature.
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
@@ -27,7 +27,7 @@ body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe Preferred Solution
|
||||
description: How you want this feature added to EmbassyOS?
|
||||
description: How you want this feature added to StartOS?
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe Alternatives
|
||||
|
||||
237
.github/workflows/startos-iso.yaml
vendored
Normal file
@@ -0,0 +1,237 @@
|
||||
name: Debian-based ISO and SquashFS
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
environment:
|
||||
type: choice
|
||||
description: Environment
|
||||
options:
|
||||
- NONE
|
||||
- dev
|
||||
- unstable
|
||||
- dev-unstable
|
||||
- docker
|
||||
- dev-docker
|
||||
- dev-unstable-docker
|
||||
runner:
|
||||
type: choice
|
||||
description: Runner
|
||||
options:
|
||||
- standard
|
||||
- fast
|
||||
platform:
|
||||
type: choice
|
||||
description: Platform
|
||||
options:
|
||||
- ALL
|
||||
- x86_64
|
||||
- x86_64-nonfree
|
||||
- aarch64
|
||||
- aarch64-nonfree
|
||||
- raspberrypi
|
||||
deploy:
|
||||
type: choice
|
||||
description: Deploy
|
||||
options:
|
||||
- NONE
|
||||
- alpha
|
||||
- beta
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- next/*
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next/*
|
||||
|
||||
env:
|
||||
NODEJS_VERSION: "18.15.0"
|
||||
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||
|
||||
jobs:
|
||||
compile:
|
||||
name: Compile Base Binaries
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
arch: >-
|
||||
${{
|
||||
fromJson('{
|
||||
"x86_64": ["x86_64"],
|
||||
"x86_64-nonfree": ["x86_64"],
|
||||
"aarch64": ["aarch64"],
|
||||
"aarch64-nonfree": ["aarch64"],
|
||||
"raspberrypi": ["aarch64"],
|
||||
"ALL": ["x86_64", "aarch64"]
|
||||
}')[github.event.inputs.platform || 'ALL']
|
||||
}}
|
||||
runs-on: ${{ fromJson('["ubuntu-22.04", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||
steps:
|
||||
- run: |
|
||||
sudo mount -t tmpfs tmpfs .
|
||||
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Make
|
||||
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: compiled-${{ matrix.arch }}.tar
|
||||
path: compiled-${{ matrix.arch }}.tar
|
||||
image:
|
||||
name: Build Image
|
||||
needs: [compile]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: >-
|
||||
${{
|
||||
fromJson(
|
||||
format(
|
||||
'[
|
||||
["{0}"],
|
||||
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "raspberrypi"]
|
||||
]',
|
||||
github.event.inputs.platform || 'ALL'
|
||||
)
|
||||
)[(github.event.inputs.platform || 'ALL') == 'ALL']
|
||||
}}
|
||||
runs-on: >-
|
||||
${{
|
||||
fromJson(
|
||||
format(
|
||||
'["ubuntu-22.04", "{0}"]',
|
||||
fromJson('{
|
||||
"x86_64": "buildjet-8vcpu-ubuntu-2204",
|
||||
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
|
||||
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
}')[matrix.platform]
|
||||
)
|
||||
)[github.event.inputs.runner == 'fast']
|
||||
}}
|
||||
env:
|
||||
ARCH: >-
|
||||
${{
|
||||
fromJson('{
|
||||
"x86_64": "x86_64",
|
||||
"x86_64-nonfree": "x86_64",
|
||||
"aarch64": "aarch64",
|
||||
"aarch64-nonfree": "aarch64",
|
||||
"raspberrypi": "aarch64",
|
||||
}')[matrix.platform]
|
||||
}}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y qemu-user-static
|
||||
wget https://deb.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.2-1_all.deb
|
||||
sha256sum ./debspawn_0.6.2-1_all.deb | grep 37ef27458cb1e35e8bce4d4f639b06b4b3866fc0b9191ec6b9bd157afd06a817
|
||||
sudo apt-get install -y ./debspawn_0.6.2-1_all.deb
|
||||
|
||||
- name: Configure debspawn
|
||||
run: |
|
||||
sudo mkdir -p /etc/debspawn/
|
||||
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
|
||||
sudo mkdir -p /var/tmp/debspawn
|
||||
|
||||
- run: sudo mount -t tmpfs tmpfs /var/tmp/debspawn
|
||||
if: ${{ github.event.inputs.runner == 'fast' && (matrix.platform == 'x86_64' || matrix.platform == 'x86_64-nonfree') }}
|
||||
|
||||
- name: Download compiled artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: compiled-${{ env.ARCH }}.tar
|
||||
|
||||
- name: Extract compiled artifacts
|
||||
run: tar -xvf compiled-${{ env.ARCH }}.tar
|
||||
|
||||
- name: Prevent rebuild of compiled artifacts
|
||||
run: |
|
||||
mkdir -p web/dist/raw
|
||||
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar
|
||||
|
||||
- name: Run iso build
|
||||
run: PLATFORM=${{ matrix.platform }} make iso
|
||||
if: ${{ matrix.platform != 'raspberrypi' }}
|
||||
|
||||
- name: Run img build
|
||||
run: PLATFORM=${{ matrix.platform }} make img
|
||||
if: ${{ matrix.platform == 'raspberrypi' }}
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.platform }}.squashfs
|
||||
path: results/*.squashfs
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.platform }}.iso
|
||||
path: results/*.iso
|
||||
if: ${{ matrix.platform != 'raspberrypi' }}
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.platform }}.img
|
||||
path: results/*.img
|
||||
if: ${{ matrix.platform == 'raspberrypi' }}
|
||||
|
||||
- name: Upload OTA to registry
|
||||
run: >-
|
||||
PLATFORM=${{ matrix.platform }} make upload-ota TARGET="${{
|
||||
fromJson('{
|
||||
"alpha": "alpha-registry-x.start9.com",
|
||||
"beta": "beta-registry.start9.com",
|
||||
}')[github.event.inputs.deploy]
|
||||
}}" KEY="${{
|
||||
fromJson(
|
||||
format('{{
|
||||
"alpha": "{0}",
|
||||
"beta": "{1}",
|
||||
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
|
||||
)[github.event.inputs.deploy]
|
||||
}}"
|
||||
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
|
||||
|
||||
index:
|
||||
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
|
||||
needs: [image]
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- run: >-
|
||||
curl "https://${{
|
||||
fromJson('{
|
||||
"alpha": "alpha-registry-x.start9.com",
|
||||
"beta": "beta-registry.start9.com",
|
||||
}')[github.event.inputs.deploy]
|
||||
}}:8443/resync.cgi?key=${{
|
||||
fromJson(
|
||||
format('{{
|
||||
"alpha": "{0}",
|
||||
"beta": "{1}",
|
||||
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
|
||||
)[github.event.inputs.deploy]
|
||||
}}"
|
||||
31
.github/workflows/test.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Automated Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- next/*
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next/*
|
||||
|
||||
env:
|
||||
NODEJS_VERSION: "18.15.0"
|
||||
ENVIRONMENT: dev-unstable
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Run Automated Tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Build And Run Tests
|
||||
run: make test
|
||||
21
.gitignore
vendored
@@ -1,5 +1,8 @@
|
||||
.DS_Store
|
||||
.idea
|
||||
system-images/binfmt/binfmt.tar
|
||||
system-images/compat/compat.tar
|
||||
system-images/util/util.tar
|
||||
/*.img
|
||||
/*.img.gz
|
||||
/*.img.xz
|
||||
@@ -8,3 +11,21 @@
|
||||
/product_key.txt
|
||||
/*_product_key.txt
|
||||
.vscode/settings.json
|
||||
deploy_web.sh
|
||||
deploy_web.sh
|
||||
secrets.db
|
||||
.vscode/
|
||||
/cargo-deps/**/*
|
||||
/PLATFORM.txt
|
||||
/ENVIRONMENT.txt
|
||||
/GIT_HASH.txt
|
||||
/VERSION.txt
|
||||
/eos-*.tar.gz
|
||||
/*.deb
|
||||
/target
|
||||
/*.squashfs
|
||||
/results
|
||||
/dpkg-workdir
|
||||
/compiled.tar
|
||||
/compiled-*.tar
|
||||
/firmware
|
||||
3
.gitmodules
vendored
@@ -1,6 +1,3 @@
|
||||
[submodule "rpc-toolkit"]
|
||||
path = rpc-toolkit
|
||||
url = https://github.com/Start9Labs/rpc-toolkit.git
|
||||
[submodule "patch-db"]
|
||||
path = patch-db
|
||||
url = https://github.com/Start9Labs/patch-db.git
|
||||
|
||||
296
CONTRIBUTING.md
@@ -1,205 +1,119 @@
|
||||
<!-- omit in toc -->
|
||||
# Contributing to Embassy OS
|
||||
# Contributing to StartOS
|
||||
|
||||
First off, thanks for taking the time to contribute! ❤️
|
||||
|
||||
All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
|
||||
|
||||
> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
|
||||
> - Star the project
|
||||
> - Tweet about it
|
||||
> - Refer this project in your project's readme
|
||||
> - Mention the project at local meetups and tell your friends/colleagues
|
||||
> - Buy an [Embassy](https://start9labs.com)
|
||||
|
||||
<!-- omit in toc -->
|
||||
## Table of Contents
|
||||
|
||||
- [I Have a Question](#i-have-a-question)
|
||||
- [I Want To Contribute](#i-want-to-contribute)
|
||||
- [Reporting Bugs](#reporting-bugs)
|
||||
- [Suggesting Enhancements](#suggesting-enhancements)
|
||||
- [Your First Code Contribution](#your-first-code-contribution)
|
||||
- [Setting Up Your Development Environment](#setting-up-your-development-environment)
|
||||
- [Building The Image](#building-the-image)
|
||||
- [Improving The Documentation](#improving-the-documentation)
|
||||
- [Styleguides](#styleguides)
|
||||
- [Formatting](#formatting)
|
||||
- [Atomic Commits](#atomic-commits)
|
||||
- [Commit Messages](#commit-messages)
|
||||
- [Pull Requests](#pull-requests)
|
||||
- [Rebasing Changes](#rebasing-changes)
|
||||
- [Join The Discussion](#join-the-discussion)
|
||||
- [Join The Project Team](#join-the-project-team)
|
||||
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://docs.start9.com/latest/developer-docs/). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
|
||||
|
||||
|
||||
## Collaboration
|
||||
|
||||
## I Have a Question
|
||||
- [Matrix](https://matrix.to/#/#community-dev:matrix.start9labs.com)
|
||||
- [Telegram](https://t.me/start9_labs/47471)
|
||||
|
||||
> If you want to ask a question, we assume that you have read the available [Documentation](https://docs.start9labs.com).
|
||||
|
||||
Before you ask a question, it is best to search for existing [Issues](https://github.com/Start9Labs/embassy-os/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
|
||||
|
||||
If you then still feel the need to ask a question and need clarification, we recommend the following:
|
||||
|
||||
- Open an [Issue](https://github.com/Start9Labs/embassy-os/issues/new).
|
||||
- Provide as much context as you can about what you're running into.
|
||||
- Provide project and platform versions, depending on what seems relevant.
|
||||
|
||||
We will then take care of the issue as soon as possible.
|
||||
|
||||
<!--
|
||||
You might want to create a separate issue tag for questions and include it in this description. People should then tag their issues accordingly.
|
||||
|
||||
Depending on how large the project is, you may want to outsource the questioning, e.g. to Stack Overflow or Gitter. You may add additional contact and information possibilities:
|
||||
- IRC
|
||||
- Slack
|
||||
- Gitter
|
||||
- Stack Overflow tag
|
||||
- Blog
|
||||
- FAQ
|
||||
- Roadmap
|
||||
- E-Mail List
|
||||
- Forum
|
||||
-->
|
||||
|
||||
## I Want To Contribute
|
||||
|
||||
> ### Legal Notice <!-- omit in toc -->
|
||||
> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
<!-- omit in toc -->
|
||||
#### Before Submitting a Bug Report
|
||||
|
||||
A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
|
||||
|
||||
- Make sure that you are using the latest version.
|
||||
- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](https://start9.com/latest/user-manual). If you are looking for support, you might want to check [this section](#i-have-a-question)).
|
||||
- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/Start9Labs/embassy-os/issues?q=label%3Abug).
|
||||
- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
|
||||
- Collect information about the bug:
|
||||
- Stack trace (Traceback)
|
||||
- Client OS, Platform and Version (Windows/Linux/macOS/iOS/Android, Firefox/Tor Browser/Consulate)
|
||||
- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
|
||||
- Possibly your input and the output
|
||||
- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
|
||||
|
||||
<!-- omit in toc -->
|
||||
#### How Do I Submit a Good Bug Report?
|
||||
|
||||
> You must never report security related issues, vulnerabilities or bugs to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to <security@start9labs.com>.
|
||||
<!-- You may add a PGP key to allow the messages to be sent encrypted as well. -->
|
||||
|
||||
We use GitHub issues to track bugs and errors. If you run into an issue with the project:
|
||||
|
||||
- Open an [Issue](https://github.com/Start9Labs/embassy-os/issues/new/choose) selecting the appropriate type.
|
||||
- Explain the behavior you would expect and the actual behavior.
|
||||
- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
|
||||
- Provide the information you collected in the previous section.
|
||||
|
||||
Once it's filed:
|
||||
|
||||
- The project team will label the issue accordingly.
|
||||
- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `Question`. Bugs with the `Question` tag will not be addressed until they are answered.
|
||||
- If the team is able to reproduce the issue, it will be marked a scoping level tag, as well as possibly other tags (such as `Security`), and the issue will be left to be [implemented by someone](#your-first-code-contribution).
|
||||
|
||||
<!-- You might want to create an issue template for bugs and errors that can be used as a guide and that defines the structure of the information to be included. If you do so, reference it here in the description. -->
|
||||
|
||||
|
||||
### Suggesting Enhancements
|
||||
|
||||
This section guides you through submitting an enhancement suggestion for Embassy OS, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
|
||||
|
||||
<!-- omit in toc -->
|
||||
#### Before Submitting an Enhancement
|
||||
|
||||
- Make sure that you are using the latest version.
|
||||
- Read the [documentation](https://start9.com/latest/user-manual) carefully and find out if the functionality is already covered, maybe by an individual configuration.
|
||||
- Perform a [search](https://github.com/Start9Labs/embassy-os/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
|
||||
- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
|
||||
|
||||
<!-- omit in toc -->
|
||||
#### How Do I Submit a Good Enhancement Suggestion?
|
||||
|
||||
Enhancement suggestions are tracked as [GitHub issues](https://github.com/Start9Labs/embassy-os/issues).
|
||||
|
||||
- Use a **clear and descriptive title** for the issue to identify the suggestion.
|
||||
- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
|
||||
- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
|
||||
- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. <!-- this should only be included if the project has a GUI -->
|
||||
- **Explain why this enhancement would be useful** to most Embassy OS users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
|
||||
|
||||
<!-- You might want to create an issue template for enhancement suggestions that can be used as a guide and that defines the structure of the information to be included. If you do so, reference it here in the description. -->
|
||||
|
||||
### Project Structure
|
||||
EmbassyOS is composed of the following components. Please visit the README for each component to understand the dependency requirements and installation instructions.
|
||||
- [`ui`](ui/README.md) (Typescript Ionic Angular) is the code that is deployed to the browser to provide the user interface for EmbassyOS.
|
||||
- [`backend`] (backend/README.md) (Rust) is a command line utility, daemon, and software development kit that sets up and manages services and their environments, provides the interface for the ui, manages system state, and provides utilities for packaging services for EmbassyOS.
|
||||
- `patch-db` - A diff based data store that is used to synchronize data between the front and backend.
|
||||
- Notably, `patch-db` has a [client](patch-db/client/README.md) with its own dependency and installation requirements.
|
||||
- `rpc-toolkit` - A library for generating an rpc server with cli bindings from Rust functions.
|
||||
- `system-images` - (Docker, Rust) A suite of utility Docker images that are preloaded with EmbassyOS to assist with functions relating to services (eg. configuration, backups, health checks).
|
||||
- [`setup-wizard`] (ui/README.md)- Code for the user interface that is displayed during the setup and recovery process for EmbassyOS.
|
||||
- [`diagnostic-ui`] (diagnostic-ui/README.md) - Code for the user interface that is displayed when something has gone wrong with starting up EmbassyOS, which provides helpful debugging tools.
|
||||
### Your First Code Contribution
|
||||
|
||||
#### Setting up your development environment
|
||||
|
||||
First, clone the EmbassyOS repository and from the project root, pull in the submodules for dependent libraries.
|
||||
## Project Structure
|
||||
|
||||
```bash
|
||||
/
|
||||
├── assets/
|
||||
├── core/
|
||||
├── build/
|
||||
├── debian/
|
||||
├── web/
|
||||
├── image-recipe/
|
||||
├── patch-db
|
||||
└── system-images/
|
||||
```
|
||||
git clone https://github.com/Start9Labs/embassy-os.git
|
||||
#### assets
|
||||
screenshots for the StartOS README
|
||||
|
||||
#### core
|
||||
An API, daemon (startd), CLI (start-cli), and SDK (start-sdk) that together provide the core functionality of StartOS.
|
||||
|
||||
#### build
|
||||
Auxiliary files and scripts to include in deployed StartOS images
|
||||
|
||||
#### debian
|
||||
Maintainer scripts for the StartOS Debian package
|
||||
|
||||
#### web
|
||||
Web UIs served under various conditions and used to interact with StartOS APIs.
|
||||
|
||||
#### image-recipe
|
||||
Scripts for building StartOS images
|
||||
|
||||
#### patch-db (submodule)
|
||||
A diff based data store used to synchronize data between the web interfaces and server.
|
||||
|
||||
#### system-images
|
||||
Docker images that assist with creating backups.
|
||||
|
||||
## Environment Setup
|
||||
|
||||
#### Clone the StartOS repository
|
||||
```sh
|
||||
git clone https://github.com/Start9Labs/start-os.git
|
||||
cd start-os
|
||||
```
|
||||
|
||||
#### Load the PatchDB submodule
|
||||
```sh
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
|
||||
Depending on which component of the ecosystem you are interested in contributing to, follow the installation requirements listed in that component's README (linked [above](#project-structure))
|
||||
#### Continue to your project of interest for additional instructions:
|
||||
- [`core`](core/README.md)
|
||||
- [`web-interfaces`](web-interfaces/README.md)
|
||||
- [`build`](build/README.md)
|
||||
- [`patch-db`](https://github.com/Start9Labs/patch-db)
|
||||
|
||||
#### Building The Image
|
||||
This step is for setting up an environment in which to test your code changes if you do not yet have a EmbassyOS.
|
||||
## Building
|
||||
This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components. To build any specific component, simply run `make <TARGET>` replacing `<TARGET>` with the name of the target you'd like to build
|
||||
|
||||
- Requirements
|
||||
- `ext4fs` (available if running on the Linux kernel)
|
||||
- [Docker](https://docs.docker.com/get-docker/)
|
||||
- GNU Make
|
||||
- Building
|
||||
- see setup instructions [here](build/README.md)
|
||||
- run `make` from the project root
|
||||
### Requirements
|
||||
- [GNU Make](https://www.gnu.org/software/make/)
|
||||
- [Docker](https://docs.docker.com/get-docker/)
|
||||
- [NodeJS v18.15.0](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
|
||||
- [sed](https://www.gnu.org/software/sed/)
|
||||
- [grep](https://www.gnu.org/software/grep/)
|
||||
- [awk](https://www.gnu.org/software/gawk/)
|
||||
- [jq](https://jqlang.github.io/jq/)
|
||||
- [gzip](https://www.gnu.org/software/gzip/)
|
||||
- [brotli](https://github.com/google/brotli)
|
||||
|
||||
### Improving The Documentation
|
||||
You can find the repository for Start9's documentation [here](https://github.com/Start9Labs/documentation). If there is something you would like to see added, let us know, or create an issue yourself. Welcome are contributions for lacking or incorrect information, broken links, requested additions, or general style improvements.
|
||||
### Environment variables
|
||||
- `PLATFORM`: which platform you would like to build for. Must be one of `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `raspberrypi`
|
||||
- NOTE: `nonfree` images are for including `nonfree` firmware packages in the built ISO
|
||||
- `ENVIRONMENT`: a hyphen separated set of feature flags to enable
|
||||
- `dev`: enables password ssh (INSECURE!) and does not compress frontends
|
||||
- `unstable`: enables assertions that will cause errors on unexpected inconsistencies that are undesirable in production use either for performance or reliability reasons
|
||||
- `docker`: use `docker` instead of `podman`
|
||||
- `GIT_BRANCH_AS_HASH`: set to `1` to use the current git branch name as the git hash so that the project does not need to be rebuilt on each commit
|
||||
|
||||
Contributions in the form of setup guides for integrations with external applications are highly encouraged. If you struggled through a process and would like to share your steps with others, check out the docs for each [service](https://github.com/Start9Labs/documentation/blob/master/source/user-manuals/available-services/index.rst) we support. The wrapper repos contain sections for adding integration guides, such as this [one](https://github.com/Start9Labs/bitcoind-wrapper/tree/master/docs). These not only help out others in the community, but inform how we can create a more seamless and intuitive experience.
|
||||
|
||||
## Styleguides
|
||||
### Formatting
|
||||
Each component of EmbassyOS contains its own style guide. Code must be formatted with the formatter designated for each component. These are outlined within each component folder's README.
|
||||
|
||||
### Atomic Commits
|
||||
Commits [should be atomic](https://en.wikipedia.org/wiki/Atomic_commit#Atomic_commit_convention) and diffs should be easy to read.
|
||||
Do not mix any formatting fixes or code moves with actual code changes.
|
||||
|
||||
### Commit Messages
|
||||
If a commit touches only 1 component, prefix the message with the affected component. i.e. `backend: update to tokio v0.3`.
|
||||
|
||||
### Pull Requests
|
||||
The body of a pull request should contain sufficient description of what the changes do, as well as a justification.
|
||||
You should include references to any relevant [issues](https://github.com/Start9Labs/embassy-os/issues).
|
||||
|
||||
### Rebasing Changes
|
||||
When a pull request conflicts with the target branch, you may be asked to rebase it on top of the current target branch. The git rebase command will take care of rebuilding your commits on top of the new base.
|
||||
|
||||
This project aims to have a clean git history, where code changes are only made in non-merge commits. This simplifies auditability because merge commits can be assumed to not contain arbitrary code changes.
|
||||
|
||||
## Join The Discussion
|
||||
Current or aspiring contributors? Join our community developer [Matrix channel](https://matrix.to/#/#community-dev:matrix.start9labs.com).
|
||||
|
||||
Just interested in or using the project? Join our community [Telegram](https://t.me/start9_labs) or [Matrix](https://matrix.to/#/#community:matrix.start9labs.com).
|
||||
|
||||
## Join The Project Team
|
||||
Interested in becoming a part of the Start9 Labs team? Send an email to <jobs@start9labs.com>
|
||||
|
||||
<!-- omit in toc -->
|
||||
## Attribution
|
||||
This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
|
||||
### Useful Make Targets
|
||||
- `iso`: Create a full `.iso` image
|
||||
- Only possible from Debian
|
||||
- Not available for `PLATFORM=raspberrypi`
|
||||
- Additional Requirements:
|
||||
- [debspawn](https://github.com/lkhq/debspawn)
|
||||
- `img`: Create a full `.img` image
|
||||
- Only possible from Debian
|
||||
- Only available for `PLATFORM=raspberrypi`
|
||||
- Additional Requirements:
|
||||
- [debspawn](https://github.com/lkhq/debspawn)
|
||||
- `format`: Run automatic code formatting for the project
|
||||
- Additional Requirements:
|
||||
- [rust](https://rustup.rs/)
|
||||
- `test`: Run automated tests for the project
|
||||
- Additional Requirements:
|
||||
- [rust](https://rustup.rs/)
|
||||
- `update`: Deploy the current working project to a device over ssh as if through an over-the-air update
|
||||
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
|
||||
- `reflash`: Deploy the current working project to a device over ssh as if using a live `iso` image to reflash it
|
||||
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
|
||||
- `update-overlay`: Deploy the current working project to a device over ssh to the in-memory overlay without restarting it
|
||||
- WARNING: changes will be reverted after the device is rebooted
|
||||
- WARNING: changes to `init` will not take effect as the device is already initialized
|
||||
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
|
||||
- `wormhole`: Deploy the `startbox` to a device using [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)
|
||||
- When the build it complete will emit a command to paste into the shell of the device to upgrade it
|
||||
- Additional Requirements:
|
||||
- [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)
|
||||
- `clean`: Delete all compiled artifacts
|
||||
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Start9 Labs, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
25
LICENSE.md
@@ -1,25 +0,0 @@
|
||||
# START9 PERSONAL USE LICENSE v1.0
|
||||
|
||||
This license governs the use of the accompanying Software. If you use the Software, you accept this license. If you do not accept the license, do not use the Software.
|
||||
|
||||
1. **Definitions.**
|
||||
1. “Licensor” means the copyright owner, Start9 Labs, Inc, or its successor(s) in interest, or a future assignee of the copyright.
|
||||
2. “Source Code” means the preferred form of the Software for making modifications to it.
|
||||
3. “Object Code” means any non-source form of the Software, including the machine-language output by a compiler or assembler.
|
||||
4. “Distribute” means to convey or to publish and generally has the same meaning here as under U.S. Copyright law.
|
||||
5. “Sell” means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/support services related to the Software), a product or service whose value derives, entirely or substantially, from the functionality of the Software.
|
||||
|
||||
2. **Grant of Rights.** Subject to the terms of this license, the Licensor grants you, the licensee, a non-exclusive, worldwide, royalty-free copyright license to:
|
||||
1. Access, audit, copy, modify, compile, or distribute the Source Code or modifications to the Source Code.
|
||||
2. Run, test, or otherwise use the Object Code.
|
||||
|
||||
3. **Limitations.**
|
||||
1. The grant of rights under the License will NOT include, and the License does NOT grant you the right to:
|
||||
1. Sell the Software or any derivative works based thereon.
|
||||
2. Distribute the Object Code.
|
||||
2. If you Distribute the Source Code, or if permission is separately granted to Distribute the Object Code, you expressly undertake not to remove, or modify, in any manner, the copyright notices attached to the Source Code, and displayed in any output of the Object Code when run, and to reproduce these notices, in an identical manner, in any distributed copies of the Software together with a copy of this license. If you Distribute a modified copy of the Software, or a derivative work based thereon, the work must carry prominent notices stating that you modified it, and giving a relevant date.
|
||||
3. The terms of this license will apply to anyone who comes into possession of a copy of the Software, and any modifications or derivative works based thereon, made by anyone.
|
||||
|
||||
4. **Contributions.** You hereby grant to Licensor a perpetual, irrevocable, worldwide, non-exclusive, royalty-free license to use and exploit any modifications or derivative works based on the Source Code of which you are the author.
|
||||
|
||||
5. **Disclaimer.** THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. LICENSOR HAS NO OBLIGATION TO SUPPORT RECIPIENTS OF THE SOFTWARE.
|
||||
263
Makefile
@@ -1,73 +1,238 @@
|
||||
EMBASSY_BINS := backend/target/aarch64-unknown-linux-gnu/release/embassyd backend/target/aarch64-unknown-linux-gnu/release/embassy-init backend/target/aarch64-unknown-linux-gnu/release/embassy-cli backend/target/aarch64-unknown-linux-gnu/release/embassy-sdk
|
||||
EMBASSY_UIS := frontend/dist/ui frontend/dist/setup-wizard frontend/dist/diagnostic-ui
|
||||
EMBASSY_SRC := raspios.img product_key.txt $(EMBASSY_BINS) backend/embassyd.service backend/embassy-init.service $(EMBASSY_UIS) $(shell find build)
|
||||
COMPAT_SRC := $(shell find system-images/compat/src)
|
||||
UTILS_SRC := $(shell find system-images/utils/Dockerfile)
|
||||
BACKEND_SRC := $(shell find backend/src) $(shell find patch-db/*/src) $(shell find rpc-toolkit/*/src) backend/Cargo.toml backend/Cargo.lock
|
||||
FRONTEND_SRC := $(shell find frontend/projects) $(shell find frontend/assets)
|
||||
PATCH_DB_CLIENT_SRC = $(shell find patch-db/client -not -path patch-db/client/dist)
|
||||
GIT_REFS := $(shell find .git/refs/heads)
|
||||
TMP_FILE := $(shell mktemp)
|
||||
PLATFORM_FILE := $(shell ./check-platform.sh)
|
||||
ENVIRONMENT_FILE := $(shell ./check-environment.sh)
|
||||
GIT_HASH_FILE := $(shell ./check-git-hash.sh)
|
||||
VERSION_FILE := $(shell ./check-version.sh)
|
||||
BASENAME := $(shell ./basename.sh)
|
||||
PLATFORM := $(shell if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo unknown; fi)
|
||||
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
|
||||
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
|
||||
BINS := core/target/$(ARCH)-unknown-linux-gnu/release/startbox core/target/aarch64-unknown-linux-musl/release/container-init core/target/x86_64-unknown-linux-musl/release/container-init
|
||||
WEB_UIS := web/dist/raw/ui web/dist/raw/setup-wizard web/dist/raw/diagnostic-ui web/dist/raw/install-wizard
|
||||
FIRMWARE_ROMS := ./firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
|
||||
BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
|
||||
DEBIAN_SRC := $(shell git ls-files debian/)
|
||||
IMAGE_RECIPE_SRC := $(shell git ls-files image-recipe/)
|
||||
STARTD_SRC := core/startos/startd.service $(BUILD_SRC)
|
||||
COMPAT_SRC := $(shell git ls-files system-images/compat/)
|
||||
UTILS_SRC := $(shell git ls-files system-images/utils/)
|
||||
BINFMT_SRC := $(shell git ls-files system-images/binfmt/)
|
||||
CORE_SRC := $(shell git ls-files core) $(shell git ls-files --recurse-submodules patch-db) web/dist/static web/patchdb-ui-seed.json $(GIT_HASH_FILE)
|
||||
WEB_SHARED_SRC := $(shell git ls-files web/projects/shared) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules web/config.json patch-db/client/dist web/patchdb-ui-seed.json
|
||||
WEB_UI_SRC := $(shell git ls-files web/projects/ui)
|
||||
WEB_SETUP_WIZARD_SRC := $(shell git ls-files web/projects/setup-wizard)
|
||||
WEB_DIAGNOSTIC_UI_SRC := $(shell git ls-files web/projects/diagnostic-ui)
|
||||
WEB_INSTALL_WIZARD_SRC := $(shell git ls-files web/projects/install-wizard)
|
||||
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
|
||||
GZIP_BIN := $(shell which pigz || which gzip)
|
||||
TAR_BIN := $(shell which gtar || which tar)
|
||||
COMPILED_TARGETS := $(BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar
|
||||
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console; fi') $(PLATFORM_FILE)
|
||||
|
||||
ifeq ($(REMOTE),)
|
||||
mkdir = mkdir -p $1
|
||||
rm = rm -rf $1
|
||||
cp = cp -r $1 $2
|
||||
ln = ln -sf $1 $2
|
||||
else
|
||||
ifeq ($(SSHPASS),)
|
||||
ssh = ssh $(REMOTE) $1
|
||||
else
|
||||
ssh = sshpass -p $(SSHPASS) ssh $(REMOTE) $1
|
||||
endif
|
||||
mkdir = $(call ssh,'sudo mkdir -p $1')
|
||||
rm = $(call ssh,'sudo rm -rf $1')
|
||||
ln = $(call ssh,'sudo ln -sf $1 $2')
|
||||
define cp
|
||||
$(TAR_BIN) --transform "s|^$1|x|" -czv -f- $1 | $(call ssh,"sudo tar --transform 's|^x|$2|' -xzv -f- -C /")
|
||||
endef
|
||||
endif
|
||||
|
||||
.DELETE_ON_ERROR:
|
||||
|
||||
all: eos.img
|
||||
.PHONY: all metadata install clean format sdk snapshots uis ui reflash deb $(IMAGE_TYPE) squashfs sudo wormhole test
|
||||
|
||||
gzip: eos.img
|
||||
gzip -k eos.img
|
||||
all: $(ALL_TARGETS)
|
||||
|
||||
metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
|
||||
|
||||
sudo:
|
||||
sudo true
|
||||
|
||||
clean:
|
||||
rm -f eos.img
|
||||
rm -f ubuntu.img
|
||||
rm -f product_key.txt
|
||||
rm -f system-images/**/*.tar
|
||||
sudo rm -f $(EMBASSY_BINS)
|
||||
rm -rf frontend/node_modules
|
||||
rm -rf frontend/dist
|
||||
rm -rf system-images/compat/target
|
||||
rm -rf core/target
|
||||
rm -rf web/.angular
|
||||
rm -f web/config.json
|
||||
rm -rf web/node_modules
|
||||
rm -rf web/dist
|
||||
rm -rf patch-db/client/node_modules
|
||||
rm -rf patch-db/client/dist
|
||||
rm -rf patch-db/target
|
||||
rm -rf cargo-deps
|
||||
rm -rf dpkg-workdir
|
||||
rm -rf image-recipe/deb
|
||||
rm -rf results
|
||||
rm -rf build/lib/firmware
|
||||
rm -f ENVIRONMENT.txt
|
||||
rm -f PLATFORM.txt
|
||||
rm -f GIT_HASH.txt
|
||||
rm -f VERSION.txt
|
||||
|
||||
eos.img: $(EMBASSY_SRC) system-images/compat/compat.tar system-images/utils/utils.tar
|
||||
! test -f eos.img || rm eos.img
|
||||
if [ "$(NO_KEY)" = "1" ]; then NO_KEY=1 ./build/make-image.sh; else ./build/make-image.sh; fi
|
||||
format:
|
||||
cd core && cargo +nightly fmt
|
||||
|
||||
system-images/compat/compat.tar: $(COMPAT_SRC)
|
||||
cd system-images/compat && ./build.sh
|
||||
cd system-images/compat && DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --tag start9/x_system/compat --platform=linux/arm64 -o type=docker,dest=compat.tar .
|
||||
test: $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||
cd core && cargo build && cargo test
|
||||
|
||||
system-images/utils/utils.tar: $(UTILS_SRC)
|
||||
cd system-images/utils && DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --tag start9/x_system/utils --platform=linux/arm64 -o type=docker,dest=utils.tar .
|
||||
sdk:
|
||||
cd core && ./install-sdk.sh
|
||||
|
||||
raspios.img:
|
||||
wget https://downloads.raspberrypi.org/raspios_lite_arm64/images/raspios_lite_arm64-2022-01-28/2022-01-28-raspios-bullseye-arm64-lite.zip
|
||||
unzip 2022-01-28-raspios-bullseye-arm64-lite.zip
|
||||
mv 2022-01-28-raspios-bullseye-arm64-lite.img raspios.img
|
||||
deb: results/$(BASENAME).deb
|
||||
|
||||
product_key.txt:
|
||||
$(shell which echo) -n "X" > product_key.txt
|
||||
cat /dev/urandom | base32 | head -c11 | tr '[:upper:]' '[:lower:]' >> product_key.txt
|
||||
if [ "$(KEY)" != "" ]; then $(shell which echo) -n "$(KEY)" > product_key.txt; fi
|
||||
echo >> product_key.txt
|
||||
debian/control: build/lib/depends build/lib/conflicts
|
||||
./debuild/control.sh
|
||||
|
||||
$(EMBASSY_BINS): $(BACKEND_SRC)
|
||||
cd backend && ./build-prod.sh
|
||||
results/$(BASENAME).deb: dpkg-build.sh $(DEBIAN_SRC) $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
|
||||
PLATFORM=$(PLATFORM) ./dpkg-build.sh
|
||||
|
||||
frontend/node_modules: frontend/package.json
|
||||
npm --prefix frontend ci
|
||||
$(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE)
|
||||
|
||||
$(EMBASSY_UIS): $(FRONTEND_SRC) frontend/node_modules patch-db/client patch-db/client/dist frontend/config.json
|
||||
npm --prefix frontend run build:all
|
||||
squashfs: results/$(BASENAME).squashfs
|
||||
|
||||
frontend/config.json: .git/HEAD $(GIT_REFS)
|
||||
jq '.useMocks = false' frontend/config-sample.json > frontend/config.json
|
||||
npm --prefix frontend run-script build-config
|
||||
results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_SRC) results/$(BASENAME).deb
|
||||
./image-recipe/run-local-build.sh "results/$(BASENAME).deb"
|
||||
|
||||
# For creating os images. DO NOT USE
|
||||
install: $(ALL_TARGETS)
|
||||
$(call mkdir,$(DESTDIR)/usr/bin)
|
||||
$(call cp,core/target/$(ARCH)-unknown-linux-gnu/release/startbox,$(DESTDIR)/usr/bin/startbox)
|
||||
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
|
||||
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
|
||||
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-sdk)
|
||||
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-deno)
|
||||
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/avahi-alias)
|
||||
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/embassy-cli)
|
||||
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
|
||||
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then $(call cp,cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); fi
|
||||
|
||||
$(call mkdir,$(DESTDIR)/lib/systemd/system)
|
||||
$(call cp,core/startos/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
|
||||
|
||||
$(call mkdir,$(DESTDIR)/usr/lib)
|
||||
$(call rm,$(DESTDIR)/usr/lib/startos)
|
||||
$(call cp,build/lib,$(DESTDIR)/usr/lib/startos)
|
||||
|
||||
$(call cp,PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
|
||||
$(call cp,ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
|
||||
$(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
|
||||
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
|
||||
|
||||
$(call mkdir,$(DESTDIR)/usr/lib/startos/container)
|
||||
$(call cp,core/target/aarch64-unknown-linux-musl/release/container-init,$(DESTDIR)/usr/lib/startos/container/container-init.arm64)
|
||||
$(call cp,core/target/x86_64-unknown-linux-musl/release/container-init,$(DESTDIR)/usr/lib/startos/container/container-init.amd64)
|
||||
|
||||
$(call mkdir,$(DESTDIR)/usr/lib/startos/system-images)
|
||||
$(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/compat.tar)
|
||||
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/utils.tar)
|
||||
$(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/binfmt.tar)
|
||||
|
||||
$(call cp,firmware/$(PLATFORM),$(DESTDIR)/usr/lib/startos/firmware)
|
||||
|
||||
update-overlay: $(ALL_TARGETS)
|
||||
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
|
||||
@echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m"
|
||||
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||
@if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi
|
||||
$(call ssh,"sudo systemctl stop startd")
|
||||
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM)
|
||||
$(call ssh,"sudo systemctl start startd")
|
||||
|
||||
wormhole: core/target/$(ARCH)-unknown-linux-gnu/release/startbox
|
||||
@wormhole send core/target/$(ARCH)-unknown-linux-gnu/release/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
|
||||
|
||||
update: $(ALL_TARGETS)
|
||||
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
|
||||
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
|
||||
$(call ssh,'sudo NO_SYNC=1 /media/embassy/next/usr/lib/startos/scripts/chroot-and-upgrade "apt-get install -y $(shell cat ./build/lib/depends)"')
|
||||
|
||||
emulate-reflash: $(ALL_TARGETS)
|
||||
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
|
||||
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
|
||||
$(call ssh,"sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot")
|
||||
|
||||
upload-ota: results/$(BASENAME).squashfs
|
||||
TARGET=$(TARGET) KEY=$(KEY) ./upload-ota.sh
|
||||
|
||||
build/lib/depends build/lib/conflicts: build/dpkg-deps/*
|
||||
build/dpkg-deps/generate.sh
|
||||
|
||||
$(FIRMWARE_ROMS): build/lib/firmware.json download-firmware.sh $(PLATFORM_FILE)
|
||||
./download-firmware.sh $(PLATFORM)
|
||||
|
||||
system-images/compat/docker-images/$(ARCH).tar: $(COMPAT_SRC) core/Cargo.lock
|
||||
cd system-images/compat && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
|
||||
|
||||
system-images/utils/docker-images/$(ARCH).tar: $(UTILS_SRC)
|
||||
cd system-images/utils && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
|
||||
|
||||
system-images/binfmt/docker-images/$(ARCH).tar: $(BINFMT_SRC)
|
||||
cd system-images/binfmt && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
|
||||
|
||||
snapshots: core/snapshot-creator/Cargo.toml
|
||||
cd core/ && ARCH=aarch64 ./build-v8-snapshot.sh
|
||||
cd core/ && ARCH=x86_64 ./build-v8-snapshot.sh
|
||||
|
||||
$(BINS): $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||
cd core && ARCH=$(ARCH) ./build-prod.sh
|
||||
touch $(BINS)
|
||||
|
||||
web/node_modules: web/package.json
|
||||
npm --prefix web ci
|
||||
|
||||
web/dist/raw/ui: $(WEB_UI_SRC) $(WEB_SHARED_SRC)
|
||||
npm --prefix web run build:ui
|
||||
|
||||
web/dist/raw/setup-wizard: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC)
|
||||
npm --prefix web run build:setup
|
||||
|
||||
web/dist/raw/diagnostic-ui: $(WEB_DIAGNOSTIC_UI_SRC) $(WEB_SHARED_SRC)
|
||||
npm --prefix web run build:dui
|
||||
|
||||
web/dist/raw/install-wizard: $(WEB_INSTALL_WIZARD_SRC) $(WEB_SHARED_SRC)
|
||||
npm --prefix web run build:install-wiz
|
||||
|
||||
web/dist/static: $(WEB_UIS) $(ENVIRONMENT_FILE)
|
||||
./compress-uis.sh
|
||||
|
||||
web/config.json: $(GIT_HASH_FILE) web/config-sample.json
|
||||
jq '.useMocks = false' web/config-sample.json | jq '.gitHash = "$(shell cat GIT_HASH.txt)"' > web/config.json
|
||||
|
||||
web/patchdb-ui-seed.json: web/package.json
|
||||
jq '."ack-welcome" = $(shell jq '.version' web/package.json)' web/patchdb-ui-seed.json > ui-seed.tmp
|
||||
mv ui-seed.tmp web/patchdb-ui-seed.json
|
||||
|
||||
patch-db/client/node_modules: patch-db/client/package.json
|
||||
npm --prefix patch-db/client install
|
||||
npm --prefix patch-db/client ci
|
||||
|
||||
patch-db/client/dist: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules
|
||||
! test -d patch-db/client/dist || rm -rf patch-db/client/dist
|
||||
npm --prefix patch-db/client run build
|
||||
npm --prefix web run build:deps
|
||||
|
||||
# this is a convenience step to build all frontends - it is not referenced elsewhere in this file
|
||||
frontend: frontend/node_modules $(EMBASSY_UIS)
|
||||
# used by github actions
|
||||
compiled-$(ARCH).tar: $(COMPILED_TARGETS) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
|
||||
tar -cvf $@ $^
|
||||
|
||||
# this is a convenience step to build all web uis - it is not referenced elsewhere in this file
|
||||
uis: $(WEB_UIS)
|
||||
|
||||
# this is a convenience step to build the UI
|
||||
ui: web/dist/raw/ui
|
||||
|
||||
cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep:
|
||||
ARCH=aarch64 ./build-cargo-dep.sh pi-beep
|
||||
|
||||
cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console:
|
||||
ARCH=$(ARCH) ./build-cargo-dep.sh tokio-console
|
||||
123
README.md
@@ -1,49 +1,82 @@
|
||||
# EmbassyOS
|
||||
[](https://github.com/Start9Labs/embassy-os/releases)
|
||||
[](https://matrix.to/#/#community:matrix.start9labs.com)
|
||||
[](https://t.me/start9_labs)
|
||||
[](https://docs.start9labs.com)
|
||||
[](https://matrix.to/#/#community-dev:matrix.start9labs.com)
|
||||
[](https://start9labs.com)
|
||||
|
||||
[](http://mastodon.start9labs.com)
|
||||
[](https://twitter.com/start9labs)
|
||||
|
||||
### _Welcome to the era of Sovereign Computing_ ###
|
||||
|
||||
EmbassyOS is a browser-based, graphical operating system for a personal server. EmbassyOS facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services. It is the most advanced, secure, reliable, and user friendly personal server OS in the world.
|
||||
|
||||
## Running EmbassyOS
|
||||
There are multiple ways to get your hands on EmbassyOS.
|
||||
|
||||
### :moneybag: Buy an Embassy
|
||||
This is the most convenient option. Simply [buy an Embassy](https://start9.com) from Start9 and plug it in. Depending on where you live, shipping costs and import duties will vary.
|
||||
|
||||
### :construction_worker: Build your own Embassy
|
||||
While not as convenient as buying an Embassy, this option is easier than you might imagine, and there are 4 reasons why you might prefer it:
|
||||
1. You already have a Raspberry Pi and would like to re-purpose it.
|
||||
1. You want to save on shipping costs.
|
||||
1. You prefer not to divulge your physical address.
|
||||
1. You just like building things.
|
||||
|
||||
To pursue this option, follow this [guide](https://start9.com/latest/diy).
|
||||
|
||||
### :hammer_and_wrench: Build EmbassyOS from Source
|
||||
|
||||
EmbassyOS can be built from source, for personal use, for free.
|
||||
A detailed guide for doing so can be found [here](https://github.com/Start9Labs/embassy-os/blob/master/build/README.md).
|
||||
|
||||
## :heart: Contributing
|
||||
There are multiple ways to contribute: work directly on EmbassyOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://github.com/Start9Labs/embassy-os/blob/master/CONTRIBUTING.md).
|
||||
|
||||
## UI Screenshots
|
||||
<div align="center">
|
||||
<img src="web/projects/shared/assets/img/icon.png" alt="StartOS Logo" width="16%" />
|
||||
<h1 style="margin-top: 0;">StartOS</h1>
|
||||
<a href="https://github.com/Start9Labs/start-os/releases">
|
||||
<img alt="GitHub release (with filter)" src="https://img.shields.io/github/v/release/start9labs/start-os?logo=github">
|
||||
</a>
|
||||
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
|
||||
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
|
||||
</a>
|
||||
<a href="https://heyapollo.com/product/startos">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
|
||||
</a>
|
||||
<a href="https://twitter.com/start9labs">
|
||||
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
|
||||
</a>
|
||||
<a href="https://matrix.to/#/#community:matrix.start9labs.com">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/community-matrix-yellow?logo=matrix">
|
||||
</a>
|
||||
<a href="https://t.me/start9_labs">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/community-telegram-blue?logo=telegram">
|
||||
</a>
|
||||
<a href="https://docs.start9.com">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
|
||||
</a>
|
||||
<a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
|
||||
</a>
|
||||
<a href="https://start9.com">
|
||||
<img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
|
||||
</a>
|
||||
</div>
|
||||
<br />
|
||||
<div align="center">
|
||||
<h3>
|
||||
Welcome to the era of Sovereign Computing
|
||||
</h3>
|
||||
<p>
|
||||
StartOS is an open source Linux distribution optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
|
||||
</p>
|
||||
</div>
|
||||
<br />
|
||||
<p align="center">
|
||||
<img src="assets/EmbassyOS.png" alt="EmbassyOS" width="65%">
|
||||
<img src="assets/StartOS.png" alt="StartOS" width="85%">
|
||||
</p>
|
||||
<br />
|
||||
|
||||
## Running StartOS
|
||||
> [!WARNING]
|
||||
> StartOS is in beta. It lacks features. It doesn't always work perfectly. Start9 servers are not plug and play. Using them properly requires some effort and patience. Please do not use StartOS or purchase a server if you are unable or unwilling to follow instructions and learn new concepts.
|
||||
|
||||
### 💰 Buy a Start9 server
|
||||
This is the most convenient option. Simply [buy a server](https://store.start9.com) from Start9 and plug it in.
|
||||
|
||||
### 👷 Build your own server
|
||||
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
|
||||
1. You already have hardware
|
||||
1. You want to save on shipping costs
|
||||
1. You prefer not to divulge your physical address
|
||||
1. You just like building things
|
||||
|
||||
To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
|
||||
|
||||
## ❤️ Contributing
|
||||
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://start9.com/contribute/).
|
||||
|
||||
To report security issues, please email our security team - security@start9.com.
|
||||
|
||||
## 🌎 Marketplace
|
||||
There are dozens of services available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/)
|
||||
|
||||
## 🖥️ User Interface Screenshots
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/eos-services.png" alt="Embassy Services" width="45%">
|
||||
<img src="assets/eos-preferences.png" alt="Embassy Preferences" width="45%">
|
||||
</p>
|
||||
<p align="center">
|
||||
<img src="assets/eos-bitcoind-health-check.png" alt="Embassy Bitcoin Health Checks" width="45%"> <img src="assets/eos-logs.png" alt="Embassy Logs" width="45%">
|
||||
<img src="assets/registry.png" alt="StartOS Marketplace" width="49%">
|
||||
<img src="assets/community.png" alt="StartOS Community Registry" width="49%">
|
||||
<img src="assets/c-lightning.png" alt="StartOS NextCloud Service" width="49%">
|
||||
<img src="assets/btcpay.png" alt="StartOS BTCPay Service" width="49%">
|
||||
<img src="assets/nextcloud.png" alt="StartOS System Settings" width="49%">
|
||||
<img src="assets/system.png" alt="StartOS System Settings" width="49%">
|
||||
<img src="assets/welcome.png" alt="StartOS System Settings" width="49%">
|
||||
<img src="assets/logs.png" alt="StartOS System Settings" width="49%">
|
||||
</p>
|
||||
|
||||
|
Before Width: | Height: | Size: 285 KiB |
BIN
assets/StartOS.png
Normal file
|
After Width: | Height: | Size: 2.1 MiB |
BIN
assets/btcpay.png
Normal file
|
After Width: | Height: | Size: 396 KiB |
BIN
assets/c-lightning.png
Normal file
|
After Width: | Height: | Size: 402 KiB |
BIN
assets/community.png
Normal file
|
After Width: | Height: | Size: 591 KiB |
|
Before Width: | Height: | Size: 334 KiB |
|
Before Width: | Height: | Size: 1.2 MiB |
|
Before Width: | Height: | Size: 347 KiB |
|
Before Width: | Height: | Size: 599 KiB |
BIN
assets/logs.png
Normal file
|
After Width: | Height: | Size: 1.6 MiB |
BIN
assets/nextcloud.png
Normal file
|
After Width: | Height: | Size: 319 KiB |
BIN
assets/registry.png
Normal file
|
After Width: | Height: | Size: 521 KiB |
BIN
assets/system.png
Normal file
|
After Width: | Height: | Size: 331 KiB |
BIN
assets/welcome.png
Normal file
|
After Width: | Height: | Size: 402 KiB |
4239
backend/Cargo.lock
generated
@@ -1,139 +0,0 @@
|
||||
[package]
|
||||
authors = ["Aiden McClelland <me@drbonez.dev>"]
|
||||
description = "The core of the Start9 Embassy Operating System"
|
||||
documentation = "https://docs.rs/embassy-os"
|
||||
edition = "2018"
|
||||
keywords = [
|
||||
"self-hosted",
|
||||
"raspberry-pi",
|
||||
"privacy",
|
||||
"bitcoin",
|
||||
"full-node",
|
||||
"lightning",
|
||||
]
|
||||
name = "embassy-os"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/Start9Labs/embassy-os"
|
||||
version = "0.3.0-rev.2"
|
||||
|
||||
[lib]
|
||||
name = "embassy"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "embassyd"
|
||||
path = "src/bin/embassyd.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "embassy-init"
|
||||
path = "src/bin/embassy-init.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "embassy-sdk"
|
||||
path = "src/bin/embassy-sdk.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "embassy-cli"
|
||||
path = "src/bin/embassy-cli.rs"
|
||||
|
||||
[features]
|
||||
avahi = ["avahi-sys"]
|
||||
beta = []
|
||||
default = ["avahi", "sound", "metal"]
|
||||
metal = []
|
||||
sound = []
|
||||
unstable = ["patch-db/unstable"]
|
||||
|
||||
[dependencies]
|
||||
aes = { version = "0.7.5", features = ["ctr"] }
|
||||
async-trait = "0.1.51"
|
||||
avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [
|
||||
"dynamic",
|
||||
], optional = true }
|
||||
base32 = "0.4.0"
|
||||
base64 = "0.13.0"
|
||||
basic-cookies = "0.1.4"
|
||||
bollard = "0.11.0"
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
clap = "2.33"
|
||||
color-eyre = "0.5"
|
||||
cookie_store = "0.15.0"
|
||||
digest = "0.9.0"
|
||||
divrem = "1.0.0"
|
||||
ed25519-dalek = { version = "1.0.1", features = ["serde"] }
|
||||
emver = { version = "0.1.6", features = ["serde"] }
|
||||
fd-lock-rs = "0.1.3"
|
||||
futures = "0.3.17"
|
||||
git-version = "0.3.5"
|
||||
hex = "0.4.3"
|
||||
hmac = "0.11.0"
|
||||
http = "0.2.5"
|
||||
hyper = "0.14.13"
|
||||
hyper-ws-listener = { git = "https://github.com/Start9Labs/hyper-ws-listener.git", branch = "main" }
|
||||
imbl = "1.0.1"
|
||||
indexmap = { version = "1.7.0", features = ["serde"] }
|
||||
isocountry = "0.3.2"
|
||||
itertools = "0.10.1"
|
||||
jsonpath_lib = "0.3.0"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2.103"
|
||||
log = "0.4.14"
|
||||
nix = "0.23.0"
|
||||
nom = "7.0.0"
|
||||
num = "0.4.0"
|
||||
num_enum = "0.5.4"
|
||||
openssh-keys = "0.5.0"
|
||||
openssl = { version = "0.10.36", features = ["vendored"] }
|
||||
patch-db = { version = "*", path = "../patch-db/patch-db", features = [
|
||||
"trace",
|
||||
] }
|
||||
pbkdf2 = "0.9.0"
|
||||
pin-project = "1.0.8"
|
||||
platforms = "1.1.0"
|
||||
prettytable-rs = "0.8.0"
|
||||
proptest = "1.0.0"
|
||||
proptest-derive = "0.3.0"
|
||||
rand = "0.7.3"
|
||||
regex = "1.5.4"
|
||||
reqwest = { version = "0.11.4", features = ["stream", "json", "socks"] }
|
||||
reqwest_cookie_store = "0.2.0"
|
||||
rpassword = "5.0.1"
|
||||
rpc-toolkit = { version = "*", path = "../rpc-toolkit/rpc-toolkit" }
|
||||
rust-argon2 = "0.8.3"
|
||||
scopeguard = "1.1" # because avahi-sys fucks your shit up
|
||||
serde = { version = "1.0.130", features = ["derive", "rc"] }
|
||||
serde_cbor = { package = "ciborium", version = "0.2.0" }
|
||||
serde_json = "1.0.68"
|
||||
serde_toml = { package = "toml", version = "0.5.8" }
|
||||
serde_yaml = "0.8.21"
|
||||
sha2 = "0.9.8"
|
||||
simple-logging = "2.0"
|
||||
sqlx = { version = "0.5.11", features = [
|
||||
"chrono",
|
||||
"offline",
|
||||
"runtime-tokio-rustls",
|
||||
"sqlite",
|
||||
] }
|
||||
stderrlog = "0.5.1"
|
||||
tar = "0.4.37"
|
||||
thiserror = "1.0.29"
|
||||
tokio = { version = "1.15.0", features = ["full"] }
|
||||
tokio-compat-02 = "0.2.0"
|
||||
tokio-stream = { version = "0.1.7", features = ["io-util", "sync"] }
|
||||
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
|
||||
tokio-tungstenite = "0.14.0"
|
||||
tokio-util = { version = "0.6.8", features = ["io"] }
|
||||
torut = "0.2.0"
|
||||
tracing = "0.1"
|
||||
tracing-error = "0.1"
|
||||
tracing-futures = "0.2"
|
||||
tracing-subscriber = "0.2"
|
||||
typed-builder = "0.9.1"
|
||||
url = { version = "2.2.2", features = ["serde"] }
|
||||
|
||||
[dependencies.serde_with]
|
||||
features = ["macros", "json"]
|
||||
version = "1.10.0"
|
||||
|
||||
[profile.dev.package.backtrace]
|
||||
opt-level = 3
|
||||
@@ -1,35 +0,0 @@
|
||||
# EmbassyOS Backend
|
||||
|
||||
- Requirements:
|
||||
- [Install Rust](https://rustup.rs)
|
||||
- Recommended: [rust-analyzer](https://rust-analyzer.github.io/)
|
||||
- [Docker](https://docs.docker.com/get-docker/)
|
||||
- [Rust ARM64 Build Container](https://github.com/Start9Labs/rust-arm-builder)
|
||||
- Scripts (run withing the `./backend` directory)
|
||||
- `build-prod.sh` - compiles a release build of the artifacts for running on ARM64
|
||||
- `build-dev.sh` - compiles a development build of the artifacts for running on ARM64
|
||||
- A Linux computer or VM
|
||||
|
||||
## Structure
|
||||
|
||||
The EmbassyOS backend is broken up into 4 different binaries:
|
||||
|
||||
- embassyd: This is the main workhorse of EmbassyOS - any new functionality you want will likely go here
|
||||
- embassy-init: This is the component responsible for allowing you to set up your device, and handles system initialization on startup
|
||||
- embassy-cli: This is a CLI tool that will allow you to issue commands to embassyd and control it similarly to the UI
|
||||
- embassy-sdk: This is a CLI tool that aids in building and packaging services you wish to deploy to the Embassy
|
||||
|
||||
Finally there is a library `embassy` that supports all four of these tools.
|
||||
|
||||
See [here](/backend/Cargo.toml) for details.
|
||||
|
||||
## Building
|
||||
|
||||
You can build the entire operating system image using `make` from the root of the EmbassyOS project. This will subsequently invoke the build scripts above to actually create the requisite binaries and put them onto the final operating system image.
|
||||
|
||||
## Questions
|
||||
|
||||
If you have questions about how various pieces of the backend system work. Open an issue and tag the following people
|
||||
|
||||
- dr-bonez
|
||||
- ProofOfKeags
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
shopt -s expand_aliases
|
||||
|
||||
if [ "$0" != "./build-dev.sh" ]; then
|
||||
>&2 echo "Must be run from backend directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
alias 'rust-arm64-builder'='docker run --rm -it -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
|
||||
|
||||
cd ..
|
||||
rust-arm64-builder sh -c "(cd backend && cargo build)"
|
||||
cd backend
|
||||
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
shopt -s expand_aliases
|
||||
|
||||
if [ "$0" != "./build-portable-dev.sh" ]; then
|
||||
>&2 echo "Must be run from backend directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
alias 'rust-musl-builder'='docker run --rm -it -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
|
||||
|
||||
cd ..
|
||||
rust-musl-builder sh -c "(cd backend && cargo +beta build --target=x86_64-unknown-linux-musl --no-default-features)"
|
||||
cd backend
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
shopt -s expand_aliases
|
||||
|
||||
if [ "$0" != "./build-portable.sh" ]; then
|
||||
>&2 echo "Must be run from backend directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
alias 'rust-musl-builder'='docker run --rm -it -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
|
||||
|
||||
cd ..
|
||||
rust-musl-builder sh -c "(cd backend && cargo +beta build --release --target=x86_64-unknown-linux-musl --no-default-features)"
|
||||
cd backend
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
shopt -s expand_aliases
|
||||
|
||||
if [ "$0" != "./build-prod.sh" ]; then
|
||||
>&2 echo "Must be run from backend directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
alias 'rust-arm64-builder'='docker run --rm -it -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
|
||||
|
||||
cd ..
|
||||
if [[ "$ENVIRONMENT" =~ (^|-)unstable($|-) ]]; then
|
||||
if [[ "$ENVIRONMENT" =~ (^|-)beta($|-) ]]; then
|
||||
rust-arm64-builder sh -c "(cd backend && cargo build --release --features beta,unstable)"
|
||||
else
|
||||
rust-arm64-builder sh -c "(cd backend && cargo build --release --features unstable)"
|
||||
fi
|
||||
else
|
||||
if [[ "$ENVIRONMENT" =~ (^|-)beta($|-) ]]; then
|
||||
rust-arm64-builder sh -c "(cd backend && cargo build --release --features beta)"
|
||||
else
|
||||
rust-arm64-builder sh -c "(cd backend && cargo build --release)"
|
||||
fi
|
||||
fi
|
||||
cd backend
|
||||
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Enter the backend directory, copy over the built EmbassyOS binaries and systemd services, edit the nginx config, then create the .ssh directory
|
||||
|
||||
cp target/aarch64-unknown-linux-gnu/release/embassy-init /mnt/usr/local/bin
|
||||
cp target/aarch64-unknown-linux-gnu/release/embassyd /mnt/usr/local/bin
|
||||
cp target/aarch64-unknown-linux-gnu/release/embassy-cli /mnt/usr/local/bin
|
||||
cp *.service /mnt/etc/systemd/system/
|
||||
|
||||
echo "application/wasm wasm;" | sudo tee -a "/mnt/etc/nginx/mime.types"
|
||||
|
||||
mkdir -p /mnt/root/.ssh
|
||||
@@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Embassy Init
|
||||
After=network.target
|
||||
Requires=network.target
|
||||
Wants=avahi-daemon.service nginx.service tor.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
Environment=RUST_LOG=embassy_init=debug,embassy=debug
|
||||
ExecStart=/usr/local/bin/embassy-init
|
||||
RemainAfterExit=true
|
||||
|
||||
[Install]
|
||||
WantedBy=embassyd.service
|
||||
@@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Embassy Daemon
|
||||
After=embassy-init.service
|
||||
Requires=embassy-init.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Environment=RUST_LOG=embassyd=debug,embassy=debug
|
||||
ExecStart=/usr/local/bin/embassyd
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
shopt -s expand_aliases
|
||||
|
||||
if [ "$0" != "./install-sdk.sh" ]; then
|
||||
>&2 echo "Must be run from backend directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cargo install --bin=embassy-sdk --path=. --no-default-features
|
||||
@@ -1,58 +0,0 @@
|
||||
-- Add migration script here
|
||||
CREATE TABLE IF NOT EXISTS tor
|
||||
(
|
||||
package TEXT NOT NULL,
|
||||
interface TEXT NOT NULL,
|
||||
key BLOB NOT NULL CHECK (length(key) = 64),
|
||||
PRIMARY KEY (package, interface)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS session
|
||||
(
|
||||
id TEXT NOT NULL PRIMARY KEY,
|
||||
logged_in TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
logged_out TIMESTAMP,
|
||||
last_active TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
user_agent TEXT,
|
||||
metadata TEXT NOT NULL DEFAULT 'null'
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS account
|
||||
(
|
||||
id INTEGER PRIMARY KEY CHECK (id = 0),
|
||||
password TEXT NOT NULL,
|
||||
tor_key BLOB NOT NULL CHECK (length(tor_key) = 64)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS ssh_keys
|
||||
(
|
||||
fingerprint TEXT NOT NULL,
|
||||
openssh_pubkey TEXT NOT NULL,
|
||||
created_at TEXT NOT NULL,
|
||||
PRIMARY KEY (fingerprint)
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS certificates
|
||||
(
|
||||
id INTEGER PRIMARY KEY, -- Root = 0, Int = 1, Other = 2..
|
||||
priv_key_pem TEXT NOT NULL,
|
||||
certificate_pem TEXT NOT NULL,
|
||||
lookup_string TEXT UNIQUE,
|
||||
created_at TEXT,
|
||||
updated_at TEXT
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS notifications
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
package_id TEXT,
|
||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
code INTEGER NOT NULL,
|
||||
level TEXT NOT NULL,
|
||||
title TEXT NOT NULL,
|
||||
message TEXT NOT NULL,
|
||||
data TEXT
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS cifs_shares
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
hostname TEXT NOT NULL,
|
||||
path TEXT NOT NULL,
|
||||
username TEXT NOT NULL,
|
||||
password TEXT
|
||||
);
|
||||
@@ -1,705 +0,0 @@
|
||||
{
|
||||
"db": "SQLite",
|
||||
"10350f5a16f1b2a6ce91672ae5dc6acc46691bd8f901861545ec83c326a8ccef": {
|
||||
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"118d59de5cf930d5a3b5667b2220e9a3d593bd84276beb2b76c93b2694b0fd72": {
|
||||
"query": "INSERT INTO session (id, user_agent, metadata) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"165daa7d6a60cb42122373b2c5ac7d39399bcc99992f0002ee7bfef50a8daceb": {
|
||||
"query": "DELETE FROM certificates WHERE id = 0 OR id = 1;",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"177c4b9cc7901a3b906e5969b86b1c11e6acbfb8e86e98f197d7333030b17964": {
|
||||
"query": "DELETE FROM notifications WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"1b2242afa55e730b37b00929b656d80940b457ec86c234ddd0de917bd8872611": {
|
||||
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES (?, ?, ?, ?) RETURNING id AS \"id: u32\"",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id: u32",
|
||||
"ordinal": 0,
|
||||
"type_info": "Int64"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 4
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"1eee1fdc793919c391008854407143d7a11b4668486c11a760b49af49992f9f8": {
|
||||
"query": "REPLACE INTO tor (package, interface, key) VALUES (?, 'main', ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"2932aa02735b6422fca4ba889abfb3de8598178d4690076dc278898753d9df62": {
|
||||
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"3502e58f2ab48fb4566d21c920c096f81acfa3ff0d02f970626a4dcd67bac71d": {
|
||||
"query": "SELECT tor_key FROM account",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "tor_key",
|
||||
"ordinal": 0,
|
||||
"type_info": "Blob"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"3e57a0e52b69f33e9411c13b03a5d82c5856d63f0375eb4c23b255a09c54f8b1": {
|
||||
"query": "SELECT key FROM tor WHERE package = ? AND interface = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "key",
|
||||
"ordinal": 0,
|
||||
"type_info": "Blob"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96": {
|
||||
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "logged_in",
|
||||
"ordinal": 1,
|
||||
"type_info": "Datetime"
|
||||
},
|
||||
{
|
||||
"name": "logged_out",
|
||||
"ordinal": 2,
|
||||
"type_info": "Datetime"
|
||||
},
|
||||
{
|
||||
"name": "last_active",
|
||||
"ordinal": 3,
|
||||
"type_info": "Datetime"
|
||||
},
|
||||
{
|
||||
"name": "user_agent",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "metadata",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"530192a2a530ee6b92e5b98e1eb1bf6d1426c7b0cb2578593a367cb0bf2c3ca8": {
|
||||
"query": "UPDATE certificates SET priv_key_pem = ?, certificate_pem = ?, updated_at = datetime('now') WHERE lookup_string = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"56b986f2a2b7091d9c3acdd78f75d9842242de1f4da8f3672f2793d9fb256928": {
|
||||
"query": "DELETE FROM tor WHERE package = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"5b114c450073f77f466c980a2541293f30087b57301c379630326e5e5c2fb792": {
|
||||
"query": "REPLACE INTO tor (package, interface, key) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"5c47da44b9c84468e95a13fc47301989900f130b3b5899d1ee6664df3ed812ac": {
|
||||
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, ?, ?, NULL, datetime('now'), datetime('now'))",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a": {
|
||||
"query": "SELECT password FROM account",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "password",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"63785dc5f193ea31e6f641a910c75857ccd288a3f6e9c4f704331531e4f0689f": {
|
||||
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = ? AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"6440354d73a67c041ea29508b43b5f309d45837a44f1a562051ad540d894c7d6": {
|
||||
"query": "DELETE FROM ssh_keys WHERE fingerprint = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"65e6c3fbb138da5cf385af096fdd3c062b6e826e12a8a4b23e16fcc773004c29": {
|
||||
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < ? ORDER BY id DESC LIMIT ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Int64"
|
||||
},
|
||||
{
|
||||
"name": "package_id",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "created_at",
|
||||
"ordinal": 2,
|
||||
"type_info": "Datetime"
|
||||
},
|
||||
{
|
||||
"name": "code",
|
||||
"ordinal": 3,
|
||||
"type_info": "Int64"
|
||||
},
|
||||
{
|
||||
"name": "level",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "title",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "message",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "data",
|
||||
"ordinal": 7,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
true
|
||||
]
|
||||
}
|
||||
},
|
||||
"668f39c868f90cdbcc635858bac9e55ed73192ed2aec5c52dcfba9800a7a4a41": {
|
||||
"query": "SELECT id AS \"id: u32\", hostname, path, username, password FROM cifs_shares",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id: u32",
|
||||
"ordinal": 0,
|
||||
"type_info": "Int64"
|
||||
},
|
||||
{
|
||||
"name": "hostname",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "username",
|
||||
"ordinal": 3,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
true
|
||||
]
|
||||
}
|
||||
},
|
||||
"6b9abc9e079cff975f8a7f07ff70548c7877ecae3be0d0f2d3f439a6713326c0": {
|
||||
"query": "DELETE FROM notifications WHERE id < ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"6c96d76bffcc5f03290d8d8544a58521345ed2a843a509b17bbcd6257bb81821": {
|
||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 1;",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "priv_key_pem",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "certificate_pem",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"7d548d2472fa3707bd17364b4800e229b9c2b1c0a22e245bf4e635b9b16b8c24": {
|
||||
"query": "INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (?, ?, ?, datetime('now'), datetime('now'))",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"8595651866e7db772260bd79e19d55b7271fd795b82a99821c935a9237c1aa16": {
|
||||
"query": "SELECT interface, key FROM tor WHERE package = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "interface",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "key",
|
||||
"ordinal": 1,
|
||||
"type_info": "Blob"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"9496e17a73672ac3675e02efa7c4bf8bd479b866c0d31fa1e3a85ef159310a57": {
|
||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "priv_key_pem",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "certificate_pem",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"9fcedab1ba34daa2c6ae97c5953c09821b35b55be75b0c66045ab31a2cf4553e": {
|
||||
"query": "REPLACE INTO account (id, password, tor_key) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"a1cbaac36d8e14c8c3e7276237c4824bff18861f91b0b08aa5791704c492acb7": {
|
||||
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, ?, ?, NULL, datetime('now'), datetime('now'))",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"a4e7162322b28508310b9de7ebc891e619b881ff6d3ea09eba13da39626ab12f": {
|
||||
"query": "UPDATE cifs_shares SET hostname = ?, path = ?, username = ?, password = ? WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 5
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e": {
|
||||
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "fingerprint",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "openssh_pubkey",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "created_at",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"abfdeea8cd10343b85f647d7abc5dc3bd0b5891101b143485938192ee3b8c907": {
|
||||
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Int64"
|
||||
},
|
||||
{
|
||||
"name": "package_id",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "created_at",
|
||||
"ordinal": 2,
|
||||
"type_info": "Datetime"
|
||||
},
|
||||
{
|
||||
"name": "code",
|
||||
"ordinal": 3,
|
||||
"type_info": "Int64"
|
||||
},
|
||||
{
|
||||
"name": "level",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "title",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "message",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "data",
|
||||
"ordinal": 7,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
true
|
||||
]
|
||||
}
|
||||
},
|
||||
"b376d9e77e0861a9af2d1081ca48d14e83abc5a1546213d15bb570972c403beb": {
|
||||
"query": "-- Add migration script here\nCREATE TABLE IF NOT EXISTS tor\n(\n package TEXT NOT NULL,\n interface TEXT NOT NULL,\n key BLOB NOT NULL CHECK (length(key) = 64),\n PRIMARY KEY (package, interface)\n);\nCREATE TABLE IF NOT EXISTS session\n(\n id TEXT NOT NULL PRIMARY KEY,\n logged_in TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n logged_out TIMESTAMP,\n last_active TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n user_agent TEXT,\n metadata TEXT NOT NULL DEFAULT 'null'\n);\nCREATE TABLE IF NOT EXISTS account\n(\n id INTEGER PRIMARY KEY CHECK (id = 0),\n password TEXT NOT NULL,\n tor_key BLOB NOT NULL CHECK (length(tor_key) = 64)\n);\nCREATE TABLE IF NOT EXISTS ssh_keys\n(\n fingerprint TEXT NOT NULL,\n openssh_pubkey TEXT NOT NULL,\n created_at TEXT NOT NULL,\n PRIMARY KEY (fingerprint)\n);\nCREATE TABLE IF NOT EXISTS certificates\n(\n id INTEGER PRIMARY KEY, -- Root = 0, Int = 1, Other = 2..\n priv_key_pem TEXT NOT NULL,\n certificate_pem TEXT NOT NULL,\n lookup_string TEXT UNIQUE,\n created_at TEXT,\n updated_at TEXT\n);\nCREATE TABLE IF NOT EXISTS notifications\n(\n id INTEGER PRIMARY KEY,\n package_id TEXT,\n created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n code INTEGER NOT NULL,\n level TEXT NOT NULL,\n title TEXT NOT NULL,\n message TEXT NOT NULL,\n data TEXT\n);\nCREATE TABLE IF NOT EXISTS cifs_shares\n(\n id INTEGER PRIMARY KEY,\n hostname TEXT NOT NULL,\n path TEXT NOT NULL,\n username TEXT NOT NULL,\n password TEXT\n);",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"cc33fe2958fe7caeac6999a217f918a68b45ad596664170b4d07671c6ea49566": {
|
||||
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "hostname",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "username",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "password",
|
||||
"ordinal": 3,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
true
|
||||
]
|
||||
}
|
||||
},
|
||||
"d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca": {
|
||||
"query": "SELECT openssh_pubkey FROM ssh_keys",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "openssh_pubkey",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"d54bd5b53f8c760e1f8cde604aa8b1bdc66e4e025a636bc44ffbcd788b5168fd": {
|
||||
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES (?, ?, ?, ?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 6
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"d79d608ceb862c15b741a6040044c6dd54a837a3a0c5594d15a6041c7bc68ea8": {
|
||||
"query": "INSERT OR IGNORE INTO tor (package, interface, key) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
},
|
||||
"de2a5e90798d606047ab8180c044baac05469c0cdf151316bd58ee8c7196fdef": {
|
||||
"query": "SELECT * FROM ssh_keys WHERE fingerprint = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "fingerprint",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "openssh_pubkey",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "created_at",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"ed848affa5bf92997cd441e3a50b3616b6724df3884bd9d199b3225e0bea8a54": {
|
||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 0;",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "priv_key_pem",
|
||||
"ordinal": 0,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "certificate_pem",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
},
|
||||
"f63c8c5a8754b34a49ef5d67802fa2b72aa409bbec92ecc6901492092974b71a": {
|
||||
"query": "DELETE FROM cifs_shares WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,354 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::net::Ipv4Addr;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use bollard::container::RemoveContainerOptions;
|
||||
use futures::future::Either as EitherFuture;
|
||||
use nix::sys::signal;
|
||||
use nix::unistd::Pid;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::id::{Id, ImageId};
|
||||
use crate::s9pk::manifest::{PackageId, SYSTEM_PACKAGE_ID};
|
||||
use crate::util::serde::{Duration as SerdeDuration, IoFormat};
|
||||
use crate::util::Version;
|
||||
use crate::volume::{VolumeId, Volumes};
|
||||
use crate::{Error, ResultExt, HOST_IP};
|
||||
|
||||
pub const NET_TLD: &str = "embassy";
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref SYSTEM_IMAGES: BTreeSet<ImageId> = {
|
||||
let mut set = BTreeSet::new();
|
||||
|
||||
set.insert("compat".parse().unwrap());
|
||||
set.insert("utils".parse().unwrap());
|
||||
|
||||
set
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DockerAction {
|
||||
pub image: ImageId,
|
||||
#[serde(default)]
|
||||
pub system: bool,
|
||||
pub entrypoint: String,
|
||||
#[serde(default)]
|
||||
pub args: Vec<String>,
|
||||
#[serde(default)]
|
||||
pub mounts: BTreeMap<VolumeId, PathBuf>,
|
||||
#[serde(default)]
|
||||
pub io_format: Option<IoFormat>,
|
||||
#[serde(default)]
|
||||
pub inject: bool,
|
||||
#[serde(default)]
|
||||
pub shm_size_mb: Option<usize>, // TODO: use postfix sizing? like 1k vs 1m vs 1g
|
||||
#[serde(default)]
|
||||
pub sigterm_timeout: Option<SerdeDuration>,
|
||||
}
|
||||
impl DockerAction {
|
||||
pub fn validate(
|
||||
&self,
|
||||
volumes: &Volumes,
|
||||
image_ids: &BTreeSet<ImageId>,
|
||||
expected_io: bool,
|
||||
) -> Result<(), color_eyre::eyre::Report> {
|
||||
for (volume, _) in &self.mounts {
|
||||
if !volumes.contains_key(volume) && !matches!(&volume, &VolumeId::Backup) {
|
||||
color_eyre::eyre::bail!("unknown volume: {}", volume);
|
||||
}
|
||||
}
|
||||
if self.system {
|
||||
if !SYSTEM_IMAGES.contains(&self.image) {
|
||||
color_eyre::eyre::bail!("unknown system image: {}", self.image);
|
||||
}
|
||||
} else {
|
||||
if !image_ids.contains(&self.image) {
|
||||
color_eyre::eyre::bail!("image for {} not contained in package", self.image);
|
||||
}
|
||||
}
|
||||
if expected_io && self.io_format.is_none() {
|
||||
color_eyre::eyre::bail!("expected io-format");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, input))]
|
||||
pub async fn execute<I: Serialize, O: for<'de> Deserialize<'de>>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
name: Option<&str>,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
allow_inject: bool,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
let mut cmd = tokio::process::Command::new("docker");
|
||||
if self.inject && allow_inject {
|
||||
cmd.arg("exec");
|
||||
} else {
|
||||
let container_name = Self::container_name(pkg_id, name);
|
||||
cmd.arg("run")
|
||||
.arg("--rm")
|
||||
.arg("--network=start9")
|
||||
.arg(format!("--add-host=embassy:{}", Ipv4Addr::from(HOST_IP)))
|
||||
.arg("--name")
|
||||
.arg(&container_name)
|
||||
.arg(format!("--hostname={}", &container_name))
|
||||
.arg("--no-healthcheck");
|
||||
match ctx
|
||||
.docker
|
||||
.remove_container(
|
||||
&container_name,
|
||||
Some(RemoveContainerOptions {
|
||||
v: false,
|
||||
force: true,
|
||||
link: false,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(()) | Err(bollard::errors::Error::DockerResponseNotFoundError { .. }) => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
}?;
|
||||
}
|
||||
cmd.args(
|
||||
self.docker_args(ctx, pkg_id, pkg_version, volumes, allow_inject)
|
||||
.await,
|
||||
);
|
||||
let input_buf = if let (Some(input), Some(format)) = (&input, &self.io_format) {
|
||||
cmd.stdin(std::process::Stdio::piped());
|
||||
Some(format.to_vec(input)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
cmd.stdout(std::process::Stdio::piped());
|
||||
cmd.stderr(std::process::Stdio::piped());
|
||||
tracing::trace!(
|
||||
"{}",
|
||||
format!("{:?}", cmd)
|
||||
.split(r#"" ""#)
|
||||
.collect::<Vec<&str>>()
|
||||
.join(" ")
|
||||
);
|
||||
let mut handle = cmd.spawn().with_kind(crate::ErrorKind::Docker)?;
|
||||
let id = handle.id();
|
||||
let timeout_fut = if let Some(timeout) = timeout {
|
||||
EitherFuture::Right(async move {
|
||||
tokio::time::sleep(timeout).await;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
} else {
|
||||
EitherFuture::Left(futures::future::pending::<Result<_, Error>>())
|
||||
};
|
||||
if let (Some(input), Some(mut stdin)) = (&input_buf, handle.stdin.take()) {
|
||||
use tokio::io::AsyncWriteExt;
|
||||
stdin
|
||||
.write_all(input)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Docker)?;
|
||||
stdin.flush().await?;
|
||||
stdin.shutdown().await?;
|
||||
drop(stdin);
|
||||
}
|
||||
enum Race<T> {
|
||||
Done(T),
|
||||
TimedOut,
|
||||
}
|
||||
let res = tokio::select! {
|
||||
res = handle.wait_with_output() => Race::Done(res.with_kind(crate::ErrorKind::Docker)?),
|
||||
res = timeout_fut => {
|
||||
res?;
|
||||
Race::TimedOut
|
||||
},
|
||||
};
|
||||
let res = match res {
|
||||
Race::Done(x) => x,
|
||||
Race::TimedOut => {
|
||||
if let Some(id) = id {
|
||||
signal::kill(Pid::from_raw(id as i32), signal::SIGKILL)
|
||||
.with_kind(crate::ErrorKind::Docker)?;
|
||||
}
|
||||
return Ok(Err((143, "Timed out. Retrying soon...".to_owned())));
|
||||
}
|
||||
};
|
||||
Ok(if res.status.success() || res.status.code() == Some(143) {
|
||||
Ok(if let Some(format) = self.io_format {
|
||||
match format.from_slice(&res.stdout) {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
|
||||
format,
|
||||
e
|
||||
);
|
||||
serde_json::from_value(String::from_utf8(res.stdout)?.into())
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
}
|
||||
}
|
||||
} else if res.stdout.is_empty() {
|
||||
serde_json::from_value(Value::Null).with_kind(crate::ErrorKind::Deserialization)?
|
||||
} else {
|
||||
serde_json::from_value(String::from_utf8(res.stdout)?.into())
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
})
|
||||
} else {
|
||||
Err((
|
||||
res.status.code().unwrap_or_default(),
|
||||
String::from_utf8(res.stderr)?,
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, input))]
|
||||
pub async fn sandboxed<I: Serialize, O: for<'de> Deserialize<'de>>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
let mut cmd = tokio::process::Command::new("docker");
|
||||
cmd.arg("run").arg("--rm").arg("--network=none");
|
||||
cmd.args(
|
||||
self.docker_args(ctx, pkg_id, pkg_version, &volumes.to_readonly(), false)
|
||||
.await,
|
||||
);
|
||||
let input_buf = if let (Some(input), Some(format)) = (&input, &self.io_format) {
|
||||
cmd.stdin(std::process::Stdio::piped());
|
||||
Some(format.to_vec(input)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
cmd.stdout(std::process::Stdio::piped());
|
||||
cmd.stderr(std::process::Stdio::piped());
|
||||
let mut handle = cmd.spawn().with_kind(crate::ErrorKind::Docker)?;
|
||||
if let (Some(input), Some(stdin)) = (&input_buf, &mut handle.stdin) {
|
||||
use tokio::io::AsyncWriteExt;
|
||||
stdin
|
||||
.write_all(input)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Docker)?;
|
||||
}
|
||||
let res = handle
|
||||
.wait_with_output()
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Docker)?;
|
||||
Ok(if res.status.success() || res.status.code() == Some(143) {
|
||||
Ok(if let Some(format) = &self.io_format {
|
||||
match format.from_slice(&res.stdout) {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
|
||||
format,
|
||||
e
|
||||
);
|
||||
serde_json::from_value(String::from_utf8(res.stdout)?.into())
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
}
|
||||
}
|
||||
} else if res.stdout.is_empty() {
|
||||
serde_json::from_value(Value::Null).with_kind(crate::ErrorKind::Deserialization)?
|
||||
} else {
|
||||
serde_json::from_value(String::from_utf8(res.stdout)?.into())
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
})
|
||||
} else {
|
||||
Err((
|
||||
res.status.code().unwrap_or_default(),
|
||||
String::from_utf8(res.stderr)?,
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn container_name(pkg_id: &PackageId, name: Option<&str>) -> String {
|
||||
if let Some(name) = name {
|
||||
format!("{}_{}.{}", pkg_id, name, NET_TLD)
|
||||
} else {
|
||||
format!("{}.{}", pkg_id, NET_TLD)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uncontainer_name(name: &str) -> Option<(PackageId<&str>, Option<&str>)> {
|
||||
let (pre_tld, _) = name.split_once(".")?;
|
||||
if pre_tld.contains('_') {
|
||||
let (pkg, name) = name.split_once("_")?;
|
||||
Some((Id::try_from(pkg).ok()?.into(), Some(name)))
|
||||
} else {
|
||||
Some((Id::try_from(pre_tld).ok()?.into(), None))
|
||||
}
|
||||
}
|
||||
|
||||
async fn docker_args(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
volumes: &Volumes,
|
||||
allow_inject: bool,
|
||||
) -> Vec<Cow<'_, OsStr>> {
|
||||
let mut res = Vec::with_capacity(
|
||||
(2 * self.mounts.len()) // --mount <MOUNT_ARG>
|
||||
+ (2 * self.shm_size_mb.is_some() as usize) // --shm-size <SHM_SIZE>
|
||||
+ 5 // --interactive --log-driver=journald --entrypoint <ENTRYPOINT> <IMAGE>
|
||||
+ self.args.len(), // [ARG...]
|
||||
);
|
||||
for (volume_id, dst) in &self.mounts {
|
||||
let volume = if let Some(v) = volumes.get(volume_id) {
|
||||
v
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
let src = volume.path_for(ctx, pkg_id, pkg_version, volume_id);
|
||||
if let Err(e) = tokio::fs::metadata(&src).await {
|
||||
tracing::warn!("{} not mounted to container: {}", src.display(), e);
|
||||
continue;
|
||||
}
|
||||
res.push(OsStr::new("--mount").into());
|
||||
res.push(
|
||||
OsString::from(format!(
|
||||
"type=bind,src={},dst={}{}",
|
||||
src.display(),
|
||||
dst.display(),
|
||||
if volume.readonly() { ",readonly" } else { "" }
|
||||
))
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if let Some(shm_size_mb) = self.shm_size_mb {
|
||||
res.push(OsStr::new("--shm-size").into());
|
||||
res.push(OsString::from(format!("{}m", shm_size_mb)).into());
|
||||
}
|
||||
res.push(OsStr::new("--interactive").into());
|
||||
if self.inject && allow_inject {
|
||||
res.push(OsString::from(Self::container_name(pkg_id, None)).into());
|
||||
res.push(OsStr::new(&self.entrypoint).into());
|
||||
} else {
|
||||
res.push(OsStr::new("--log-driver=journald").into());
|
||||
res.push(OsStr::new("--entrypoint").into());
|
||||
res.push(OsStr::new(&self.entrypoint).into());
|
||||
if self.system {
|
||||
res.push(OsString::from(self.image.for_package(SYSTEM_PACKAGE_ID, None)).into());
|
||||
} else {
|
||||
res.push(OsString::from(self.image.for_package(pkg_id, Some(pkg_version))).into());
|
||||
}
|
||||
}
|
||||
res.extend(self.args.iter().map(|s| OsStr::new(s).into()));
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
@@ -1,290 +0,0 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::eyre;
|
||||
use indexmap::IndexSet;
|
||||
use patch_db::HasModel;
|
||||
use rpc_toolkit::command;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
|
||||
use self::docker::DockerAction;
|
||||
use crate::config::{Config, ConfigSpec};
|
||||
use crate::context::RpcContext;
|
||||
use crate::id::{Id, ImageId, InvalidId};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
|
||||
use crate::util::Version;
|
||||
use crate::volume::Volumes;
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
pub mod docker;
|
||||
|
||||
// TODO: create RPC endpoint that looks up the appropriate action and calls `execute`
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
|
||||
pub struct ActionId<S: AsRef<str> = String>(Id<S>);
|
||||
impl FromStr for ActionId {
|
||||
type Err = InvalidId;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(ActionId(Id::try_from(s.to_owned())?))
|
||||
}
|
||||
}
|
||||
impl From<ActionId> for String {
|
||||
fn from(value: ActionId) -> Self {
|
||||
value.0.into()
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<ActionId<S>> for ActionId<S> {
|
||||
fn as_ref(&self) -> &ActionId<S> {
|
||||
self
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> std::fmt::Display for ActionId<S> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", &self.0)
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<str> for ActionId<S> {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<Path> for ActionId<S> {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_ref().as_ref()
|
||||
}
|
||||
}
|
||||
impl<'de, S> Deserialize<'de> for ActionId<S>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
Id<S>: Deserialize<'de>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::de::Deserializer<'de>,
|
||||
{
|
||||
Ok(ActionId(Deserialize::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
||||
pub struct Actions(pub BTreeMap<ActionId, Action>);
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(tag = "version")]
|
||||
pub enum ActionResult {
|
||||
#[serde(rename = "0")]
|
||||
V0(ActionResultV0),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ActionResultV0 {
|
||||
pub message: String,
|
||||
pub value: Option<String>,
|
||||
pub copyable: bool,
|
||||
pub qr: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum DockerStatus {
|
||||
Running,
|
||||
Stopped,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Action {
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
#[serde(default)]
|
||||
pub warning: Option<String>,
|
||||
pub implementation: ActionImplementation,
|
||||
pub allowed_statuses: IndexSet<DockerStatus>,
|
||||
#[serde(default)]
|
||||
pub input_spec: ConfigSpec,
|
||||
}
|
||||
impl Action {
|
||||
#[instrument]
|
||||
pub fn validate(&self, volumes: &Volumes, image_ids: &BTreeSet<ImageId>) -> Result<(), Error> {
|
||||
self.implementation
|
||||
.validate(volumes, image_ids, true)
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::ValidateS9pk,
|
||||
format!("Action {}", self.name),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn execute(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
action_id: &ActionId,
|
||||
volumes: &Volumes,
|
||||
input: Option<Config>,
|
||||
) -> Result<ActionResult, Error> {
|
||||
if let Some(ref input) = input {
|
||||
self.input_spec
|
||||
.matches(&input)
|
||||
.with_kind(crate::ErrorKind::ConfigSpecViolation)?;
|
||||
}
|
||||
self.implementation
|
||||
.execute(
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Some(&format!("{}Action", action_id)),
|
||||
volumes,
|
||||
input,
|
||||
true,
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
.map_err(|e| Error::new(eyre!("{}", e.1), crate::ErrorKind::Action))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[serde(tag = "type")]
|
||||
pub enum ActionImplementation {
|
||||
Docker(DockerAction),
|
||||
}
|
||||
impl ActionImplementation {
|
||||
#[instrument]
|
||||
pub fn validate(
|
||||
&self,
|
||||
volumes: &Volumes,
|
||||
image_ids: &BTreeSet<ImageId>,
|
||||
expected_io: bool,
|
||||
) -> Result<(), color_eyre::eyre::Report> {
|
||||
match self {
|
||||
ActionImplementation::Docker(action) => {
|
||||
action.validate(volumes, image_ids, expected_io)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, input))]
|
||||
pub async fn execute<I: Serialize, O: for<'de> Deserialize<'de>>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
name: Option<&str>,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
allow_inject: bool,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
match self {
|
||||
ActionImplementation::Docker(action) => {
|
||||
action
|
||||
.execute(
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
name,
|
||||
volumes,
|
||||
input,
|
||||
allow_inject,
|
||||
timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
#[instrument(skip(ctx, input))]
|
||||
pub async fn sandboxed<I: Serialize, O: for<'de> Deserialize<'de>>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
match self {
|
||||
ActionImplementation::Docker(action) => {
|
||||
action
|
||||
.sandboxed(ctx, pkg_id, pkg_version, volumes, input, timeout)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn display_action_result(action_result: ActionResult, matches: &ArgMatches<'_>) {
|
||||
if matches.is_present("format") {
|
||||
return display_serializable(action_result, matches);
|
||||
}
|
||||
match action_result {
|
||||
ActionResult::V0(ar) => {
|
||||
println!(
|
||||
"{}: {}",
|
||||
ar.message,
|
||||
serde_json::to_string(&ar.value).unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[command(about = "Executes an action", display(display_action_result))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn action(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg(rename = "id")] pkg_id: PackageId,
|
||||
#[arg(rename = "action-id")] action_id: ActionId,
|
||||
#[arg(stdin, parse(parse_stdin_deserializable))] input: Option<Config>,
|
||||
#[allow(unused_variables)]
|
||||
#[arg(long = "format")]
|
||||
format: Option<IoFormat>,
|
||||
) -> Result<ActionResult, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&pkg_id)
|
||||
.and_then(|p| p.installed())
|
||||
.expect(&mut db)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?
|
||||
.manifest()
|
||||
.get(&mut db, true)
|
||||
.await?
|
||||
.to_owned();
|
||||
if let Some(action) = manifest.actions.0.get(&action_id) {
|
||||
action
|
||||
.execute(
|
||||
&ctx,
|
||||
&manifest.id,
|
||||
&manifest.version,
|
||||
&action_id,
|
||||
&manifest.volumes,
|
||||
input,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
Err(Error::new(
|
||||
eyre!("Action not found in manifest"),
|
||||
crate::ErrorKind::NotFound,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NoOutput;
|
||||
impl<'de> Deserialize<'de> for NoOutput {
|
||||
fn deserialize<D>(_: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
Ok(NoOutput)
|
||||
}
|
||||
}
|
||||
@@ -1,397 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::Utc;
|
||||
use color_eyre::eyre::eyre;
|
||||
use openssl::pkey::{PKey, Private};
|
||||
use openssl::x509::X509;
|
||||
use patch_db::{DbHandle, LockType, PatchDbHandle, Revision};
|
||||
use rpc_toolkit::command;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use torut::onion::TorSecretKeyV3;
|
||||
use tracing::instrument;
|
||||
|
||||
use super::target::BackupTargetId;
|
||||
use super::PackageBackupReport;
|
||||
use crate::auth::check_password_against_db;
|
||||
use crate::backup::{BackupReport, ServerBackupReport};
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::util::WithRevision;
|
||||
use crate::disk::mount::backup::BackupMountGuard;
|
||||
use crate::disk::mount::filesystem::ReadWrite;
|
||||
use crate::disk::mount::guard::TmpMountGuard;
|
||||
use crate::notifications::NotificationLevel;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::status::MainStatus;
|
||||
use crate::util::serde::IoFormat;
|
||||
use crate::util::{display_none, AtomicFile};
|
||||
use crate::version::VersionT;
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct OsBackup {
|
||||
pub tor_key: TorSecretKeyV3,
|
||||
pub root_ca_key: PKey<Private>,
|
||||
pub root_ca_cert: X509,
|
||||
pub ui: Value,
|
||||
}
|
||||
impl<'de> Deserialize<'de> for OsBackup {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename = "kebab-case")]
|
||||
struct OsBackupDe {
|
||||
tor_key: String,
|
||||
root_ca_key: String,
|
||||
root_ca_cert: String,
|
||||
ui: Value,
|
||||
}
|
||||
let int = OsBackupDe::deserialize(deserializer)?;
|
||||
let key_vec = base32::decode(base32::Alphabet::RFC4648 { padding: true }, &int.tor_key)
|
||||
.ok_or_else(|| {
|
||||
serde::de::Error::invalid_value(
|
||||
serde::de::Unexpected::Str(&int.tor_key),
|
||||
&"an RFC4648 encoded string",
|
||||
)
|
||||
})?;
|
||||
if key_vec.len() != 64 {
|
||||
return Err(serde::de::Error::invalid_value(
|
||||
serde::de::Unexpected::Str(&int.tor_key),
|
||||
&"a 64 byte value encoded as an RFC4648 string",
|
||||
));
|
||||
}
|
||||
let mut key_slice = [0; 64];
|
||||
key_slice.clone_from_slice(&key_vec);
|
||||
Ok(OsBackup {
|
||||
tor_key: TorSecretKeyV3::from(key_slice),
|
||||
root_ca_key: PKey::<Private>::private_key_from_pem(int.root_ca_key.as_bytes())
|
||||
.map_err(serde::de::Error::custom)?,
|
||||
root_ca_cert: X509::from_pem(int.root_ca_cert.as_bytes())
|
||||
.map_err(serde::de::Error::custom)?,
|
||||
ui: int.ui,
|
||||
})
|
||||
}
|
||||
}
|
||||
impl Serialize for OsBackup {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename = "kebab-case")]
|
||||
struct OsBackupSer<'a> {
|
||||
tor_key: String,
|
||||
root_ca_key: String,
|
||||
root_ca_cert: String,
|
||||
ui: &'a Value,
|
||||
}
|
||||
OsBackupSer {
|
||||
tor_key: base32::encode(
|
||||
base32::Alphabet::RFC4648 { padding: true },
|
||||
&self.tor_key.as_bytes(),
|
||||
),
|
||||
root_ca_key: String::from_utf8(
|
||||
self.root_ca_key
|
||||
.private_key_to_pem_pkcs8()
|
||||
.map_err(serde::ser::Error::custom)?,
|
||||
)
|
||||
.map_err(serde::ser::Error::custom)?,
|
||||
root_ca_cert: String::from_utf8(
|
||||
self.root_ca_cert
|
||||
.to_pem()
|
||||
.map_err(serde::ser::Error::custom)?,
|
||||
)
|
||||
.map_err(serde::ser::Error::custom)?,
|
||||
ui: &self.ui,
|
||||
}
|
||||
.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
#[command(rename = "create", display(display_none))]
|
||||
#[instrument(skip(ctx, old_password, password))]
|
||||
pub async fn backup_all(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg(rename = "target-id")] target_id: BackupTargetId,
|
||||
#[arg(rename = "old-password", long = "old-password")] old_password: Option<String>,
|
||||
#[arg] password: String,
|
||||
) -> Result<WithRevision<()>, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?;
|
||||
let fs = target_id
|
||||
.load(&mut ctx.secret_store.acquire().await?)
|
||||
.await?;
|
||||
let mut backup_guard = BackupMountGuard::mount(
|
||||
TmpMountGuard::mount(&fs, ReadWrite).await?,
|
||||
old_password.as_ref().unwrap_or(&password),
|
||||
)
|
||||
.await?;
|
||||
if old_password.is_some() {
|
||||
backup_guard.change_password(&password)?;
|
||||
}
|
||||
let revision = assure_backing_up(&mut db).await?;
|
||||
tokio::task::spawn(async move {
|
||||
let backup_res = perform_backup(&ctx, &mut db, backup_guard).await;
|
||||
let status_model = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.status_info()
|
||||
.backing_up();
|
||||
status_model
|
||||
.clone()
|
||||
.lock(&mut db, LockType::Write)
|
||||
.await
|
||||
.expect("failed to lock server status");
|
||||
match backup_res {
|
||||
Ok(report) if report.iter().all(|(_, rep)| rep.error.is_none()) => ctx
|
||||
.notification_manager
|
||||
.notify(
|
||||
&mut db,
|
||||
None,
|
||||
NotificationLevel::Success,
|
||||
"Backup Complete".to_owned(),
|
||||
"Your backup has completed".to_owned(),
|
||||
BackupReport {
|
||||
server: ServerBackupReport {
|
||||
attempted: true,
|
||||
error: None,
|
||||
},
|
||||
packages: report,
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.expect("failed to send notification"),
|
||||
Ok(report) => ctx
|
||||
.notification_manager
|
||||
.notify(
|
||||
&mut db,
|
||||
None,
|
||||
NotificationLevel::Warning,
|
||||
"Backup Complete".to_owned(),
|
||||
"Your backup has completed, but some package(s) failed to backup".to_owned(),
|
||||
BackupReport {
|
||||
server: ServerBackupReport {
|
||||
attempted: true,
|
||||
error: None,
|
||||
},
|
||||
packages: report,
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.expect("failed to send notification"),
|
||||
Err(e) => {
|
||||
tracing::error!("Backup Failed: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
ctx.notification_manager
|
||||
.notify(
|
||||
&mut db,
|
||||
None,
|
||||
NotificationLevel::Error,
|
||||
"Backup Failed".to_owned(),
|
||||
"Your backup failed to complete.".to_owned(),
|
||||
BackupReport {
|
||||
server: ServerBackupReport {
|
||||
attempted: true,
|
||||
error: Some(e.to_string()),
|
||||
},
|
||||
packages: BTreeMap::new(),
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.expect("failed to send notification");
|
||||
}
|
||||
}
|
||||
status_model
|
||||
.put(&mut db, &false)
|
||||
.await
|
||||
.expect("failed to change server status");
|
||||
});
|
||||
Ok(WithRevision {
|
||||
response: (),
|
||||
revision,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(db))]
|
||||
async fn assure_backing_up(db: &mut PatchDbHandle) -> Result<Option<Arc<Revision>>, Error> {
|
||||
let mut tx = db.begin().await?;
|
||||
let mut backing_up = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.status_info()
|
||||
.backing_up()
|
||||
.get_mut(&mut tx)
|
||||
.await?;
|
||||
|
||||
if *backing_up {
|
||||
return Err(Error::new(
|
||||
eyre!("Server is already backing up!"),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
*backing_up = true;
|
||||
backing_up.save(&mut tx).await?;
|
||||
Ok(tx.commit(None).await?)
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db, backup_guard))]
|
||||
async fn perform_backup<Db: DbHandle>(
|
||||
ctx: &RpcContext,
|
||||
mut db: Db,
|
||||
mut backup_guard: BackupMountGuard<TmpMountGuard>,
|
||||
) -> Result<BTreeMap<PackageId, PackageBackupReport>, Error> {
|
||||
let mut backup_report = BTreeMap::new();
|
||||
|
||||
for package_id in crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.keys(&mut db, false)
|
||||
.await?
|
||||
{
|
||||
let mut tx = db.begin().await?; // for lock scope
|
||||
let installed_model = if let Some(installed_model) = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&package_id)
|
||||
.and_then(|m| m.installed())
|
||||
.check(&mut tx)
|
||||
.await?
|
||||
{
|
||||
installed_model
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
let main_status_model = installed_model.clone().status().main();
|
||||
|
||||
main_status_model.lock(&mut tx, LockType::Write).await?;
|
||||
let (started, health) = match main_status_model.get(&mut tx, true).await?.into_owned() {
|
||||
MainStatus::Starting => (Some(Utc::now()), Default::default()),
|
||||
MainStatus::Running { started, health } => (Some(started), health.clone()),
|
||||
MainStatus::Stopped | MainStatus::Stopping => (None, Default::default()),
|
||||
MainStatus::BackingUp { .. } => {
|
||||
backup_report.insert(
|
||||
package_id,
|
||||
PackageBackupReport {
|
||||
error: Some(
|
||||
"Can't do backup because service is in a backing up state".to_owned(),
|
||||
),
|
||||
},
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
main_status_model
|
||||
.put(
|
||||
&mut tx,
|
||||
&MainStatus::BackingUp {
|
||||
started,
|
||||
health: health.clone(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
tx.save().await?; // drop locks
|
||||
|
||||
let manifest = installed_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.get(&mut db, false)
|
||||
.await?;
|
||||
|
||||
ctx.managers
|
||||
.get(&(manifest.id.clone(), manifest.version.clone()))
|
||||
.await
|
||||
.ok_or_else(|| {
|
||||
Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest)
|
||||
})?
|
||||
.synchronize()
|
||||
.await;
|
||||
|
||||
let mut tx = db.begin().await?;
|
||||
|
||||
installed_model.lock(&mut tx, LockType::Write).await?;
|
||||
|
||||
let guard = backup_guard.mount_package_backup(&package_id).await?;
|
||||
let res = manifest
|
||||
.backup
|
||||
.create(
|
||||
ctx,
|
||||
&package_id,
|
||||
&manifest.title,
|
||||
&manifest.version,
|
||||
&manifest.interfaces,
|
||||
&manifest.volumes,
|
||||
)
|
||||
.await;
|
||||
guard.unmount().await?;
|
||||
backup_report.insert(
|
||||
package_id.clone(),
|
||||
PackageBackupReport {
|
||||
error: res.as_ref().err().map(|e| e.to_string()),
|
||||
},
|
||||
);
|
||||
|
||||
if let Ok(pkg_meta) = res {
|
||||
installed_model
|
||||
.last_backup()
|
||||
.put(&mut tx, &Some(pkg_meta.timestamp))
|
||||
.await?;
|
||||
backup_guard
|
||||
.metadata
|
||||
.package_backups
|
||||
.insert(package_id, pkg_meta);
|
||||
}
|
||||
|
||||
main_status_model
|
||||
.put(
|
||||
&mut tx,
|
||||
&match started {
|
||||
Some(started) => MainStatus::Running { started, health },
|
||||
None => MainStatus::Stopped,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
tx.save().await?;
|
||||
}
|
||||
|
||||
crate::db::DatabaseModel::new()
|
||||
.lock(&mut db, LockType::Write)
|
||||
.await?;
|
||||
|
||||
let (root_ca_key, root_ca_cert) = ctx.net_controller.ssl.export_root_ca().await?;
|
||||
let mut os_backup_file = AtomicFile::new(backup_guard.as_ref().join("os-backup.cbor")).await?;
|
||||
os_backup_file
|
||||
.write_all(
|
||||
&IoFormat::Cbor.to_vec(&OsBackup {
|
||||
tor_key: ctx.net_controller.tor.embassyd_tor_key().await,
|
||||
root_ca_key,
|
||||
root_ca_cert,
|
||||
ui: crate::db::DatabaseModel::new()
|
||||
.ui()
|
||||
.get(&mut db, true)
|
||||
.await?
|
||||
.into_owned(),
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
os_backup_file.save().await?;
|
||||
|
||||
let timestamp = Some(Utc::now());
|
||||
|
||||
backup_guard.unencrypted_metadata.version = crate::version::Current::new().semver().into();
|
||||
backup_guard.unencrypted_metadata.full = true;
|
||||
backup_guard.metadata.version = crate::version::Current::new().semver().into();
|
||||
backup_guard.metadata.timestamp = timestamp;
|
||||
|
||||
backup_guard.save_and_unmount().await?;
|
||||
|
||||
crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.last_backup()
|
||||
.put(&mut db, ×tamp)
|
||||
.await?;
|
||||
|
||||
Ok(backup_report)
|
||||
}
|
||||
@@ -1,425 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::eyre;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::FutureExt;
|
||||
use openssl::x509::X509;
|
||||
use patch_db::{DbHandle, PatchDbHandle, Revision};
|
||||
use rpc_toolkit::command;
|
||||
use tokio::fs::File;
|
||||
use tokio::task::JoinHandle;
|
||||
use torut::onion::OnionAddressV3;
|
||||
use tracing::instrument;
|
||||
|
||||
use super::target::BackupTargetId;
|
||||
use crate::backup::backup_bulk::OsBackup;
|
||||
use crate::context::{RpcContext, SetupContext};
|
||||
use crate::db::model::{PackageDataEntry, StaticFiles};
|
||||
use crate::db::util::WithRevision;
|
||||
use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard};
|
||||
use crate::disk::mount::filesystem::ReadOnly;
|
||||
use crate::disk::mount::guard::TmpMountGuard;
|
||||
use crate::install::progress::InstallProgress;
|
||||
use crate::install::{download_install_s9pk, PKG_PUBLIC_DIR};
|
||||
use crate::net::ssl::SslManager;
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::s9pk::reader::S9pkReader;
|
||||
use crate::setup::RecoveryStatus;
|
||||
use crate::util::display_none;
|
||||
use crate::util::io::dir_size;
|
||||
use crate::util::serde::IoFormat;
|
||||
use crate::volume::{backup_dir, BACKUP_DIR, PKG_VOLUME_DIR};
|
||||
use crate::{auth::check_password_against_db, notifications::NotificationLevel};
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
fn parse_comma_separated(arg: &str, _: &ArgMatches<'_>) -> Result<Vec<PackageId>, Error> {
|
||||
arg.split(',')
|
||||
.map(|s| s.trim().parse().map_err(Error::from))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[command(rename = "restore", display(display_none))]
|
||||
#[instrument(skip(ctx, old_password, password))]
|
||||
pub async fn restore_packages_rpc(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg(parse(parse_comma_separated))] ids: Vec<PackageId>,
|
||||
#[arg(rename = "target-id")] target_id: BackupTargetId,
|
||||
#[arg(rename = "old-password", long = "old-password")] old_password: Option<String>,
|
||||
#[arg] password: String,
|
||||
) -> Result<WithRevision<()>, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?;
|
||||
let fs = target_id
|
||||
.load(&mut ctx.secret_store.acquire().await?)
|
||||
.await?;
|
||||
let mut backup_guard = BackupMountGuard::mount(
|
||||
TmpMountGuard::mount(&fs, ReadOnly).await?,
|
||||
old_password.as_ref().unwrap_or(&password),
|
||||
)
|
||||
.await?;
|
||||
if old_password.is_some() {
|
||||
backup_guard.change_password(&password)?;
|
||||
}
|
||||
|
||||
let (revision, backup_guard, tasks, _) =
|
||||
restore_packages(&ctx, &mut db, backup_guard, ids).await?;
|
||||
|
||||
tokio::spawn(async {
|
||||
futures::future::join_all(tasks).await;
|
||||
if let Err(e) = backup_guard.unmount().await {
|
||||
tracing::error!("Error unmounting backup drive: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
});
|
||||
|
||||
Ok(WithRevision {
|
||||
response: (),
|
||||
revision,
|
||||
})
|
||||
}
|
||||
|
||||
async fn approximate_progress(
|
||||
rpc_ctx: &RpcContext,
|
||||
progress: &mut ProgressInfo,
|
||||
) -> Result<(), Error> {
|
||||
for (id, size) in &mut progress.target_volume_size {
|
||||
let dir = rpc_ctx.datadir.join(PKG_VOLUME_DIR).join(id).join("data");
|
||||
if tokio::fs::metadata(&dir).await.is_err() {
|
||||
*size = 0;
|
||||
} else {
|
||||
*size = dir_size(&dir).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn approximate_progress_loop(
|
||||
ctx: &SetupContext,
|
||||
rpc_ctx: &RpcContext,
|
||||
mut starting_info: ProgressInfo,
|
||||
) {
|
||||
loop {
|
||||
if let Err(e) = approximate_progress(rpc_ctx, &mut starting_info).await {
|
||||
tracing::error!("Failed to approximate restore progress: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
} else {
|
||||
*ctx.recovery_status.write().await = Some(Ok(starting_info.flatten()));
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct ProgressInfo {
|
||||
package_installs: BTreeMap<PackageId, Arc<InstallProgress>>,
|
||||
src_volume_size: BTreeMap<PackageId, u64>,
|
||||
target_volume_size: BTreeMap<PackageId, u64>,
|
||||
}
|
||||
impl ProgressInfo {
|
||||
fn flatten(&self) -> RecoveryStatus {
|
||||
let mut total_bytes = 0;
|
||||
let mut bytes_transferred = 0;
|
||||
|
||||
for progress in self.package_installs.values() {
|
||||
total_bytes += ((progress.size.unwrap_or(0) as f64) * 2.2) as u64;
|
||||
bytes_transferred += progress.downloaded.load(Ordering::SeqCst);
|
||||
bytes_transferred += ((progress.validated.load(Ordering::SeqCst) as f64) * 0.2) as u64;
|
||||
bytes_transferred += progress.unpacked.load(Ordering::SeqCst);
|
||||
}
|
||||
|
||||
for size in self.src_volume_size.values() {
|
||||
total_bytes += *size;
|
||||
}
|
||||
|
||||
for size in self.target_volume_size.values() {
|
||||
bytes_transferred += *size;
|
||||
}
|
||||
|
||||
if bytes_transferred > total_bytes {
|
||||
bytes_transferred = total_bytes;
|
||||
}
|
||||
|
||||
RecoveryStatus {
|
||||
total_bytes,
|
||||
bytes_transferred,
|
||||
complete: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn recover_full_embassy(
|
||||
ctx: SetupContext,
|
||||
disk_guid: Arc<String>,
|
||||
embassy_password: String,
|
||||
recovery_source: TmpMountGuard,
|
||||
recovery_password: Option<String>,
|
||||
) -> Result<(OnionAddressV3, X509, BoxFuture<'static, Result<(), Error>>), Error> {
|
||||
let backup_guard = BackupMountGuard::mount(
|
||||
recovery_source,
|
||||
recovery_password.as_deref().unwrap_or_default(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let os_backup_path = backup_guard.as_ref().join("os-backup.cbor");
|
||||
let os_backup: OsBackup =
|
||||
IoFormat::Cbor.from_slice(&tokio::fs::read(&os_backup_path).await.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
os_backup_path.display().to_string(),
|
||||
)
|
||||
})?)?;
|
||||
|
||||
let password = argon2::hash_encoded(
|
||||
embassy_password.as_bytes(),
|
||||
&rand::random::<[u8; 16]>()[..],
|
||||
&argon2::Config::default(),
|
||||
)
|
||||
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
|
||||
let key_vec = os_backup.tor_key.as_bytes().to_vec();
|
||||
let secret_store = ctx.secret_store().await?;
|
||||
sqlx::query!(
|
||||
"REPLACE INTO account (id, password, tor_key) VALUES (?, ?, ?)",
|
||||
0,
|
||||
password,
|
||||
key_vec,
|
||||
)
|
||||
.execute(&mut secret_store.acquire().await?)
|
||||
.await?;
|
||||
|
||||
SslManager::import_root_ca(
|
||||
secret_store.clone(),
|
||||
os_backup.root_ca_key,
|
||||
os_backup.root_ca_cert.clone(),
|
||||
)
|
||||
.await?;
|
||||
secret_store.close().await;
|
||||
|
||||
Ok((
|
||||
os_backup.tor_key.public().get_onion_address(),
|
||||
os_backup.root_ca_cert,
|
||||
async move {
|
||||
let rpc_ctx = RpcContext::init(ctx.config_path.as_ref(), disk_guid).await?;
|
||||
let mut db = rpc_ctx.db.handle();
|
||||
|
||||
let ids = backup_guard
|
||||
.metadata
|
||||
.package_backups
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect();
|
||||
let (_, backup_guard, tasks, progress_info) = restore_packages(
|
||||
&rpc_ctx,
|
||||
&mut db,
|
||||
backup_guard,
|
||||
ids,
|
||||
)
|
||||
.await?;
|
||||
|
||||
tokio::select! {
|
||||
res = futures::future::join_all(tasks) => {
|
||||
for res in res {
|
||||
match res.with_kind(crate::ErrorKind::Unknown) {
|
||||
Ok((Ok(_), _)) => (),
|
||||
Ok((Err(err), package_id)) => {
|
||||
if let Err(err) = rpc_ctx.notification_manager.notify(
|
||||
&mut db,
|
||||
Some(package_id.clone()),
|
||||
NotificationLevel::Error,
|
||||
"Restoration Failure".to_string(), format!("Error restoring package {}: {}", package_id,err), (), None).await{
|
||||
tracing::error!("Failed to notify: {}", err);
|
||||
tracing::debug!("{:?}", err);
|
||||
};
|
||||
tracing::error!("Error restoring package {}: {}", package_id, err);
|
||||
tracing::debug!("{:?}", err);
|
||||
},
|
||||
Err(e) => {
|
||||
if let Err(err) = rpc_ctx.notification_manager.notify(
|
||||
&mut db,
|
||||
None,
|
||||
NotificationLevel::Error,
|
||||
"Restoration Failure".to_string(), format!("Error restoring ?: {}", e), (), None).await {
|
||||
|
||||
tracing::error!("Failed to notify: {}", err);
|
||||
tracing::debug!("{:?}", err);
|
||||
}
|
||||
tracing::error!("Error restoring packages: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
},
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
_ = approximate_progress_loop(&ctx, &rpc_ctx, progress_info) => unreachable!(concat!(module_path!(), "::approximate_progress_loop should not terminate")),
|
||||
}
|
||||
|
||||
backup_guard.unmount().await?;
|
||||
rpc_ctx.shutdown().await
|
||||
}.boxed()
|
||||
))
|
||||
}
|
||||
|
||||
async fn restore_packages(
|
||||
ctx: &RpcContext,
|
||||
db: &mut PatchDbHandle,
|
||||
backup_guard: BackupMountGuard<TmpMountGuard>,
|
||||
ids: Vec<PackageId>,
|
||||
) -> Result<
|
||||
(
|
||||
Option<Arc<Revision>>,
|
||||
BackupMountGuard<TmpMountGuard>,
|
||||
Vec<JoinHandle<(Result<(), Error>, PackageId)>>,
|
||||
ProgressInfo,
|
||||
),
|
||||
Error,
|
||||
> {
|
||||
let (revision, guards) = assure_restoring(ctx, db, ids, &backup_guard).await?;
|
||||
|
||||
let mut progress_info = ProgressInfo::default();
|
||||
|
||||
let mut tasks = Vec::with_capacity(guards.len());
|
||||
for (manifest, guard) in guards {
|
||||
let id = manifest.id.clone();
|
||||
let (progress, task) = restore_package(ctx.clone(), manifest, guard).await?;
|
||||
progress_info.package_installs.insert(id.clone(), progress);
|
||||
progress_info
|
||||
.src_volume_size
|
||||
.insert(id.clone(), dir_size(backup_dir(&id)).await?);
|
||||
progress_info.target_volume_size.insert(id.clone(), 0);
|
||||
let package_id = id.clone();
|
||||
tasks.push(tokio::spawn(
|
||||
async move {
|
||||
if let Err(e) = task.await {
|
||||
tracing::error!("Error restoring package {}: {}", id, e);
|
||||
tracing::debug!("{:?}", e);
|
||||
Err(e)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
.map(|x| (x, package_id)),
|
||||
));
|
||||
}
|
||||
|
||||
Ok((revision, backup_guard, tasks, progress_info))
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db, backup_guard))]
|
||||
async fn assure_restoring(
|
||||
ctx: &RpcContext,
|
||||
db: &mut PatchDbHandle,
|
||||
ids: Vec<PackageId>,
|
||||
backup_guard: &BackupMountGuard<TmpMountGuard>,
|
||||
) -> Result<
|
||||
(
|
||||
Option<Arc<Revision>>,
|
||||
Vec<(Manifest, PackageBackupMountGuard)>,
|
||||
),
|
||||
Error,
|
||||
> {
|
||||
let mut tx = db.begin().await?;
|
||||
|
||||
let mut guards = Vec::with_capacity(ids.len());
|
||||
|
||||
for id in ids {
|
||||
let mut model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.get_mut(&mut tx)
|
||||
.await?;
|
||||
|
||||
if !model.is_none() {
|
||||
return Err(Error::new(
|
||||
eyre!("Can't restore over existing package: {}", id),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
|
||||
let guard = backup_guard.mount_package_backup(&id).await?;
|
||||
let s9pk_path = Path::new(BACKUP_DIR).join(&id).join(format!("{}.s9pk", id));
|
||||
let mut rdr = S9pkReader::open(&s9pk_path, false).await?;
|
||||
|
||||
let manifest = rdr.manifest().await?;
|
||||
let version = manifest.version.clone();
|
||||
let progress = InstallProgress::new(Some(tokio::fs::metadata(&s9pk_path).await?.len()));
|
||||
|
||||
let public_dir_path = ctx
|
||||
.datadir
|
||||
.join(PKG_PUBLIC_DIR)
|
||||
.join(&id)
|
||||
.join(version.as_str());
|
||||
tokio::fs::create_dir_all(&public_dir_path).await?;
|
||||
|
||||
let license_path = public_dir_path.join("LICENSE.md");
|
||||
let mut dst = File::create(&license_path).await?;
|
||||
tokio::io::copy(&mut rdr.license().await?, &mut dst).await?;
|
||||
dst.sync_all().await?;
|
||||
|
||||
let instructions_path = public_dir_path.join("INSTRUCTIONS.md");
|
||||
let mut dst = File::create(&instructions_path).await?;
|
||||
tokio::io::copy(&mut rdr.instructions().await?, &mut dst).await?;
|
||||
dst.sync_all().await?;
|
||||
|
||||
let icon_path = Path::new("icon").with_extension(&manifest.assets.icon_type());
|
||||
let icon_path = public_dir_path.join(&icon_path);
|
||||
let mut dst = File::create(&icon_path).await?;
|
||||
tokio::io::copy(&mut rdr.icon().await?, &mut dst).await?;
|
||||
dst.sync_all().await?;
|
||||
|
||||
*model = Some(PackageDataEntry::Restoring {
|
||||
install_progress: progress.clone(),
|
||||
static_files: StaticFiles::local(&id, &version, manifest.assets.icon_type()),
|
||||
manifest: manifest.clone(),
|
||||
});
|
||||
model.save(&mut tx).await?;
|
||||
|
||||
guards.push((manifest, guard));
|
||||
}
|
||||
|
||||
Ok((tx.commit(None).await?, guards))
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, guard))]
|
||||
async fn restore_package<'a>(
|
||||
ctx: RpcContext,
|
||||
manifest: Manifest,
|
||||
guard: PackageBackupMountGuard,
|
||||
) -> Result<(Arc<InstallProgress>, BoxFuture<'static, Result<(), Error>>), Error> {
|
||||
let s9pk_path = Path::new(BACKUP_DIR)
|
||||
.join(&manifest.id)
|
||||
.join(format!("{}.s9pk", manifest.id));
|
||||
let len = tokio::fs::metadata(&s9pk_path)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
s9pk_path.display().to_string(),
|
||||
)
|
||||
})?
|
||||
.len();
|
||||
let file = File::open(&s9pk_path).await.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
s9pk_path.display().to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
let progress = InstallProgress::new(Some(len));
|
||||
|
||||
Ok((
|
||||
progress.clone(),
|
||||
async move {
|
||||
download_install_s9pk(&ctx, &manifest, None, progress, file).await?;
|
||||
|
||||
guard.unmount().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
.boxed(),
|
||||
))
|
||||
}
|
||||
@@ -1,240 +0,0 @@
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use embassy::context::rpc::RpcContextConfig;
|
||||
use embassy::context::{DiagnosticContext, SetupContext};
|
||||
use embassy::disk::fsck::RepairStrategy;
|
||||
use embassy::disk::main::DEFAULT_PASSWORD;
|
||||
use embassy::disk::REPAIR_DISK_PATH;
|
||||
use embassy::hostname::get_product_key;
|
||||
use embassy::middleware::cors::cors;
|
||||
use embassy::middleware::diagnostic::diagnostic;
|
||||
use embassy::middleware::encrypt::encrypt;
|
||||
#[cfg(feature = "avahi")]
|
||||
use embassy::net::mdns::MdnsController;
|
||||
use embassy::shutdown::Shutdown;
|
||||
use embassy::sound::CHIME;
|
||||
use embassy::util::logger::EmbassyLogger;
|
||||
use embassy::util::Invoke;
|
||||
use embassy::{Error, ResultExt};
|
||||
use http::StatusCode;
|
||||
use rpc_toolkit::rpc_server;
|
||||
use tokio::process::Command;
|
||||
use tracing::instrument;
|
||||
|
||||
fn status_fn(_: i32) -> StatusCode {
|
||||
StatusCode::OK
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
|
||||
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_err() {
|
||||
#[cfg(feature = "avahi")]
|
||||
let _mdns = MdnsController::init();
|
||||
tokio::fs::write(
|
||||
"/etc/nginx/sites-available/default",
|
||||
include_str!("../nginx/setup-wizard.conf"),
|
||||
)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
embassy::ErrorKind::Filesystem,
|
||||
"/etc/nginx/sites-available/default",
|
||||
)
|
||||
})?;
|
||||
Command::new("systemctl")
|
||||
.arg("reload")
|
||||
.arg("nginx")
|
||||
.invoke(embassy::ErrorKind::Nginx)
|
||||
.await?;
|
||||
let ctx = SetupContext::init(cfg_path).await?;
|
||||
let keysource_ctx = ctx.clone();
|
||||
let keysource = move || {
|
||||
let ctx = keysource_ctx.clone();
|
||||
async move { ctx.product_key().await }
|
||||
};
|
||||
let encrypt = encrypt(keysource);
|
||||
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
|
||||
CHIME.play().await?;
|
||||
rpc_server!({
|
||||
command: embassy::setup_api,
|
||||
context: ctx.clone(),
|
||||
status: status_fn,
|
||||
middleware: [
|
||||
cors,
|
||||
encrypt,
|
||||
]
|
||||
})
|
||||
.with_graceful_shutdown({
|
||||
let mut shutdown = ctx.shutdown.subscribe();
|
||||
async move {
|
||||
shutdown.recv().await.expect("context dropped");
|
||||
}
|
||||
})
|
||||
.await
|
||||
.with_kind(embassy::ErrorKind::Network)?;
|
||||
} else {
|
||||
let cfg = RpcContextConfig::load(cfg_path).await?;
|
||||
let guid_string = tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
||||
.await?;
|
||||
let guid = guid_string.trim();
|
||||
let reboot = embassy::disk::main::import(
|
||||
guid,
|
||||
cfg.datadir(),
|
||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
||||
RepairStrategy::Aggressive
|
||||
} else {
|
||||
RepairStrategy::Preen
|
||||
},
|
||||
DEFAULT_PASSWORD,
|
||||
)
|
||||
.await?;
|
||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
||||
tokio::fs::remove_file(REPAIR_DISK_PATH)
|
||||
.await
|
||||
.with_ctx(|_| (embassy::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
|
||||
}
|
||||
if reboot.0 {
|
||||
embassy::disk::main::export(guid, cfg.datadir()).await?;
|
||||
Command::new("reboot")
|
||||
.invoke(embassy::ErrorKind::Unknown)
|
||||
.await?;
|
||||
}
|
||||
tracing::info!("Loaded Disk");
|
||||
embassy::init::init(&cfg, &get_product_key().await?).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
|
||||
let script = path.as_ref();
|
||||
if script.exists() {
|
||||
match Command::new("/bin/bash").arg(script).spawn() {
|
||||
Ok(mut c) => {
|
||||
if let Err(e) = c.wait().await {
|
||||
tracing::error!("Error Running {}: {}", script.display(), e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Error Running {}: {}", script.display(), e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
|
||||
embassy::sound::BEP.play().await?;
|
||||
|
||||
run_script_if_exists("/embassy-os/preinit.sh").await;
|
||||
|
||||
let res = if let Err(e) = setup_or_init(cfg_path).await {
|
||||
async {
|
||||
tracing::error!("{}", e.source);
|
||||
tracing::debug!("{}", e.source);
|
||||
embassy::sound::BEETHOVEN.play().await?;
|
||||
#[cfg(feature = "avahi")]
|
||||
let _mdns = MdnsController::init();
|
||||
tokio::fs::write(
|
||||
"/etc/nginx/sites-available/default",
|
||||
include_str!("../nginx/diagnostic-ui.conf"),
|
||||
)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
embassy::ErrorKind::Filesystem,
|
||||
"/etc/nginx/sites-available/default",
|
||||
)
|
||||
})?;
|
||||
Command::new("systemctl")
|
||||
.arg("reload")
|
||||
.arg("nginx")
|
||||
.invoke(embassy::ErrorKind::Nginx)
|
||||
.await?;
|
||||
let ctx = DiagnosticContext::init(
|
||||
cfg_path,
|
||||
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_ok() {
|
||||
Some(Arc::new(
|
||||
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
||||
.await?
|
||||
.trim()
|
||||
.to_owned(),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
e,
|
||||
)
|
||||
.await?;
|
||||
let mut shutdown_recv = ctx.shutdown.subscribe();
|
||||
rpc_server!({
|
||||
command: embassy::diagnostic_api,
|
||||
context: ctx.clone(),
|
||||
status: status_fn,
|
||||
middleware: [
|
||||
cors,
|
||||
diagnostic,
|
||||
]
|
||||
})
|
||||
.with_graceful_shutdown({
|
||||
let mut shutdown = ctx.shutdown.subscribe();
|
||||
async move {
|
||||
shutdown.recv().await.expect("context dropped");
|
||||
}
|
||||
})
|
||||
.await
|
||||
.with_kind(embassy::ErrorKind::Network)?;
|
||||
|
||||
Ok::<_, Error>(
|
||||
shutdown_recv
|
||||
.recv()
|
||||
.await
|
||||
.with_kind(embassy::ErrorKind::Network)?,
|
||||
)
|
||||
}
|
||||
.await
|
||||
} else {
|
||||
Ok(None)
|
||||
};
|
||||
|
||||
run_script_if_exists("/embassy-os/postinit.sh").await;
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let matches = clap::App::new("embassyd")
|
||||
.arg(
|
||||
clap::Arg::with_name("config")
|
||||
.short("c")
|
||||
.long("config")
|
||||
.takes_value(true),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
EmbassyLogger::init();
|
||||
|
||||
let cfg_path = matches.value_of("config");
|
||||
let res = {
|
||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("failed to initialize runtime");
|
||||
rt.block_on(inner_main(cfg_path))
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(Some(shutdown)) => shutdown.execute(),
|
||||
Ok(None) => (),
|
||||
Err(e) => {
|
||||
eprintln!("{}", e.source);
|
||||
tracing::debug!("{:?}", e.source);
|
||||
drop(e.source);
|
||||
std::process::exit(e.kind as i32)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,377 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use embassy::context::{DiagnosticContext, RpcContext};
|
||||
use embassy::core::rpc_continuations::RequestGuid;
|
||||
use embassy::db::subscribe;
|
||||
use embassy::middleware::auth::auth;
|
||||
use embassy::middleware::cors::cors;
|
||||
use embassy::middleware::diagnostic::diagnostic;
|
||||
#[cfg(feature = "avahi")]
|
||||
use embassy::net::mdns::MdnsController;
|
||||
use embassy::net::tor::tor_health_check;
|
||||
use embassy::shutdown::Shutdown;
|
||||
use embassy::system::launch_metrics_task;
|
||||
use embassy::util::logger::EmbassyLogger;
|
||||
use embassy::util::{daemon, Invoke};
|
||||
use embassy::{static_server, Error, ErrorKind, ResultExt};
|
||||
use futures::{FutureExt, TryFutureExt};
|
||||
use reqwest::{Client, Proxy};
|
||||
use rpc_toolkit::hyper::{Body, Response, Server, StatusCode};
|
||||
use rpc_toolkit::rpc_server;
|
||||
use tokio::process::Command;
|
||||
use tokio::signal::unix::signal;
|
||||
use tracing::instrument;
|
||||
|
||||
fn status_fn(_: i32) -> StatusCode {
|
||||
StatusCode::OK
|
||||
}
|
||||
|
||||
fn err_to_500(e: Error) -> Response<Body> {
|
||||
tracing::error!("{}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.body(Body::empty())
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
|
||||
let (rpc_ctx, shutdown) = {
|
||||
embassy::hostname::sync_hostname().await?;
|
||||
let rpc_ctx = RpcContext::init(
|
||||
cfg_path,
|
||||
Arc::new(
|
||||
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
||||
.await?
|
||||
.trim()
|
||||
.to_owned(),
|
||||
),
|
||||
)
|
||||
.await?;
|
||||
let mut shutdown_recv = rpc_ctx.shutdown.subscribe();
|
||||
|
||||
let sig_handler_ctx = rpc_ctx.clone();
|
||||
let sig_handler = tokio::spawn(async move {
|
||||
use tokio::signal::unix::SignalKind;
|
||||
futures::future::select_all(
|
||||
[
|
||||
SignalKind::interrupt(),
|
||||
SignalKind::quit(),
|
||||
SignalKind::terminate(),
|
||||
]
|
||||
.iter()
|
||||
.map(|s| {
|
||||
async move {
|
||||
signal(*s)
|
||||
.expect(&format!("register {:?} handler", s))
|
||||
.recv()
|
||||
.await
|
||||
}
|
||||
.boxed()
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
sig_handler_ctx
|
||||
.shutdown
|
||||
.send(None)
|
||||
.map_err(|_| ())
|
||||
.expect("send shutdown signal");
|
||||
});
|
||||
|
||||
rpc_ctx.set_nginx_conf(&mut rpc_ctx.db.handle()).await?;
|
||||
let auth = auth(rpc_ctx.clone());
|
||||
let ctx = rpc_ctx.clone();
|
||||
let server = rpc_server!({
|
||||
command: embassy::main_api,
|
||||
context: ctx,
|
||||
status: status_fn,
|
||||
middleware: [
|
||||
cors,
|
||||
auth,
|
||||
]
|
||||
})
|
||||
.with_graceful_shutdown({
|
||||
let mut shutdown = rpc_ctx.shutdown.subscribe();
|
||||
async move {
|
||||
shutdown.recv().await.expect("context dropped");
|
||||
}
|
||||
});
|
||||
|
||||
let metrics_ctx = rpc_ctx.clone();
|
||||
let metrics_task = tokio::spawn(async move {
|
||||
launch_metrics_task(&metrics_ctx.metrics_cache, || {
|
||||
metrics_ctx.shutdown.subscribe()
|
||||
})
|
||||
.await
|
||||
});
|
||||
|
||||
let rev_cache_ctx = rpc_ctx.clone();
|
||||
let revision_cache_task = tokio::spawn(async move {
|
||||
let mut sub = rev_cache_ctx.db.subscribe();
|
||||
let mut shutdown = rev_cache_ctx.shutdown.subscribe();
|
||||
loop {
|
||||
let rev = match tokio::select! {
|
||||
a = sub.recv() => a,
|
||||
_ = shutdown.recv() => break,
|
||||
} {
|
||||
Ok(a) => a,
|
||||
Err(_) => {
|
||||
rev_cache_ctx.revision_cache.write().await.truncate(0);
|
||||
continue;
|
||||
}
|
||||
}; // TODO: handle falling behind
|
||||
let mut cache = rev_cache_ctx.revision_cache.write().await;
|
||||
cache.push_back(rev);
|
||||
if cache.len() > rev_cache_ctx.revision_cache_size {
|
||||
cache.pop_front();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let ws_ctx = rpc_ctx.clone();
|
||||
let ws_server = {
|
||||
let builder = Server::bind(&ws_ctx.bind_ws);
|
||||
|
||||
let make_svc = ::rpc_toolkit::hyper::service::make_service_fn(move |_| {
|
||||
let ctx = ws_ctx.clone();
|
||||
async move {
|
||||
Ok::<_, ::rpc_toolkit::hyper::Error>(::rpc_toolkit::hyper::service::service_fn(
|
||||
move |req| {
|
||||
let ctx = ctx.clone();
|
||||
async move {
|
||||
tracing::debug!("Request to {}", req.uri().path());
|
||||
match req.uri().path() {
|
||||
"/ws/db" => {
|
||||
Ok(subscribe(ctx, req).await.unwrap_or_else(err_to_500))
|
||||
}
|
||||
path if path.starts_with("/rest/rpc/") => {
|
||||
match RequestGuid::from(
|
||||
path.strip_prefix("/rest/rpc/").unwrap(),
|
||||
) {
|
||||
None => {
|
||||
tracing::debug!("No Guid Path");
|
||||
Response::builder()
|
||||
.status(StatusCode::BAD_REQUEST)
|
||||
.body(Body::empty())
|
||||
}
|
||||
Some(guid) => {
|
||||
match ctx
|
||||
.rpc_stream_continuations
|
||||
.lock()
|
||||
.await
|
||||
.remove(&guid)
|
||||
{
|
||||
None => Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::empty()),
|
||||
Some(cont) => match (cont.handler)(req).await {
|
||||
Ok(r) => Ok(r),
|
||||
Err(e) => Response::builder()
|
||||
.status(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
)
|
||||
.body(Body::from(format!("{}", e))),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::empty()),
|
||||
}
|
||||
}
|
||||
},
|
||||
))
|
||||
}
|
||||
});
|
||||
builder.serve(make_svc)
|
||||
}
|
||||
.with_graceful_shutdown({
|
||||
let mut shutdown = rpc_ctx.shutdown.subscribe();
|
||||
async move {
|
||||
shutdown.recv().await.expect("context dropped");
|
||||
}
|
||||
});
|
||||
|
||||
let file_server_ctx = rpc_ctx.clone();
|
||||
let file_server = {
|
||||
static_server::init(file_server_ctx, {
|
||||
let mut shutdown = rpc_ctx.shutdown.subscribe();
|
||||
async move {
|
||||
shutdown.recv().await.expect("context dropped");
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
let tor_health_ctx = rpc_ctx.clone();
|
||||
let tor_client = Client::builder()
|
||||
.proxy(
|
||||
Proxy::http(format!(
|
||||
"socks5h://{}:{}",
|
||||
rpc_ctx.tor_socks.ip(),
|
||||
rpc_ctx.tor_socks.port()
|
||||
))
|
||||
.with_kind(crate::ErrorKind::Network)?,
|
||||
)
|
||||
.build()
|
||||
.with_kind(crate::ErrorKind::Network)?;
|
||||
let tor_health_daemon = daemon(
|
||||
move || {
|
||||
let ctx = tor_health_ctx.clone();
|
||||
let client = tor_client.clone();
|
||||
async move { tor_health_check(&client, &ctx.net_controller.tor).await }
|
||||
},
|
||||
Duration::from_secs(300),
|
||||
rpc_ctx.shutdown.subscribe(),
|
||||
);
|
||||
|
||||
embassy::sound::CHIME.play().await?;
|
||||
|
||||
futures::try_join!(
|
||||
server
|
||||
.map_err(|e| Error::new(e, ErrorKind::Network))
|
||||
.map_ok(|_| tracing::debug!("RPC Server Shutdown")),
|
||||
metrics_task
|
||||
.map_err(|e| Error::new(
|
||||
eyre!("{}", e).wrap_err("Metrics daemon panicked!"),
|
||||
ErrorKind::Unknown
|
||||
))
|
||||
.map_ok(|_| tracing::debug!("Metrics daemon Shutdown")),
|
||||
revision_cache_task
|
||||
.map_err(|e| Error::new(
|
||||
eyre!("{}", e).wrap_err("Revision Cache daemon panicked!"),
|
||||
ErrorKind::Unknown
|
||||
))
|
||||
.map_ok(|_| tracing::debug!("Revision Cache daemon Shutdown")),
|
||||
ws_server
|
||||
.map_err(|e| Error::new(e, ErrorKind::Network))
|
||||
.map_ok(|_| tracing::debug!("WebSocket Server Shutdown")),
|
||||
file_server
|
||||
.map_err(|e| Error::new(e, ErrorKind::Network))
|
||||
.map_ok(|_| tracing::debug!("Static File Server Shutdown")),
|
||||
tor_health_daemon
|
||||
.map_err(|e| Error::new(
|
||||
e.wrap_err("Tor Health daemon panicked!"),
|
||||
ErrorKind::Unknown
|
||||
))
|
||||
.map_ok(|_| tracing::debug!("Tor Health daemon Shutdown")),
|
||||
)?;
|
||||
|
||||
let mut shutdown = shutdown_recv
|
||||
.recv()
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Unknown)?;
|
||||
|
||||
sig_handler.abort();
|
||||
|
||||
if let Some(shutdown) = &mut shutdown {
|
||||
drop(shutdown.db_handle.take());
|
||||
}
|
||||
|
||||
(rpc_ctx, shutdown)
|
||||
};
|
||||
rpc_ctx.shutdown().await?;
|
||||
|
||||
Ok(shutdown)
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let matches = clap::App::new("embassyd")
|
||||
.arg(
|
||||
clap::Arg::with_name("config")
|
||||
.short("c")
|
||||
.long("config")
|
||||
.takes_value(true),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
EmbassyLogger::init();
|
||||
|
||||
let cfg_path = matches.value_of("config");
|
||||
|
||||
let res = {
|
||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.expect("failed to initialize runtime");
|
||||
rt.block_on(async {
|
||||
match inner_main(cfg_path).await {
|
||||
Ok(a) => Ok(a),
|
||||
Err(e) => {
|
||||
(|| async {
|
||||
tracing::error!("{}", e.source);
|
||||
tracing::debug!("{:?}", e.source);
|
||||
embassy::sound::BEETHOVEN.play().await?;
|
||||
#[cfg(feature = "avahi")]
|
||||
let _mdns = MdnsController::init();
|
||||
tokio::fs::write(
|
||||
"/etc/nginx/sites-available/default",
|
||||
include_str!("../nginx/diagnostic-ui.conf"),
|
||||
)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
embassy::ErrorKind::Filesystem,
|
||||
"/etc/nginx/sites-available/default",
|
||||
)
|
||||
})?;
|
||||
Command::new("systemctl")
|
||||
.arg("reload")
|
||||
.arg("nginx")
|
||||
.invoke(embassy::ErrorKind::Nginx)
|
||||
.await?;
|
||||
let ctx = DiagnosticContext::init(
|
||||
cfg_path,
|
||||
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_ok() {
|
||||
Some(Arc::new(
|
||||
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
||||
.await?
|
||||
.trim()
|
||||
.to_owned(),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
e,
|
||||
)
|
||||
.await?;
|
||||
rpc_server!({
|
||||
command: embassy::diagnostic_api,
|
||||
context: ctx.clone(),
|
||||
status: status_fn,
|
||||
middleware: [
|
||||
cors,
|
||||
diagnostic,
|
||||
]
|
||||
})
|
||||
.with_graceful_shutdown({
|
||||
let mut shutdown = ctx.shutdown.subscribe();
|
||||
async move {
|
||||
shutdown.recv().await.expect("context dropped");
|
||||
}
|
||||
})
|
||||
.await
|
||||
.with_kind(embassy::ErrorKind::Network)?;
|
||||
Ok::<_, Error>(None)
|
||||
})()
|
||||
.await
|
||||
}
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(None) => (),
|
||||
Ok(Some(s)) => s.execute(),
|
||||
Err(e) => {
|
||||
eprintln!("{}", e.source);
|
||||
tracing::debug!("{:?}", e.source);
|
||||
drop(e.source);
|
||||
std::process::exit(e.kind as i32)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,570 +0,0 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use futures::future::{BoxFuture, FutureExt};
|
||||
use indexmap::IndexSet;
|
||||
use itertools::Itertools;
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use rand::SeedableRng;
|
||||
use regex::Regex;
|
||||
use rpc_toolkit::command;
|
||||
use serde_json::Value;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::model::CurrentDependencyInfo;
|
||||
use crate::db::util::WithRevision;
|
||||
use crate::dependencies::{
|
||||
add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive,
|
||||
BreakageRes, DependencyError, DependencyErrors, TaggedDependencyError,
|
||||
};
|
||||
use crate::install::cleanup::remove_from_current_dependents_lists;
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::util::display_none;
|
||||
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
|
||||
use crate::{Error, ResultExt as _};
|
||||
|
||||
pub mod action;
|
||||
pub mod spec;
|
||||
pub mod util;
|
||||
|
||||
pub use spec::{ConfigSpec, Defaultable};
|
||||
use util::NumRange;
|
||||
|
||||
use self::action::ConfigRes;
|
||||
use self::spec::{PackagePointerSpec, ValueSpecPointer};
|
||||
|
||||
pub type Config = serde_json::Map<String, Value>;
|
||||
pub trait TypeOf {
|
||||
fn type_of(&self) -> &'static str;
|
||||
}
|
||||
impl TypeOf for Value {
|
||||
fn type_of(&self) -> &'static str {
|
||||
match self {
|
||||
Value::Array(_) => "list",
|
||||
Value::Bool(_) => "boolean",
|
||||
Value::Null => "null",
|
||||
Value::Number(_) => "number",
|
||||
Value::Object(_) => "object",
|
||||
Value::String(_) => "string",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ConfigurationError {
|
||||
#[error("Timeout Error")]
|
||||
TimeoutError(#[from] TimeoutError),
|
||||
#[error("No Match: {0}")]
|
||||
NoMatch(#[from] NoMatchWithPath),
|
||||
#[error("System Error: {0}")]
|
||||
SystemError(Error),
|
||||
#[error("Permission Denied: {0}")]
|
||||
PermissionDenied(ValueSpecPointer),
|
||||
}
|
||||
impl From<ConfigurationError> for Error {
|
||||
fn from(err: ConfigurationError) -> Self {
|
||||
let kind = match &err {
|
||||
ConfigurationError::SystemError(e) => e.kind,
|
||||
_ => crate::ErrorKind::ConfigGen,
|
||||
};
|
||||
crate::Error::new(err, kind)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, thiserror::Error)]
|
||||
#[error("Timeout Error")]
|
||||
pub struct TimeoutError;
|
||||
|
||||
#[derive(Clone, Debug, thiserror::Error)]
|
||||
pub struct NoMatchWithPath {
|
||||
pub path: Vec<String>,
|
||||
pub error: MatchError,
|
||||
}
|
||||
impl NoMatchWithPath {
|
||||
pub fn new(error: MatchError) -> Self {
|
||||
NoMatchWithPath {
|
||||
path: Vec::new(),
|
||||
error,
|
||||
}
|
||||
}
|
||||
pub fn prepend(mut self, seg: String) -> Self {
|
||||
self.path.push(seg);
|
||||
self
|
||||
}
|
||||
}
|
||||
impl std::fmt::Display for NoMatchWithPath {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}: {}", self.path.iter().rev().join("."), self.error)
|
||||
}
|
||||
}
|
||||
impl From<NoMatchWithPath> for Error {
|
||||
fn from(e: NoMatchWithPath) -> Self {
|
||||
ConfigurationError::from(e).into()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, thiserror::Error)]
|
||||
pub enum MatchError {
|
||||
#[error("String {0:?} Does Not Match Pattern {1}")]
|
||||
Pattern(String, Regex),
|
||||
#[error("String {0:?} Is Not In Enum {1:?}")]
|
||||
Enum(String, IndexSet<String>),
|
||||
#[error("Field Is Not Nullable")]
|
||||
NotNullable,
|
||||
#[error("Length Mismatch: expected {0}, actual: {1}")]
|
||||
LengthMismatch(NumRange<usize>, usize),
|
||||
#[error("Invalid Type: expected {0}, actual: {1}")]
|
||||
InvalidType(&'static str, &'static str),
|
||||
#[error("Number Out Of Range: expected {0}, actual: {1}")]
|
||||
OutOfRange(NumRange<f64>, f64),
|
||||
#[error("Number Is Not Integral: {0}")]
|
||||
NonIntegral(f64),
|
||||
#[error("Variant {0:?} Is Not In Union {1:?}")]
|
||||
Union(String, IndexSet<String>),
|
||||
#[error("Variant Is Missing Tag {0:?}")]
|
||||
MissingTag(String),
|
||||
#[error("Property {0:?} Of Variant {1:?} Conflicts With Union Tag")]
|
||||
PropertyMatchesUnionTag(String, String),
|
||||
#[error("Name of Property {0:?} Conflicts With Map Tag Name")]
|
||||
PropertyNameMatchesMapTag(String),
|
||||
#[error("Pointer Is Invalid: {0}")]
|
||||
InvalidPointer(spec::ValueSpecPointer),
|
||||
#[error("Object Key Is Invalid: {0}")]
|
||||
InvalidKey(String),
|
||||
#[error("Value In List Is Not Unique")]
|
||||
ListUniquenessViolation,
|
||||
}
|
||||
|
||||
#[command(rename = "config-spec", cli_only, blocking, display(display_none))]
|
||||
pub fn verify_spec(#[arg] path: PathBuf) -> Result<(), Error> {
|
||||
let mut file = std::fs::File::open(&path)?;
|
||||
let format = match path.extension().and_then(|s| s.to_str()) {
|
||||
Some("yaml") | Some("yml") => IoFormat::Yaml,
|
||||
Some("json") => IoFormat::Json,
|
||||
Some("toml") => IoFormat::Toml,
|
||||
Some("cbor") => IoFormat::Cbor,
|
||||
_ => {
|
||||
return Err(Error::new(
|
||||
eyre!("Unknown file format. Expected one of yaml, json, toml, cbor."),
|
||||
crate::ErrorKind::Deserialization,
|
||||
));
|
||||
}
|
||||
};
|
||||
let _: ConfigSpec = format.from_reader(&mut file)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(subcommands(get, set))]
|
||||
pub fn config(#[arg] id: PackageId) -> Result<PackageId, Error> {
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
#[command(display(display_serializable))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn get(
|
||||
#[context] ctx: RpcContext,
|
||||
#[parent_data] id: PackageId,
|
||||
#[allow(unused_variables)]
|
||||
#[arg(long = "format")]
|
||||
format: Option<IoFormat>,
|
||||
) -> Result<ConfigRes, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let pkg_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(&mut db)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?;
|
||||
let action = pkg_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.config()
|
||||
.get(&mut db, true)
|
||||
.await?
|
||||
.to_owned()
|
||||
.ok_or_else(|| Error::new(eyre!("{} has no config", id), crate::ErrorKind::NotFound))?;
|
||||
let version = pkg_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.version()
|
||||
.get(&mut db, true)
|
||||
.await?;
|
||||
let volumes = pkg_model.manifest().volumes().get(&mut db, true).await?;
|
||||
action.get(&ctx, &id, &*version, &*volumes).await
|
||||
}
|
||||
|
||||
#[command(
|
||||
subcommands(self(set_impl(async, context(RpcContext))), set_dry),
|
||||
display(display_none)
|
||||
)]
|
||||
#[instrument]
|
||||
pub fn set(
|
||||
#[parent_data] id: PackageId,
|
||||
#[allow(unused_variables)]
|
||||
#[arg(long = "format")]
|
||||
format: Option<IoFormat>,
|
||||
#[arg(long = "timeout")] timeout: Option<crate::util::serde::Duration>,
|
||||
#[arg(stdin, parse(parse_stdin_deserializable))] config: Option<Config>,
|
||||
#[arg(rename = "expire-id", long = "expire-id")] expire_id: Option<String>,
|
||||
) -> Result<(PackageId, Option<Config>, Option<Duration>, Option<String>), Error> {
|
||||
Ok((id, config, timeout.map(|d| *d), expire_id))
|
||||
}
|
||||
|
||||
#[command(rename = "dry", display(display_serializable))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn set_dry(
|
||||
#[context] ctx: RpcContext,
|
||||
#[parent_data] (id, config, timeout, _): (
|
||||
PackageId,
|
||||
Option<Config>,
|
||||
Option<Duration>,
|
||||
Option<String>,
|
||||
),
|
||||
) -> Result<BreakageRes, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
let mut breakages = BTreeMap::new();
|
||||
configure(
|
||||
&ctx,
|
||||
&mut tx,
|
||||
&id,
|
||||
config,
|
||||
&timeout,
|
||||
true,
|
||||
&mut BTreeMap::new(),
|
||||
&mut breakages,
|
||||
)
|
||||
.await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.status()
|
||||
.configured()
|
||||
.put(&mut tx, &true)
|
||||
.await?;
|
||||
tx.abort().await?;
|
||||
Ok(BreakageRes(breakages))
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn set_impl(
|
||||
ctx: RpcContext,
|
||||
(id, config, timeout, expire_id): (PackageId, Option<Config>, Option<Duration>, Option<String>),
|
||||
) -> Result<WithRevision<()>, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
let mut breakages = BTreeMap::new();
|
||||
configure(
|
||||
&ctx,
|
||||
&mut tx,
|
||||
&id,
|
||||
config,
|
||||
&timeout,
|
||||
false,
|
||||
&mut BTreeMap::new(),
|
||||
&mut breakages,
|
||||
)
|
||||
.await?;
|
||||
Ok(WithRevision {
|
||||
response: (),
|
||||
revision: tx.commit(expire_id).await?,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
pub async fn configure<Db: DbHandle>(
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
id: &PackageId,
|
||||
config: Option<Config>,
|
||||
timeout: &Option<Duration>,
|
||||
dry_run: bool,
|
||||
overrides: &mut BTreeMap<PackageId, Config>,
|
||||
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
|
||||
) -> Result<(), Error> {
|
||||
configure_rec(ctx, db, id, config, timeout, dry_run, overrides, breakages).await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.expect(db)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(db)
|
||||
.await?
|
||||
.status()
|
||||
.configured()
|
||||
.put(db, &true)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
pub fn configure_rec<'a, Db: DbHandle>(
|
||||
ctx: &'a RpcContext,
|
||||
db: &'a mut Db,
|
||||
id: &'a PackageId,
|
||||
config: Option<Config>,
|
||||
timeout: &'a Option<Duration>,
|
||||
dry_run: bool,
|
||||
overrides: &'a mut BTreeMap<PackageId, Config>,
|
||||
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
|
||||
) -> BoxFuture<'a, Result<(), Error>> {
|
||||
async move {
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(db, LockType::Write)
|
||||
.await?;
|
||||
// fetch data from db
|
||||
let pkg_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(db)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?;
|
||||
let action = pkg_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.config()
|
||||
.get(db, true)
|
||||
.await?
|
||||
.to_owned()
|
||||
.ok_or_else(|| Error::new(eyre!("{} has no config", id), crate::ErrorKind::NotFound))?;
|
||||
let version = pkg_model.clone().manifest().version().get(db, true).await?;
|
||||
let dependencies = pkg_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.dependencies()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let volumes = pkg_model.clone().manifest().volumes().get(db, true).await?;
|
||||
let is_needs_config = !*pkg_model
|
||||
.clone()
|
||||
.status()
|
||||
.configured()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
|
||||
// get current config and current spec
|
||||
let ConfigRes {
|
||||
config: old_config,
|
||||
spec,
|
||||
} = action.get(ctx, id, &*version, &*volumes).await?;
|
||||
|
||||
// determine new config to use
|
||||
let mut config = if let Some(config) = config.or_else(|| old_config.clone()) {
|
||||
config
|
||||
} else {
|
||||
spec.gen(&mut rand::rngs::StdRng::from_entropy(), timeout)?
|
||||
};
|
||||
|
||||
let manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Manifest>(|i| i.manifest())
|
||||
.expect(db)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?;
|
||||
|
||||
spec.validate(&*manifest)?;
|
||||
spec.matches(&config)?; // check that new config matches spec
|
||||
spec.update(ctx, db, &*manifest, &*overrides, &mut config)
|
||||
.await?; // dereference pointers in the new config
|
||||
|
||||
// create backreferences to pointers
|
||||
let mut sys = pkg_model.clone().system_pointers().get_mut(db).await?;
|
||||
sys.truncate(0);
|
||||
let mut current_dependencies: BTreeMap<PackageId, CurrentDependencyInfo> = dependencies
|
||||
.0
|
||||
.iter()
|
||||
.filter_map(|(id, info)| {
|
||||
if info.requirement.required() {
|
||||
Some((id.clone(), CurrentDependencyInfo::default()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
for ptr in spec.pointers(&config)? {
|
||||
match ptr {
|
||||
ValueSpecPointer::Package(pkg_ptr) => {
|
||||
if let Some(current_dependency) =
|
||||
current_dependencies.get_mut(pkg_ptr.package_id())
|
||||
{
|
||||
current_dependency.pointers.push(pkg_ptr);
|
||||
} else {
|
||||
current_dependencies.insert(
|
||||
pkg_ptr.package_id().to_owned(),
|
||||
CurrentDependencyInfo {
|
||||
pointers: vec![pkg_ptr],
|
||||
health_checks: BTreeSet::new(),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
ValueSpecPointer::System(s) => sys.push(s),
|
||||
}
|
||||
}
|
||||
sys.save(db).await?;
|
||||
|
||||
let signal = if !dry_run {
|
||||
// run config action
|
||||
let res = action
|
||||
.set(ctx, id, &*version, &*dependencies, &*volumes, &config)
|
||||
.await?;
|
||||
|
||||
// track dependencies with no pointers
|
||||
for (package_id, health_checks) in res.depends_on.into_iter() {
|
||||
if let Some(current_dependency) = current_dependencies.get_mut(&package_id) {
|
||||
current_dependency.health_checks.extend(health_checks);
|
||||
} else {
|
||||
current_dependencies.insert(
|
||||
package_id,
|
||||
CurrentDependencyInfo {
|
||||
pointers: Vec::new(),
|
||||
health_checks,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// track dependency health checks
|
||||
current_dependencies = current_dependencies
|
||||
.into_iter()
|
||||
.filter(|(dep_id, _)| {
|
||||
if dep_id != id && !manifest.dependencies.0.contains_key(dep_id) {
|
||||
tracing::warn!("Illegal dependency specified: {}", dep_id);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
res.signal
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// update dependencies
|
||||
let mut deps = pkg_model.clone().current_dependencies().get_mut(db).await?;
|
||||
remove_from_current_dependents_lists(db, id, deps.keys()).await?; // remove previous
|
||||
add_dependent_to_current_dependents_lists(db, id, ¤t_dependencies).await?; // add new
|
||||
current_dependencies.remove(id);
|
||||
*deps = current_dependencies.clone();
|
||||
deps.save(db).await?;
|
||||
let mut errs = pkg_model
|
||||
.clone()
|
||||
.status()
|
||||
.dependency_errors()
|
||||
.get_mut(db)
|
||||
.await?;
|
||||
*errs = DependencyErrors::init(ctx, db, &*manifest, ¤t_dependencies).await?;
|
||||
errs.save(db).await?;
|
||||
|
||||
// cache current config for dependents
|
||||
overrides.insert(id.clone(), config.clone());
|
||||
|
||||
// handle dependents
|
||||
let dependents = pkg_model.clone().current_dependents().get(db, true).await?;
|
||||
let prev = if is_needs_config { None } else { old_config }
|
||||
.map(Value::Object)
|
||||
.unwrap_or_default();
|
||||
let next = Value::Object(config.clone());
|
||||
for (dependent, dep_info) in dependents.iter().filter(|(dep_id, _)| dep_id != &id) {
|
||||
// check if config passes dependent check
|
||||
let dependent_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependent)
|
||||
.and_then(|pkg| pkg.installed())
|
||||
.expect(db)
|
||||
.await?;
|
||||
if let Some(cfg) = &*dependent_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.dependencies()
|
||||
.idx_model(id)
|
||||
.expect(db)
|
||||
.await?
|
||||
.config()
|
||||
.get(db, true)
|
||||
.await?
|
||||
{
|
||||
let manifest = dependent_model.clone().manifest().get(db, true).await?;
|
||||
if let Err(error) = cfg
|
||||
.check(
|
||||
ctx,
|
||||
dependent,
|
||||
&manifest.version,
|
||||
&manifest.volumes,
|
||||
&config,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
let dep_err = DependencyError::ConfigUnsatisfied { error };
|
||||
break_transitive(db, dependent, id, dep_err, breakages).await?;
|
||||
}
|
||||
|
||||
// handle backreferences
|
||||
for ptr in &dep_info.pointers {
|
||||
if let PackagePointerSpec::Config(cfg_ptr) = ptr {
|
||||
if cfg_ptr.select(&next) != cfg_ptr.select(&prev) {
|
||||
if let Err(e) = configure_rec(
|
||||
ctx, db, dependent, None, timeout, dry_run, overrides, breakages,
|
||||
)
|
||||
.await
|
||||
{
|
||||
if e.kind == crate::ErrorKind::ConfigRulesViolation {
|
||||
break_transitive(
|
||||
db,
|
||||
dependent,
|
||||
id,
|
||||
DependencyError::ConfigUnsatisfied {
|
||||
error: format!("{}", e),
|
||||
},
|
||||
breakages,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
heal_all_dependents_transitive(ctx, db, id).await?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(signal) = signal {
|
||||
match ctx.managers.get(&(id.clone(), version.clone())).await {
|
||||
None => {
|
||||
// in theory this should never happen, which indicates this function should be moved behind the
|
||||
// Manager interface
|
||||
return Err(Error::new(
|
||||
eyre!("Manager Not Found for package being configured"),
|
||||
crate::ErrorKind::Incoherent,
|
||||
));
|
||||
}
|
||||
Some(m) => {
|
||||
m.signal(&signal).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
@@ -1,350 +0,0 @@
|
||||
use std::collections::{BTreeMap, VecDeque};
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||
use std::ops::Deref;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use bollard::Docker;
|
||||
use color_eyre::eyre::eyre;
|
||||
use patch_db::json_ptr::JsonPointer;
|
||||
use patch_db::{DbHandle, LockType, PatchDb, Revision};
|
||||
use reqwest::Url;
|
||||
use rpc_toolkit::url::Host;
|
||||
use rpc_toolkit::Context;
|
||||
use serde::Deserialize;
|
||||
use sqlx::sqlite::SqliteConnectOptions;
|
||||
use sqlx::SqlitePool;
|
||||
use tokio::fs::File;
|
||||
use tokio::process::Command;
|
||||
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
|
||||
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
|
||||
use crate::hostname::{derive_hostname, derive_id, get_product_key};
|
||||
use crate::install::cleanup::{cleanup_failed, uninstall};
|
||||
use crate::manager::ManagerMap;
|
||||
use crate::middleware::auth::HashSessionToken;
|
||||
use crate::net::tor::os_key;
|
||||
use crate::net::wifi::WpaCli;
|
||||
use crate::net::NetController;
|
||||
use crate::notifications::NotificationManager;
|
||||
use crate::setup::password_hash;
|
||||
use crate::shutdown::Shutdown;
|
||||
use crate::status::{MainStatus, Status};
|
||||
use crate::util::io::from_yaml_async_reader;
|
||||
use crate::util::{AsyncFileExt, Invoke};
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RpcContextConfig {
|
||||
pub bind_rpc: Option<SocketAddr>,
|
||||
pub bind_ws: Option<SocketAddr>,
|
||||
pub bind_static: Option<SocketAddr>,
|
||||
pub tor_control: Option<SocketAddr>,
|
||||
pub tor_socks: Option<SocketAddr>,
|
||||
pub revision_cache_size: Option<usize>,
|
||||
pub datadir: Option<PathBuf>,
|
||||
pub log_server: Option<Url>,
|
||||
}
|
||||
impl RpcContextConfig {
|
||||
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
||||
let cfg_path = path
|
||||
.as_ref()
|
||||
.map(|p| p.as_ref())
|
||||
.unwrap_or(Path::new(crate::CONFIG_PATH));
|
||||
if let Some(f) = File::maybe_open(cfg_path)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
||||
{
|
||||
from_yaml_async_reader(f).await
|
||||
} else {
|
||||
Ok(Self::default())
|
||||
}
|
||||
}
|
||||
pub fn datadir(&self) -> &Path {
|
||||
self.datadir
|
||||
.as_deref()
|
||||
.unwrap_or_else(|| Path::new("/embassy-data"))
|
||||
}
|
||||
pub async fn db(&self, secret_store: &SqlitePool, product_key: &str) -> Result<PatchDb, Error> {
|
||||
let sid = derive_id(product_key);
|
||||
let hostname = derive_hostname(&sid);
|
||||
let db_path = self.datadir().join("main").join("embassy.db");
|
||||
let db = PatchDb::open(&db_path)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
|
||||
if !db.exists(&<JsonPointer>::default()).await? {
|
||||
db.put(
|
||||
&<JsonPointer>::default(),
|
||||
&Database::init(
|
||||
sid,
|
||||
&hostname,
|
||||
&os_key(&mut secret_store.acquire().await?).await?,
|
||||
password_hash(&mut secret_store.acquire().await?).await?,
|
||||
),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(db)
|
||||
}
|
||||
#[instrument]
|
||||
pub async fn secret_store(&self) -> Result<SqlitePool, Error> {
|
||||
let secret_store = SqlitePool::connect_with(
|
||||
SqliteConnectOptions::new()
|
||||
.filename(self.datadir().join("main").join("secrets.db"))
|
||||
.create_if_missing(true)
|
||||
.busy_timeout(Duration::from_secs(30)),
|
||||
)
|
||||
.await?;
|
||||
sqlx::migrate!()
|
||||
.run(&secret_store)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Database)?;
|
||||
Ok(secret_store)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RpcContextSeed {
|
||||
is_closed: AtomicBool,
|
||||
pub bind_rpc: SocketAddr,
|
||||
pub bind_ws: SocketAddr,
|
||||
pub bind_static: SocketAddr,
|
||||
pub datadir: PathBuf,
|
||||
pub disk_guid: Arc<String>,
|
||||
pub db: PatchDb,
|
||||
pub secret_store: SqlitePool,
|
||||
pub docker: Docker,
|
||||
pub net_controller: NetController,
|
||||
pub managers: ManagerMap,
|
||||
pub revision_cache_size: usize,
|
||||
pub revision_cache: RwLock<VecDeque<Arc<Revision>>>,
|
||||
pub metrics_cache: RwLock<Option<crate::system::Metrics>>,
|
||||
pub shutdown: broadcast::Sender<Option<Shutdown>>,
|
||||
pub tor_socks: SocketAddr,
|
||||
pub notification_manager: NotificationManager,
|
||||
pub open_authed_websockets: Mutex<BTreeMap<HashSessionToken, Vec<oneshot::Sender<()>>>>,
|
||||
pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>,
|
||||
pub wifi_manager: Arc<RwLock<WpaCli>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RpcContext(Arc<RpcContextSeed>);
|
||||
impl RpcContext {
|
||||
#[instrument(skip(cfg_path))]
|
||||
pub async fn init<P: AsRef<Path>>(
|
||||
cfg_path: Option<P>,
|
||||
disk_guid: Arc<String>,
|
||||
) -> Result<Self, Error> {
|
||||
let base = RpcContextConfig::load(cfg_path).await?;
|
||||
tracing::info!("Loaded Config");
|
||||
let tor_proxy = base.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
|
||||
Ipv4Addr::new(127, 0, 0, 1),
|
||||
9050,
|
||||
)));
|
||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
||||
let secret_store = base.secret_store().await?;
|
||||
tracing::info!("Opened Sqlite DB");
|
||||
let db = base.db(&secret_store, &get_product_key().await?).await?;
|
||||
tracing::info!("Opened PatchDB");
|
||||
let docker = Docker::connect_with_unix_defaults()?;
|
||||
tracing::info!("Connected to Docker");
|
||||
let net_controller = NetController::init(
|
||||
([127, 0, 0, 1], 80).into(),
|
||||
crate::net::tor::os_key(&mut secret_store.acquire().await?).await?,
|
||||
base.tor_control
|
||||
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
|
||||
secret_store.clone(),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
tracing::info!("Initialized Net Controller");
|
||||
let managers = ManagerMap::default();
|
||||
let metrics_cache = RwLock::new(None);
|
||||
let notification_manager = NotificationManager::new(secret_store.clone());
|
||||
tracing::info!("Initialized Notification Manager");
|
||||
let seed = Arc::new(RpcContextSeed {
|
||||
is_closed: AtomicBool::new(false),
|
||||
bind_rpc: base.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
|
||||
bind_ws: base.bind_ws.unwrap_or(([127, 0, 0, 1], 5960).into()),
|
||||
bind_static: base.bind_static.unwrap_or(([127, 0, 0, 1], 5961).into()),
|
||||
datadir: base.datadir().to_path_buf(),
|
||||
disk_guid,
|
||||
db,
|
||||
secret_store,
|
||||
docker,
|
||||
net_controller,
|
||||
managers,
|
||||
revision_cache_size: base.revision_cache_size.unwrap_or(512),
|
||||
revision_cache: RwLock::new(VecDeque::new()),
|
||||
metrics_cache,
|
||||
shutdown,
|
||||
tor_socks: tor_proxy,
|
||||
notification_manager,
|
||||
open_authed_websockets: Mutex::new(BTreeMap::new()),
|
||||
rpc_stream_continuations: Mutex::new(BTreeMap::new()),
|
||||
wifi_manager: Arc::new(RwLock::new(WpaCli::init("wlan0".to_string()))),
|
||||
});
|
||||
|
||||
let res = Self(seed);
|
||||
res.cleanup().await?;
|
||||
tracing::info!("Cleaned up transient states");
|
||||
res.managers
|
||||
.init(
|
||||
&res,
|
||||
&mut res.db.handle(),
|
||||
&mut res.secret_store.acquire().await?,
|
||||
)
|
||||
.await?;
|
||||
tracing::info!("Initialized Package Managers");
|
||||
Ok(res)
|
||||
}
|
||||
#[instrument(skip(self, db))]
|
||||
pub async fn set_nginx_conf<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
|
||||
tokio::fs::write("/etc/nginx/sites-available/default", {
|
||||
let info = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
format!(
|
||||
include_str!("../nginx/main-ui.conf.template"),
|
||||
lan_hostname = info.lan_address.host_str().unwrap(),
|
||||
tor_hostname = info.tor_address.host_str().unwrap(),
|
||||
)
|
||||
})
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
"/etc/nginx/sites-available/default",
|
||||
)
|
||||
})?;
|
||||
Command::new("systemctl")
|
||||
.arg("reload")
|
||||
.arg("nginx")
|
||||
.invoke(crate::ErrorKind::Nginx)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
pub async fn shutdown(self) -> Result<(), Error> {
|
||||
self.managers.empty().await?;
|
||||
self.secret_store.close().await;
|
||||
self.is_closed.store(true, Ordering::SeqCst);
|
||||
Ok(())
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
pub async fn cleanup(&self) -> Result<(), Error> {
|
||||
let mut db = self.db.handle();
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(&mut db, LockType::Write)
|
||||
.await?;
|
||||
for package_id in crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.keys(&mut db, true)
|
||||
.await?
|
||||
{
|
||||
if let Err(e) = async {
|
||||
let mut pde = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&package_id)
|
||||
.get_mut(&mut db)
|
||||
.await?;
|
||||
match pde.as_mut().ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("Node does not exist: /package-data/{}", package_id),
|
||||
crate::ErrorKind::Database,
|
||||
)
|
||||
})? {
|
||||
PackageDataEntry::Installing { .. }
|
||||
| PackageDataEntry::Restoring { .. }
|
||||
| PackageDataEntry::Updating { .. } => {
|
||||
cleanup_failed(self, &mut db, &package_id).await?;
|
||||
}
|
||||
PackageDataEntry::Removing { .. } => {
|
||||
uninstall(
|
||||
self,
|
||||
&mut db,
|
||||
&mut self.secret_store.acquire().await?,
|
||||
&package_id,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
PackageDataEntry::Installed {
|
||||
installed:
|
||||
InstalledPackageDataEntry {
|
||||
status: Status { main, .. },
|
||||
..
|
||||
},
|
||||
..
|
||||
} => {
|
||||
let new_main = match std::mem::replace(
|
||||
main,
|
||||
MainStatus::Stopped, /* placeholder */
|
||||
) {
|
||||
MainStatus::BackingUp { started, .. } => {
|
||||
if let Some(_) = started {
|
||||
MainStatus::Starting
|
||||
} else {
|
||||
MainStatus::Stopped
|
||||
}
|
||||
}
|
||||
MainStatus::Running { .. } => MainStatus::Starting,
|
||||
a => a,
|
||||
};
|
||||
*main = new_main;
|
||||
|
||||
pde.save(&mut db).await?;
|
||||
}
|
||||
}
|
||||
Ok::<_, Error>(())
|
||||
}
|
||||
.await
|
||||
{
|
||||
tracing::error!("Failed to clean up package {}: {}", package_id, e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
impl Context for RpcContext {
|
||||
fn host(&self) -> Host<&str> {
|
||||
match self.0.bind_rpc.ip() {
|
||||
IpAddr::V4(a) => Host::Ipv4(a),
|
||||
IpAddr::V6(a) => Host::Ipv6(a),
|
||||
}
|
||||
}
|
||||
fn port(&self) -> u16 {
|
||||
self.0.bind_rpc.port()
|
||||
}
|
||||
}
|
||||
impl Deref for RpcContext {
|
||||
type Target = RpcContextSeed;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
#[cfg(feature = "unstable")]
|
||||
if self.0.is_closed.load(Ordering::SeqCst) {
|
||||
panic!(
|
||||
"RpcContext used after shutdown! {}",
|
||||
tracing_error::SpanTrace::capture()
|
||||
);
|
||||
}
|
||||
&*self.0
|
||||
}
|
||||
}
|
||||
impl Drop for RpcContext {
|
||||
fn drop(&mut self) {
|
||||
#[cfg(feature = "unstable")]
|
||||
if self.0.is_closed.load(Ordering::SeqCst) {
|
||||
tracing::info!(
|
||||
"RpcContext dropped. {} left.",
|
||||
Arc::strong_count(&self.0) - 1
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::eyre;
|
||||
use rpc_toolkit::Context;
|
||||
use serde::Deserialize;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct SdkContextConfig {
|
||||
pub developer_key_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SdkContextSeed {
|
||||
pub developer_key_path: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SdkContext(Arc<SdkContextSeed>);
|
||||
impl SdkContext {
|
||||
/// BLOCKING
|
||||
#[instrument(skip(matches))]
|
||||
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
|
||||
let cfg_path = Path::new(matches.value_of("config").unwrap_or(crate::CONFIG_PATH));
|
||||
let base = if cfg_path.exists() {
|
||||
serde_yaml::from_reader(
|
||||
File::open(cfg_path)
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?,
|
||||
)
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
} else {
|
||||
SdkContextConfig::default()
|
||||
};
|
||||
Ok(SdkContext(Arc::new(SdkContextSeed {
|
||||
developer_key_path: base.developer_key_path.unwrap_or_else(|| {
|
||||
cfg_path
|
||||
.parent()
|
||||
.unwrap_or(Path::new("/"))
|
||||
.join(".developer_key")
|
||||
}),
|
||||
})))
|
||||
}
|
||||
/// BLOCKING
|
||||
#[instrument]
|
||||
pub fn developer_key(&self) -> Result<ed25519_dalek::Keypair, Error> {
|
||||
if !self.developer_key_path.exists() {
|
||||
return Err(Error::new(eyre!("Developer Key does not exist! Please run `embassy-sdk init` before running this command."), crate::ErrorKind::Uninitialized));
|
||||
}
|
||||
let mut keypair_buf = [0; ed25519_dalek::KEYPAIR_LENGTH];
|
||||
File::open(&self.developer_key_path)?.read_exact(&mut keypair_buf)?;
|
||||
Ok(ed25519_dalek::Keypair::from_bytes(&keypair_buf)?)
|
||||
}
|
||||
}
|
||||
impl std::ops::Deref for SdkContext {
|
||||
type Target = SdkContextSeed;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&*self.0
|
||||
}
|
||||
}
|
||||
impl Context for SdkContext {}
|
||||
@@ -1,169 +0,0 @@
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::ops::Deref;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use patch_db::json_ptr::JsonPointer;
|
||||
use patch_db::PatchDb;
|
||||
use rpc_toolkit::yajrc::RpcError;
|
||||
use rpc_toolkit::Context;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::sqlite::SqliteConnectOptions;
|
||||
use sqlx::SqlitePool;
|
||||
use tokio::fs::File;
|
||||
use tokio::sync::broadcast::Sender;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::instrument;
|
||||
use url::Host;
|
||||
|
||||
use crate::db::model::Database;
|
||||
use crate::hostname::{derive_hostname, derive_id, get_product_key};
|
||||
use crate::net::tor::os_key;
|
||||
use crate::setup::{password_hash, RecoveryStatus};
|
||||
use crate::util::io::from_yaml_async_reader;
|
||||
use crate::util::AsyncFileExt;
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct SetupResult {
|
||||
pub tor_address: String,
|
||||
pub lan_address: String,
|
||||
pub root_ca: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct SetupContextConfig {
|
||||
pub bind_rpc: Option<SocketAddr>,
|
||||
pub datadir: Option<PathBuf>,
|
||||
}
|
||||
impl SetupContextConfig {
|
||||
#[instrument(skip(path))]
|
||||
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
||||
let cfg_path = path
|
||||
.as_ref()
|
||||
.map(|p| p.as_ref())
|
||||
.unwrap_or(Path::new(crate::CONFIG_PATH));
|
||||
if let Some(f) = File::maybe_open(cfg_path)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
||||
{
|
||||
from_yaml_async_reader(f).await
|
||||
} else {
|
||||
Ok(Self::default())
|
||||
}
|
||||
}
|
||||
pub fn datadir(&self) -> &Path {
|
||||
self.datadir
|
||||
.as_deref()
|
||||
.unwrap_or_else(|| Path::new("/embassy-data"))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SetupContextSeed {
|
||||
pub config_path: Option<PathBuf>,
|
||||
pub bind_rpc: SocketAddr,
|
||||
pub shutdown: Sender<()>,
|
||||
pub datadir: PathBuf,
|
||||
pub selected_v2_drive: RwLock<Option<PathBuf>>,
|
||||
pub cached_product_key: RwLock<Option<Arc<String>>>,
|
||||
pub recovery_status: RwLock<Option<Result<RecoveryStatus, RpcError>>>,
|
||||
pub setup_result: RwLock<Option<(Arc<String>, SetupResult)>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SetupContext(Arc<SetupContextSeed>);
|
||||
impl SetupContext {
|
||||
#[instrument(skip(path))]
|
||||
pub async fn init<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
||||
let cfg = SetupContextConfig::load(path.as_ref()).await?;
|
||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
||||
let datadir = cfg.datadir().to_owned();
|
||||
Ok(Self(Arc::new(SetupContextSeed {
|
||||
config_path: path.as_ref().map(|p| p.as_ref().to_owned()),
|
||||
bind_rpc: cfg.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
|
||||
shutdown,
|
||||
datadir,
|
||||
selected_v2_drive: RwLock::new(None),
|
||||
cached_product_key: RwLock::new(None),
|
||||
recovery_status: RwLock::new(None),
|
||||
setup_result: RwLock::new(None),
|
||||
})))
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
pub async fn db(&self, secret_store: &SqlitePool) -> Result<PatchDb, Error> {
|
||||
let db_path = self.datadir.join("main").join("embassy.db");
|
||||
let db = PatchDb::open(&db_path)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
|
||||
if !db.exists(&<JsonPointer>::default()).await? {
|
||||
let pkey = self.product_key().await?;
|
||||
let sid = derive_id(&*pkey);
|
||||
let hostname = derive_hostname(&sid);
|
||||
db.put(
|
||||
&<JsonPointer>::default(),
|
||||
&Database::init(
|
||||
sid,
|
||||
&hostname,
|
||||
&os_key(&mut secret_store.acquire().await?).await?,
|
||||
password_hash(&mut secret_store.acquire().await?).await?,
|
||||
),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(db)
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
pub async fn secret_store(&self) -> Result<SqlitePool, Error> {
|
||||
let secret_store = SqlitePool::connect_with(
|
||||
SqliteConnectOptions::new()
|
||||
.filename(self.datadir.join("main").join("secrets.db"))
|
||||
.create_if_missing(true)
|
||||
.busy_timeout(Duration::from_secs(30)),
|
||||
)
|
||||
.await?;
|
||||
sqlx::migrate!()
|
||||
.run(&secret_store)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Database)?;
|
||||
Ok(secret_store)
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
pub async fn product_key(&self) -> Result<Arc<String>, Error> {
|
||||
Ok(
|
||||
if let Some(k) = {
|
||||
let guard = self.cached_product_key.read().await;
|
||||
let res = guard.clone();
|
||||
drop(guard);
|
||||
res
|
||||
} {
|
||||
k
|
||||
} else {
|
||||
let k = Arc::new(get_product_key().await?);
|
||||
*self.cached_product_key.write().await = Some(k.clone());
|
||||
k
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Context for SetupContext {
|
||||
fn host(&self) -> Host<&str> {
|
||||
match self.0.bind_rpc.ip() {
|
||||
IpAddr::V4(a) => Host::Ipv4(a),
|
||||
IpAddr::V6(a) => Host::Ipv6(a),
|
||||
}
|
||||
}
|
||||
fn port(&self) -> u16 {
|
||||
self.0.bind_rpc.port()
|
||||
}
|
||||
}
|
||||
impl Deref for SetupContext {
|
||||
type Target = SetupContextSeed;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&*self.0
|
||||
}
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use rpc_toolkit::command;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::util::WithRevision;
|
||||
use crate::dependencies::{
|
||||
break_all_dependents_transitive, heal_all_dependents_transitive, BreakageRes, DependencyError,
|
||||
TaggedDependencyError,
|
||||
};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::status::MainStatus;
|
||||
use crate::util::display_none;
|
||||
use crate::util::serde::display_serializable;
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
#[command(display(display_none))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn start(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg] id: PackageId,
|
||||
) -> Result<WithRevision<()>, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(&mut tx, LockType::Write)
|
||||
.await?;
|
||||
let installed = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.and_then(|pkg| pkg.installed())
|
||||
.expect(&mut tx)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::NotFound,
|
||||
format!("{} is not installed", id),
|
||||
)
|
||||
})?;
|
||||
installed.lock(&mut tx, LockType::Read).await?;
|
||||
let version = installed
|
||||
.clone()
|
||||
.manifest()
|
||||
.version()
|
||||
.get(&mut tx, true)
|
||||
.await?
|
||||
.to_owned();
|
||||
let mut status = installed.status().main().get_mut(&mut tx).await?;
|
||||
|
||||
*status = MainStatus::Starting;
|
||||
status.save(&mut tx).await?;
|
||||
heal_all_dependents_transitive(&ctx, &mut tx, &id).await?;
|
||||
|
||||
let revision = tx.commit(None).await?;
|
||||
|
||||
ctx.managers
|
||||
.get(&(id, version))
|
||||
.await
|
||||
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
|
||||
.synchronize()
|
||||
.await;
|
||||
|
||||
Ok(WithRevision {
|
||||
revision,
|
||||
response: (),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(db))]
|
||||
async fn stop_common<Db: DbHandle>(
|
||||
db: &mut Db,
|
||||
id: &PackageId,
|
||||
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
|
||||
) -> Result<(), Error> {
|
||||
let mut tx = db.begin().await?;
|
||||
let mut status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.and_then(|pkg| pkg.installed())
|
||||
.expect(&mut tx)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::NotFound,
|
||||
format!("{} is not installed", id),
|
||||
)
|
||||
})?
|
||||
.status()
|
||||
.main()
|
||||
.get_mut(&mut tx)
|
||||
.await?;
|
||||
|
||||
*status = MainStatus::Stopping;
|
||||
status.save(&mut tx).await?;
|
||||
tx.save().await?;
|
||||
break_all_dependents_transitive(db, &id, DependencyError::NotRunning, breakages).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(subcommands(self(stop_impl(async)), stop_dry), display(display_none))]
|
||||
pub fn stop(#[arg] id: PackageId) -> Result<PackageId, Error> {
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
#[command(rename = "dry", display(display_serializable))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn stop_dry(
|
||||
#[context] ctx: RpcContext,
|
||||
#[parent_data] id: PackageId,
|
||||
) -> Result<BreakageRes, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
|
||||
let mut breakages = BTreeMap::new();
|
||||
stop_common(&mut tx, &id, &mut breakages).await?;
|
||||
|
||||
tx.abort().await?;
|
||||
|
||||
Ok(BreakageRes(breakages))
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevision<()>, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
|
||||
stop_common(&mut tx, &id, &mut BTreeMap::new()).await?;
|
||||
|
||||
Ok(WithRevision {
|
||||
revision: tx.commit(None).await?,
|
||||
response: (),
|
||||
})
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
use http::{Request, Response};
|
||||
use hyper::Body;
|
||||
use rand::RngCore;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)]
|
||||
pub struct RequestGuid<T: AsRef<str> = String>(T);
|
||||
impl RequestGuid {
|
||||
pub fn new() -> Self {
|
||||
let mut buf = [0; 40];
|
||||
rand::thread_rng().fill_bytes(&mut buf);
|
||||
RequestGuid(base32::encode(
|
||||
base32::Alphabet::RFC4648 { padding: false },
|
||||
&buf,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn from(r: &str) -> Option<RequestGuid> {
|
||||
if r.len() != 64 {
|
||||
return None;
|
||||
}
|
||||
for c in r.chars() {
|
||||
if !(c >= 'A' && c <= 'Z' || c >= '2' && c <= '7') {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
Some(RequestGuid(r.to_owned()))
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn parse_guid() {
|
||||
println!(
|
||||
"{:?}",
|
||||
RequestGuid::from(&format!("{}", RequestGuid::new()))
|
||||
)
|
||||
}
|
||||
|
||||
impl<T: AsRef<str>> std::fmt::Display for RequestGuid<T> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.0.as_ref().fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RpcContinuation {
|
||||
pub created_at: Instant,
|
||||
pub handler: Box<
|
||||
dyn FnOnce(Request<Body>) -> BoxFuture<'static, Result<Response<Body>, crate::Error>>
|
||||
+ Send
|
||||
+ Sync,
|
||||
>,
|
||||
}
|
||||
@@ -1,279 +0,0 @@
|
||||
pub mod model;
|
||||
pub mod package;
|
||||
pub mod util;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use futures::{FutureExt, SinkExt, StreamExt};
|
||||
use patch_db::json_ptr::JsonPointer;
|
||||
use patch_db::{Dump, Revision};
|
||||
use rpc_toolkit::command;
|
||||
use rpc_toolkit::hyper::upgrade::Upgraded;
|
||||
use rpc_toolkit::hyper::{Body, Error as HyperError, Request, Response};
|
||||
use rpc_toolkit::yajrc::{GenericRpcMethod, RpcError, RpcResponse};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use tokio::sync::{broadcast, oneshot};
|
||||
use tokio::task::JoinError;
|
||||
use tokio_tungstenite::tungstenite::Message;
|
||||
use tokio_tungstenite::WebSocketStream;
|
||||
use tracing::instrument;
|
||||
|
||||
pub use self::model::DatabaseModel;
|
||||
use self::util::WithRevision;
|
||||
use crate::context::RpcContext;
|
||||
use crate::middleware::auth::{HasValidSession, HashSessionToken};
|
||||
use crate::util::serde::{display_serializable, IoFormat};
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
#[instrument(skip(ctx, ws_fut))]
|
||||
async fn ws_handler<
|
||||
WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
|
||||
>(
|
||||
ctx: RpcContext,
|
||||
ws_fut: WSFut,
|
||||
) -> Result<(), Error> {
|
||||
let (dump, sub) = ctx.db.dump_and_sub().await;
|
||||
let mut stream = ws_fut
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Network)?
|
||||
.with_kind(crate::ErrorKind::Unknown)?;
|
||||
|
||||
let (has_valid_session, token) = loop {
|
||||
if let Some(Message::Text(cookie)) = stream
|
||||
.next()
|
||||
.await
|
||||
.transpose()
|
||||
.with_kind(crate::ErrorKind::Network)?
|
||||
{
|
||||
let cookie_str = serde_json::from_str::<Cow<str>>(&cookie)
|
||||
.with_kind(crate::ErrorKind::Deserialization)?;
|
||||
|
||||
let id = basic_cookies::Cookie::parse(&cookie_str)
|
||||
.with_kind(crate::ErrorKind::Authorization)?
|
||||
.into_iter()
|
||||
.find(|c| c.get_name() == "session")
|
||||
.ok_or_else(|| {
|
||||
Error::new(eyre!("UNAUTHORIZED"), crate::ErrorKind::Authorization)
|
||||
})?;
|
||||
let authenticated_session = HashSessionToken::from_cookie(&id);
|
||||
match HasValidSession::from_session(&authenticated_session, &ctx).await {
|
||||
Err(e) => {
|
||||
stream
|
||||
.send(Message::Text(
|
||||
serde_json::to_string(
|
||||
&RpcResponse::<GenericRpcMethod<String>>::from_result(Err::<
|
||||
_,
|
||||
RpcError,
|
||||
>(
|
||||
e.into()
|
||||
)),
|
||||
)
|
||||
.with_kind(crate::ErrorKind::Serialization)?,
|
||||
))
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Network)?;
|
||||
return Ok(());
|
||||
}
|
||||
Ok(has_validation) => break (has_validation, authenticated_session),
|
||||
}
|
||||
}
|
||||
};
|
||||
let kill = subscribe_to_session_kill(&ctx, token).await;
|
||||
send_dump(has_valid_session, &mut stream, dump).await?;
|
||||
|
||||
deal_with_messages(has_valid_session, kill, sub, stream).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn subscribe_to_session_kill(
|
||||
ctx: &RpcContext,
|
||||
token: HashSessionToken,
|
||||
) -> oneshot::Receiver<()> {
|
||||
let (send, recv) = oneshot::channel();
|
||||
let mut guard = ctx.open_authed_websockets.lock().await;
|
||||
if !guard.contains_key(&token) {
|
||||
guard.insert(token, vec![send]);
|
||||
} else {
|
||||
guard.get_mut(&token).unwrap().push(send);
|
||||
}
|
||||
recv
|
||||
}
|
||||
|
||||
#[instrument(skip(_has_valid_authentication, kill, sub, stream))]
|
||||
async fn deal_with_messages(
|
||||
_has_valid_authentication: HasValidSession,
|
||||
mut kill: oneshot::Receiver<()>,
|
||||
mut sub: broadcast::Receiver<Arc<Revision>>,
|
||||
mut stream: WebSocketStream<Upgraded>,
|
||||
) -> Result<(), Error> {
|
||||
loop {
|
||||
futures::select! {
|
||||
_ = (&mut kill).fuse() => {
|
||||
tracing::info!("Closing WebSocket: Reason: Session Terminated");
|
||||
return Ok(())
|
||||
}
|
||||
new_rev = sub.recv().fuse() => {
|
||||
let rev = new_rev.with_kind(crate::ErrorKind::Database)?;
|
||||
stream
|
||||
.send(Message::Text(
|
||||
serde_json::to_string(
|
||||
&RpcResponse::<GenericRpcMethod<String>>::from_result(Ok::<_, RpcError>(
|
||||
serde_json::to_value(&rev).with_kind(crate::ErrorKind::Serialization)?,
|
||||
)),
|
||||
)
|
||||
.with_kind(crate::ErrorKind::Serialization)?,
|
||||
))
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Network)?;
|
||||
}
|
||||
message = stream.next().fuse() => {
|
||||
let message = message.transpose().with_kind(crate::ErrorKind::Network)?;
|
||||
match message {
|
||||
Some(Message::Ping(a)) => {
|
||||
stream
|
||||
.send(Message::Pong(a))
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Network)?;
|
||||
}
|
||||
Some(Message::Close(frame)) => {
|
||||
if let Some(reason) = frame.as_ref() {
|
||||
tracing::info!("Closing WebSocket: Reason: {} {}", reason.code, reason.reason);
|
||||
} else {
|
||||
tracing::info!("Closing WebSocket: Reason: Unknown");
|
||||
}
|
||||
return Ok(())
|
||||
}
|
||||
None => {
|
||||
tracing::info!("Closing WebSocket: Stream Finished");
|
||||
return Ok(())
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
_ = tokio::time::sleep(Duration::from_secs(10)).fuse() => {
|
||||
stream
|
||||
.send(Message::Ping(Vec::new()))
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Network)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_dump(
|
||||
_has_valid_authentication: HasValidSession,
|
||||
stream: &mut WebSocketStream<Upgraded>,
|
||||
dump: Dump,
|
||||
) -> Result<(), Error> {
|
||||
stream
|
||||
.send(Message::Text(
|
||||
serde_json::to_string(&RpcResponse::<GenericRpcMethod<String>>::from_result(Ok::<
|
||||
_,
|
||||
RpcError,
|
||||
>(
|
||||
serde_json::to_value(&dump).with_kind(crate::ErrorKind::Serialization)?,
|
||||
)))
|
||||
.with_kind(crate::ErrorKind::Serialization)?,
|
||||
))
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Network)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<Body>, Error> {
|
||||
let (parts, body) = req.into_parts();
|
||||
let req = Request::from_parts(parts, body);
|
||||
let (res, ws_fut) = hyper_ws_listener::create_ws(req).with_kind(crate::ErrorKind::Network)?;
|
||||
if let Some(ws_fut) = ws_fut {
|
||||
tokio::task::spawn(async move {
|
||||
match ws_handler(ctx, ws_fut).await {
|
||||
Ok(()) => (),
|
||||
Err(e) => {
|
||||
tracing::error!("WebSocket Closed: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[command(subcommands(revisions, dump, put))]
|
||||
pub fn db() -> Result<(), RpcError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum RevisionsRes {
|
||||
Revisions(Vec<Arc<Revision>>),
|
||||
Dump(Dump),
|
||||
}
|
||||
|
||||
#[command(display(display_serializable))]
|
||||
pub async fn revisions(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg] since: u64,
|
||||
#[allow(unused_variables)]
|
||||
#[arg(long = "format")]
|
||||
format: Option<IoFormat>,
|
||||
) -> Result<RevisionsRes, RpcError> {
|
||||
let cache = ctx.revision_cache.read().await;
|
||||
if cache
|
||||
.front()
|
||||
.map(|rev| rev.id <= since + 1)
|
||||
.unwrap_or(false)
|
||||
{
|
||||
Ok(RevisionsRes::Revisions(
|
||||
cache
|
||||
.iter()
|
||||
.skip_while(|rev| rev.id < since + 1)
|
||||
.cloned()
|
||||
.collect(),
|
||||
))
|
||||
} else {
|
||||
drop(cache);
|
||||
Ok(RevisionsRes::Dump(ctx.db.dump().await))
|
||||
}
|
||||
}
|
||||
|
||||
#[command(display(display_serializable))]
|
||||
pub async fn dump(
|
||||
#[context] ctx: RpcContext,
|
||||
#[allow(unused_variables)]
|
||||
#[arg(long = "format")]
|
||||
format: Option<IoFormat>,
|
||||
) -> Result<Dump, RpcError> {
|
||||
Ok(ctx.db.dump().await)
|
||||
}
|
||||
|
||||
#[command(subcommands(ui))]
|
||||
pub fn put() -> Result<(), RpcError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(display(display_serializable))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn ui(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg] pointer: JsonPointer,
|
||||
#[arg] value: Value,
|
||||
#[allow(unused_variables)]
|
||||
#[arg(long = "format")]
|
||||
format: Option<IoFormat>,
|
||||
) -> Result<WithRevision<()>, Error> {
|
||||
let ptr = "/ui"
|
||||
.parse::<JsonPointer>()
|
||||
.with_kind(crate::ErrorKind::Database)?
|
||||
+ &pointer;
|
||||
Ok(WithRevision {
|
||||
response: (),
|
||||
revision: ctx.db.put(&ptr, &value, None).await?,
|
||||
})
|
||||
}
|
||||
@@ -1,316 +0,0 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use emver::VersionRange;
|
||||
use isocountry::CountryCode;
|
||||
use patch_db::json_ptr::JsonPointer;
|
||||
use patch_db::{HasModel, Map, MapModel, OptionModel};
|
||||
use reqwest::Url;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use torut::onion::TorSecretKeyV3;
|
||||
|
||||
use crate::config::spec::{PackagePointerSpec, SystemPointerSpec};
|
||||
use crate::install::progress::InstallProgress;
|
||||
use crate::net::interface::InterfaceId;
|
||||
use crate::s9pk::manifest::{Manifest, ManifestModel, PackageId};
|
||||
use crate::status::health_check::HealthCheckId;
|
||||
use crate::status::Status;
|
||||
use crate::util::Version;
|
||||
use crate::version::{Current, VersionT};
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Database {
|
||||
#[model]
|
||||
pub server_info: ServerInfo,
|
||||
#[model]
|
||||
pub package_data: AllPackageData,
|
||||
#[model]
|
||||
pub recovered_packages: BTreeMap<PackageId, RecoveredPackageInfo>,
|
||||
pub ui: Value,
|
||||
}
|
||||
impl Database {
|
||||
pub fn init(
|
||||
id: String,
|
||||
hostname: &str,
|
||||
tor_key: &TorSecretKeyV3,
|
||||
password_hash: String,
|
||||
) -> Self {
|
||||
// TODO
|
||||
Database {
|
||||
server_info: ServerInfo {
|
||||
id,
|
||||
version: Current::new().semver().into(),
|
||||
last_backup: None,
|
||||
last_wifi_region: None,
|
||||
eos_version_compat: Current::new().compat().clone(),
|
||||
lan_address: format!("https://{}.local", hostname).parse().unwrap(),
|
||||
tor_address: format!("http://{}", tor_key.public().get_onion_address())
|
||||
.parse()
|
||||
.unwrap(),
|
||||
status_info: ServerStatus {
|
||||
backing_up: false,
|
||||
updated: false,
|
||||
update_progress: None,
|
||||
},
|
||||
wifi: WifiInfo {
|
||||
ssids: Vec::new(),
|
||||
connected: None,
|
||||
selected: None,
|
||||
},
|
||||
unread_notification_count: 0,
|
||||
connection_addresses: ConnectionAddresses {
|
||||
tor: Vec::new(),
|
||||
clearnet: Vec::new(),
|
||||
},
|
||||
password_hash,
|
||||
},
|
||||
package_data: AllPackageData::default(),
|
||||
recovered_packages: BTreeMap::new(),
|
||||
ui: Value::Object(Default::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl DatabaseModel {
|
||||
pub fn new() -> Self {
|
||||
Self::from(JsonPointer::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct ServerInfo {
|
||||
pub id: String,
|
||||
pub version: Version,
|
||||
pub last_backup: Option<DateTime<Utc>>,
|
||||
/// Used in the wifi to determine the region to set the system to
|
||||
pub last_wifi_region: Option<CountryCode>,
|
||||
pub eos_version_compat: VersionRange,
|
||||
pub lan_address: Url,
|
||||
pub tor_address: Url,
|
||||
#[model]
|
||||
#[serde(default)]
|
||||
pub status_info: ServerStatus,
|
||||
pub wifi: WifiInfo,
|
||||
pub unread_notification_count: u64,
|
||||
pub connection_addresses: ConnectionAddresses,
|
||||
pub password_hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct ServerStatus {
|
||||
pub backing_up: bool,
|
||||
pub updated: bool,
|
||||
#[model]
|
||||
pub update_progress: Option<UpdateProgress>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct UpdateProgress {
|
||||
pub size: Option<u64>,
|
||||
pub downloaded: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct WifiInfo {
|
||||
pub ssids: Vec<String>,
|
||||
pub selected: Option<String>,
|
||||
pub connected: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct ServerSpecs {
|
||||
pub cpu: String,
|
||||
pub disk: String,
|
||||
pub memory: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct ConnectionAddresses {
|
||||
pub tor: Vec<String>,
|
||||
pub clearnet: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize)]
|
||||
pub struct AllPackageData(pub BTreeMap<PackageId, PackageDataEntry>);
|
||||
impl Map for AllPackageData {
|
||||
type Key = PackageId;
|
||||
type Value = PackageDataEntry;
|
||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
||||
self.0.get(key)
|
||||
}
|
||||
}
|
||||
impl HasModel for AllPackageData {
|
||||
type Model = MapModel<Self>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct StaticFiles {
|
||||
license: String,
|
||||
instructions: String,
|
||||
icon: String,
|
||||
}
|
||||
impl StaticFiles {
|
||||
pub fn local(id: &PackageId, version: &Version, icon_type: &str) -> Self {
|
||||
StaticFiles {
|
||||
license: format!("/public/package-data/{}/{}/LICENSE.md", id, version),
|
||||
instructions: format!("/public/package-data/{}/{}/INSTRUCTIONS.md", id, version),
|
||||
icon: format!("/public/package-data/{}/{}/icon.{}", id, version, icon_type),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(tag = "state")]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum PackageDataEntry {
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
Installing {
|
||||
static_files: StaticFiles,
|
||||
manifest: Manifest,
|
||||
install_progress: Arc<InstallProgress>,
|
||||
},
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
Updating {
|
||||
static_files: StaticFiles,
|
||||
manifest: Manifest,
|
||||
installed: InstalledPackageDataEntry,
|
||||
install_progress: Arc<InstallProgress>,
|
||||
},
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
Restoring {
|
||||
static_files: StaticFiles,
|
||||
manifest: Manifest,
|
||||
install_progress: Arc<InstallProgress>,
|
||||
},
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
Removing {
|
||||
static_files: StaticFiles,
|
||||
manifest: Manifest,
|
||||
removing: InstalledPackageDataEntry,
|
||||
},
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
Installed {
|
||||
static_files: StaticFiles,
|
||||
manifest: Manifest,
|
||||
installed: InstalledPackageDataEntry,
|
||||
},
|
||||
}
|
||||
impl PackageDataEntry {
|
||||
pub fn installed(&self) -> Option<&InstalledPackageDataEntry> {
|
||||
match self {
|
||||
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
|
||||
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
|
||||
}
|
||||
}
|
||||
pub fn installed_mut(&mut self) -> Option<&mut InstalledPackageDataEntry> {
|
||||
match self {
|
||||
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
|
||||
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
|
||||
}
|
||||
}
|
||||
pub fn into_installed(self) -> Option<InstalledPackageDataEntry> {
|
||||
match self {
|
||||
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
|
||||
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
|
||||
}
|
||||
}
|
||||
pub fn manifest(self) -> Manifest {
|
||||
match self {
|
||||
PackageDataEntry::Installing { manifest, .. } => manifest,
|
||||
PackageDataEntry::Updating { manifest, .. } => manifest,
|
||||
PackageDataEntry::Restoring { manifest, .. } => manifest,
|
||||
PackageDataEntry::Removing { manifest, .. } => manifest,
|
||||
PackageDataEntry::Installed { manifest, .. } => manifest,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl PackageDataEntryModel {
|
||||
pub fn installed(self) -> OptionModel<InstalledPackageDataEntry> {
|
||||
self.0.child("installed").into()
|
||||
}
|
||||
pub fn removing(self) -> OptionModel<InstalledPackageDataEntry> {
|
||||
self.0.child("removing").into()
|
||||
}
|
||||
pub fn install_progress(self) -> OptionModel<InstallProgress> {
|
||||
self.0.child("install-progress").into()
|
||||
}
|
||||
pub fn manifest(self) -> ManifestModel {
|
||||
self.0.child("manifest").into()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct InstalledPackageDataEntry {
|
||||
#[model]
|
||||
pub status: Status,
|
||||
pub marketplace_url: Option<Url>,
|
||||
#[serde(default)]
|
||||
#[serde(with = "crate::util::serde::ed25519_pubkey")]
|
||||
pub developer_key: ed25519_dalek::PublicKey,
|
||||
#[model]
|
||||
pub manifest: Manifest,
|
||||
pub last_backup: Option<DateTime<Utc>>,
|
||||
pub system_pointers: Vec<SystemPointerSpec>,
|
||||
#[model]
|
||||
pub dependency_info: BTreeMap<PackageId, StaticDependencyInfo>,
|
||||
#[model]
|
||||
pub current_dependents: BTreeMap<PackageId, CurrentDependencyInfo>,
|
||||
#[model]
|
||||
pub current_dependencies: BTreeMap<PackageId, CurrentDependencyInfo>,
|
||||
#[model]
|
||||
pub interface_addresses: InterfaceAddressMap,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct StaticDependencyInfo {
|
||||
pub manifest: Option<Manifest>,
|
||||
pub icon: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct CurrentDependencyInfo {
|
||||
pub pointers: Vec<PackagePointerSpec>,
|
||||
pub health_checks: BTreeSet<HealthCheckId>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct InterfaceAddressMap(pub BTreeMap<InterfaceId, InterfaceAddresses>);
|
||||
impl Map for InterfaceAddressMap {
|
||||
type Key = InterfaceId;
|
||||
type Value = InterfaceAddresses;
|
||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
||||
self.0.get(key)
|
||||
}
|
||||
}
|
||||
impl HasModel for InterfaceAddressMap {
|
||||
type Model = MapModel<Self>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct InterfaceAddresses {
|
||||
#[model]
|
||||
pub tor_address: Option<String>,
|
||||
#[model]
|
||||
pub lan_address: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RecoveredPackageInfo {
|
||||
pub title: String,
|
||||
pub icon: String,
|
||||
pub version: Version,
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
use patch_db::DbHandle;
|
||||
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::Error;
|
||||
|
||||
pub async fn get_packages<Db: DbHandle>(db: &mut Db) -> Result<Vec<PackageId>, Error> {
|
||||
let packages = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.get(db, false)
|
||||
.await?;
|
||||
Ok(packages.0.keys().cloned().collect())
|
||||
}
|
||||
|
||||
pub async fn get_manifest<Db: DbHandle>(
|
||||
db: &mut Db,
|
||||
pkg: &PackageId,
|
||||
) -> Result<Option<Manifest>, Error> {
|
||||
let mpde = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(pkg)
|
||||
.get(db, false)
|
||||
.await?
|
||||
.into_owned();
|
||||
Ok(mpde.map(|pde| pde.manifest()))
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use patch_db::Revision;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct WithRevision<T> {
|
||||
pub response: T,
|
||||
pub revision: Option<Arc<Revision>>,
|
||||
}
|
||||
@@ -1,911 +0,0 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BTreeMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use emver::VersionRange;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::FutureExt;
|
||||
use patch_db::{DbHandle, HasModel, LockType, Map, MapModel, PatchDbHandle};
|
||||
use rand::SeedableRng;
|
||||
use rpc_toolkit::command;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::action::{ActionImplementation, NoOutput};
|
||||
use crate::config::action::ConfigRes;
|
||||
use crate::config::spec::PackagePointerSpec;
|
||||
use crate::config::{Config, ConfigSpec};
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::model::{CurrentDependencyInfo, InstalledPackageDataEntry};
|
||||
use crate::error::ResultExt;
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::status::health_check::{HealthCheckId, HealthCheckResult};
|
||||
use crate::status::{MainStatus, Status};
|
||||
use crate::util::serde::display_serializable;
|
||||
use crate::util::{display_none, Version};
|
||||
use crate::volume::Volumes;
|
||||
use crate::Error;
|
||||
|
||||
#[command(subcommands(configure))]
|
||||
pub fn dependency() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, thiserror::Error, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[serde(tag = "type")]
|
||||
pub enum DependencyError {
|
||||
NotInstalled, // { "type": "not-installed" }
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
IncorrectVersion {
|
||||
expected: VersionRange,
|
||||
received: Version,
|
||||
}, // { "type": "incorrect-version", "expected": "0.1.0", "received": "^0.2.0" }
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
ConfigUnsatisfied {
|
||||
error: String,
|
||||
}, // { "type": "config-unsatisfied", "error": "Bitcoin Core must have pruning set to manual." }
|
||||
NotRunning, // { "type": "not-running" }
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
HealthChecksFailed {
|
||||
failures: BTreeMap<HealthCheckId, HealthCheckResult>,
|
||||
}, // { "type": "health-checks-failed", "checks": { "rpc": { "time": "2021-05-11T18:21:29Z", "result": "starting" } } }
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
Transitive, // { "type": "transitive" }
|
||||
}
|
||||
|
||||
impl DependencyError {
|
||||
pub fn cmp_priority(&self, other: &DependencyError) -> std::cmp::Ordering {
|
||||
use std::cmp::Ordering::*;
|
||||
|
||||
use DependencyError::*;
|
||||
match (self, other) {
|
||||
(NotInstalled, NotInstalled) => Equal,
|
||||
(NotInstalled, _) => Greater,
|
||||
(_, NotInstalled) => Less,
|
||||
(IncorrectVersion { .. }, IncorrectVersion { .. }) => Equal,
|
||||
(IncorrectVersion { .. }, _) => Greater,
|
||||
(_, IncorrectVersion { .. }) => Less,
|
||||
(ConfigUnsatisfied { .. }, ConfigUnsatisfied { .. }) => Equal,
|
||||
(ConfigUnsatisfied { .. }, _) => Greater,
|
||||
(_, ConfigUnsatisfied { .. }) => Less,
|
||||
(NotRunning, NotRunning) => Equal,
|
||||
(NotRunning, _) => Greater,
|
||||
(_, NotRunning) => Less,
|
||||
(HealthChecksFailed { .. }, HealthChecksFailed { .. }) => Equal,
|
||||
(HealthChecksFailed { .. }, _) => Greater,
|
||||
(_, HealthChecksFailed { .. }) => Less,
|
||||
(Transitive, Transitive) => Equal,
|
||||
}
|
||||
}
|
||||
pub fn merge_with(self, other: DependencyError) -> DependencyError {
|
||||
match (self, other) {
|
||||
(DependencyError::NotInstalled, _) | (_, DependencyError::NotInstalled) => {
|
||||
DependencyError::NotInstalled
|
||||
}
|
||||
(DependencyError::IncorrectVersion { expected, received }, _)
|
||||
| (_, DependencyError::IncorrectVersion { expected, received }) => {
|
||||
DependencyError::IncorrectVersion { expected, received }
|
||||
}
|
||||
(
|
||||
DependencyError::ConfigUnsatisfied { error: e0 },
|
||||
DependencyError::ConfigUnsatisfied { error: e1 },
|
||||
) => DependencyError::ConfigUnsatisfied {
|
||||
error: e0 + "\n" + &e1,
|
||||
},
|
||||
(DependencyError::ConfigUnsatisfied { error }, _)
|
||||
| (_, DependencyError::ConfigUnsatisfied { error }) => {
|
||||
DependencyError::ConfigUnsatisfied { error }
|
||||
}
|
||||
(DependencyError::NotRunning, _) | (_, DependencyError::NotRunning) => {
|
||||
DependencyError::NotRunning
|
||||
}
|
||||
(
|
||||
DependencyError::HealthChecksFailed { failures: f0 },
|
||||
DependencyError::HealthChecksFailed { failures: f1 },
|
||||
) => DependencyError::HealthChecksFailed {
|
||||
failures: f0.into_iter().chain(f1.into_iter()).collect(),
|
||||
},
|
||||
(DependencyError::HealthChecksFailed { failures }, _)
|
||||
| (_, DependencyError::HealthChecksFailed { failures }) => {
|
||||
DependencyError::HealthChecksFailed { failures }
|
||||
}
|
||||
(DependencyError::Transitive, _) => DependencyError::Transitive,
|
||||
}
|
||||
}
|
||||
#[instrument(skip(ctx, db))]
|
||||
pub fn try_heal<'a, Db: DbHandle>(
|
||||
self,
|
||||
ctx: &'a RpcContext,
|
||||
db: &'a mut Db,
|
||||
id: &'a PackageId,
|
||||
dependency: &'a PackageId,
|
||||
mut dependency_config: Option<Config>,
|
||||
info: &'a DepInfo,
|
||||
) -> BoxFuture<'a, Result<Option<Self>, Error>> {
|
||||
async move {
|
||||
Ok(match self {
|
||||
DependencyError::NotInstalled => {
|
||||
if crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.exists(db, true)
|
||||
.await?
|
||||
{
|
||||
DependencyError::IncorrectVersion {
|
||||
expected: info.version.clone(),
|
||||
received: Default::default(),
|
||||
}
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info)
|
||||
.await?
|
||||
} else {
|
||||
Some(DependencyError::NotInstalled)
|
||||
}
|
||||
}
|
||||
DependencyError::IncorrectVersion { expected, .. } => {
|
||||
let version: Version = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.map(|m| m.manifest().version())
|
||||
.get(db, true)
|
||||
.await?
|
||||
.into_owned()
|
||||
.unwrap_or_default();
|
||||
if version.satisfies(&expected) {
|
||||
DependencyError::ConfigUnsatisfied {
|
||||
error: String::new(),
|
||||
}
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info)
|
||||
.await?
|
||||
} else {
|
||||
Some(DependencyError::IncorrectVersion {
|
||||
expected,
|
||||
received: version,
|
||||
})
|
||||
}
|
||||
}
|
||||
DependencyError::ConfigUnsatisfied { .. } => {
|
||||
let dependent_manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Manifest>(|m| m.manifest())
|
||||
.expect(db)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let dependency_manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Manifest>(|m| m.manifest())
|
||||
.expect(db)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let dependency_config = if let Some(cfg) = dependency_config.take() {
|
||||
cfg
|
||||
} else if let Some(cfg_info) = &dependency_manifest.config {
|
||||
cfg_info
|
||||
.get(
|
||||
ctx,
|
||||
dependency,
|
||||
&dependency_manifest.version,
|
||||
&dependency_manifest.volumes,
|
||||
)
|
||||
.await?
|
||||
.config
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
Config::default()
|
||||
};
|
||||
if let Some(cfg_req) = &info.config {
|
||||
if let Err(error) = cfg_req
|
||||
.check(
|
||||
ctx,
|
||||
id,
|
||||
&dependent_manifest.version,
|
||||
&dependent_manifest.volumes,
|
||||
&dependency_config,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
return Ok(Some(DependencyError::ConfigUnsatisfied { error }));
|
||||
}
|
||||
}
|
||||
DependencyError::NotRunning
|
||||
.try_heal(ctx, db, id, dependency, Some(dependency_config), info)
|
||||
.await?
|
||||
}
|
||||
DependencyError::NotRunning => {
|
||||
let status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Status>(|m| m.status())
|
||||
.expect(db)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await?;
|
||||
if status.main.running() {
|
||||
DependencyError::HealthChecksFailed {
|
||||
failures: BTreeMap::new(),
|
||||
}
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info)
|
||||
.await?
|
||||
} else {
|
||||
Some(DependencyError::NotRunning)
|
||||
}
|
||||
}
|
||||
DependencyError::HealthChecksFailed { .. } => {
|
||||
let status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Status>(|m| m.status())
|
||||
.expect(db)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await?
|
||||
.into_owned();
|
||||
match status.main {
|
||||
MainStatus::BackingUp {
|
||||
started: Some(_),
|
||||
health,
|
||||
}
|
||||
| MainStatus::Running { health, .. } => {
|
||||
let mut failures = BTreeMap::new();
|
||||
for (check, res) in health {
|
||||
if !matches!(res, HealthCheckResult::Success)
|
||||
&& crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.and_then::<_, CurrentDependencyInfo>(|m| {
|
||||
m.current_dependencies().idx_model(dependency)
|
||||
})
|
||||
.get(db, true)
|
||||
.await?
|
||||
.into_owned()
|
||||
.map(|i| i.health_checks)
|
||||
.unwrap_or_default()
|
||||
.contains(&check)
|
||||
{
|
||||
failures.insert(check.clone(), res.clone());
|
||||
}
|
||||
}
|
||||
if !failures.is_empty() {
|
||||
Some(DependencyError::HealthChecksFailed { failures })
|
||||
} else {
|
||||
DependencyError::Transitive
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info)
|
||||
.await?
|
||||
}
|
||||
}
|
||||
MainStatus::Starting => {
|
||||
DependencyError::Transitive
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info)
|
||||
.await?
|
||||
}
|
||||
_ => return Ok(Some(DependencyError::NotRunning)),
|
||||
}
|
||||
}
|
||||
DependencyError::Transitive => {
|
||||
if crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, DependencyErrors>(|m| m.status().dependency_errors())
|
||||
.get(db, true)
|
||||
.await?
|
||||
.into_owned()
|
||||
.unwrap_or_default()
|
||||
.0
|
||||
.is_empty()
|
||||
{
|
||||
None
|
||||
} else {
|
||||
Some(DependencyError::Transitive)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
}
|
||||
impl std::fmt::Display for DependencyError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
DependencyError::NotInstalled => write!(f, "Not Installed"),
|
||||
DependencyError::IncorrectVersion { expected, received } => write!(
|
||||
f,
|
||||
"Incorrect Version: Expected {}, Received {}",
|
||||
expected,
|
||||
received.as_str()
|
||||
),
|
||||
DependencyError::ConfigUnsatisfied { error } => {
|
||||
write!(f, "Configuration Requirements Not Satisfied: {}", error)
|
||||
}
|
||||
DependencyError::NotRunning => write!(f, "Not Running"),
|
||||
DependencyError::HealthChecksFailed { failures } => {
|
||||
write!(f, "Failed Health Check(s): ")?;
|
||||
let mut comma = false;
|
||||
for (check, res) in failures {
|
||||
if !comma {
|
||||
comma = true;
|
||||
} else {
|
||||
write!(f, ", ")?;
|
||||
}
|
||||
write!(f, "{}: {}", check, res)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
DependencyError::Transitive => {
|
||||
write!(f, "Dependency Error(s)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct TaggedDependencyError {
|
||||
pub dependency: PackageId,
|
||||
pub error: DependencyError,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct BreakageRes(pub BTreeMap<PackageId, TaggedDependencyError>);
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
||||
pub struct Dependencies(pub BTreeMap<PackageId, DepInfo>);
|
||||
impl Map for Dependencies {
|
||||
type Key = PackageId;
|
||||
type Value = DepInfo;
|
||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
||||
self.0.get(key)
|
||||
}
|
||||
}
|
||||
impl HasModel for Dependencies {
|
||||
type Model = MapModel<Self>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[serde(tag = "type")]
|
||||
pub enum DependencyRequirement {
|
||||
OptIn { how: String },
|
||||
OptOut { how: String },
|
||||
Required,
|
||||
}
|
||||
impl DependencyRequirement {
|
||||
pub fn required(&self) -> bool {
|
||||
matches!(self, &DependencyRequirement::Required)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DepInfo {
|
||||
pub version: VersionRange,
|
||||
pub requirement: DependencyRequirement,
|
||||
pub description: Option<String>,
|
||||
#[serde(default)]
|
||||
#[model]
|
||||
pub config: Option<DependencyConfig>,
|
||||
}
|
||||
impl DepInfo {
|
||||
pub async fn satisfied<Db: DbHandle>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
dependency_id: &PackageId,
|
||||
dependency_config: Option<Config>, // fetch if none
|
||||
dependent_id: &PackageId,
|
||||
) -> Result<Result<(), DependencyError>, Error> {
|
||||
Ok(
|
||||
if let Some(err) = DependencyError::NotInstalled
|
||||
.try_heal(
|
||||
ctx,
|
||||
db,
|
||||
dependent_id,
|
||||
dependency_id,
|
||||
dependency_config,
|
||||
self,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
Err(err)
|
||||
} else {
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DependencyConfig {
|
||||
check: ActionImplementation,
|
||||
auto_configure: ActionImplementation,
|
||||
}
|
||||
impl DependencyConfig {
|
||||
pub async fn check(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
dependent_id: &PackageId,
|
||||
dependent_version: &Version,
|
||||
dependent_volumes: &Volumes,
|
||||
dependency_config: &Config,
|
||||
) -> Result<Result<NoOutput, String>, Error> {
|
||||
Ok(self
|
||||
.check
|
||||
.sandboxed(
|
||||
ctx,
|
||||
dependent_id,
|
||||
dependent_version,
|
||||
dependent_volumes,
|
||||
Some(dependency_config),
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
.map_err(|(_, e)| e))
|
||||
}
|
||||
pub async fn auto_configure(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
dependent_id: &PackageId,
|
||||
dependent_version: &Version,
|
||||
dependent_volumes: &Volumes,
|
||||
old: &Config,
|
||||
) -> Result<Config, Error> {
|
||||
self.auto_configure
|
||||
.sandboxed(
|
||||
ctx,
|
||||
dependent_id,
|
||||
dependent_version,
|
||||
dependent_volumes,
|
||||
Some(old),
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
.map_err(|e| Error::new(eyre!("{}", e.1), crate::ErrorKind::AutoConfigure))
|
||||
}
|
||||
}
|
||||
|
||||
#[command(
|
||||
subcommands(self(configure_impl(async)), configure_dry),
|
||||
display(display_none)
|
||||
)]
|
||||
pub async fn configure(
|
||||
#[arg(rename = "dependent-id")] dependent_id: PackageId,
|
||||
#[arg(rename = "dependency-id")] dependency_id: PackageId,
|
||||
) -> Result<(PackageId, PackageId), Error> {
|
||||
Ok((dependent_id, dependency_id))
|
||||
}
|
||||
|
||||
pub async fn configure_impl(
|
||||
ctx: RpcContext,
|
||||
(pkg_id, dep_id): (PackageId, PackageId),
|
||||
) -> Result<(), Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let ConfigDryRes {
|
||||
old_config: _,
|
||||
new_config,
|
||||
spec: _,
|
||||
} = configure_logic(ctx.clone(), &mut db, (pkg_id, dep_id.clone())).await?;
|
||||
Ok(crate::config::configure(
|
||||
&ctx,
|
||||
&mut db,
|
||||
&dep_id,
|
||||
Some(new_config),
|
||||
&Some(Duration::from_secs(3).into()),
|
||||
false,
|
||||
&mut BTreeMap::new(),
|
||||
&mut BTreeMap::new(),
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct ConfigDryRes {
|
||||
pub old_config: Config,
|
||||
pub new_config: Config,
|
||||
pub spec: ConfigSpec,
|
||||
}
|
||||
|
||||
#[command(rename = "dry", display(display_serializable))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn configure_dry(
|
||||
#[context] ctx: RpcContext,
|
||||
#[parent_data] (pkg_id, dependency_id): (PackageId, PackageId),
|
||||
) -> Result<ConfigDryRes, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
configure_logic(ctx, &mut db, (pkg_id, dependency_id)).await
|
||||
}
|
||||
|
||||
pub async fn configure_logic(
|
||||
ctx: RpcContext,
|
||||
db: &mut PatchDbHandle,
|
||||
(pkg_id, dependency_id): (PackageId, PackageId),
|
||||
) -> Result<ConfigDryRes, Error> {
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(db, LockType::Read)
|
||||
.await?;
|
||||
let pkg_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&pkg_id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(db)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?;
|
||||
let pkg_version = pkg_model.clone().manifest().version().get(db, true).await?;
|
||||
let pkg_volumes = pkg_model.clone().manifest().volumes().get(db, true).await?;
|
||||
let dependency_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dependency_id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(db)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?;
|
||||
let dependency_config_action = dependency_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.config()
|
||||
.get(db, true)
|
||||
.await?
|
||||
.to_owned()
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("{} has no config", dependency_id),
|
||||
crate::ErrorKind::NotFound,
|
||||
)
|
||||
})?;
|
||||
let dependency_version = dependency_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.version()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let dependency_volumes = dependency_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.volumes()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let dependencies = pkg_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.dependencies()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
|
||||
let dependency = dependencies
|
||||
.get(&dependency_id)
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!(
|
||||
"dependency for {} not found in the manifest for {}",
|
||||
dependency_id,
|
||||
pkg_id
|
||||
),
|
||||
crate::ErrorKind::NotFound,
|
||||
)
|
||||
})?
|
||||
.config
|
||||
.as_ref()
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!(
|
||||
"dependency config for {} not found on {}",
|
||||
dependency_id,
|
||||
pkg_id
|
||||
),
|
||||
crate::ErrorKind::NotFound,
|
||||
)
|
||||
})?;
|
||||
let ConfigRes {
|
||||
config: maybe_config,
|
||||
spec,
|
||||
} = dependency_config_action
|
||||
.get(
|
||||
&ctx,
|
||||
&dependency_id,
|
||||
&*dependency_version,
|
||||
&*dependency_volumes,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let old_config = if let Some(config) = maybe_config {
|
||||
config
|
||||
} else {
|
||||
spec.gen(
|
||||
&mut rand::rngs::StdRng::from_entropy(),
|
||||
&Some(Duration::new(10, 0)),
|
||||
)?
|
||||
};
|
||||
|
||||
let new_config = dependency
|
||||
.auto_configure
|
||||
.sandboxed(
|
||||
&ctx,
|
||||
&pkg_id,
|
||||
&pkg_version,
|
||||
&pkg_volumes,
|
||||
Some(&old_config),
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
.map_err(|e| Error::new(eyre!("{}", e.1), crate::ErrorKind::AutoConfigure))?;
|
||||
|
||||
Ok(ConfigDryRes {
|
||||
old_config,
|
||||
new_config,
|
||||
spec,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(db, current_dependencies))]
|
||||
pub async fn add_dependent_to_current_dependents_lists<
|
||||
'a,
|
||||
Db: DbHandle,
|
||||
I: IntoIterator<Item = (&'a PackageId, &'a CurrentDependencyInfo)>,
|
||||
>(
|
||||
db: &mut Db,
|
||||
dependent_id: &PackageId,
|
||||
current_dependencies: I,
|
||||
) -> Result<(), Error> {
|
||||
for (dependency, dep_info) in current_dependencies {
|
||||
if let Some(dependency_model) = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dependency)
|
||||
.and_then(|pkg| pkg.installed())
|
||||
.check(db)
|
||||
.await?
|
||||
{
|
||||
dependency_model
|
||||
.current_dependents()
|
||||
.idx_model(dependent_id)
|
||||
.put(db, &dep_info)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
|
||||
pub struct DependencyErrors(pub BTreeMap<PackageId, DependencyError>);
|
||||
impl Map for DependencyErrors {
|
||||
type Key = PackageId;
|
||||
type Value = DependencyError;
|
||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
||||
self.0.get(key)
|
||||
}
|
||||
}
|
||||
impl HasModel for DependencyErrors {
|
||||
type Model = MapModel<Self>;
|
||||
}
|
||||
impl DependencyErrors {
|
||||
pub async fn init<Db: DbHandle>(
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
manifest: &Manifest,
|
||||
current_dependencies: &BTreeMap<PackageId, CurrentDependencyInfo>,
|
||||
) -> Result<DependencyErrors, Error> {
|
||||
let mut res = BTreeMap::new();
|
||||
for (dependency_id, info) in current_dependencies.keys().filter_map(|dependency_id| {
|
||||
manifest
|
||||
.dependencies
|
||||
.0
|
||||
.get(dependency_id)
|
||||
.map(|info| (dependency_id, info))
|
||||
}) {
|
||||
if let Err(e) = info
|
||||
.satisfied(ctx, db, dependency_id, None, &manifest.id)
|
||||
.await?
|
||||
{
|
||||
res.insert(dependency_id.clone(), e);
|
||||
}
|
||||
}
|
||||
Ok(DependencyErrors(res))
|
||||
}
|
||||
}
|
||||
impl std::fmt::Display for DependencyErrors {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{{ ")?;
|
||||
for (idx, (id, err)) in self.0.iter().enumerate() {
|
||||
write!(f, "{}: {}", id, err)?;
|
||||
if idx < self.0.len() - 1 {
|
||||
// not last
|
||||
write!(f, ", ")?;
|
||||
}
|
||||
}
|
||||
write!(f, " }}")
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn break_all_dependents_transitive<'a, Db: DbHandle>(
|
||||
db: &'a mut Db,
|
||||
id: &'a PackageId,
|
||||
error: DependencyError,
|
||||
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
|
||||
) -> Result<(), Error> {
|
||||
for dependent in crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(db)
|
||||
.await?
|
||||
.current_dependents()
|
||||
.keys(db, true)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|dependent| id != dependent)
|
||||
{
|
||||
break_transitive(db, &dependent, id, error.clone(), breakages).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(db))]
|
||||
pub fn break_transitive<'a, Db: DbHandle>(
|
||||
db: &'a mut Db,
|
||||
id: &'a PackageId,
|
||||
dependency: &'a PackageId,
|
||||
error: DependencyError,
|
||||
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
|
||||
) -> BoxFuture<'a, Result<(), Error>> {
|
||||
async move {
|
||||
let mut tx = db.begin().await?;
|
||||
let model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(&mut tx)
|
||||
.await?;
|
||||
let mut status = model.clone().status().get_mut(&mut tx).await?;
|
||||
|
||||
let old = status.dependency_errors.0.remove(dependency);
|
||||
let newly_broken = if let Some(e) = &old {
|
||||
error.cmp_priority(&e) == Ordering::Greater
|
||||
} else {
|
||||
true
|
||||
};
|
||||
status.dependency_errors.0.insert(
|
||||
dependency.clone(),
|
||||
if let Some(old) = old {
|
||||
old.merge_with(error.clone())
|
||||
} else {
|
||||
error.clone()
|
||||
},
|
||||
);
|
||||
if newly_broken {
|
||||
breakages.insert(
|
||||
id.clone(),
|
||||
TaggedDependencyError {
|
||||
dependency: dependency.clone(),
|
||||
error: error.clone(),
|
||||
},
|
||||
);
|
||||
status.save(&mut tx).await?;
|
||||
|
||||
tx.save().await?;
|
||||
break_all_dependents_transitive(db, id, DependencyError::Transitive, breakages).await?;
|
||||
} else {
|
||||
status.save(&mut tx).await?;
|
||||
|
||||
tx.save().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
pub async fn heal_all_dependents_transitive<'a, Db: DbHandle>(
|
||||
ctx: &'a RpcContext,
|
||||
db: &'a mut Db,
|
||||
id: &'a PackageId,
|
||||
) -> Result<(), Error> {
|
||||
for dependent in crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(db)
|
||||
.await?
|
||||
.current_dependents()
|
||||
.keys(db, true)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|dependent| id != dependent)
|
||||
{
|
||||
heal_transitive(ctx, db, &dependent, id).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
pub fn heal_transitive<'a, Db: DbHandle>(
|
||||
ctx: &'a RpcContext,
|
||||
db: &'a mut Db,
|
||||
id: &'a PackageId,
|
||||
dependency: &'a PackageId,
|
||||
) -> BoxFuture<'a, Result<(), Error>> {
|
||||
async move {
|
||||
let mut tx = db.begin().await?;
|
||||
let model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(&mut tx)
|
||||
.await?;
|
||||
let mut status = model.clone().status().get_mut(&mut tx).await?;
|
||||
|
||||
let old = status.dependency_errors.0.remove(dependency);
|
||||
|
||||
if let Some(old) = old {
|
||||
let info = model
|
||||
.manifest()
|
||||
.dependencies()
|
||||
.idx_model(dependency)
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.get(&mut tx, true)
|
||||
.await?;
|
||||
if let Some(new) = old
|
||||
.try_heal(ctx, &mut tx, id, dependency, None, &*info)
|
||||
.await?
|
||||
{
|
||||
status.dependency_errors.0.insert(dependency.clone(), new);
|
||||
status.save(&mut tx).await?;
|
||||
tx.save().await?;
|
||||
} else {
|
||||
status.save(&mut tx).await?;
|
||||
tx.save().await?;
|
||||
heal_all_dependents_transitive(ctx, db, id).await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
pub async fn reconfigure_dependents_with_live_pointers(
|
||||
ctx: &RpcContext,
|
||||
mut tx: impl DbHandle,
|
||||
pde: &InstalledPackageDataEntry,
|
||||
) -> Result<(), Error> {
|
||||
let dependents = &pde.current_dependents;
|
||||
let me = &pde.manifest.id;
|
||||
for (dependent_id, dependency_info) in dependents {
|
||||
if dependency_info.pointers.iter().any(|ptr| match ptr {
|
||||
// dependency id matches the package being uninstalled
|
||||
PackagePointerSpec::TorAddress(ptr) => &ptr.package_id == me && dependent_id != me,
|
||||
PackagePointerSpec::LanAddress(ptr) => &ptr.package_id == me && dependent_id != me,
|
||||
// we never need to retarget these
|
||||
PackagePointerSpec::TorKey(_) => false,
|
||||
PackagePointerSpec::Config(_) => false,
|
||||
}) {
|
||||
crate::config::configure(
|
||||
ctx,
|
||||
&mut tx,
|
||||
dependent_id,
|
||||
None,
|
||||
&None,
|
||||
false,
|
||||
&mut BTreeMap::new(),
|
||||
&mut BTreeMap::new(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
use std::collections::BTreeSet;
|
||||
use std::num::ParseIntError;
|
||||
use std::path::Path;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use super::BOOT_RW_PATH;
|
||||
use crate::util::AtomicFile;
|
||||
use crate::Error;
|
||||
|
||||
pub const QUIRK_PATH: &'static str = "/sys/module/usb_storage/parameters/quirks";
|
||||
|
||||
pub const WHITELIST: [(VendorId, ProductId); 5] = [
|
||||
(VendorId(0x1d6b), ProductId(0x0002)), // root hub usb2
|
||||
(VendorId(0x1d6b), ProductId(0x0003)), // root hub usb3
|
||||
(VendorId(0x2109), ProductId(0x3431)),
|
||||
(VendorId(0x1058), ProductId(0x262f)), // western digital black HDD
|
||||
(VendorId(0x04e8), ProductId(0x4001)), // Samsung T7
|
||||
];
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct VendorId(u16);
|
||||
impl std::str::FromStr for VendorId {
|
||||
type Err = ParseIntError;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
u16::from_str_radix(s.trim(), 16).map(VendorId)
|
||||
}
|
||||
}
|
||||
impl std::fmt::Display for VendorId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:04x}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct ProductId(u16);
|
||||
impl std::str::FromStr for ProductId {
|
||||
type Err = ParseIntError;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
u16::from_str_radix(s.trim(), 16).map(ProductId)
|
||||
}
|
||||
}
|
||||
impl std::fmt::Display for ProductId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:04x}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Quirks(BTreeSet<(VendorId, ProductId)>);
|
||||
impl Quirks {
|
||||
pub fn add(&mut self, vendor: VendorId, product: ProductId) {
|
||||
self.0.insert((vendor, product));
|
||||
}
|
||||
pub fn remove(&mut self, vendor: VendorId, product: ProductId) {
|
||||
self.0.remove(&(vendor, product));
|
||||
}
|
||||
pub fn contains(&self, vendor: VendorId, product: ProductId) -> bool {
|
||||
self.0.contains(&(vendor, product))
|
||||
}
|
||||
}
|
||||
impl std::fmt::Display for Quirks {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mut comma = false;
|
||||
for (vendor, product) in &self.0 {
|
||||
if comma {
|
||||
write!(f, ",")?;
|
||||
} else {
|
||||
comma = true;
|
||||
}
|
||||
write!(f, "{}:{}:u", vendor, product)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
impl std::str::FromStr for Quirks {
|
||||
type Err = Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let s = s.trim();
|
||||
let mut quirks = BTreeSet::new();
|
||||
for item in s.split(",") {
|
||||
if let [vendor, product, "u"] = item.splitn(3, ":").collect::<Vec<_>>().as_slice() {
|
||||
quirks.insert((vendor.parse()?, product.parse()?));
|
||||
} else {
|
||||
return Err(Error::new(
|
||||
eyre!("Invalid quirk: `{}`", item),
|
||||
crate::ErrorKind::DiskManagement,
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(Quirks(quirks))
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn update_quirks(quirks: &mut Quirks) -> Result<Vec<String>, Error> {
|
||||
let mut usb_devices = tokio::fs::read_dir("/sys/bus/usb/devices/").await?;
|
||||
let mut to_reconnect = Vec::new();
|
||||
while let Some(usb_device) = usb_devices.next_entry().await? {
|
||||
if tokio::fs::metadata(usb_device.path().join("idVendor"))
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
continue;
|
||||
}
|
||||
let vendor = tokio::fs::read_to_string(usb_device.path().join("idVendor"))
|
||||
.await?
|
||||
.parse()?;
|
||||
let product = tokio::fs::read_to_string(usb_device.path().join("idProduct"))
|
||||
.await?
|
||||
.parse()?;
|
||||
if WHITELIST.contains(&(vendor, product)) {
|
||||
quirks.remove(vendor, product);
|
||||
continue;
|
||||
}
|
||||
if quirks.contains(vendor, product) {
|
||||
continue;
|
||||
}
|
||||
quirks.add(vendor, product);
|
||||
{
|
||||
// write quirks to sysfs
|
||||
let mut quirk_file = tokio::fs::File::create(QUIRK_PATH).await?;
|
||||
quirk_file.write_all(quirks.to_string().as_bytes()).await?;
|
||||
quirk_file.sync_all().await?;
|
||||
drop(quirk_file);
|
||||
}
|
||||
|
||||
disconnect_usb(usb_device.path()).await?;
|
||||
let (vendor_name, product_name) = tokio::try_join!(
|
||||
tokio::fs::read_to_string(usb_device.path().join("manufacturer")),
|
||||
tokio::fs::read_to_string(usb_device.path().join("product")),
|
||||
)?;
|
||||
to_reconnect.push(format!("{} {}", vendor_name, product_name));
|
||||
}
|
||||
Ok(to_reconnect)
|
||||
}
|
||||
|
||||
#[instrument(skip(usb_device_path))]
|
||||
pub async fn disconnect_usb(usb_device_path: impl AsRef<Path>) -> Result<(), Error> {
|
||||
let authorized_path = usb_device_path.as_ref().join("bConfigurationValue");
|
||||
let mut authorized_file = tokio::fs::File::create(&authorized_path).await?;
|
||||
authorized_file.write_all(b"0").await?;
|
||||
authorized_file.sync_all().await?;
|
||||
drop(authorized_file);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn fetch_quirks() -> Result<Quirks, Error> {
|
||||
Ok(tokio::fs::read_to_string(QUIRK_PATH).await?.parse()?)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn save_quirks(quirks: &Quirks) -> Result<(), Error> {
|
||||
let orig_path = Path::new(BOOT_RW_PATH).join("cmdline.txt.orig");
|
||||
let target_path = Path::new(BOOT_RW_PATH).join("cmdline.txt");
|
||||
if tokio::fs::metadata(&orig_path).await.is_err() {
|
||||
tokio::fs::copy(&target_path, &orig_path).await?;
|
||||
}
|
||||
let cmdline = tokio::fs::read_to_string(&orig_path).await?;
|
||||
let mut target = AtomicFile::new(&target_path).await?;
|
||||
target
|
||||
.write_all(format!("usb-storage.quirks={} {}", quirks, cmdline).as_bytes())
|
||||
.await?;
|
||||
target.save().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
use digest::Digest;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::process::Command;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::util::Invoke;
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
|
||||
pub const PRODUCT_KEY_PATH: &'static str = "/embassy-os/product_key.txt";
|
||||
|
||||
#[instrument]
|
||||
pub async fn get_hostname() -> Result<String, Error> {
|
||||
Ok(derive_hostname(&get_id().await?))
|
||||
}
|
||||
|
||||
pub fn derive_hostname(id: &str) -> String {
|
||||
format!("embassy-{}", id)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn get_current_hostname() -> Result<String, Error> {
|
||||
let out = Command::new("hostname")
|
||||
.invoke(ErrorKind::ParseSysInfo)
|
||||
.await?;
|
||||
let out_string = String::from_utf8(out)?;
|
||||
Ok(out_string.trim().to_owned())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn set_hostname(hostname: &str) -> Result<(), Error> {
|
||||
let _out = Command::new("hostnamectl")
|
||||
.arg("set-hostname")
|
||||
.arg(hostname)
|
||||
.invoke(ErrorKind::ParseSysInfo)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn get_product_key() -> Result<String, Error> {
|
||||
let out = tokio::fs::read_to_string(PRODUCT_KEY_PATH)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, PRODUCT_KEY_PATH))?;
|
||||
Ok(out.trim().to_owned())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn set_product_key(key: &str) -> Result<(), Error> {
|
||||
let mut pkey_file = File::create(PRODUCT_KEY_PATH).await?;
|
||||
pkey_file.write_all(key.as_bytes()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn derive_id(key: &str) -> String {
|
||||
let mut hasher = sha2::Sha256::new();
|
||||
hasher.update(key.as_bytes());
|
||||
let res = hasher.finalize();
|
||||
hex::encode(&res[0..4])
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn get_id() -> Result<String, Error> {
|
||||
let key = get_product_key().await?;
|
||||
Ok(derive_id(&key))
|
||||
}
|
||||
|
||||
// cat /embassy-os/product_key.txt | shasum -a 256 | head -c 8 | awk '{print "embassy-"$1}' | xargs hostnamectl set-hostname && systemctl restart avahi-daemon
|
||||
#[instrument]
|
||||
pub async fn sync_hostname() -> Result<(), Error> {
|
||||
set_hostname(&format!("embassy-{}", get_id().await?)).await?;
|
||||
Command::new("systemctl")
|
||||
.arg("restart")
|
||||
.arg("avahi-daemon")
|
||||
.invoke(crate::ErrorKind::Network)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
use std::borrow::{Borrow, Cow};
|
||||
use std::fmt::Debug;
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use crate::util::Version;
|
||||
use crate::Error;
|
||||
|
||||
pub const SYSTEM_ID: Id<&'static str> = Id("x_system");
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("Invalid ID")]
|
||||
pub struct InvalidId;
|
||||
impl From<InvalidId> for Error {
|
||||
fn from(err: InvalidId) -> Self {
|
||||
Error::new(err, crate::error::ErrorKind::InvalidPackageId)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct IdUnchecked<S: AsRef<str>>(pub S);
|
||||
impl<'de> Deserialize<'de> for IdUnchecked<Cow<'de, str>> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
struct Visitor;
|
||||
impl<'de> serde::de::Visitor<'de> for Visitor {
|
||||
type Value = IdUnchecked<Cow<'de, str>>;
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(formatter, "a valid ID")
|
||||
}
|
||||
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
Ok(IdUnchecked(Cow::Owned(v.to_owned())))
|
||||
}
|
||||
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
Ok(IdUnchecked(Cow::Owned(v)))
|
||||
}
|
||||
fn visit_borrowed_str<E>(self, v: &'de str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
Ok(IdUnchecked(Cow::Borrowed(v)))
|
||||
}
|
||||
}
|
||||
deserializer.deserialize_any(Visitor)
|
||||
}
|
||||
}
|
||||
impl<'de> Deserialize<'de> for IdUnchecked<String> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Ok(IdUnchecked(String::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
impl<'de> Deserialize<'de> for IdUnchecked<&'de str> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Ok(IdUnchecked(<&'de str>::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct Id<S: AsRef<str> = String>(S);
|
||||
impl<S: AsRef<str>> Id<S> {
|
||||
pub fn try_from(value: S) -> Result<Self, InvalidId> {
|
||||
if value
|
||||
.as_ref()
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_lowercase() || c == '-')
|
||||
{
|
||||
Ok(Id(value))
|
||||
} else {
|
||||
Err(InvalidId)
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> Id<&'a str> {
|
||||
pub fn owned(&self) -> Id {
|
||||
Id(self.0.to_owned())
|
||||
}
|
||||
}
|
||||
impl From<Id> for String {
|
||||
fn from(value: Id) -> Self {
|
||||
value.0
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> std::ops::Deref for Id<S> {
|
||||
type Target = S;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> std::fmt::Display for Id<S> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0.as_ref())
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<str> for Id<S> {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> Borrow<str> for Id<S> {
|
||||
fn borrow(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl<'de, S> Deserialize<'de> for Id<S>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
IdUnchecked<S>: Deserialize<'de>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let unchecked: IdUnchecked<S> = Deserialize::deserialize(deserializer)?;
|
||||
Id::try_from(unchecked.0).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> Serialize for Id<S> {
|
||||
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
|
||||
where
|
||||
Ser: Serializer,
|
||||
{
|
||||
serializer.serialize_str(self.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
|
||||
pub struct ImageId<S: AsRef<str> = String>(Id<S>);
|
||||
impl<S: AsRef<str>> std::fmt::Display for ImageId<S> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", &self.0)
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> ImageId<S> {
|
||||
pub fn for_package<PkgId: AsRef<crate::s9pk::manifest::PackageId<S0>>, S0: AsRef<str>>(
|
||||
&self,
|
||||
pkg_id: PkgId,
|
||||
pkg_version: Option<&Version>,
|
||||
) -> String {
|
||||
format!(
|
||||
"start9/{}/{}:{}",
|
||||
pkg_id.as_ref(),
|
||||
self.0,
|
||||
pkg_version.map(|v| { v.as_str() }).unwrap_or("latest")
|
||||
)
|
||||
}
|
||||
}
|
||||
impl FromStr for ImageId {
|
||||
type Err = InvalidId;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(ImageId(Id::try_from(s.to_owned())?))
|
||||
}
|
||||
}
|
||||
impl<'de, S> Deserialize<'de> for ImageId<S>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
Id<S>: Deserialize<'de>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Ok(ImageId(Deserialize::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::process::Command;
|
||||
|
||||
use crate::context::rpc::RpcContextConfig;
|
||||
use crate::db::model::ServerStatus;
|
||||
use crate::install::PKG_DOCKER_DIR;
|
||||
use crate::util::Invoke;
|
||||
use crate::Error;
|
||||
|
||||
pub const SYSTEM_REBUILD_PATH: &str = "/embassy-os/system-rebuild";
|
||||
|
||||
pub async fn check_time_is_synchronized() -> Result<bool, Error> {
|
||||
Ok(String::from_utf8(
|
||||
Command::new("timedatectl")
|
||||
.arg("show")
|
||||
.arg("-p")
|
||||
.arg("NTPSynchronized")
|
||||
.invoke(crate::ErrorKind::Unknown)
|
||||
.await?,
|
||||
)?
|
||||
.trim()
|
||||
== "NTPSynchronized=yes")
|
||||
}
|
||||
|
||||
pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error> {
|
||||
let should_rebuild = tokio::fs::metadata(SYSTEM_REBUILD_PATH).await.is_ok();
|
||||
let secret_store = cfg.secret_store().await?;
|
||||
let log_dir = cfg.datadir().join("main").join("logs");
|
||||
if tokio::fs::metadata(&log_dir).await.is_err() {
|
||||
tokio::fs::create_dir_all(&log_dir).await?;
|
||||
}
|
||||
crate::disk::mount::util::bind(&log_dir, "/var/log/journal", false).await?;
|
||||
Command::new("systemctl")
|
||||
.arg("restart")
|
||||
.arg("systemd-journald")
|
||||
.invoke(crate::ErrorKind::Journald)
|
||||
.await?;
|
||||
tracing::info!("Mounted Logs");
|
||||
let tmp_dir = cfg.datadir().join("package-data/tmp");
|
||||
if tokio::fs::metadata(&tmp_dir).await.is_err() {
|
||||
tokio::fs::create_dir_all(&tmp_dir).await?;
|
||||
}
|
||||
let tmp_docker = cfg.datadir().join("package-data/tmp/docker");
|
||||
let tmp_docker_exists = tokio::fs::metadata(&tmp_docker).await.is_ok();
|
||||
if should_rebuild || !tmp_docker_exists {
|
||||
if tmp_docker_exists {
|
||||
tokio::fs::remove_dir_all(&tmp_docker).await?;
|
||||
}
|
||||
Command::new("cp")
|
||||
.arg("-r")
|
||||
.arg("/var/lib/docker")
|
||||
.arg(&tmp_docker)
|
||||
.invoke(crate::ErrorKind::Filesystem)
|
||||
.await?;
|
||||
}
|
||||
Command::new("systemctl")
|
||||
.arg("stop")
|
||||
.arg("docker")
|
||||
.invoke(crate::ErrorKind::Docker)
|
||||
.await?;
|
||||
crate::disk::mount::util::bind(&tmp_docker, "/var/lib/docker", false).await?;
|
||||
Command::new("systemctl")
|
||||
.arg("reset-failed")
|
||||
.arg("docker")
|
||||
.invoke(crate::ErrorKind::Docker)
|
||||
.await?;
|
||||
Command::new("systemctl")
|
||||
.arg("start")
|
||||
.arg("docker")
|
||||
.invoke(crate::ErrorKind::Docker)
|
||||
.await?;
|
||||
tracing::info!("Mounted Docker Data");
|
||||
|
||||
if should_rebuild || !tmp_docker_exists {
|
||||
tracing::info!("Loading System Docker Images");
|
||||
crate::install::load_images("/var/lib/embassy/system-images").await?;
|
||||
tracing::info!("Loaded System Docker Images");
|
||||
|
||||
tracing::info!("Loading Package Docker Images");
|
||||
crate::install::load_images(cfg.datadir().join(PKG_DOCKER_DIR)).await?;
|
||||
tracing::info!("Loaded Package Docker Images");
|
||||
}
|
||||
|
||||
crate::ssh::sync_keys_from_db(&secret_store, "/root/.ssh/authorized_keys").await?;
|
||||
tracing::info!("Synced SSH Keys");
|
||||
let db = cfg.db(&secret_store, product_key).await?;
|
||||
|
||||
let mut handle = db.handle();
|
||||
|
||||
crate::net::wifi::synchronize_wpa_supplicant_conf(
|
||||
&cfg.datadir().join("main"),
|
||||
&*crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.last_wifi_region()
|
||||
.get(&mut handle, false)
|
||||
.await
|
||||
.map_err(|_e| {
|
||||
Error::new(
|
||||
color_eyre::eyre::eyre!("Could not find the last wifi region"),
|
||||
crate::ErrorKind::NotFound,
|
||||
)
|
||||
})?,
|
||||
)
|
||||
.await?;
|
||||
tracing::info!("Synchronized wpa_supplicant.conf");
|
||||
let mut info = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.get_mut(&mut handle)
|
||||
.await?;
|
||||
info.status_info = ServerStatus {
|
||||
backing_up: false,
|
||||
updated: false,
|
||||
update_progress: None,
|
||||
};
|
||||
info.save(&mut handle).await?;
|
||||
|
||||
let mut warn_time_not_synced = true;
|
||||
for _ in 0..60 {
|
||||
if check_time_is_synchronized().await? {
|
||||
warn_time_not_synced = false;
|
||||
break;
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
if warn_time_not_synced {
|
||||
tracing::warn!("Timed out waiting for system time to synchronize");
|
||||
}
|
||||
|
||||
crate::version::init(&mut handle).await?;
|
||||
|
||||
if should_rebuild {
|
||||
tokio::fs::remove_file(SYSTEM_REBUILD_PATH).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,310 +0,0 @@
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
|
||||
use bollard::image::ListImagesOptions;
|
||||
use color_eyre::eyre::eyre;
|
||||
use patch_db::{DbHandle, LockType, PatchDbHandle};
|
||||
use sqlx::{Executor, Sqlite};
|
||||
use tracing::instrument;
|
||||
|
||||
use super::{PKG_ARCHIVE_DIR, PKG_DOCKER_DIR};
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::model::{CurrentDependencyInfo, InstalledPackageDataEntry, PackageDataEntry};
|
||||
use crate::dependencies::reconfigure_dependents_with_live_pointers;
|
||||
use crate::error::ErrorCollection;
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::util::{Apply, Version};
|
||||
use crate::Error;
|
||||
|
||||
#[instrument(skip(ctx, db, deps))]
|
||||
pub async fn update_dependency_errors_of_dependents<
|
||||
'a,
|
||||
Db: DbHandle,
|
||||
I: IntoIterator<Item = &'a PackageId>,
|
||||
>(
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
id: &PackageId,
|
||||
deps: I,
|
||||
) -> Result<(), Error> {
|
||||
for dep in deps {
|
||||
if let Some(man) = &*crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dep)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Manifest>(|m| m.manifest())
|
||||
.get(db, true)
|
||||
.await?
|
||||
{
|
||||
if let Err(e) = if let Some(info) = man.dependencies.0.get(id) {
|
||||
info.satisfied(ctx, db, id, None, dep).await?
|
||||
} else {
|
||||
Ok(())
|
||||
} {
|
||||
let mut errs = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dep)
|
||||
.expect(db)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(db)
|
||||
.await?
|
||||
.status()
|
||||
.dependency_errors()
|
||||
.get_mut(db)
|
||||
.await?;
|
||||
errs.0.insert(id.clone(), e);
|
||||
errs.save(db).await?;
|
||||
} else {
|
||||
let mut errs = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dep)
|
||||
.expect(db)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(db)
|
||||
.await?
|
||||
.status()
|
||||
.dependency_errors()
|
||||
.get_mut(db)
|
||||
.await?;
|
||||
errs.0.remove(id);
|
||||
errs.save(db).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn cleanup(ctx: &RpcContext, id: &PackageId, version: &Version) -> Result<(), Error> {
|
||||
let mut errors = ErrorCollection::new();
|
||||
ctx.managers.remove(&(id.clone(), version.clone())).await;
|
||||
// docker images start9/$APP_ID/*:$VERSION -q | xargs docker rmi
|
||||
let images = ctx
|
||||
.docker
|
||||
.list_images(Some(ListImagesOptions {
|
||||
all: false,
|
||||
filters: {
|
||||
let mut f = HashMap::new();
|
||||
f.insert(
|
||||
"reference".to_owned(),
|
||||
vec![format!("start9/{}/*:{}", id, version)],
|
||||
);
|
||||
f
|
||||
},
|
||||
digests: false,
|
||||
}))
|
||||
.await
|
||||
.apply(|res| errors.handle(res));
|
||||
errors.extend(
|
||||
futures::future::join_all(images.into_iter().flatten().map(|image| async {
|
||||
let image = image; // move into future
|
||||
ctx.docker.remove_image(&image.id, None, None).await
|
||||
}))
|
||||
.await,
|
||||
);
|
||||
let pkg_archive_dir = ctx
|
||||
.datadir
|
||||
.join(PKG_ARCHIVE_DIR)
|
||||
.join(id)
|
||||
.join(version.as_str());
|
||||
if tokio::fs::metadata(&pkg_archive_dir).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&pkg_archive_dir)
|
||||
.await
|
||||
.apply(|res| errors.handle(res));
|
||||
}
|
||||
let docker_path = ctx
|
||||
.datadir
|
||||
.join(PKG_DOCKER_DIR)
|
||||
.join(id)
|
||||
.join(version.as_str());
|
||||
if tokio::fs::metadata(&docker_path).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&docker_path)
|
||||
.await
|
||||
.apply(|res| errors.handle(res));
|
||||
}
|
||||
|
||||
errors.into_result()
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
pub async fn cleanup_failed<Db: DbHandle>(
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
id: &PackageId,
|
||||
) -> Result<(), Error> {
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(db, LockType::Write)
|
||||
.await?;
|
||||
let pde = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.expect(db)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await?
|
||||
.into_owned();
|
||||
if let Some(manifest) = match &pde {
|
||||
PackageDataEntry::Installing { manifest, .. }
|
||||
| PackageDataEntry::Restoring { manifest, .. } => Some(manifest),
|
||||
PackageDataEntry::Updating {
|
||||
manifest,
|
||||
installed:
|
||||
InstalledPackageDataEntry {
|
||||
manifest: installed_manifest,
|
||||
..
|
||||
},
|
||||
..
|
||||
} => {
|
||||
if &manifest.version != &installed_manifest.version {
|
||||
Some(manifest)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
tracing::warn!("{}: Nothing to clean up!", id);
|
||||
None
|
||||
}
|
||||
} {
|
||||
cleanup(ctx, id, &manifest.version).await?;
|
||||
}
|
||||
|
||||
match pde {
|
||||
PackageDataEntry::Installing { .. } | PackageDataEntry::Restoring { .. } => {
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.remove(db, id)
|
||||
.await?;
|
||||
}
|
||||
PackageDataEntry::Updating {
|
||||
installed,
|
||||
static_files,
|
||||
..
|
||||
} => {
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.put(
|
||||
db,
|
||||
&PackageDataEntry::Installed {
|
||||
manifest: installed.manifest.clone(),
|
||||
installed,
|
||||
static_files,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(db, current_dependencies))]
|
||||
pub async fn remove_from_current_dependents_lists<
|
||||
'a,
|
||||
Db: DbHandle,
|
||||
I: IntoIterator<Item = &'a PackageId>,
|
||||
>(
|
||||
db: &mut Db,
|
||||
id: &'a PackageId,
|
||||
current_dependencies: I,
|
||||
) -> Result<(), Error> {
|
||||
for dep in current_dependencies.into_iter().chain(std::iter::once(id)) {
|
||||
if let Some(current_dependents) = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dep)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, BTreeMap<PackageId, CurrentDependencyInfo>>(|m| m.current_dependents())
|
||||
.check(db)
|
||||
.await?
|
||||
{
|
||||
if current_dependents
|
||||
.clone()
|
||||
.idx_model(id)
|
||||
.exists(db, true)
|
||||
.await?
|
||||
{
|
||||
current_dependents.remove(db, id).await?
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, secrets, db))]
|
||||
pub async fn uninstall<Ex>(
|
||||
ctx: &RpcContext,
|
||||
db: &mut PatchDbHandle,
|
||||
secrets: &mut Ex,
|
||||
id: &PackageId,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let mut tx = db.begin().await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(&mut tx, LockType::Write)
|
||||
.await?;
|
||||
let entry = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|pde| pde.removing())
|
||||
.get(&mut tx, true)
|
||||
.await?
|
||||
.into_owned()
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("Package not in removing state: {}", id),
|
||||
crate::ErrorKind::NotFound,
|
||||
)
|
||||
})?;
|
||||
cleanup(ctx, &entry.manifest.id, &entry.manifest.version).await?;
|
||||
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.remove(&mut tx, id)
|
||||
.await?;
|
||||
|
||||
// once we have removed the package entry, we can change all the dependent pointers to null
|
||||
reconfigure_dependents_with_live_pointers(ctx, &mut tx, &entry).await?;
|
||||
|
||||
remove_from_current_dependents_lists(
|
||||
&mut tx,
|
||||
&entry.manifest.id,
|
||||
entry.current_dependencies.keys(),
|
||||
)
|
||||
.await?;
|
||||
update_dependency_errors_of_dependents(
|
||||
ctx,
|
||||
&mut tx,
|
||||
&entry.manifest.id,
|
||||
entry.current_dependents.keys(),
|
||||
)
|
||||
.await?;
|
||||
let volumes = ctx
|
||||
.datadir
|
||||
.join(crate::volume::PKG_VOLUME_DIR)
|
||||
.join(&entry.manifest.id);
|
||||
if tokio::fs::metadata(&volumes).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&volumes).await?;
|
||||
}
|
||||
tx.commit(None).await?;
|
||||
remove_tor_keys(secrets, &entry.manifest.id).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(secrets))]
|
||||
pub async fn remove_tor_keys<Ex>(secrets: &mut Ex, id: &PackageId) -> Result<(), Error>
|
||||
where
|
||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let id_str = id.as_str();
|
||||
sqlx::query!("DELETE FROM tor WHERE package = ?", id_str)
|
||||
.execute(secrets)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use rpc_toolkit::command;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::dependencies::{break_transitive, BreakageRes, DependencyError};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::display_serializable;
|
||||
use crate::util::Version;
|
||||
use crate::Error;
|
||||
|
||||
#[command(subcommands(dry))]
|
||||
pub async fn update() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(display(display_serializable))]
|
||||
pub async fn dry(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg] id: PackageId,
|
||||
#[arg] version: Version,
|
||||
) -> Result<BreakageRes, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
let mut breakages = BTreeMap::new();
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(&mut tx, LockType::Read)
|
||||
.await?;
|
||||
for dependent in crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.current_dependents()
|
||||
.keys(&mut tx, true)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|dependent| &id != dependent)
|
||||
{
|
||||
let version_req = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dependent)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.manifest()
|
||||
.dependencies()
|
||||
.idx_model(&id)
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.get(&mut tx, true)
|
||||
.await?
|
||||
.into_owned()
|
||||
.version;
|
||||
if !version.satisfies(&version_req) {
|
||||
break_transitive(
|
||||
&mut tx,
|
||||
&dependent,
|
||||
&id,
|
||||
DependencyError::IncorrectVersion {
|
||||
expected: version_req,
|
||||
received: version.clone(),
|
||||
},
|
||||
&mut breakages,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
tx.abort().await?;
|
||||
Ok(BreakageRes(breakages))
|
||||
}
|
||||
@@ -1,246 +0,0 @@
|
||||
use std::process::Stdio;
|
||||
use std::time::{Duration, UNIX_EPOCH};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::eyre;
|
||||
use futures::TryStreamExt;
|
||||
use rpc_toolkit::command;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
use tokio::process::Command;
|
||||
use tokio_stream::wrappers::LinesStream;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::action::docker::DockerAction;
|
||||
use crate::error::ResultExt;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::Reversible;
|
||||
use crate::Error;
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct LogResponse {
|
||||
entries: Reversible<LogEntry>,
|
||||
start_cursor: Option<String>,
|
||||
end_cursor: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
|
||||
pub struct LogEntry {
|
||||
timestamp: DateTime<Utc>,
|
||||
message: String,
|
||||
}
|
||||
impl std::fmt::Display for LogEntry {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, "{} {}", self.timestamp, self.message)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct JournalctlEntry {
|
||||
#[serde(rename = "__REALTIME_TIMESTAMP")]
|
||||
timestamp: String,
|
||||
#[serde(rename = "MESSAGE")]
|
||||
#[serde(deserialize_with = "deserialize_string_or_utf8_array")]
|
||||
message: String,
|
||||
#[serde(rename = "__CURSOR")]
|
||||
cursor: String,
|
||||
}
|
||||
impl JournalctlEntry {
|
||||
fn log_entry(self) -> Result<(String, LogEntry), Error> {
|
||||
Ok((
|
||||
self.cursor,
|
||||
LogEntry {
|
||||
timestamp: DateTime::<Utc>::from(
|
||||
UNIX_EPOCH + Duration::from_micros(self.timestamp.parse::<u64>()?),
|
||||
),
|
||||
message: self.message,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_string_or_utf8_array<'de, D: serde::de::Deserializer<'de>>(
|
||||
deserializer: D,
|
||||
) -> std::result::Result<String, D::Error> {
|
||||
struct Visitor;
|
||||
impl<'de> serde::de::Visitor<'de> for Visitor {
|
||||
type Value = String;
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(formatter, "a parsable string")
|
||||
}
|
||||
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
Ok(v.to_owned())
|
||||
}
|
||||
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
Ok(v)
|
||||
}
|
||||
fn visit_unit<E>(self) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
Ok(String::new())
|
||||
}
|
||||
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
|
||||
where
|
||||
A: serde::de::SeqAccess<'de>,
|
||||
{
|
||||
String::from_utf8(
|
||||
std::iter::repeat_with(|| seq.next_element::<u8>().transpose())
|
||||
.take_while(|a| a.is_some())
|
||||
.filter_map(|a| a)
|
||||
.collect::<Result<Vec<u8>, _>>()?,
|
||||
)
|
||||
.map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
deserializer.deserialize_any(Visitor)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum LogSource {
|
||||
Kernel,
|
||||
Service(&'static str),
|
||||
Container(PackageId),
|
||||
}
|
||||
|
||||
pub fn display_logs(all: LogResponse, _: &ArgMatches<'_>) {
|
||||
for entry in all.entries.iter() {
|
||||
println!("{}", entry);
|
||||
}
|
||||
}
|
||||
|
||||
#[command(display(display_logs))]
|
||||
pub async fn logs(
|
||||
#[arg] id: PackageId,
|
||||
#[arg] limit: Option<usize>,
|
||||
#[arg] cursor: Option<String>,
|
||||
#[arg] before_flag: Option<bool>,
|
||||
) -> Result<LogResponse, Error> {
|
||||
Ok(fetch_logs(
|
||||
LogSource::Container(id),
|
||||
limit,
|
||||
cursor,
|
||||
before_flag.unwrap_or(false),
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn fetch_logs(
|
||||
id: LogSource,
|
||||
limit: Option<usize>,
|
||||
cursor: Option<String>,
|
||||
before_flag: bool,
|
||||
) -> Result<LogResponse, Error> {
|
||||
let mut cmd = Command::new("journalctl");
|
||||
|
||||
let limit = limit.unwrap_or(50);
|
||||
|
||||
cmd.arg("--output=json");
|
||||
cmd.arg("--output-fields=MESSAGE");
|
||||
cmd.arg(format!("-n{}", limit));
|
||||
match id {
|
||||
LogSource::Kernel => {
|
||||
cmd.arg("-k");
|
||||
}
|
||||
LogSource::Service(id) => {
|
||||
cmd.arg("-u");
|
||||
cmd.arg(id);
|
||||
}
|
||||
LogSource::Container(id) => {
|
||||
cmd.arg(format!(
|
||||
"CONTAINER_NAME={}",
|
||||
DockerAction::container_name(&id, None)
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let cursor_formatted = format!("--after-cursor={}", cursor.clone().unwrap_or("".to_owned()));
|
||||
let mut get_prev_logs_and_reverse = false;
|
||||
if cursor.is_some() {
|
||||
cmd.arg(&cursor_formatted);
|
||||
if before_flag {
|
||||
get_prev_logs_and_reverse = true;
|
||||
}
|
||||
}
|
||||
if get_prev_logs_and_reverse {
|
||||
cmd.arg("--reverse");
|
||||
}
|
||||
|
||||
let mut child = cmd.stdout(Stdio::piped()).spawn()?;
|
||||
let out = BufReader::new(
|
||||
child
|
||||
.stdout
|
||||
.take()
|
||||
.ok_or_else(|| Error::new(eyre!("No stdout available"), crate::ErrorKind::Journald))?,
|
||||
);
|
||||
|
||||
let journalctl_entries = LinesStream::new(out.lines());
|
||||
|
||||
let mut deserialized_entries = journalctl_entries
|
||||
.map_err(|e| Error::new(e, crate::ErrorKind::Journald))
|
||||
.and_then(|s| {
|
||||
futures::future::ready(
|
||||
serde_json::from_str::<JournalctlEntry>(&s)
|
||||
.with_kind(crate::ErrorKind::Deserialization),
|
||||
)
|
||||
});
|
||||
|
||||
let mut entries = Vec::with_capacity(limit);
|
||||
let mut start_cursor = None;
|
||||
|
||||
if let Some(first) = deserialized_entries.try_next().await? {
|
||||
let (cursor, entry) = first.log_entry()?;
|
||||
start_cursor = Some(cursor);
|
||||
entries.push(entry);
|
||||
}
|
||||
|
||||
let (mut end_cursor, entries) = deserialized_entries
|
||||
.try_fold(
|
||||
(start_cursor.clone(), entries),
|
||||
|(_, mut acc), entry| async move {
|
||||
let (cursor, entry) = entry.log_entry()?;
|
||||
acc.push(entry);
|
||||
Ok((Some(cursor), acc))
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
let mut entries = Reversible::new(entries);
|
||||
// reverse again so output is always in increasing chronological order
|
||||
if get_prev_logs_and_reverse {
|
||||
entries.reverse();
|
||||
std::mem::swap(&mut start_cursor, &mut end_cursor);
|
||||
}
|
||||
Ok(LogResponse {
|
||||
entries,
|
||||
start_cursor,
|
||||
end_cursor,
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_logs() {
|
||||
let response = fetch_logs(
|
||||
// change `tor.service` to an actual journald unit on your machine
|
||||
// LogSource::Service("tor.service"),
|
||||
// first run `docker run --name=hello-world.embassy --log-driver=journald hello-world`
|
||||
LogSource::Container("hello-world".parse().unwrap()),
|
||||
// Some(5),
|
||||
None,
|
||||
None,
|
||||
// Some("s=1b8c418e28534400856c27b211dd94fd;i=5a7;b=97571c13a1284f87bc0639b5cff5acbe;m=740e916;t=5ca073eea3445;x=f45bc233ca328348".to_owned()),
|
||||
false,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let serialized = serde_json::to_string_pretty(&response).unwrap();
|
||||
println!("{}", serialized);
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::dependencies::{break_transitive, DependencyError};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::status::health_check::{HealthCheckId, HealthCheckResult};
|
||||
use crate::status::MainStatus;
|
||||
use crate::Error;
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
pub async fn check<Db: DbHandle>(
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
id: &PackageId,
|
||||
should_commit: &AtomicBool,
|
||||
) -> Result<(), Error> {
|
||||
let mut tx = db.begin().await?;
|
||||
|
||||
let mut checkpoint = tx.begin().await?;
|
||||
|
||||
let installed_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.expect(&mut checkpoint)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(&mut checkpoint)
|
||||
.await?;
|
||||
|
||||
let manifest = installed_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.get(&mut checkpoint, true)
|
||||
.await?
|
||||
.into_owned();
|
||||
|
||||
let started = installed_model
|
||||
.clone()
|
||||
.status()
|
||||
.main()
|
||||
.started()
|
||||
.get(&mut checkpoint, true)
|
||||
.await?
|
||||
.into_owned();
|
||||
|
||||
checkpoint.save().await?;
|
||||
|
||||
let health_results = if let Some(started) = started {
|
||||
manifest
|
||||
.health_checks
|
||||
.check_all(ctx, started, id, &manifest.version, &manifest.volumes)
|
||||
.await?
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if !should_commit.load(Ordering::SeqCst) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut checkpoint = tx.begin().await?;
|
||||
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(&mut checkpoint, LockType::Write)
|
||||
.await?;
|
||||
|
||||
let mut status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.expect(&mut checkpoint)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(&mut checkpoint)
|
||||
.await?
|
||||
.status()
|
||||
.main()
|
||||
.get_mut(&mut checkpoint)
|
||||
.await?;
|
||||
|
||||
match &mut *status {
|
||||
MainStatus::Running { health, .. } => {
|
||||
*health = health_results.clone();
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
status.save(&mut checkpoint).await?;
|
||||
|
||||
let current_dependents = installed_model
|
||||
.current_dependents()
|
||||
.get(&mut checkpoint, true)
|
||||
.await?;
|
||||
|
||||
checkpoint.save().await?;
|
||||
|
||||
for (dependent, info) in &*current_dependents {
|
||||
let failures: BTreeMap<HealthCheckId, HealthCheckResult> = health_results
|
||||
.iter()
|
||||
.filter(|(_, hc_res)| !matches!(hc_res, HealthCheckResult::Success { .. }))
|
||||
.filter(|(hc_id, _)| info.health_checks.contains(hc_id))
|
||||
.map(|(k, v)| (k.clone(), v.clone()))
|
||||
.collect();
|
||||
|
||||
if !failures.is_empty() {
|
||||
break_transitive(
|
||||
&mut tx,
|
||||
&dependent,
|
||||
id,
|
||||
DependencyError::HealthChecksFailed { failures },
|
||||
&mut BTreeMap::new(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
tx.save().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,624 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::convert::TryInto;
|
||||
use std::future::Future;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::task::Poll;
|
||||
use std::time::Duration;
|
||||
|
||||
use bollard::container::{KillContainerOptions, StopContainerOptions};
|
||||
use chrono::Utc;
|
||||
use color_eyre::eyre::eyre;
|
||||
use nix::sys::signal::Signal;
|
||||
use num_enum::TryFromPrimitive;
|
||||
use patch_db::DbHandle;
|
||||
use sqlx::{Executor, Sqlite};
|
||||
use tokio::sync::watch::error::RecvError;
|
||||
use tokio::sync::watch::{channel, Receiver, Sender};
|
||||
use tokio::sync::{Notify, RwLock};
|
||||
use torut::onion::TorSecretKeyV3;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::action::docker::DockerAction;
|
||||
use crate::action::{ActionImplementation, NoOutput};
|
||||
use crate::context::RpcContext;
|
||||
use crate::manager::sync::synchronizer;
|
||||
use crate::net::interface::InterfaceId;
|
||||
use crate::net::GeneratedCertificateMountPoint;
|
||||
use crate::notifications::NotificationLevel;
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::status::MainStatus;
|
||||
use crate::util::{Container, NonDetachingJoinHandle, Version};
|
||||
use crate::Error;
|
||||
|
||||
pub mod health;
|
||||
mod sync;
|
||||
|
||||
pub const HEALTH_CHECK_COOLDOWN_SECONDS: u64 = 60;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ManagerMap(RwLock<BTreeMap<(PackageId, Version), Arc<Manager>>>);
|
||||
impl ManagerMap {
|
||||
#[instrument(skip(self, ctx, db, secrets))]
|
||||
pub async fn init<Db: DbHandle, Ex>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
secrets: &mut Ex,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let mut res = BTreeMap::new();
|
||||
for package in crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.keys(db, true)
|
||||
.await?
|
||||
{
|
||||
let man: Manifest = if let Some(manifest) = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&package)
|
||||
.and_then(|pkg| pkg.installed())
|
||||
.map(|m| m.manifest())
|
||||
.get(db, true)
|
||||
.await?
|
||||
.to_owned()
|
||||
{
|
||||
manifest
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
let tor_keys = man.interfaces.tor_keys(secrets, &package).await?;
|
||||
res.insert(
|
||||
(package, man.version.clone()),
|
||||
Arc::new(Manager::create(ctx.clone(), man, tor_keys).await?),
|
||||
);
|
||||
}
|
||||
*self.0.write().await = res;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, ctx))]
|
||||
pub async fn add(
|
||||
&self,
|
||||
ctx: RpcContext,
|
||||
manifest: Manifest,
|
||||
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
|
||||
) -> Result<(), Error> {
|
||||
let mut lock = self.0.write().await;
|
||||
let id = (manifest.id.clone(), manifest.version.clone());
|
||||
if let Some(man) = lock.remove(&id) {
|
||||
if !man.thread.is_empty().await {
|
||||
man.exit().await?;
|
||||
}
|
||||
}
|
||||
lock.insert(
|
||||
id,
|
||||
Arc::new(Manager::create(ctx, manifest, tor_keys).await?),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn remove(&self, id: &(PackageId, Version)) {
|
||||
if let Some(man) = self.0.write().await.remove(id) {
|
||||
if let Err(e) = man.exit().await {
|
||||
tracing::error!("Error shutting down manager: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn empty(&self) -> Result<(), Error> {
|
||||
let res =
|
||||
futures::future::join_all(std::mem::take(&mut *self.0.write().await).into_iter().map(
|
||||
|((id, version), man)| async move {
|
||||
tracing::debug!("Manager for {}@{} shutting down", id, version);
|
||||
man.exit().await?;
|
||||
tracing::debug!("Manager for {}@{} is shutdown", id, version);
|
||||
if let Err(e) = Arc::try_unwrap(man) {
|
||||
tracing::trace!(
|
||||
"Manager for {}@{} still has {} other open references",
|
||||
id,
|
||||
version,
|
||||
Arc::strong_count(&e) - 1
|
||||
);
|
||||
}
|
||||
Ok::<_, Error>(())
|
||||
},
|
||||
))
|
||||
.await;
|
||||
res.into_iter().fold(Ok(()), |res, x| match (res, x) {
|
||||
(Ok(()), x) => x,
|
||||
(Err(e), Ok(())) => Err(e),
|
||||
(Err(e1), Err(e2)) => Err(Error::new(eyre!("{}, {}", e1.source, e2.source), e1.kind)),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn get(&self, id: &(PackageId, Version)) -> Option<Arc<Manager>> {
|
||||
self.0.read().await.get(id).cloned()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Manager {
|
||||
shared: Arc<ManagerSharedState>,
|
||||
thread: Container<NonDetachingJoinHandle<()>>,
|
||||
}
|
||||
|
||||
#[derive(TryFromPrimitive)]
|
||||
#[repr(usize)]
|
||||
pub enum Status {
|
||||
Starting = 0,
|
||||
Running = 1,
|
||||
Stopped = 2,
|
||||
Paused = 3,
|
||||
Shutdown = 4,
|
||||
}
|
||||
|
||||
pub struct ManagerSharedState {
|
||||
ctx: RpcContext,
|
||||
status: AtomicUsize,
|
||||
on_stop: Sender<OnStop>,
|
||||
manifest: Manifest,
|
||||
container_name: String,
|
||||
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
|
||||
synchronized: Notify,
|
||||
synchronize_now: Notify,
|
||||
commit_health_check_results: AtomicBool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum OnStop {
|
||||
Restart,
|
||||
Sleep,
|
||||
Exit,
|
||||
}
|
||||
|
||||
#[instrument(skip(state))]
|
||||
async fn run_main(
|
||||
state: &Arc<ManagerSharedState>,
|
||||
) -> Result<Result<NoOutput, (i32, String)>, Error> {
|
||||
let rt_state = state.clone();
|
||||
let interfaces = state
|
||||
.manifest
|
||||
.interfaces
|
||||
.0
|
||||
.iter()
|
||||
.map(|(id, info)| {
|
||||
Ok((
|
||||
id.clone(),
|
||||
info,
|
||||
state
|
||||
.tor_keys
|
||||
.get(id)
|
||||
.ok_or_else(|| {
|
||||
Error::new(eyre!("interface {} missing key", id), crate::ErrorKind::Tor)
|
||||
})?
|
||||
.clone(),
|
||||
))
|
||||
})
|
||||
.collect::<Result<Vec<_>, Error>>()?;
|
||||
let generated_certificate = state
|
||||
.ctx
|
||||
.net_controller
|
||||
.generate_certificate_mountpoint(&state.manifest.id, &interfaces)
|
||||
.await?;
|
||||
let mut runtime =
|
||||
tokio::spawn(async move { start_up_image(rt_state, generated_certificate).await });
|
||||
let ip;
|
||||
loop {
|
||||
match state
|
||||
.ctx
|
||||
.docker
|
||||
.inspect_container(&state.container_name, None)
|
||||
.await
|
||||
{
|
||||
Ok(res) => {
|
||||
if let Some(ip_addr) = res
|
||||
.network_settings
|
||||
.and_then(|ns| ns.networks)
|
||||
.and_then(|mut n| n.remove("start9"))
|
||||
.and_then(|es| es.ip_address)
|
||||
.filter(|ip| !ip.is_empty())
|
||||
.map(|ip| ip.parse())
|
||||
.transpose()?
|
||||
{
|
||||
ip = ip_addr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(bollard::errors::Error::DockerResponseNotFoundError { .. }) => (),
|
||||
Err(e) => Err(e)?,
|
||||
}
|
||||
match futures::poll!(&mut runtime) {
|
||||
Poll::Ready(res) => {
|
||||
return res
|
||||
.map_err(|_| {
|
||||
Error::new(eyre!("Manager runtime panicked!"), crate::ErrorKind::Docker)
|
||||
})
|
||||
.and_then(|a| a)
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
state
|
||||
.ctx
|
||||
.net_controller
|
||||
.add(&state.manifest.id, ip, interfaces, generated_certificate)
|
||||
.await?;
|
||||
|
||||
state
|
||||
.commit_health_check_results
|
||||
.store(true, Ordering::SeqCst);
|
||||
let health = async {
|
||||
tokio::time::sleep(Duration::from_secs(10)).await; // only sleep for 1 second before first health check
|
||||
loop {
|
||||
let mut db = state.ctx.db.handle();
|
||||
if let Err(e) = health::check(
|
||||
&state.ctx,
|
||||
&mut db,
|
||||
&state.manifest.id,
|
||||
&state.commit_health_check_results,
|
||||
)
|
||||
.await
|
||||
{
|
||||
tracing::error!(
|
||||
"Failed to run health check for {}: {}",
|
||||
&state.manifest.id,
|
||||
e
|
||||
);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(HEALTH_CHECK_COOLDOWN_SECONDS)).await;
|
||||
}
|
||||
};
|
||||
let _ = state
|
||||
.status
|
||||
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
|
||||
if x == Status::Starting as usize {
|
||||
Some(Status::Running as usize)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
let res = tokio::select! {
|
||||
a = runtime => a.map_err(|_| Error::new(eyre!("Manager runtime panicked!"), crate::ErrorKind::Docker)).and_then(|a| a),
|
||||
_ = health => Err(Error::new(eyre!("Health check daemon exited!"), crate::ErrorKind::Unknown)),
|
||||
};
|
||||
state
|
||||
.ctx
|
||||
.net_controller
|
||||
.remove(
|
||||
&state.manifest.id,
|
||||
state.manifest.interfaces.0.keys().cloned(),
|
||||
)
|
||||
.await?;
|
||||
res
|
||||
}
|
||||
|
||||
/// We want to start up the manifest, but in this case we want to know that we have generated the certificates.
|
||||
/// Note for _generated_certificate: Needed to know that before we start the state we have generated the certificate
|
||||
async fn start_up_image(
|
||||
rt_state: Arc<ManagerSharedState>,
|
||||
_generated_certificate: GeneratedCertificateMountPoint,
|
||||
) -> Result<Result<NoOutput, (i32, String)>, Error> {
|
||||
rt_state
|
||||
.manifest
|
||||
.main
|
||||
.execute::<(), NoOutput>(
|
||||
&rt_state.ctx,
|
||||
&rt_state.manifest.id,
|
||||
&rt_state.manifest.version,
|
||||
None,
|
||||
&rt_state.manifest.volumes,
|
||||
None,
|
||||
false,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
impl Manager {
|
||||
#[instrument(skip(ctx))]
|
||||
async fn create(
|
||||
ctx: RpcContext,
|
||||
manifest: Manifest,
|
||||
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
|
||||
) -> Result<Self, Error> {
|
||||
let (on_stop, recv) = channel(OnStop::Sleep);
|
||||
let shared = Arc::new(ManagerSharedState {
|
||||
ctx,
|
||||
status: AtomicUsize::new(Status::Stopped as usize),
|
||||
on_stop,
|
||||
container_name: DockerAction::container_name(&manifest.id, None),
|
||||
manifest,
|
||||
tor_keys,
|
||||
synchronized: Notify::new(),
|
||||
synchronize_now: Notify::new(),
|
||||
commit_health_check_results: AtomicBool::new(true),
|
||||
});
|
||||
shared.synchronize_now.notify_one();
|
||||
let thread_shared = shared.clone();
|
||||
let thread = tokio::spawn(async move {
|
||||
tokio::select! {
|
||||
_ = manager_thread_loop(recv, &thread_shared) => (),
|
||||
_ = synchronizer(&*thread_shared) => (),
|
||||
}
|
||||
});
|
||||
Ok(Manager {
|
||||
shared,
|
||||
thread: Container::new(Some(thread.into())),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn signal(&self, signal: &Signal) -> Result<(), Error> {
|
||||
// stop health checks from committing their results
|
||||
self.shared
|
||||
.commit_health_check_results
|
||||
.store(false, Ordering::SeqCst);
|
||||
|
||||
// send signal to container
|
||||
self.shared
|
||||
.ctx
|
||||
.docker
|
||||
.kill_container(
|
||||
&self.shared.container_name,
|
||||
Some(KillContainerOptions {
|
||||
signal: signal.to_string(),
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.or_else(|e| {
|
||||
if matches!(
|
||||
e,
|
||||
bollard::errors::Error::DockerResponseConflictError { .. }
|
||||
| bollard::errors::Error::DockerResponseNotFoundError { .. }
|
||||
) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(e)
|
||||
}
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn exit(&self) -> Result<(), Error> {
|
||||
self.shared
|
||||
.commit_health_check_results
|
||||
.store(false, Ordering::SeqCst);
|
||||
let _ = self.shared.on_stop.send(OnStop::Exit);
|
||||
match self
|
||||
.shared
|
||||
.ctx
|
||||
.docker
|
||||
.stop_container(
|
||||
&self.shared.container_name,
|
||||
Some(StopContainerOptions {
|
||||
t: match &self.shared.manifest.main {
|
||||
ActionImplementation::Docker(a) => a,
|
||||
}
|
||||
.sigterm_timeout
|
||||
.map(|a| *a)
|
||||
.unwrap_or(Duration::from_secs(30))
|
||||
.as_secs_f64() as i64,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Err(bollard::errors::Error::DockerResponseNotFoundError { .. })
|
||||
| Err(bollard::errors::Error::DockerResponseConflictError { .. })
|
||||
| Err(bollard::errors::Error::DockerResponseNotModifiedError { .. }) => (), // Already stopped
|
||||
a => a?,
|
||||
};
|
||||
self.shared.status.store(
|
||||
Status::Shutdown as usize,
|
||||
std::sync::atomic::Ordering::SeqCst,
|
||||
);
|
||||
if let Some(thread) = self.thread.take().await {
|
||||
thread.await.map_err(|e| {
|
||||
Error::new(
|
||||
eyre!("Manager thread panicked: {}", e),
|
||||
crate::ErrorKind::Docker,
|
||||
)
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
/// this will depend on locks to main status. if you hold any locks when calling this function that conflict, this will deadlock
|
||||
pub async fn synchronize(&self) {
|
||||
self.shared.synchronize_now.notify_waiters();
|
||||
self.shared.synchronized.notified().await
|
||||
}
|
||||
}
|
||||
|
||||
async fn manager_thread_loop(mut recv: Receiver<OnStop>, thread_shared: &Arc<ManagerSharedState>) {
|
||||
loop {
|
||||
fn handle_stop_action<'a>(
|
||||
recv: &'a mut Receiver<OnStop>,
|
||||
) -> (
|
||||
OnStop,
|
||||
Option<impl Future<Output = Result<(), RecvError>> + 'a>,
|
||||
) {
|
||||
let val = *recv.borrow_and_update();
|
||||
match val {
|
||||
OnStop::Sleep => (OnStop::Sleep, Some(recv.changed())),
|
||||
a => (a, None),
|
||||
}
|
||||
}
|
||||
let (stop_action, fut) = handle_stop_action(&mut recv);
|
||||
match stop_action {
|
||||
OnStop::Sleep => {
|
||||
if let Some(fut) = fut {
|
||||
thread_shared.status.store(
|
||||
Status::Stopped as usize,
|
||||
std::sync::atomic::Ordering::SeqCst,
|
||||
);
|
||||
fut.await.unwrap();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
OnStop::Exit => {
|
||||
thread_shared.status.store(
|
||||
Status::Stopped as usize,
|
||||
std::sync::atomic::Ordering::SeqCst,
|
||||
);
|
||||
break;
|
||||
}
|
||||
OnStop::Restart => {
|
||||
thread_shared.status.store(
|
||||
Status::Running as usize,
|
||||
std::sync::atomic::Ordering::SeqCst,
|
||||
);
|
||||
}
|
||||
}
|
||||
match run_main(&thread_shared).await {
|
||||
Ok(Ok(NoOutput)) => (), // restart
|
||||
Ok(Err(e)) => {
|
||||
let mut db = thread_shared.ctx.db.handle();
|
||||
let started = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&thread_shared.manifest.id)
|
||||
.and_then(|pde| pde.installed())
|
||||
.map::<_, MainStatus>(|i| i.status().main())
|
||||
.get(&mut db, false)
|
||||
.await;
|
||||
match started.as_deref() {
|
||||
Ok(Some(MainStatus::Running { started, .. }))
|
||||
if cfg!(feature = "unstable")
|
||||
|| (Utc::now().signed_duration_since(*started)
|
||||
> chrono::Duration::from_std(Duration::from_secs(60)).unwrap()
|
||||
&& !matches!(&*thread_shared.on_stop.borrow(), &OnStop::Exit)) =>
|
||||
{
|
||||
let res = thread_shared.ctx.notification_manager
|
||||
.notify(
|
||||
&mut db,
|
||||
Some(thread_shared.manifest.id.clone()),
|
||||
NotificationLevel::Warning,
|
||||
String::from("Service Crashed"),
|
||||
format!("The service {} has crashed with the following exit code: {}\nDetails: {}", thread_shared.manifest.id.clone(), e.0, e.1),
|
||||
(),
|
||||
Some(3600) // 1 hour
|
||||
)
|
||||
.await;
|
||||
match res {
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to issue notification: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
Ok(()) => {}
|
||||
}
|
||||
}
|
||||
_ => tracing::error!("service just started. not issuing crash notification"),
|
||||
}
|
||||
tracing::error!("service crashed: {}: {}", e.0, e.1);
|
||||
tokio::time::sleep(Duration::from_secs(15)).await;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("failed to start service: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(shared))]
|
||||
async fn stop(shared: &ManagerSharedState) -> Result<(), Error> {
|
||||
shared
|
||||
.commit_health_check_results
|
||||
.store(false, Ordering::SeqCst);
|
||||
shared.on_stop.send(OnStop::Sleep).map_err(|_| {
|
||||
Error::new(
|
||||
eyre!("Manager has already been shutdown"),
|
||||
crate::ErrorKind::Docker,
|
||||
)
|
||||
})?;
|
||||
if matches!(
|
||||
shared.status.load(Ordering::SeqCst).try_into().unwrap(),
|
||||
Status::Paused
|
||||
) {
|
||||
resume(shared).await?;
|
||||
}
|
||||
match shared
|
||||
.ctx
|
||||
.docker
|
||||
.stop_container(
|
||||
&shared.container_name,
|
||||
Some(StopContainerOptions {
|
||||
t: match &shared.manifest.main {
|
||||
ActionImplementation::Docker(a) => a,
|
||||
}
|
||||
.sigterm_timeout
|
||||
.map(|a| *a)
|
||||
.unwrap_or(Duration::from_secs(30))
|
||||
.as_secs_f64() as i64,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Err(bollard::errors::Error::DockerResponseNotFoundError { .. })
|
||||
| Err(bollard::errors::Error::DockerResponseConflictError { .. })
|
||||
| Err(bollard::errors::Error::DockerResponseNotModifiedError { .. }) => (), // Already stopped
|
||||
a => a?,
|
||||
};
|
||||
shared.status.store(
|
||||
Status::Stopped as usize,
|
||||
std::sync::atomic::Ordering::SeqCst,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(shared))]
|
||||
async fn start(shared: &ManagerSharedState) -> Result<(), Error> {
|
||||
shared.on_stop.send(OnStop::Restart).map_err(|_| {
|
||||
Error::new(
|
||||
eyre!("Manager has already been shutdown"),
|
||||
crate::ErrorKind::Docker,
|
||||
)
|
||||
})?;
|
||||
let _ = shared
|
||||
.status
|
||||
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
|
||||
if x != Status::Running as usize {
|
||||
Some(Status::Starting as usize)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(shared))]
|
||||
async fn pause(shared: &ManagerSharedState) -> Result<(), Error> {
|
||||
if let Err(e) = shared
|
||||
.ctx
|
||||
.docker
|
||||
.pause_container(&shared.container_name)
|
||||
.await
|
||||
{
|
||||
tracing::error!("failed to pause container. stopping instead. {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
return stop(shared).await;
|
||||
}
|
||||
shared
|
||||
.status
|
||||
.store(Status::Paused as usize, std::sync::atomic::Ordering::SeqCst);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(shared))]
|
||||
async fn resume(shared: &ManagerSharedState) -> Result<(), Error> {
|
||||
shared
|
||||
.ctx
|
||||
.docker
|
||||
.unpause_container(&shared.container_name)
|
||||
.await?;
|
||||
shared.status.store(
|
||||
Status::Running as usize,
|
||||
std::sync::atomic::Ordering::SeqCst,
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::convert::TryInto;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Duration;
|
||||
|
||||
use chrono::Utc;
|
||||
|
||||
use super::{pause, resume, start, stop, ManagerSharedState, Status};
|
||||
use crate::status::MainStatus;
|
||||
use crate::Error;
|
||||
|
||||
/// Allocates a db handle. DO NOT CALL with a db handle already in scope
|
||||
async fn synchronize_once(shared: &ManagerSharedState) -> Result<Status, Error> {
|
||||
let mut db = shared.ctx.db.handle();
|
||||
let mut status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&shared.manifest.id)
|
||||
.expect(&mut db)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(&mut db)
|
||||
.await?
|
||||
.status()
|
||||
.main()
|
||||
.get_mut(&mut db)
|
||||
.await?;
|
||||
let manager_status = shared.status.load(Ordering::SeqCst).try_into().unwrap();
|
||||
match manager_status {
|
||||
Status::Stopped => match &mut *status {
|
||||
MainStatus::Stopped => (),
|
||||
MainStatus::Stopping => {
|
||||
*status = MainStatus::Stopped;
|
||||
}
|
||||
MainStatus::Starting => {
|
||||
start(shared).await?;
|
||||
}
|
||||
MainStatus::Running { started, .. } => {
|
||||
*started = Utc::now();
|
||||
start(shared).await?;
|
||||
}
|
||||
MainStatus::BackingUp { .. } => (),
|
||||
},
|
||||
Status::Starting => match *status {
|
||||
MainStatus::Stopped | MainStatus::Stopping => {
|
||||
stop(shared).await?;
|
||||
}
|
||||
MainStatus::Starting | MainStatus::Running { .. } => (),
|
||||
MainStatus::BackingUp { .. } => {
|
||||
pause(shared).await?;
|
||||
}
|
||||
},
|
||||
Status::Running => match *status {
|
||||
MainStatus::Stopped | MainStatus::Stopping => {
|
||||
stop(shared).await?;
|
||||
}
|
||||
MainStatus::Starting => {
|
||||
*status = MainStatus::Running {
|
||||
started: Utc::now(),
|
||||
health: BTreeMap::new(),
|
||||
};
|
||||
}
|
||||
MainStatus::Running { .. } => (),
|
||||
MainStatus::BackingUp { .. } => {
|
||||
pause(shared).await?;
|
||||
}
|
||||
},
|
||||
Status::Paused => match *status {
|
||||
MainStatus::Stopped | MainStatus::Stopping => {
|
||||
stop(shared).await?;
|
||||
}
|
||||
MainStatus::Starting | MainStatus::Running { .. } => {
|
||||
resume(shared).await?;
|
||||
}
|
||||
MainStatus::BackingUp { .. } => (),
|
||||
},
|
||||
Status::Shutdown => (),
|
||||
}
|
||||
status.save(&mut db).await?;
|
||||
Ok(manager_status)
|
||||
}
|
||||
|
||||
pub async fn synchronizer(shared: &ManagerSharedState) {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(Duration::from_secs(5)) => (),
|
||||
_ = shared.synchronize_now.notified() => (),
|
||||
}
|
||||
let status = match synchronize_once(shared).await {
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"Synchronizer for {}@{} failed: {}",
|
||||
shared.manifest.id,
|
||||
shared.manifest.version,
|
||||
e
|
||||
);
|
||||
tracing::debug!("{:?}", e);
|
||||
continue;
|
||||
}
|
||||
Ok(status) => status,
|
||||
};
|
||||
tracing::trace!("{} status synchronized", shared.manifest.id);
|
||||
shared.synchronized.notify_waiters();
|
||||
match status {
|
||||
Status::Shutdown => {
|
||||
break;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,274 +0,0 @@
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
use aes::cipher::{CipherKey, NewCipher, Nonce, StreamCipher};
|
||||
use aes::Aes256Ctr;
|
||||
use color_eyre::eyre::eyre;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{FutureExt, Stream};
|
||||
use hmac::Hmac;
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
use rpc_toolkit::hyper::http::Error as HttpError;
|
||||
use rpc_toolkit::hyper::{self, Body, Request, Response, StatusCode};
|
||||
use rpc_toolkit::rpc_server_helpers::{
|
||||
to_response, DynMiddleware, DynMiddlewareStage2, DynMiddlewareStage3, DynMiddlewareStage4,
|
||||
};
|
||||
use rpc_toolkit::yajrc::RpcMethod;
|
||||
use rpc_toolkit::Metadata;
|
||||
use sha2::Sha256;
|
||||
|
||||
use crate::util::Apply;
|
||||
use crate::Error;
|
||||
|
||||
pub fn pbkdf2(password: impl AsRef<[u8]>, salt: impl AsRef<[u8]>) -> CipherKey<Aes256Ctr> {
|
||||
let mut aeskey = CipherKey::<Aes256Ctr>::default();
|
||||
pbkdf2::pbkdf2::<Hmac<Sha256>>(
|
||||
password.as_ref(),
|
||||
salt.as_ref(),
|
||||
1000,
|
||||
aeskey.as_mut_slice(),
|
||||
);
|
||||
aeskey
|
||||
}
|
||||
|
||||
pub fn encrypt_slice(input: impl AsRef<[u8]>, password: impl AsRef<[u8]>) -> Vec<u8> {
|
||||
let prefix: [u8; 32] = rand::random();
|
||||
let aeskey = pbkdf2(password.as_ref(), &prefix[16..]);
|
||||
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
|
||||
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
|
||||
let mut res = Vec::with_capacity(32 + input.as_ref().len());
|
||||
res.extend_from_slice(&prefix[..]);
|
||||
res.extend_from_slice(input.as_ref());
|
||||
aes.apply_keystream(&mut res[32..]);
|
||||
res
|
||||
}
|
||||
|
||||
pub fn decrypt_slice(input: impl AsRef<[u8]>, password: impl AsRef<[u8]>) -> Vec<u8> {
|
||||
if input.as_ref().len() < 32 {
|
||||
return Vec::new();
|
||||
}
|
||||
let (prefix, rest) = input.as_ref().split_at(32);
|
||||
let aeskey = pbkdf2(password.as_ref(), &prefix[16..]);
|
||||
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
|
||||
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
|
||||
let mut res = rest.to_vec();
|
||||
aes.apply_keystream(&mut res);
|
||||
res
|
||||
}
|
||||
|
||||
#[pin_project::pin_project]
|
||||
pub struct DecryptStream {
|
||||
key: Arc<String>,
|
||||
#[pin]
|
||||
body: Body,
|
||||
ctr: Vec<u8>,
|
||||
salt: Vec<u8>,
|
||||
aes: Option<Aes256Ctr>,
|
||||
}
|
||||
impl DecryptStream {
|
||||
pub fn new(key: Arc<String>, body: Body) -> Self {
|
||||
DecryptStream {
|
||||
key,
|
||||
body,
|
||||
ctr: Vec::new(),
|
||||
salt: Vec::new(),
|
||||
aes: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Stream for DecryptStream {
|
||||
type Item = hyper::Result<hyper::body::Bytes>;
|
||||
fn poll_next(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
let this = self.project();
|
||||
match this.body.poll_next(cx) {
|
||||
std::task::Poll::Pending => std::task::Poll::Pending,
|
||||
std::task::Poll::Ready(Some(Ok(bytes))) => std::task::Poll::Ready(Some(Ok({
|
||||
let mut buf = &*bytes;
|
||||
if let Some(aes) = this.aes.as_mut() {
|
||||
let mut res = buf.to_vec();
|
||||
aes.apply_keystream(&mut res);
|
||||
res.into()
|
||||
} else {
|
||||
if this.ctr.len() < 16 && buf.len() > 0 {
|
||||
let to_read = std::cmp::min(16 - this.ctr.len(), buf.len());
|
||||
this.ctr.extend_from_slice(&buf[0..to_read]);
|
||||
buf = &buf[to_read..];
|
||||
}
|
||||
if this.salt.len() < 16 && buf.len() > 0 {
|
||||
let to_read = std::cmp::min(16 - this.salt.len(), buf.len());
|
||||
this.salt.extend_from_slice(&buf[0..to_read]);
|
||||
buf = &buf[to_read..];
|
||||
}
|
||||
if this.ctr.len() == 16 && this.salt.len() == 16 {
|
||||
let aeskey = pbkdf2(this.key.as_bytes(), &this.salt);
|
||||
let ctr = Nonce::<Aes256Ctr>::from_slice(&this.ctr);
|
||||
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
|
||||
let mut res = buf.to_vec();
|
||||
aes.apply_keystream(&mut res);
|
||||
*this.aes = Some(aes);
|
||||
res.into()
|
||||
} else {
|
||||
hyper::body::Bytes::new()
|
||||
}
|
||||
}
|
||||
}))),
|
||||
std::task::Poll::Ready(a) => std::task::Poll::Ready(a),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pin_project]
|
||||
pub struct EncryptStream {
|
||||
#[pin]
|
||||
body: Body,
|
||||
aes: Aes256Ctr,
|
||||
prefix: Option<[u8; 32]>,
|
||||
}
|
||||
impl EncryptStream {
|
||||
pub fn new(key: &str, body: Body) -> Self {
|
||||
let prefix: [u8; 32] = rand::random();
|
||||
let aeskey = pbkdf2(key.as_bytes(), &prefix[16..]);
|
||||
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
|
||||
let aes = Aes256Ctr::new(&aeskey, &ctr);
|
||||
EncryptStream {
|
||||
body,
|
||||
aes,
|
||||
prefix: Some(prefix),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Stream for EncryptStream {
|
||||
type Item = hyper::Result<hyper::body::Bytes>;
|
||||
fn poll_next(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
let this = self.project();
|
||||
if let Some(prefix) = this.prefix.take() {
|
||||
std::task::Poll::Ready(Some(Ok(prefix.to_vec().into())))
|
||||
} else {
|
||||
match this.body.poll_next(cx) {
|
||||
std::task::Poll::Pending => std::task::Poll::Pending,
|
||||
std::task::Poll::Ready(Some(Ok(bytes))) => std::task::Poll::Ready(Some(Ok({
|
||||
let mut res = bytes.to_vec();
|
||||
this.aes.apply_keystream(&mut res);
|
||||
res.into()
|
||||
}))),
|
||||
std::task::Poll::Ready(a) => std::task::Poll::Ready(a),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn encrypted(headers: &HeaderMap) -> bool {
|
||||
headers
|
||||
.get("Content-Encoding")
|
||||
.and_then(|h| {
|
||||
h.to_str()
|
||||
.ok()?
|
||||
.split(",")
|
||||
.any(|s| s == "aesctr256")
|
||||
.apply(Some)
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn encrypt<
|
||||
F: Fn() -> Fut + Send + Sync + Clone + 'static,
|
||||
Fut: Future<Output = Result<Arc<String>, Error>> + Send + Sync + 'static,
|
||||
M: Metadata,
|
||||
>(
|
||||
keysource: F,
|
||||
) -> DynMiddleware<M> {
|
||||
Box::new(
|
||||
move |req: &mut Request<Body>,
|
||||
metadata: M|
|
||||
-> BoxFuture<Result<Result<DynMiddlewareStage2, Response<Body>>, HttpError>> {
|
||||
let keysource = keysource.clone();
|
||||
async move {
|
||||
let encrypted = encrypted(req.headers());
|
||||
let key = if encrypted {
|
||||
let key = match keysource().await {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
let (res_parts, _) = Response::new(()).into_parts();
|
||||
return Ok(Err(to_response(
|
||||
req.headers(),
|
||||
res_parts,
|
||||
Err(e.into()),
|
||||
|_| StatusCode::OK,
|
||||
)?));
|
||||
}
|
||||
};
|
||||
let body = std::mem::take(req.body_mut());
|
||||
*req.body_mut() = Body::wrap_stream(DecryptStream::new(key.clone(), body));
|
||||
Some(key)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let res: DynMiddlewareStage2 = Box::new(move |req, rpc_req| {
|
||||
async move {
|
||||
if !encrypted
|
||||
&& metadata
|
||||
.get(&rpc_req.method.as_str(), "authenticated")
|
||||
.unwrap_or(true)
|
||||
{
|
||||
let (res_parts, _) = Response::new(()).into_parts();
|
||||
Ok(Err(to_response(
|
||||
&req.headers,
|
||||
res_parts,
|
||||
Err(Error::new(
|
||||
eyre!("Must be encrypted"),
|
||||
crate::ErrorKind::Authorization,
|
||||
)
|
||||
.into()),
|
||||
|_| StatusCode::OK,
|
||||
)?))
|
||||
} else {
|
||||
let res: DynMiddlewareStage3 = Box::new(move |_, _| {
|
||||
async move {
|
||||
let res: DynMiddlewareStage4 = Box::new(move |res| {
|
||||
async move {
|
||||
if let Some(key) = key {
|
||||
res.headers_mut().insert(
|
||||
"Content-Encoding",
|
||||
HeaderValue::from_static("aesctr256"),
|
||||
);
|
||||
if let Some(len_header) =
|
||||
res.headers_mut().get_mut("Content-Length")
|
||||
{
|
||||
if let Some(len) = len_header
|
||||
.to_str()
|
||||
.ok()
|
||||
.and_then(|l| l.parse::<u64>().ok())
|
||||
{
|
||||
*len_header = HeaderValue::from(len + 32);
|
||||
}
|
||||
}
|
||||
let body = std::mem::take(res.body_mut());
|
||||
*res.body_mut() = Body::wrap_stream(
|
||||
EncryptStream::new(key.as_ref(), body),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
.boxed()
|
||||
});
|
||||
Ok(Ok(res))
|
||||
}
|
||||
.boxed()
|
||||
});
|
||||
Ok(Ok(res))
|
||||
}
|
||||
}
|
||||
.boxed()
|
||||
});
|
||||
Ok(Ok(res))
|
||||
}
|
||||
.boxed()
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -1,319 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::net::IpAddr;
|
||||
|
||||
use avahi_sys::{
|
||||
self, avahi_client_errno, avahi_entry_group_add_service, avahi_entry_group_commit,
|
||||
avahi_entry_group_free, avahi_entry_group_reset, avahi_free, avahi_strerror, AvahiClient,
|
||||
AvahiEntryGroup,
|
||||
};
|
||||
use color_eyre::eyre::eyre;
|
||||
use libc::c_void;
|
||||
use tokio::process::Command;
|
||||
use tokio::sync::Mutex;
|
||||
use torut::onion::TorSecretKeyV3;
|
||||
|
||||
use super::interface::InterfaceId;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::Invoke;
|
||||
use crate::Error;
|
||||
|
||||
pub async fn resolve_mdns(hostname: &str) -> Result<IpAddr, Error> {
|
||||
Ok(String::from_utf8(
|
||||
Command::new("avahi-resolve-host-name")
|
||||
.arg("-4")
|
||||
.arg(hostname)
|
||||
.invoke(crate::ErrorKind::Network)
|
||||
.await?,
|
||||
)?
|
||||
.split_once("\t")
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("Failed to resolve hostname: {}", hostname),
|
||||
crate::ErrorKind::Network,
|
||||
)
|
||||
})?
|
||||
.1
|
||||
.trim()
|
||||
.parse()?)
|
||||
}
|
||||
|
||||
pub struct MdnsController(Mutex<MdnsControllerInner>);
|
||||
impl MdnsController {
|
||||
pub fn init() -> Self {
|
||||
MdnsController(Mutex::new(MdnsControllerInner::init()))
|
||||
}
|
||||
pub async fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>(
|
||||
&self,
|
||||
pkg_id: &PackageId,
|
||||
interfaces: I,
|
||||
) {
|
||||
self.0.lock().await.add(pkg_id, interfaces)
|
||||
}
|
||||
pub async fn remove<I: IntoIterator<Item = InterfaceId>>(
|
||||
&self,
|
||||
pkg_id: &PackageId,
|
||||
interfaces: I,
|
||||
) {
|
||||
self.0.lock().await.remove(pkg_id, interfaces)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MdnsControllerInner {
|
||||
hostname: Vec<u8>,
|
||||
hostname_raw: *const libc::c_char,
|
||||
entry_group: *mut AvahiEntryGroup,
|
||||
services: BTreeMap<(PackageId, InterfaceId), TorSecretKeyV3>,
|
||||
_client_error: std::pin::Pin<Box<i32>>,
|
||||
}
|
||||
unsafe impl Send for MdnsControllerInner {}
|
||||
unsafe impl Sync for MdnsControllerInner {}
|
||||
|
||||
impl MdnsControllerInner {
|
||||
fn load_services(&mut self) {
|
||||
unsafe {
|
||||
tracing::debug!("Loading services for mDNS");
|
||||
let mut res;
|
||||
let http_tcp_cstr = std::ffi::CString::new("_http._tcp")
|
||||
.expect("Could not cast _http._tcp to c string");
|
||||
res = avahi_entry_group_add_service(
|
||||
self.entry_group,
|
||||
avahi_sys::AVAHI_IF_UNSPEC,
|
||||
avahi_sys::AVAHI_PROTO_UNSPEC,
|
||||
avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_USE_MULTICAST,
|
||||
self.hostname_raw,
|
||||
http_tcp_cstr.as_ptr(),
|
||||
std::ptr::null(),
|
||||
std::ptr::null(),
|
||||
443,
|
||||
// below is a secret final argument that the type signature of this function does not tell you that it
|
||||
// needs. This is because the C lib function takes a variable number of final arguments indicating the
|
||||
// desired TXT records to add to this service entry. The way it decides when to stop taking arguments
|
||||
// from the stack and dereferencing them is when it finds a null pointer...because fuck you, that's why.
|
||||
// The consequence of this is that forgetting this last argument will cause segfaults or other undefined
|
||||
// behavior. Welcome back to the stone age motherfucker.
|
||||
std::ptr::null::<libc::c_char>(),
|
||||
);
|
||||
if res < avahi_sys::AVAHI_OK {
|
||||
let e_str = avahi_strerror(res);
|
||||
tracing::error!(
|
||||
"Could not add service to Avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
panic!("Failed to load Avahi services");
|
||||
}
|
||||
tracing::info!(
|
||||
"Published {:?}",
|
||||
std::ffi::CStr::from_ptr(self.hostname_raw)
|
||||
);
|
||||
for key in self.services.values() {
|
||||
let lan_address = key
|
||||
.public()
|
||||
.get_onion_address()
|
||||
.get_address_without_dot_onion()
|
||||
+ ".local";
|
||||
tracing::debug!("Adding mdns CNAME entry for {}", &lan_address);
|
||||
let lan_address_ptr = std::ffi::CString::new(lan_address)
|
||||
.expect("Could not cast lan address to c string");
|
||||
res = avahi_sys::avahi_entry_group_add_record(
|
||||
self.entry_group,
|
||||
avahi_sys::AVAHI_IF_UNSPEC,
|
||||
avahi_sys::AVAHI_PROTO_UNSPEC,
|
||||
avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_USE_MULTICAST
|
||||
| avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_ALLOW_MULTIPLE,
|
||||
lan_address_ptr.as_ptr(),
|
||||
avahi_sys::AVAHI_DNS_CLASS_IN as u16,
|
||||
avahi_sys::AVAHI_DNS_TYPE_CNAME as u16,
|
||||
avahi_sys::AVAHI_DEFAULT_TTL,
|
||||
self.hostname.as_ptr().cast(),
|
||||
self.hostname.len(),
|
||||
);
|
||||
if res < avahi_sys::AVAHI_OK {
|
||||
let e_str = avahi_strerror(res);
|
||||
tracing::error!(
|
||||
"Could not add CNAME record to Avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
panic!("Failed to load Avahi services");
|
||||
}
|
||||
tracing::info!("Published {:?}", lan_address_ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
fn init() -> Self {
|
||||
unsafe {
|
||||
tracing::debug!("Initializing mDNS controller");
|
||||
let simple_poll = avahi_sys::avahi_simple_poll_new();
|
||||
let poll = avahi_sys::avahi_simple_poll_get(simple_poll);
|
||||
let mut box_err = Box::pin(0 as i32);
|
||||
let err_c: *mut i32 = box_err.as_mut().get_mut();
|
||||
let avahi_client = avahi_sys::avahi_client_new(
|
||||
poll,
|
||||
avahi_sys::AvahiClientFlags::AVAHI_CLIENT_NO_FAIL,
|
||||
Some(client_callback),
|
||||
std::ptr::null_mut(),
|
||||
err_c,
|
||||
);
|
||||
if avahi_client == std::ptr::null_mut::<AvahiClient>() {
|
||||
let e_str = avahi_strerror(*box_err);
|
||||
tracing::error!(
|
||||
"Could not create avahi client: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
panic!("Failed to create Avahi Client");
|
||||
}
|
||||
let group = avahi_sys::avahi_entry_group_new(
|
||||
avahi_client,
|
||||
Some(entry_group_callback),
|
||||
std::ptr::null_mut(),
|
||||
);
|
||||
if group == std::ptr::null_mut() {
|
||||
let e_str = avahi_strerror(avahi_client_errno(avahi_client));
|
||||
tracing::error!(
|
||||
"Could not create avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
panic!("Failed to create Avahi Entry Group");
|
||||
}
|
||||
let mut hostname_buf = vec![0];
|
||||
let hostname_raw = avahi_sys::avahi_client_get_host_name_fqdn(avahi_client);
|
||||
hostname_buf
|
||||
.extend_from_slice(std::ffi::CStr::from_ptr(hostname_raw).to_bytes_with_nul());
|
||||
let buflen = hostname_buf.len();
|
||||
debug_assert!(hostname_buf.ends_with(b".local\0"));
|
||||
debug_assert!(!hostname_buf[..(buflen - 7)].contains(&b'.'));
|
||||
// assume fixed length prefix on hostname due to local address
|
||||
hostname_buf[0] = (buflen - 8) as u8; // set the prefix length to len - 8 (leading byte, .local, nul) for the main address
|
||||
hostname_buf[buflen - 7] = 5; // set the prefix length to 5 for "local"
|
||||
|
||||
let mut res = MdnsControllerInner {
|
||||
hostname: hostname_buf,
|
||||
hostname_raw,
|
||||
entry_group: group,
|
||||
services: BTreeMap::new(),
|
||||
_client_error: box_err,
|
||||
};
|
||||
res.load_services();
|
||||
let commit_err = avahi_entry_group_commit(res.entry_group);
|
||||
if commit_err < avahi_sys::AVAHI_OK {
|
||||
let e_str = avahi_strerror(commit_err);
|
||||
tracing::error!(
|
||||
"Could not reset Avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
panic!("Failed to load Avahi services: reset");
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
fn sync(&mut self) {
|
||||
unsafe {
|
||||
let mut res;
|
||||
res = avahi_entry_group_reset(self.entry_group);
|
||||
if res < avahi_sys::AVAHI_OK {
|
||||
let e_str = avahi_strerror(res);
|
||||
tracing::error!(
|
||||
"Could not reset Avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
panic!("Failed to load Avahi services: reset");
|
||||
}
|
||||
self.load_services();
|
||||
res = avahi_entry_group_commit(self.entry_group);
|
||||
if res < avahi_sys::AVAHI_OK {
|
||||
let e_str = avahi_strerror(res);
|
||||
tracing::error!(
|
||||
"Could not commit Avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
panic!("Failed to load Avahi services: commit");
|
||||
}
|
||||
}
|
||||
}
|
||||
fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>(
|
||||
&mut self,
|
||||
pkg_id: &PackageId,
|
||||
interfaces: I,
|
||||
) {
|
||||
self.services.extend(
|
||||
interfaces
|
||||
.into_iter()
|
||||
.map(|(interface_id, key)| ((pkg_id.clone(), interface_id), key)),
|
||||
);
|
||||
self.sync();
|
||||
}
|
||||
fn remove<I: IntoIterator<Item = InterfaceId>>(&mut self, pkg_id: &PackageId, interfaces: I) {
|
||||
for interface_id in interfaces {
|
||||
self.services.remove(&(pkg_id.clone(), interface_id));
|
||||
}
|
||||
self.sync();
|
||||
}
|
||||
}
|
||||
impl Drop for MdnsControllerInner {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
avahi_free(self.hostname_raw as *mut c_void);
|
||||
avahi_entry_group_free(self.entry_group);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe extern "C" fn entry_group_callback(
|
||||
_group: *mut avahi_sys::AvahiEntryGroup,
|
||||
state: avahi_sys::AvahiEntryGroupState,
|
||||
_userdata: *mut core::ffi::c_void,
|
||||
) {
|
||||
match state {
|
||||
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_FAILURE => {
|
||||
tracing::warn!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_FAILURE");
|
||||
}
|
||||
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_COLLISION => {
|
||||
tracing::warn!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_COLLISION");
|
||||
}
|
||||
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_UNCOMMITED => {
|
||||
tracing::warn!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_UNCOMMITED");
|
||||
}
|
||||
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_ESTABLISHED => {
|
||||
tracing::warn!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_ESTABLISHED");
|
||||
}
|
||||
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_REGISTERING => {
|
||||
tracing::warn!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_REGISTERING");
|
||||
}
|
||||
other => {
|
||||
tracing::warn!("AvahiCallback: EntryGroupState = {}", other);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe extern "C" fn client_callback(
|
||||
_group: *mut avahi_sys::AvahiClient,
|
||||
state: avahi_sys::AvahiClientState,
|
||||
_userdata: *mut core::ffi::c_void,
|
||||
) {
|
||||
match state {
|
||||
avahi_sys::AvahiClientState_AVAHI_CLIENT_FAILURE => {
|
||||
tracing::warn!("AvahiCallback: ClientState = AVAHI_CLIENT_FAILURE");
|
||||
}
|
||||
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_RUNNING => {
|
||||
tracing::warn!("AvahiCallback: ClientState = AVAHI_CLIENT_S_RUNNING");
|
||||
}
|
||||
avahi_sys::AvahiClientState_AVAHI_CLIENT_CONNECTING => {
|
||||
tracing::warn!("AvahiCallback: ClientState = AVAHI_CLIENT_CONNECTING");
|
||||
}
|
||||
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_COLLISION => {
|
||||
tracing::warn!("AvahiCallback: ClientState = AVAHI_CLIENT_S_COLLISION");
|
||||
}
|
||||
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_REGISTERING => {
|
||||
tracing::warn!("AvahiCallback: ClientState = AVAHI_CLIENT_S_REGISTERING");
|
||||
}
|
||||
other => {
|
||||
tracing::warn!("AvahiCallback: ClientState = {}", other);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use openssl::pkey::{PKey, Private};
|
||||
use openssl::x509::X509;
|
||||
use rpc_toolkit::command;
|
||||
use sqlx::SqlitePool;
|
||||
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
|
||||
use tracing::instrument;
|
||||
|
||||
use self::interface::{Interface, InterfaceId};
|
||||
#[cfg(feature = "avahi")]
|
||||
use self::mdns::MdnsController;
|
||||
use self::nginx::NginxController;
|
||||
use self::ssl::SslManager;
|
||||
use self::tor::TorController;
|
||||
use crate::net::interface::TorConfig;
|
||||
use crate::net::nginx::InterfaceMetadata;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::Error;
|
||||
|
||||
pub mod interface;
|
||||
#[cfg(feature = "avahi")]
|
||||
pub mod mdns;
|
||||
pub mod nginx;
|
||||
pub mod ssl;
|
||||
pub mod tor;
|
||||
pub mod wifi;
|
||||
|
||||
const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl";
|
||||
|
||||
#[command(subcommands(tor::tor))]
|
||||
pub fn net() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Indicates that the net controller has created the
|
||||
/// SSL keys
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct GeneratedCertificateMountPoint(());
|
||||
|
||||
pub struct NetController {
|
||||
pub tor: TorController,
|
||||
#[cfg(feature = "avahi")]
|
||||
pub mdns: MdnsController,
|
||||
pub nginx: NginxController,
|
||||
pub ssl: SslManager,
|
||||
}
|
||||
impl NetController {
|
||||
#[instrument(skip(db))]
|
||||
pub async fn init(
|
||||
embassyd_addr: SocketAddr,
|
||||
embassyd_tor_key: TorSecretKeyV3,
|
||||
tor_control: SocketAddr,
|
||||
db: SqlitePool,
|
||||
import_root_ca: Option<(PKey<Private>, X509)>,
|
||||
) -> Result<Self, Error> {
|
||||
let ssl = match import_root_ca {
|
||||
None => SslManager::init(db).await,
|
||||
Some(a) => SslManager::import_root_ca(db, a.0, a.1).await,
|
||||
}?;
|
||||
Ok(Self {
|
||||
tor: TorController::init(embassyd_addr, embassyd_tor_key, tor_control).await?,
|
||||
#[cfg(feature = "avahi")]
|
||||
mdns: MdnsController::init(),
|
||||
nginx: NginxController::init(PathBuf::from("/etc/nginx"), &ssl).await?,
|
||||
ssl,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn ssl_directory_for(&self, pkg_id: &PackageId) -> PathBuf {
|
||||
PathBuf::from(format!("{}/{}", PACKAGE_CERT_PATH, pkg_id))
|
||||
}
|
||||
|
||||
#[instrument(skip(self, interfaces, _generated_certificate))]
|
||||
pub async fn add<'a, I>(
|
||||
&self,
|
||||
pkg_id: &PackageId,
|
||||
ip: Ipv4Addr,
|
||||
interfaces: I,
|
||||
_generated_certificate: GeneratedCertificateMountPoint,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
I: IntoIterator<Item = (InterfaceId, &'a Interface, TorSecretKeyV3)> + Clone,
|
||||
for<'b> &'b I: IntoIterator<Item = &'b (InterfaceId, &'a Interface, TorSecretKeyV3)>,
|
||||
{
|
||||
let interfaces_tor = interfaces
|
||||
.clone()
|
||||
.into_iter()
|
||||
.filter_map(|i| match i.1.tor_config.clone() {
|
||||
None => None,
|
||||
Some(cfg) => Some((i.0, cfg, i.2)),
|
||||
})
|
||||
.collect::<Vec<(InterfaceId, TorConfig, TorSecretKeyV3)>>();
|
||||
let (tor_res, _, nginx_res) = tokio::join!(
|
||||
self.tor.add(pkg_id, ip, interfaces_tor),
|
||||
{
|
||||
#[cfg(feature = "avahi")]
|
||||
let mdns_fut = self.mdns.add(
|
||||
pkg_id,
|
||||
interfaces
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(|(interface_id, _, key)| (interface_id, key)),
|
||||
);
|
||||
#[cfg(not(feature = "avahi"))]
|
||||
let mdns_fut = futures::future::ready(());
|
||||
mdns_fut
|
||||
},
|
||||
{
|
||||
let interfaces = interfaces
|
||||
.into_iter()
|
||||
.filter_map(|(id, interface, tor_key)| match &interface.lan_config {
|
||||
None => None,
|
||||
Some(cfg) => Some((
|
||||
id,
|
||||
InterfaceMetadata {
|
||||
dns_base: OnionAddressV3::from(&tor_key.public())
|
||||
.get_address_without_dot_onion(),
|
||||
lan_config: cfg.clone(),
|
||||
protocols: interface.protocols.clone(),
|
||||
},
|
||||
)),
|
||||
});
|
||||
self.nginx.add(&self.ssl, pkg_id.clone(), ip, interfaces)
|
||||
}
|
||||
);
|
||||
tor_res?;
|
||||
nginx_res?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, interfaces))]
|
||||
pub async fn remove<I: IntoIterator<Item = InterfaceId> + Clone>(
|
||||
&self,
|
||||
pkg_id: &PackageId,
|
||||
interfaces: I,
|
||||
) -> Result<(), Error> {
|
||||
let (tor_res, _, nginx_res) = tokio::join!(
|
||||
self.tor.remove(pkg_id, interfaces.clone()),
|
||||
{
|
||||
#[cfg(feature = "avahi")]
|
||||
let mdns_fut = self.mdns.remove(pkg_id, interfaces);
|
||||
#[cfg(not(feature = "avahi"))]
|
||||
let mdns_fut = futures::future::ready(());
|
||||
mdns_fut
|
||||
},
|
||||
self.nginx.remove(pkg_id)
|
||||
);
|
||||
tor_res?;
|
||||
nginx_res?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn generate_certificate_mountpoint<'a, I>(
|
||||
&self,
|
||||
pkg_id: &PackageId,
|
||||
interfaces: &I,
|
||||
) -> Result<GeneratedCertificateMountPoint, Error>
|
||||
where
|
||||
I: IntoIterator<Item = (InterfaceId, &'a Interface, TorSecretKeyV3)> + Clone,
|
||||
for<'b> &'b I: IntoIterator<Item = &'b (InterfaceId, &'a Interface, TorSecretKeyV3)>,
|
||||
{
|
||||
tracing::info!("Generating SSL Certificate mountpoints for {}", pkg_id);
|
||||
let package_path = PathBuf::from(PACKAGE_CERT_PATH).join(pkg_id);
|
||||
tokio::fs::create_dir_all(&package_path).await?;
|
||||
for (id, _, key) in interfaces {
|
||||
let dns_base = OnionAddressV3::from(&key.public()).get_address_without_dot_onion();
|
||||
let ssl_path_key = package_path.join(format!("{}.key.pem", id));
|
||||
let ssl_path_cert = package_path.join(format!("{}.cert.pem", id));
|
||||
let (key, chain) = self.ssl.certificate_for(&dns_base, pkg_id).await?;
|
||||
tokio::try_join!(
|
||||
crate::net::ssl::export_key(&key, &ssl_path_key),
|
||||
crate::net::ssl::export_cert(&chain, &ssl_path_cert)
|
||||
)?;
|
||||
}
|
||||
Ok(GeneratedCertificateMountPoint(()))
|
||||
}
|
||||
|
||||
pub async fn export_root_ca(&self) -> Result<(PKey<Private>, X509), Error> {
|
||||
self.ssl.export_root_ca().await
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
server {{
|
||||
listen {listen_args};
|
||||
listen [::]:{listen_args_ipv6};
|
||||
server_name .{hostname}.local;
|
||||
{ssl_certificate_line}
|
||||
{ssl_certificate_key_line}
|
||||
location / {{
|
||||
proxy_pass http://{app_ip}:{internal_port}/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
client_max_body_size 0;
|
||||
proxy_request_buffering off;
|
||||
proxy_buffering off;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
}}
|
||||
}}
|
||||
@@ -1,233 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use futures::FutureExt;
|
||||
use indexmap::IndexSet;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
use super::interface::{InterfaceId, LanPortConfig};
|
||||
use super::ssl::SslManager;
|
||||
use crate::hostname::get_hostname;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::Port;
|
||||
use crate::util::Invoke;
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
|
||||
pub struct NginxController {
|
||||
pub nginx_root: PathBuf,
|
||||
inner: Mutex<NginxControllerInner>,
|
||||
}
|
||||
impl NginxController {
|
||||
pub async fn init(nginx_root: PathBuf, ssl_manager: &SslManager) -> Result<Self, Error> {
|
||||
Ok(NginxController {
|
||||
inner: Mutex::new(NginxControllerInner::init(&nginx_root, ssl_manager).await?),
|
||||
nginx_root,
|
||||
})
|
||||
}
|
||||
pub async fn add<I: IntoIterator<Item = (InterfaceId, InterfaceMetadata)>>(
|
||||
&self,
|
||||
ssl_manager: &SslManager,
|
||||
package: PackageId,
|
||||
ipv4: Ipv4Addr,
|
||||
interfaces: I,
|
||||
) -> Result<(), Error> {
|
||||
self.inner
|
||||
.lock()
|
||||
.await
|
||||
.add(&self.nginx_root, ssl_manager, package, ipv4, interfaces)
|
||||
.await
|
||||
}
|
||||
pub async fn remove(&self, package: &PackageId) -> Result<(), Error> {
|
||||
self.inner
|
||||
.lock()
|
||||
.await
|
||||
.remove(&self.nginx_root, package)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NginxControllerInner {
|
||||
interfaces: BTreeMap<PackageId, PackageNetInfo>,
|
||||
}
|
||||
impl NginxControllerInner {
|
||||
#[instrument]
|
||||
async fn init(nginx_root: &Path, ssl_manager: &SslManager) -> Result<Self, Error> {
|
||||
let inner = NginxControllerInner {
|
||||
interfaces: BTreeMap::new(),
|
||||
};
|
||||
// write main ssl key/cert to fs location
|
||||
let (key, cert) = ssl_manager
|
||||
.certificate_for(&get_hostname().await?, &"embassy".parse().unwrap())
|
||||
.await?;
|
||||
let ssl_path_key = nginx_root.join(format!("ssl/embassy_main.key.pem"));
|
||||
let ssl_path_cert = nginx_root.join(format!("ssl/embassy_main.cert.pem"));
|
||||
tokio::try_join!(
|
||||
crate::net::ssl::export_key(&key, &ssl_path_key),
|
||||
crate::net::ssl::export_cert(&cert, &ssl_path_cert),
|
||||
)?;
|
||||
Ok(inner)
|
||||
}
|
||||
#[instrument(skip(self, interfaces))]
|
||||
async fn add<I: IntoIterator<Item = (InterfaceId, InterfaceMetadata)>>(
|
||||
&mut self,
|
||||
nginx_root: &Path,
|
||||
ssl_manager: &SslManager,
|
||||
package: PackageId,
|
||||
ipv4: Ipv4Addr,
|
||||
interfaces: I,
|
||||
) -> Result<(), Error> {
|
||||
let interface_map = interfaces
|
||||
.into_iter()
|
||||
.filter(|(_, meta)| {
|
||||
// don't add nginx stuff for anything we can't connect to over some flavor of http
|
||||
(meta.protocols.contains("http") || meta.protocols.contains("https"))
|
||||
// also don't add nginx unless it has at least one exposed port
|
||||
&& meta.lan_config.len() > 0
|
||||
})
|
||||
.collect::<BTreeMap<InterfaceId, InterfaceMetadata>>();
|
||||
|
||||
for (id, meta) in interface_map.iter() {
|
||||
for (port, lan_port_config) in meta.lan_config.iter() {
|
||||
// get ssl certificate chain
|
||||
let (listen_args, ssl_certificate_line, ssl_certificate_key_line) =
|
||||
if lan_port_config.ssl {
|
||||
// these have already been written by the net controller
|
||||
let package_path = nginx_root.join(format!("ssl/{}", package));
|
||||
if tokio::fs::metadata(&package_path).await.is_err() {
|
||||
tokio::fs::create_dir_all(&package_path)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(ErrorKind::Filesystem, package_path.display().to_string())
|
||||
})?;
|
||||
}
|
||||
let ssl_path_key = package_path.join(format!("{}.key.pem", id));
|
||||
let ssl_path_cert = package_path.join(format!("{}.cert.pem", id));
|
||||
let (key, chain) = ssl_manager
|
||||
.certificate_for(&meta.dns_base, &package)
|
||||
.await?;
|
||||
tokio::try_join!(
|
||||
crate::net::ssl::export_key(&key, &ssl_path_key),
|
||||
crate::net::ssl::export_cert(&chain, &ssl_path_cert)
|
||||
)?;
|
||||
(
|
||||
format!("{} ssl", port.0),
|
||||
format!("ssl_certificate {};", ssl_path_cert.to_str().unwrap()),
|
||||
format!("ssl_certificate_key {};", ssl_path_key.to_str().unwrap()),
|
||||
)
|
||||
} else {
|
||||
(format!("{}", port.0), String::from(""), String::from(""))
|
||||
};
|
||||
// write nginx configs
|
||||
let nginx_conf_path = nginx_root.join(format!(
|
||||
"sites-available/{}_{}_{}.conf",
|
||||
package, id, port.0
|
||||
));
|
||||
tokio::fs::write(
|
||||
&nginx_conf_path,
|
||||
format!(
|
||||
include_str!("nginx.conf.template"),
|
||||
listen_args = listen_args,
|
||||
listen_args_ipv6 = listen_args,
|
||||
hostname = meta.dns_base,
|
||||
ssl_certificate_line = ssl_certificate_line,
|
||||
ssl_certificate_key_line = ssl_certificate_key_line,
|
||||
app_ip = ipv4,
|
||||
internal_port = lan_port_config.internal,
|
||||
),
|
||||
)
|
||||
.await
|
||||
.with_ctx(|_| (ErrorKind::Filesystem, nginx_conf_path.display().to_string()))?;
|
||||
let sites_enabled_link_path =
|
||||
nginx_root.join(format!("sites-enabled/{}_{}_{}.conf", package, id, port.0));
|
||||
if tokio::fs::metadata(&sites_enabled_link_path).await.is_ok() {
|
||||
tokio::fs::remove_file(&sites_enabled_link_path).await?;
|
||||
}
|
||||
tokio::fs::symlink(&nginx_conf_path, &sites_enabled_link_path)
|
||||
.await
|
||||
.with_ctx(|_| (ErrorKind::Filesystem, nginx_conf_path.display().to_string()))?;
|
||||
}
|
||||
}
|
||||
match self.interfaces.get_mut(&package) {
|
||||
None => {
|
||||
let info = PackageNetInfo {
|
||||
interfaces: interface_map,
|
||||
};
|
||||
self.interfaces.insert(package, info);
|
||||
}
|
||||
Some(p) => {
|
||||
p.interfaces.extend(interface_map);
|
||||
}
|
||||
};
|
||||
|
||||
self.hup().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn remove(&mut self, nginx_root: &Path, package: &PackageId) -> Result<(), Error> {
|
||||
let removed = self.interfaces.remove(package);
|
||||
if let Some(net_info) = removed {
|
||||
for (id, meta) in net_info.interfaces {
|
||||
for (port, _lan_port_config) in meta.lan_config.iter() {
|
||||
// remove ssl certificates and nginx configs
|
||||
let package_path = nginx_root.join(format!("ssl/{}", package));
|
||||
let enabled_path = nginx_root
|
||||
.join(format!("sites-enabled/{}_{}_{}.conf", package, id, port.0));
|
||||
let available_path = nginx_root.join(format!(
|
||||
"sites-available/{}_{}_{}.conf",
|
||||
package, id, port.0
|
||||
));
|
||||
let _ = tokio::try_join!(
|
||||
async {
|
||||
if tokio::fs::metadata(&package_path).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&package_path)
|
||||
.map(|res| {
|
||||
res.with_ctx(|_| {
|
||||
(
|
||||
ErrorKind::Filesystem,
|
||||
package_path.display().to_string(),
|
||||
)
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
},
|
||||
tokio::fs::remove_file(&enabled_path).map(|res| res.with_ctx(|_| (
|
||||
ErrorKind::Filesystem,
|
||||
enabled_path.display().to_string()
|
||||
))),
|
||||
tokio::fs::remove_file(&available_path).map(|res| res.with_ctx(|_| (
|
||||
ErrorKind::Filesystem,
|
||||
available_path.display().to_string()
|
||||
))),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
self.hup().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn hup(&self) -> Result<(), Error> {
|
||||
let _ = tokio::process::Command::new("systemctl")
|
||||
.arg("reload")
|
||||
.arg("nginx")
|
||||
.invoke(ErrorKind::Nginx)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
struct PackageNetInfo {
|
||||
interfaces: BTreeMap<InterfaceId, InterfaceMetadata>,
|
||||
}
|
||||
pub struct InterfaceMetadata {
|
||||
pub dns_base: String,
|
||||
pub lan_config: BTreeMap<Port, LanPortConfig>,
|
||||
pub protocols: IndexSet<String>,
|
||||
}
|
||||
@@ -1,544 +0,0 @@
|
||||
use std::cmp::Ordering;
|
||||
use std::path::Path;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use futures::FutureExt;
|
||||
use openssl::asn1::{Asn1Integer, Asn1Time};
|
||||
use openssl::bn::{BigNum, MsbOption};
|
||||
use openssl::ec::{EcGroup, EcKey};
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::nid::Nid;
|
||||
use openssl::pkey::{PKey, Private};
|
||||
use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
|
||||
use openssl::*;
|
||||
use sqlx::SqlitePool;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
|
||||
static CERTIFICATE_VERSION: i32 = 2; // X509 version 3 is actually encoded as '2' in the cert because fuck you.
|
||||
pub const ROOT_CA_STATIC_PATH: &str = "/var/lib/embassy/ssl/root-ca.crt";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SslManager {
|
||||
store: SslStore,
|
||||
root_cert: X509,
|
||||
int_key: PKey<Private>,
|
||||
int_cert: X509,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct SslStore {
|
||||
secret_store: SqlitePool,
|
||||
}
|
||||
impl SslStore {
|
||||
fn new(db: SqlitePool) -> Result<Self, Error> {
|
||||
Ok(SslStore { secret_store: db })
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
async fn save_root_certificate(&self, key: &PKey<Private>, cert: &X509) -> Result<(), Error> {
|
||||
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
|
||||
let cert_str = String::from_utf8(cert.to_pem()?)?;
|
||||
let _n = sqlx::query!("INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, ?, ?, NULL, datetime('now'), datetime('now'))", key_str, cert_str).execute(&self.secret_store).await?;
|
||||
Ok(())
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
async fn load_root_certificate(&self) -> Result<Option<(PKey<Private>, X509)>, Error> {
|
||||
let m_row =
|
||||
sqlx::query!("SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 0;")
|
||||
.fetch_optional(&self.secret_store)
|
||||
.await?;
|
||||
match m_row {
|
||||
None => Ok(None),
|
||||
Some(row) => {
|
||||
let priv_key = PKey::private_key_from_pem(&row.priv_key_pem.into_bytes())?;
|
||||
let certificate = X509::from_pem(&row.certificate_pem.into_bytes())?;
|
||||
Ok(Some((priv_key, certificate)))
|
||||
}
|
||||
}
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
async fn save_intermediate_certificate(
|
||||
&self,
|
||||
key: &PKey<Private>,
|
||||
cert: &X509,
|
||||
) -> Result<(), Error> {
|
||||
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
|
||||
let cert_str = String::from_utf8(cert.to_pem()?)?;
|
||||
let _n = sqlx::query!("INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, ?, ?, NULL, datetime('now'), datetime('now'))", key_str, cert_str).execute(&self.secret_store).await?;
|
||||
Ok(())
|
||||
}
|
||||
async fn load_intermediate_certificate(&self) -> Result<Option<(PKey<Private>, X509)>, Error> {
|
||||
let m_row =
|
||||
sqlx::query!("SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 1;")
|
||||
.fetch_optional(&self.secret_store)
|
||||
.await?;
|
||||
match m_row {
|
||||
None => Ok(None),
|
||||
Some(row) => {
|
||||
let priv_key = PKey::private_key_from_pem(&row.priv_key_pem.into_bytes())?;
|
||||
let certificate = X509::from_pem(&row.certificate_pem.into_bytes())?;
|
||||
Ok(Some((priv_key, certificate)))
|
||||
}
|
||||
}
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
async fn import_root_certificate(
|
||||
&self,
|
||||
root_key: &PKey<Private>,
|
||||
root_cert: &X509,
|
||||
) -> Result<(), Error> {
|
||||
// remove records for both root and intermediate CA
|
||||
sqlx::query!("DELETE FROM certificates WHERE id = 0 OR id = 1;")
|
||||
.execute(&self.secret_store)
|
||||
.await?;
|
||||
self.save_root_certificate(root_key, root_cert).await?;
|
||||
Ok(())
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
async fn save_certificate(
|
||||
&self,
|
||||
key: &PKey<Private>,
|
||||
cert: &X509,
|
||||
lookup_string: &str,
|
||||
) -> Result<(), Error> {
|
||||
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
|
||||
let cert_str = String::from_utf8(cert.to_pem()?)?;
|
||||
let _n = sqlx::query!("INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (?, ?, ?, datetime('now'), datetime('now'))", key_str, cert_str, lookup_string).execute(&self.secret_store).await?;
|
||||
Ok(())
|
||||
}
|
||||
async fn load_certificate(
|
||||
&self,
|
||||
lookup_string: &str,
|
||||
) -> Result<Option<(PKey<Private>, X509)>, Error> {
|
||||
let m_row = sqlx::query!(
|
||||
"SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = ?",
|
||||
lookup_string
|
||||
)
|
||||
.fetch_optional(&self.secret_store)
|
||||
.await?;
|
||||
match m_row {
|
||||
None => Ok(None),
|
||||
Some(row) => {
|
||||
let priv_key = PKey::private_key_from_pem(&row.priv_key_pem.into_bytes())?;
|
||||
let certificate = X509::from_pem(&row.certificate_pem.into_bytes())?;
|
||||
Ok(Some((priv_key, certificate)))
|
||||
}
|
||||
}
|
||||
}
|
||||
#[instrument(skip(self))]
|
||||
async fn update_certificate(
|
||||
&self,
|
||||
key: &PKey<Private>,
|
||||
cert: &X509,
|
||||
lookup_string: &str,
|
||||
) -> Result<(), Error> {
|
||||
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
|
||||
let cert_str = String::from_utf8(cert.to_pem()?)?;
|
||||
let n = sqlx::query!("UPDATE certificates SET priv_key_pem = ?, certificate_pem = ?, updated_at = datetime('now') WHERE lookup_string = ?", key_str, cert_str, lookup_string).execute(&self.secret_store).await?;
|
||||
if n.rows_affected() == 0 {
|
||||
return Err(Error::new(
|
||||
eyre!(
|
||||
"Attempted to update non-existent certificate: {}",
|
||||
lookup_string
|
||||
),
|
||||
ErrorKind::OpenSsl,
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
const EC_CURVE_NAME: nid::Nid = nid::Nid::X9_62_PRIME256V1;
|
||||
lazy_static::lazy_static! {
|
||||
static ref EC_GROUP: EcGroup = EcGroup::from_curve_name(EC_CURVE_NAME).unwrap();
|
||||
static ref SSL_MUTEX: Mutex<()> = Mutex::new(()); // TODO: make thread safe
|
||||
}
|
||||
|
||||
impl SslManager {
|
||||
#[instrument(skip(db))]
|
||||
pub async fn init(db: SqlitePool) -> Result<Self, Error> {
|
||||
let store = SslStore::new(db)?;
|
||||
let (root_key, root_cert) = match store.load_root_certificate().await? {
|
||||
None => {
|
||||
let root_key = generate_key()?;
|
||||
let root_cert = make_root_cert(&root_key)?;
|
||||
store.save_root_certificate(&root_key, &root_cert).await?;
|
||||
Ok::<_, Error>((root_key, root_cert))
|
||||
}
|
||||
Some((key, cert)) => Ok((key, cert)),
|
||||
}?;
|
||||
// generate static file for download, this will get blown up on embassy restart so it's good to write it on
|
||||
// every ssl manager init
|
||||
tokio::fs::create_dir_all(
|
||||
Path::new(ROOT_CA_STATIC_PATH)
|
||||
.parent()
|
||||
.unwrap_or(Path::new("/")),
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::write(ROOT_CA_STATIC_PATH, root_cert.to_pem()?).await?;
|
||||
let (int_key, int_cert) = match store.load_intermediate_certificate().await? {
|
||||
None => {
|
||||
let int_key = generate_key()?;
|
||||
let int_cert = make_int_cert((&root_key, &root_cert), &int_key)?;
|
||||
store
|
||||
.save_intermediate_certificate(&int_key, &int_cert)
|
||||
.await?;
|
||||
Ok::<_, Error>((int_key, int_cert))
|
||||
}
|
||||
Some((key, cert)) => Ok((key, cert)),
|
||||
}?;
|
||||
Ok(SslManager {
|
||||
store,
|
||||
root_cert,
|
||||
int_key,
|
||||
int_cert,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: currently the burden of proof is on the caller to ensure that all of the arguments to this function are
|
||||
// consistent. The following properties are assumed and not verified:
|
||||
// 1. `root_cert` is self-signed and contains the public key that matches the private key `root_key`
|
||||
// 2. certificate is not past its expiration date
|
||||
// Warning: If this function ever fails, you must either call it again or regenerate your certificates from scratch
|
||||
// since it is possible for it to fail after successfully saving the root certificate but before successfully saving
|
||||
// the intermediate certificate
|
||||
#[instrument(skip(db))]
|
||||
pub async fn import_root_ca(
|
||||
db: SqlitePool,
|
||||
root_key: PKey<Private>,
|
||||
root_cert: X509,
|
||||
) -> Result<Self, Error> {
|
||||
let store = SslStore::new(db)?;
|
||||
store.import_root_certificate(&root_key, &root_cert).await?;
|
||||
let int_key = generate_key()?;
|
||||
let int_cert = make_int_cert((&root_key, &root_cert), &int_key)?;
|
||||
store
|
||||
.save_intermediate_certificate(&int_key, &int_cert)
|
||||
.await?;
|
||||
Ok(SslManager {
|
||||
store,
|
||||
root_cert,
|
||||
int_key,
|
||||
int_cert,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn export_root_ca(&self) -> Result<(PKey<Private>, X509), Error> {
|
||||
match self.store.load_root_certificate().await? {
|
||||
None => Err(Error::new(
|
||||
eyre!("Failed to export root certificate: root certificate has not been generated"),
|
||||
ErrorKind::OpenSsl,
|
||||
)),
|
||||
Some(a) => Ok(a),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn certificate_for(
|
||||
&self,
|
||||
dns_base: &str,
|
||||
package_id: &PackageId,
|
||||
) -> Result<(PKey<Private>, Vec<X509>), Error> {
|
||||
let (key, cert) = match self.store.load_certificate(dns_base).await? {
|
||||
None => {
|
||||
let key = generate_key()?;
|
||||
let cert = make_leaf_cert(
|
||||
(&self.int_key, &self.int_cert),
|
||||
(&key, dns_base, package_id),
|
||||
)?;
|
||||
self.store.save_certificate(&key, &cert, dns_base).await?;
|
||||
Ok::<_, Error>((key, cert))
|
||||
}
|
||||
Some((key, cert)) => {
|
||||
let window_end = Asn1Time::days_from_now(30)?;
|
||||
let expiration = cert.not_after();
|
||||
if expiration.compare(&window_end)? == Ordering::Less {
|
||||
let key = generate_key()?;
|
||||
let cert = make_leaf_cert(
|
||||
(&self.int_key, &self.int_cert),
|
||||
(&key, dns_base, package_id),
|
||||
)?;
|
||||
self.store.update_certificate(&key, &cert, dns_base).await?;
|
||||
Ok((key, cert))
|
||||
} else {
|
||||
Ok((key, cert))
|
||||
}
|
||||
}
|
||||
}?;
|
||||
Ok((
|
||||
key,
|
||||
vec![cert, self.int_cert.clone(), self.root_cert.clone()],
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn export_key(key: &PKey<Private>, target: &Path) -> Result<(), Error> {
|
||||
tokio::fs::write(target, key.private_key_to_pem_pkcs8()?)
|
||||
.map(|res| res.with_ctx(|_| (ErrorKind::Filesystem, target.display().to_string())))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
pub async fn export_cert(chain: &Vec<X509>, target: &Path) -> Result<(), Error> {
|
||||
tokio::fs::write(
|
||||
target,
|
||||
chain
|
||||
.into_iter()
|
||||
.flat_map(|c| c.to_pem().unwrap())
|
||||
.collect::<Vec<u8>>(),
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
#[instrument]
|
||||
fn rand_serial() -> Result<Asn1Integer, Error> {
|
||||
let mut bn = BigNum::new()?;
|
||||
bn.rand(64, MsbOption::MAYBE_ZERO, false)?;
|
||||
let asn1 = Asn1Integer::from_bn(&bn)?;
|
||||
Ok(asn1)
|
||||
}
|
||||
#[instrument]
|
||||
fn generate_key() -> Result<PKey<Private>, Error> {
|
||||
let new_key = EcKey::generate(EC_GROUP.as_ref())?;
|
||||
let key = PKey::from_ec_key(new_key)?;
|
||||
Ok(key)
|
||||
}
|
||||
#[instrument]
|
||||
fn make_root_cert(root_key: &PKey<Private>) -> Result<X509, Error> {
|
||||
let mut builder = X509Builder::new()?;
|
||||
builder.set_version(CERTIFICATE_VERSION)?;
|
||||
|
||||
let embargo = Asn1Time::days_from_now(0)?;
|
||||
builder.set_not_before(&embargo)?;
|
||||
|
||||
let expiration = Asn1Time::days_from_now(3650)?;
|
||||
builder.set_not_after(&expiration)?;
|
||||
|
||||
builder.set_serial_number(&*rand_serial()?)?;
|
||||
|
||||
let mut subject_name_builder = X509NameBuilder::new()?;
|
||||
subject_name_builder.append_entry_by_text("CN", "Embassy Local Root CA")?;
|
||||
subject_name_builder.append_entry_by_text("O", "Start9")?;
|
||||
subject_name_builder.append_entry_by_text("OU", "Embassy")?;
|
||||
let subject_name = subject_name_builder.build();
|
||||
builder.set_subject_name(&subject_name)?;
|
||||
|
||||
builder.set_issuer_name(&subject_name)?;
|
||||
|
||||
builder.set_pubkey(&root_key)?;
|
||||
|
||||
// Extensions
|
||||
let cfg = conf::Conf::new(conf::ConfMethod::default())?;
|
||||
let ctx = builder.x509v3_context(None, Some(&cfg));
|
||||
// subjectKeyIdentifier = hash
|
||||
let subject_key_identifier =
|
||||
X509Extension::new_nid(Some(&cfg), Some(&ctx), Nid::SUBJECT_KEY_IDENTIFIER, "hash")?;
|
||||
// basicConstraints = critical, CA:true, pathlen:0
|
||||
let basic_constraints = X509Extension::new_nid(
|
||||
Some(&cfg),
|
||||
Some(&ctx),
|
||||
Nid::BASIC_CONSTRAINTS,
|
||||
"critical,CA:true",
|
||||
)?;
|
||||
// keyUsage = critical, digitalSignature, cRLSign, keyCertSign
|
||||
let key_usage = X509Extension::new_nid(
|
||||
Some(&cfg),
|
||||
Some(&ctx),
|
||||
Nid::KEY_USAGE,
|
||||
"critical,digitalSignature,cRLSign,keyCertSign",
|
||||
)?;
|
||||
builder.append_extension(subject_key_identifier)?;
|
||||
builder.append_extension(basic_constraints)?;
|
||||
builder.append_extension(key_usage)?;
|
||||
builder.sign(&root_key, MessageDigest::sha256())?;
|
||||
let cert = builder.build();
|
||||
Ok(cert)
|
||||
}
|
||||
#[instrument]
|
||||
fn make_int_cert(
|
||||
signer: (&PKey<Private>, &X509),
|
||||
applicant: &PKey<Private>,
|
||||
) -> Result<X509, Error> {
|
||||
let mut builder = X509Builder::new()?;
|
||||
builder.set_version(CERTIFICATE_VERSION)?;
|
||||
|
||||
let embargo = Asn1Time::days_from_now(0)?;
|
||||
builder.set_not_before(&embargo)?;
|
||||
|
||||
let expiration = Asn1Time::days_from_now(3650)?;
|
||||
builder.set_not_after(&expiration)?;
|
||||
|
||||
builder.set_serial_number(&*rand_serial()?)?;
|
||||
|
||||
let mut subject_name_builder = X509NameBuilder::new()?;
|
||||
subject_name_builder.append_entry_by_text("CN", "Embassy Local Intermediate CA")?;
|
||||
subject_name_builder.append_entry_by_text("O", "Start9")?;
|
||||
subject_name_builder.append_entry_by_text("OU", "Embassy")?;
|
||||
let subject_name = subject_name_builder.build();
|
||||
builder.set_subject_name(&subject_name)?;
|
||||
|
||||
builder.set_issuer_name(signer.1.subject_name())?;
|
||||
|
||||
builder.set_pubkey(&applicant)?;
|
||||
|
||||
let cfg = conf::Conf::new(conf::ConfMethod::default())?;
|
||||
let ctx = builder.x509v3_context(Some(&signer.1), Some(&cfg));
|
||||
// subjectKeyIdentifier = hash
|
||||
let subject_key_identifier =
|
||||
X509Extension::new_nid(Some(&cfg), Some(&ctx), Nid::SUBJECT_KEY_IDENTIFIER, "hash")?;
|
||||
// authorityKeyIdentifier = keyid:always,issuer
|
||||
let authority_key_identifier = X509Extension::new_nid(
|
||||
Some(&cfg),
|
||||
Some(&ctx),
|
||||
Nid::AUTHORITY_KEY_IDENTIFIER,
|
||||
"keyid:always,issuer",
|
||||
)?;
|
||||
// basicConstraints = critical, CA:true, pathlen:0
|
||||
let basic_constraints = X509Extension::new_nid(
|
||||
Some(&cfg),
|
||||
Some(&ctx),
|
||||
Nid::BASIC_CONSTRAINTS,
|
||||
"critical,CA:true,pathlen:0",
|
||||
)?;
|
||||
// keyUsage = critical, digitalSignature, cRLSign, keyCertSign
|
||||
let key_usage = X509Extension::new_nid(
|
||||
Some(&cfg),
|
||||
Some(&ctx),
|
||||
Nid::KEY_USAGE,
|
||||
"critical,digitalSignature,cRLSign,keyCertSign",
|
||||
)?;
|
||||
builder.append_extension(subject_key_identifier)?;
|
||||
builder.append_extension(authority_key_identifier)?;
|
||||
builder.append_extension(basic_constraints)?;
|
||||
builder.append_extension(key_usage)?;
|
||||
builder.sign(&signer.0, MessageDigest::sha256())?;
|
||||
let cert = builder.build();
|
||||
Ok(cert)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn make_leaf_cert(
|
||||
signer: (&PKey<Private>, &X509),
|
||||
applicant: (&PKey<Private>, &str, &PackageId),
|
||||
) -> Result<X509, Error> {
|
||||
let mut builder = X509Builder::new()?;
|
||||
builder.set_version(CERTIFICATE_VERSION)?;
|
||||
|
||||
let embargo = Asn1Time::days_from_now(0)?;
|
||||
builder.set_not_before(&embargo)?;
|
||||
|
||||
// Google Apple and Mozilla reject certificate horizons longer than 397 days
|
||||
// https://techbeacon.com/security/google-apple-mozilla-enforce-1-year-max-security-certifications
|
||||
let expiration = Asn1Time::days_from_now(397)?;
|
||||
builder.set_not_after(&expiration)?;
|
||||
|
||||
builder.set_serial_number(&*rand_serial()?)?;
|
||||
|
||||
let mut subject_name_builder = X509NameBuilder::new()?;
|
||||
subject_name_builder.append_entry_by_text("CN", &format!("{}.local", &applicant.1))?;
|
||||
subject_name_builder.append_entry_by_text("O", "Start9")?;
|
||||
subject_name_builder.append_entry_by_text("OU", "Embassy")?;
|
||||
let subject_name = subject_name_builder.build();
|
||||
builder.set_subject_name(&subject_name)?;
|
||||
|
||||
builder.set_issuer_name(signer.1.subject_name())?;
|
||||
|
||||
builder.set_pubkey(&applicant.0)?;
|
||||
|
||||
// Extensions
|
||||
let cfg = conf::Conf::new(conf::ConfMethod::default())?;
|
||||
let ctx = builder.x509v3_context(Some(&signer.1), Some(&cfg));
|
||||
// subjectKeyIdentifier = hash
|
||||
let subject_key_identifier =
|
||||
X509Extension::new_nid(Some(&cfg), Some(&ctx), Nid::SUBJECT_KEY_IDENTIFIER, "hash")?;
|
||||
// authorityKeyIdentifier = keyid:always,issuer
|
||||
let authority_key_identifier = X509Extension::new_nid(
|
||||
Some(&cfg),
|
||||
Some(&ctx),
|
||||
Nid::AUTHORITY_KEY_IDENTIFIER,
|
||||
"keyid,issuer:always",
|
||||
)?;
|
||||
let basic_constraints =
|
||||
X509Extension::new_nid(Some(&cfg), Some(&ctx), Nid::BASIC_CONSTRAINTS, "CA:FALSE")?;
|
||||
let key_usage = X509Extension::new_nid(
|
||||
Some(&cfg),
|
||||
Some(&ctx),
|
||||
Nid::KEY_USAGE,
|
||||
"critical,digitalSignature,keyEncipherment",
|
||||
)?;
|
||||
|
||||
let subject_alt_name = X509Extension::new_nid(
|
||||
Some(&cfg),
|
||||
Some(&ctx),
|
||||
Nid::SUBJECT_ALT_NAME,
|
||||
&format!(
|
||||
"DNS:{}.local,DNS:*.{}.local,DNS:{}.onion,DNS:*.{}.onion,DNS:{}.embassy,DNS:*.{}.embassy",
|
||||
&applicant.1, &applicant.1, &applicant.1, &applicant.1, &applicant.2, &applicant.2,
|
||||
),
|
||||
)?;
|
||||
builder.append_extension(subject_key_identifier)?;
|
||||
builder.append_extension(authority_key_identifier)?;
|
||||
builder.append_extension(subject_alt_name)?;
|
||||
builder.append_extension(basic_constraints)?;
|
||||
builder.append_extension(key_usage)?;
|
||||
|
||||
builder.sign(&signer.0, MessageDigest::sha256())?;
|
||||
|
||||
let cert = builder.build();
|
||||
Ok(cert)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn ca_details_persist() -> Result<(), Error> {
|
||||
let pool = sqlx::Pool::<sqlx::Sqlite>::connect("sqlite::memory:").await?;
|
||||
sqlx::query_file!("migrations/20210629193146_Init.sql")
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
let mgr = SslManager::init(pool.clone()).await?;
|
||||
let root_cert0 = mgr.root_cert;
|
||||
let int_key0 = mgr.int_key;
|
||||
let int_cert0 = mgr.int_cert;
|
||||
let mgr = SslManager::init(pool).await?;
|
||||
let root_cert1 = mgr.root_cert;
|
||||
let int_key1 = mgr.int_key;
|
||||
let int_cert1 = mgr.int_cert;
|
||||
|
||||
assert_eq!(root_cert0.to_pem()?, root_cert1.to_pem()?);
|
||||
assert_eq!(
|
||||
int_key0.private_key_to_pem_pkcs8()?,
|
||||
int_key1.private_key_to_pem_pkcs8()?
|
||||
);
|
||||
assert_eq!(int_cert0.to_pem()?, int_cert1.to_pem()?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn certificate_details_persist() -> Result<(), Error> {
|
||||
let pool = sqlx::Pool::<sqlx::Sqlite>::connect("sqlite::memory:").await?;
|
||||
sqlx::query_file!("migrations/20210629193146_Init.sql")
|
||||
.execute(&pool)
|
||||
.await?;
|
||||
let mgr = SslManager::init(pool.clone()).await?;
|
||||
let package_id = "bitcoind".parse().unwrap();
|
||||
let (key0, cert_chain0) = mgr.certificate_for("start9", &package_id).await?;
|
||||
let (key1, cert_chain1) = mgr.certificate_for("start9", &package_id).await?;
|
||||
|
||||
assert_eq!(
|
||||
key0.private_key_to_pem_pkcs8()?,
|
||||
key1.private_key_to_pem_pkcs8()?
|
||||
);
|
||||
assert_eq!(
|
||||
cert_chain0
|
||||
.iter()
|
||||
.map(|cert| cert.to_pem().unwrap())
|
||||
.collect::<Vec<Vec<u8>>>(),
|
||||
cert_chain1
|
||||
.iter()
|
||||
.map(|cert| cert.to_pem().unwrap())
|
||||
.collect::<Vec<Vec<u8>>>()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,449 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::eyre;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::FutureExt;
|
||||
use reqwest::Client;
|
||||
use rpc_toolkit::command;
|
||||
use serde_json::json;
|
||||
use sqlx::{Executor, Sqlite};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::Mutex;
|
||||
use torut::control::{AsyncEvent, AuthenticatedConn, ConnError};
|
||||
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
|
||||
use tracing::instrument;
|
||||
|
||||
use super::interface::{InterfaceId, TorConfig};
|
||||
use crate::context::RpcContext;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::{display_serializable, IoFormat};
|
||||
use crate::{Error, ErrorKind, ResultExt as _};
|
||||
|
||||
#[test]
|
||||
fn random_key() {
|
||||
println!("x'{}'", hex::encode(TorSecretKeyV3::generate().as_bytes()));
|
||||
}
|
||||
|
||||
#[command(subcommands(list_services))]
|
||||
pub fn tor() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn display_services(services: Vec<OnionAddressV3>, matches: &ArgMatches<'_>) {
|
||||
use prettytable::*;
|
||||
|
||||
if matches.is_present("format") {
|
||||
return display_serializable(services, matches);
|
||||
}
|
||||
|
||||
let mut table = Table::new();
|
||||
for service in services {
|
||||
let row = row![&service.to_string()];
|
||||
table.add_row(row);
|
||||
}
|
||||
table.print_tty(false);
|
||||
}
|
||||
|
||||
#[command(rename = "list-services", display(display_services))]
|
||||
pub async fn list_services(
|
||||
#[context] ctx: RpcContext,
|
||||
#[allow(unused_variables)]
|
||||
#[arg(long = "format")]
|
||||
format: Option<IoFormat>,
|
||||
) -> Result<Vec<OnionAddressV3>, Error> {
|
||||
ctx.net_controller.tor.list_services().await
|
||||
}
|
||||
|
||||
#[instrument(skip(secrets))]
|
||||
pub async fn os_key<Ex>(secrets: &mut Ex) -> Result<TorSecretKeyV3, Error>
|
||||
where
|
||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let key = sqlx::query!("SELECT tor_key FROM account")
|
||||
.fetch_one(secrets)
|
||||
.await?
|
||||
.tor_key;
|
||||
|
||||
let mut buf = [0; 64];
|
||||
buf.clone_from_slice(
|
||||
key.get(0..64).ok_or_else(|| {
|
||||
Error::new(eyre!("Invalid Tor Key Length"), crate::ErrorKind::Database)
|
||||
})?,
|
||||
);
|
||||
Ok(buf.into())
|
||||
}
|
||||
|
||||
fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> {
|
||||
async move { Ok(()) }.boxed()
|
||||
}
|
||||
|
||||
pub struct TorController(Mutex<TorControllerInner>);
|
||||
impl TorController {
|
||||
pub async fn init(
|
||||
embassyd_addr: SocketAddr,
|
||||
embassyd_tor_key: TorSecretKeyV3,
|
||||
tor_control: SocketAddr,
|
||||
) -> Result<Self, Error> {
|
||||
Ok(TorController(Mutex::new(
|
||||
TorControllerInner::init(embassyd_addr, embassyd_tor_key, tor_control).await?,
|
||||
)))
|
||||
}
|
||||
|
||||
pub async fn add<I: IntoIterator<Item = (InterfaceId, TorConfig, TorSecretKeyV3)> + Clone>(
|
||||
&self,
|
||||
pkg_id: &PackageId,
|
||||
ip: Ipv4Addr,
|
||||
interfaces: I,
|
||||
) -> Result<(), Error> {
|
||||
self.0.lock().await.add(pkg_id, ip, interfaces).await
|
||||
}
|
||||
|
||||
pub async fn remove<I: IntoIterator<Item = InterfaceId> + Clone>(
|
||||
&self,
|
||||
pkg_id: &PackageId,
|
||||
interfaces: I,
|
||||
) -> Result<(), Error> {
|
||||
self.0.lock().await.remove(pkg_id, interfaces).await
|
||||
}
|
||||
|
||||
pub async fn replace(&self) -> Result<bool, Error> {
|
||||
self.0.lock().await.replace().await
|
||||
}
|
||||
|
||||
pub async fn embassyd_tor_key(&self) -> TorSecretKeyV3 {
|
||||
self.0.lock().await.embassyd_tor_key.clone()
|
||||
}
|
||||
|
||||
pub async fn embassyd_onion(&self) -> OnionAddressV3 {
|
||||
self.0.lock().await.embassyd_onion()
|
||||
}
|
||||
|
||||
pub async fn list_services(&self) -> Result<Vec<OnionAddressV3>, Error> {
|
||||
self.0.lock().await.list_services().await
|
||||
}
|
||||
}
|
||||
|
||||
type AuthenticatedConnection = AuthenticatedConn<
|
||||
TcpStream,
|
||||
fn(AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>>,
|
||||
>;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
struct HiddenServiceConfig {
|
||||
ip: Ipv4Addr,
|
||||
cfg: TorConfig,
|
||||
}
|
||||
|
||||
pub struct TorControllerInner {
|
||||
embassyd_addr: SocketAddr,
|
||||
embassyd_tor_key: TorSecretKeyV3,
|
||||
control_addr: SocketAddr,
|
||||
connection: Option<AuthenticatedConnection>,
|
||||
services: BTreeMap<(PackageId, InterfaceId), (TorSecretKeyV3, TorConfig, Ipv4Addr)>,
|
||||
}
|
||||
impl TorControllerInner {
|
||||
#[instrument(skip(self, interfaces))]
|
||||
async fn add<'a, I: IntoIterator<Item = (InterfaceId, TorConfig, TorSecretKeyV3)>>(
|
||||
&mut self,
|
||||
pkg_id: &PackageId,
|
||||
ip: Ipv4Addr,
|
||||
interfaces: I,
|
||||
) -> Result<(), Error> {
|
||||
for (interface_id, tor_cfg, key) in interfaces {
|
||||
let id = (pkg_id.clone(), interface_id);
|
||||
match self.services.get(&id) {
|
||||
Some(k) if k.0 != key => {
|
||||
self.remove(pkg_id, std::iter::once(id.1.clone())).await?;
|
||||
}
|
||||
Some(_) => continue,
|
||||
None => (),
|
||||
}
|
||||
self.connection
|
||||
.as_mut()
|
||||
.ok_or_else(|| {
|
||||
Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Unknown)
|
||||
})?
|
||||
.add_onion_v3(
|
||||
&key,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
None,
|
||||
&mut tor_cfg
|
||||
.port_mapping
|
||||
.iter()
|
||||
.map(|(external, internal)| {
|
||||
(external.0, SocketAddr::from((ip, internal.0)))
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.iter(),
|
||||
)
|
||||
.await?;
|
||||
self.services.insert(id, (key, tor_cfg, ip));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self, interfaces))]
|
||||
async fn remove<I: IntoIterator<Item = InterfaceId>>(
|
||||
&mut self,
|
||||
pkg_id: &PackageId,
|
||||
interfaces: I,
|
||||
) -> Result<(), Error> {
|
||||
for interface_id in interfaces {
|
||||
if let Some((key, _cfg, _ip)) = self.services.remove(&(pkg_id.clone(), interface_id)) {
|
||||
self.connection
|
||||
.as_mut()
|
||||
.ok_or_else(|| {
|
||||
Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Tor)
|
||||
})?
|
||||
.del_onion(
|
||||
&key.public()
|
||||
.get_onion_address()
|
||||
.get_address_without_dot_onion(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn init(
|
||||
embassyd_addr: SocketAddr,
|
||||
embassyd_tor_key: TorSecretKeyV3,
|
||||
tor_control: SocketAddr,
|
||||
) -> Result<Self, Error> {
|
||||
let mut conn = torut::control::UnauthenticatedConn::new(
|
||||
TcpStream::connect(tor_control).await?, // TODO
|
||||
);
|
||||
let auth = conn
|
||||
.load_protocol_info()
|
||||
.await?
|
||||
.make_auth_data()?
|
||||
.ok_or_else(|| eyre!("Cookie Auth Not Available"))
|
||||
.with_kind(crate::ErrorKind::Tor)?;
|
||||
conn.authenticate(&auth).await?;
|
||||
let mut connection: AuthenticatedConnection = conn.into_authenticated().await;
|
||||
connection.set_async_event_handler(Some(event_handler));
|
||||
|
||||
let mut controller = TorControllerInner {
|
||||
embassyd_addr,
|
||||
embassyd_tor_key,
|
||||
control_addr: tor_control,
|
||||
connection: Some(connection),
|
||||
services: BTreeMap::new(),
|
||||
};
|
||||
controller.add_embassyd_onion().await?;
|
||||
Ok(controller)
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn add_embassyd_onion(&mut self) -> Result<(), Error> {
|
||||
tracing::info!(
|
||||
"Registering Main Tor Service: {}",
|
||||
self.embassyd_tor_key.public().get_onion_address()
|
||||
);
|
||||
self.connection
|
||||
.as_mut()
|
||||
.ok_or_else(|| Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Tor))?
|
||||
.add_onion_v3(
|
||||
&self.embassyd_tor_key,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
None,
|
||||
&mut std::iter::once(&(self.embassyd_addr.port(), self.embassyd_addr)),
|
||||
)
|
||||
.await?;
|
||||
tracing::info!(
|
||||
"Registered Main Tor Service: {}",
|
||||
self.embassyd_tor_key.public().get_onion_address()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn replace(&mut self) -> Result<bool, Error> {
|
||||
let connection = self.connection.take();
|
||||
let uptime = if let Some(mut c) = connection {
|
||||
// this should be unreachable because the only time when this should be none is for the duration of tor's
|
||||
// restart lower down in this method, which is held behind a Mutex
|
||||
let uptime = c.get_info("uptime").await?.parse::<u64>()?;
|
||||
// we never want to restart the tor daemon if it hasn't been up for at least a half hour
|
||||
if uptime < 1800 {
|
||||
self.connection = Some(c); // put it back
|
||||
return Ok(false);
|
||||
}
|
||||
// when connection closes below, tor daemon is restarted
|
||||
c.take_ownership().await?;
|
||||
// this should close the connection
|
||||
drop(c);
|
||||
Some(uptime)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// attempt to reconnect to the control socket, not clear how long this should take
|
||||
let mut new_connection: AuthenticatedConnection;
|
||||
loop {
|
||||
match TcpStream::connect(self.control_addr).await {
|
||||
Ok(stream) => {
|
||||
let mut new_conn = torut::control::UnauthenticatedConn::new(stream);
|
||||
let auth = new_conn
|
||||
.load_protocol_info()
|
||||
.await?
|
||||
.make_auth_data()?
|
||||
.ok_or_else(|| eyre!("Cookie Auth Not Available"))
|
||||
.with_kind(crate::ErrorKind::Tor)?;
|
||||
new_conn.authenticate(&auth).await?;
|
||||
new_connection = new_conn.into_authenticated().await;
|
||||
let uptime_new = new_connection.get_info("uptime").await?.parse::<u64>()?;
|
||||
// if the new uptime exceeds the one we got at the beginning, it's the same tor daemon, do not proceed
|
||||
match uptime {
|
||||
Some(uptime) if uptime_new > uptime => (),
|
||||
_ => {
|
||||
new_connection.set_async_event_handler(Some(event_handler));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::info!("Failed to reconnect to tor control socket: {}", e);
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
// replace the connection object here on the new copy of the tor daemon
|
||||
self.connection.replace(new_connection);
|
||||
|
||||
// swap empty map for owned old service map
|
||||
let old_services = std::mem::replace(&mut self.services, BTreeMap::new());
|
||||
|
||||
// re add all of the services on the new control socket
|
||||
for ((package_id, interface_id), (tor_key, tor_cfg, ipv4)) in old_services {
|
||||
self.add(
|
||||
&package_id,
|
||||
ipv4,
|
||||
std::iter::once((interface_id, tor_cfg, tor_key)),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// add embassyd hidden service again
|
||||
self.add_embassyd_onion().await?;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn embassyd_onion(&self) -> OnionAddressV3 {
|
||||
self.embassyd_tor_key.public().get_onion_address()
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn list_services(&mut self) -> Result<Vec<OnionAddressV3>, Error> {
|
||||
self.connection
|
||||
.as_mut()
|
||||
.ok_or_else(|| Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Tor))?
|
||||
.get_info("onions/current")
|
||||
.await?
|
||||
.lines()
|
||||
.map(|l| l.trim().parse().with_kind(ErrorKind::Tor))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn tor_health_check(client: &Client, tor_controller: &TorController) {
|
||||
tracing::debug!("Attempting to self-check tor address");
|
||||
let onion = tor_controller.embassyd_onion().await;
|
||||
let result = client
|
||||
.post(format!("http://{}/rpc/v1", onion))
|
||||
.body(
|
||||
json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "echo",
|
||||
"params": { "message": "Follow the orange rabbit" },
|
||||
})
|
||||
.to_string()
|
||||
.into_bytes(),
|
||||
)
|
||||
.send()
|
||||
.await;
|
||||
match result {
|
||||
// if success, do nothing
|
||||
Ok(_) => {
|
||||
tracing::debug!(
|
||||
"Successfully verified main tor address liveness at {}",
|
||||
onion
|
||||
)
|
||||
}
|
||||
// if failure, disconnect tor control port, and restart tor controller
|
||||
Err(e) => {
|
||||
tracing::error!("Unable to reach self over tor: {}", e);
|
||||
loop {
|
||||
match tor_controller.replace().await {
|
||||
Ok(restarted) => {
|
||||
if restarted {
|
||||
tracing::error!("Tor has been recently restarted, refusing to restart");
|
||||
}
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Unable to restart tor: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test() {
|
||||
let mut conn = torut::control::UnauthenticatedConn::new(
|
||||
TcpStream::connect(SocketAddr::from(([127, 0, 0, 1], 9051)))
|
||||
.await
|
||||
.unwrap(), // TODO
|
||||
);
|
||||
let auth = conn
|
||||
.load_protocol_info()
|
||||
.await
|
||||
.unwrap()
|
||||
.make_auth_data()
|
||||
.unwrap()
|
||||
.ok_or_else(|| eyre!("Cookie Auth Not Available"))
|
||||
.with_kind(crate::ErrorKind::Tor)
|
||||
.unwrap();
|
||||
conn.authenticate(&auth).await.unwrap();
|
||||
let mut connection: AuthenticatedConn<
|
||||
TcpStream,
|
||||
fn(AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>>,
|
||||
> = conn.into_authenticated().await;
|
||||
let tor_key = torut::onion::TorSecretKeyV3::generate();
|
||||
dbg!(connection.get_conf("SocksPort").await.unwrap());
|
||||
connection
|
||||
.add_onion_v3(
|
||||
&tor_key,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
None,
|
||||
&mut [(443_u16, SocketAddr::from(([127, 0, 0, 1], 8443)))].iter(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
connection
|
||||
.add_onion_v3(
|
||||
&tor_key,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
None,
|
||||
&mut [(8443_u16, SocketAddr::from(([127, 0, 0, 1], 8443)))].iter(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
root /var/www/html/diagnostic;
|
||||
|
||||
index index.html index.htm index.nginx-debian.html;
|
||||
|
||||
server_name _;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
proxy_socket_keepalive on;
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 1800;
|
||||
|
||||
gzip on;
|
||||
gzip_vary on;
|
||||
gzip_min_length 1024;
|
||||
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
|
||||
|
||||
location /rpc/ {
|
||||
proxy_pass http://127.0.0.1:5959/;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
|
||||
map $http_upgrade $connection_upgrade {{
|
||||
default upgrade;
|
||||
'' $http_connection;
|
||||
}}
|
||||
|
||||
server {{
|
||||
listen 443 ssl default_server;
|
||||
listen [::]:443 ssl default_server;
|
||||
ssl_certificate /etc/nginx/ssl/embassy_main.cert.pem;
|
||||
ssl_certificate_key /etc/nginx/ssl/embassy_main.key.pem;
|
||||
|
||||
root /var/www/html/main;
|
||||
|
||||
index index.html index.htm index.nginx-debian.html;
|
||||
|
||||
server_name .{lan_hostname};
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
proxy_socket_keepalive on;
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 1800;
|
||||
|
||||
gzip on;
|
||||
gzip_vary on;
|
||||
gzip_min_length 1024;
|
||||
gzip_types text/plain text/css text/xml text/javascript application/javascript image/svg+xml font/tts font/otf font/eot font/openttype application/x-javascript application/xml;
|
||||
|
||||
|
||||
location /rpc/ {{
|
||||
proxy_pass http://127.0.0.1:5959/;
|
||||
}}
|
||||
|
||||
location /ws/ {{
|
||||
proxy_pass http://127.0.0.1:5960$request_uri;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}}
|
||||
|
||||
location /rest/ {{
|
||||
proxy_pass http://127.0.0.1:5960$request_uri;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
client_max_body_size 0;
|
||||
}}
|
||||
|
||||
location /public/ {{
|
||||
proxy_pass http://127.0.0.1:5961/;
|
||||
}}
|
||||
|
||||
location / {{
|
||||
try_files $uri $uri/ =404;
|
||||
}}
|
||||
}}
|
||||
server {{
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name .{lan_hostname};
|
||||
return 301 https://$host$request_uri;
|
||||
}}
|
||||
server {{
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
ssl_certificate /etc/nginx/ssl/embassy_main.cert.pem;
|
||||
ssl_certificate_key /etc/nginx/ssl/embassy_main.key.pem;
|
||||
|
||||
root /var/www/html/main;
|
||||
|
||||
index index.html index.htm index.nginx-debian.html;
|
||||
|
||||
server_name .{tor_hostname};
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
proxy_socket_keepalive on;
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 1800;
|
||||
|
||||
gzip on;
|
||||
gzip_vary on;
|
||||
gzip_min_length 1024;
|
||||
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
|
||||
|
||||
location /rpc/ {{
|
||||
proxy_pass http://127.0.0.1:5959/;
|
||||
}}
|
||||
|
||||
location /ws/ {{
|
||||
proxy_pass http://127.0.0.1:5960$request_uri;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}}
|
||||
|
||||
location /rest/ {{
|
||||
proxy_pass http://127.0.0.1:5960$request_uri;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
client_max_body_size 0;
|
||||
}}
|
||||
|
||||
location /public/ {{
|
||||
proxy_pass http://127.0.0.1:5961/;
|
||||
}}
|
||||
|
||||
location / {{
|
||||
try_files $uri $uri/ =404;
|
||||
}}
|
||||
}}
|
||||
server {{
|
||||
listen 443 ssl;
|
||||
listen [::]:443;
|
||||
server_name .{tor_hostname};
|
||||
return 301 http://$host$request_uri;
|
||||
}}
|
||||
@@ -1,29 +0,0 @@
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
root /var/www/html/setup;
|
||||
|
||||
index index.html index.htm index.nginx-debian.html;
|
||||
|
||||
server_name _;
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
proxy_socket_keepalive on;
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 1800;
|
||||
|
||||
gzip on;
|
||||
gzip_vary on;
|
||||
gzip_min_length 1024;
|
||||
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
|
||||
|
||||
location /rpc/ {
|
||||
proxy_pass http://127.0.0.1:5959/;
|
||||
}
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
}
|
||||
@@ -1,164 +0,0 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use imbl::OrdMap;
|
||||
use rpc_toolkit::command;
|
||||
use serde_json::Value;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::context::SdkContext;
|
||||
use crate::s9pk::builder::S9pkPacker;
|
||||
use crate::s9pk::manifest::Manifest;
|
||||
use crate::s9pk::reader::S9pkReader;
|
||||
use crate::util::display_none;
|
||||
use crate::util::serde::IoFormat;
|
||||
use crate::volume::Volume;
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
pub mod builder;
|
||||
pub mod header;
|
||||
pub mod manifest;
|
||||
pub mod reader;
|
||||
|
||||
pub const SIG_CONTEXT: &'static [u8] = b"s9pk";
|
||||
|
||||
#[command(cli_only, display(display_none), blocking)]
|
||||
#[instrument(skip(ctx))]
|
||||
pub fn pack(#[context] ctx: SdkContext, #[arg] path: Option<PathBuf>) -> Result<(), Error> {
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
|
||||
let path = if let Some(path) = path {
|
||||
path
|
||||
} else {
|
||||
std::env::current_dir()?
|
||||
};
|
||||
let manifest_value: Value = if path.join("manifest.toml").exists() {
|
||||
IoFormat::Toml.from_reader(File::open(path.join("manifest.toml"))?)?
|
||||
} else if path.join("manifest.yaml").exists() {
|
||||
IoFormat::Yaml.from_reader(File::open(path.join("manifest.yaml"))?)?
|
||||
} else if path.join("manifest.json").exists() {
|
||||
IoFormat::Json.from_reader(File::open(path.join("manifest.json"))?)?
|
||||
} else {
|
||||
return Err(Error::new(
|
||||
eyre!("manifest not found"),
|
||||
crate::ErrorKind::Pack,
|
||||
));
|
||||
};
|
||||
let manifest: Manifest = serde_json::from_value(manifest_value.clone())
|
||||
.with_kind(crate::ErrorKind::Deserialization)?;
|
||||
let extra_keys =
|
||||
enumerate_extra_keys(&serde_json::to_value(&manifest).unwrap(), &manifest_value);
|
||||
for k in extra_keys {
|
||||
tracing::warn!("Unrecognized Manifest Key: {}", k);
|
||||
}
|
||||
|
||||
let outfile_path = path.join(format!("{}.s9pk", manifest.id));
|
||||
let mut outfile = File::create(outfile_path)?;
|
||||
S9pkPacker::builder()
|
||||
.manifest(&manifest)
|
||||
.writer(&mut outfile)
|
||||
.license(
|
||||
File::open(path.join(manifest.assets.license_path())).with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.license_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
)
|
||||
.icon(
|
||||
File::open(path.join(manifest.assets.icon_path())).with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.icon_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
)
|
||||
.instructions(
|
||||
File::open(path.join(manifest.assets.instructions_path())).with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.instructions_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
)
|
||||
.docker_images(
|
||||
File::open(path.join(manifest.assets.docker_images_path())).with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.docker_images_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
)
|
||||
.assets({
|
||||
let mut assets = tar::Builder::new(Vec::new()); // TODO: Ideally stream this? best not to buffer in memory
|
||||
|
||||
for (asset_volume, _) in manifest
|
||||
.volumes
|
||||
.iter()
|
||||
.filter(|(_, v)| matches!(v, &&Volume::Assets {}))
|
||||
{
|
||||
assets.append_dir_all(
|
||||
asset_volume,
|
||||
path.join(manifest.assets.assets_path()).join(asset_volume),
|
||||
)?;
|
||||
}
|
||||
|
||||
std::io::Cursor::new(assets.into_inner()?)
|
||||
})
|
||||
.build()
|
||||
.pack(&ctx.developer_key()?)?;
|
||||
outfile.sync_all()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(rename = "s9pk", cli_only, display(display_none))]
|
||||
pub async fn verify(#[arg] path: PathBuf) -> Result<(), Error> {
|
||||
let mut s9pk = S9pkReader::open(path, true).await?;
|
||||
s9pk.validate().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn enumerate_extra_keys(reference: &Value, candidate: &Value) -> Vec<String> {
|
||||
match (reference, candidate) {
|
||||
(Value::Object(m_r), Value::Object(m_c)) => {
|
||||
let om_r: OrdMap<String, Value> = m_r.clone().into_iter().collect();
|
||||
let om_c: OrdMap<String, Value> = m_c.clone().into_iter().collect();
|
||||
let common = om_r.clone().intersection(om_c.clone());
|
||||
let top_extra = common.clone().symmetric_difference(om_c.clone());
|
||||
let mut all_extra = top_extra
|
||||
.keys()
|
||||
.map(|s| format!(".{}", s))
|
||||
.collect::<Vec<String>>();
|
||||
for (k, v) in common {
|
||||
all_extra.extend(
|
||||
enumerate_extra_keys(&v, om_c.get(&k).unwrap())
|
||||
.into_iter()
|
||||
.map(|s| format!(".{}{}", k, s)),
|
||||
)
|
||||
}
|
||||
all_extra
|
||||
}
|
||||
(_, Value::Object(m1)) => m1.clone().keys().map(|s| format!(".{}", s)).collect(),
|
||||
_ => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_enumerate_extra_keys() {
|
||||
use serde_json::json;
|
||||
let extras = enumerate_extra_keys(
|
||||
&json!({
|
||||
"test": 1,
|
||||
"test2": null,
|
||||
}),
|
||||
&json!({
|
||||
"test": 1,
|
||||
"test2": { "test3": null },
|
||||
"test4": null
|
||||
}),
|
||||
);
|
||||
println!("{:?}", extras)
|
||||
}
|
||||
@@ -1,807 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::os::unix::prelude::MetadataExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use digest::generic_array::GenericArray;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{FutureExt, TryFutureExt, TryStreamExt};
|
||||
use nix::unistd::{Gid, Uid};
|
||||
use openssl::x509::X509;
|
||||
use patch_db::LockType;
|
||||
use rpc_toolkit::command;
|
||||
use rpc_toolkit::yajrc::RpcError;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use sqlx::{Executor, Sqlite};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::backup::restore::recover_full_embassy;
|
||||
use crate::backup::target::BackupTargetFS;
|
||||
use crate::context::rpc::RpcContextConfig;
|
||||
use crate::context::setup::SetupResult;
|
||||
use crate::context::SetupContext;
|
||||
use crate::db::model::RecoveredPackageInfo;
|
||||
use crate::disk::fsck::RepairStrategy;
|
||||
use crate::disk::main::DEFAULT_PASSWORD;
|
||||
use crate::disk::mount::filesystem::block_dev::BlockDev;
|
||||
use crate::disk::mount::filesystem::cifs::Cifs;
|
||||
use crate::disk::mount::filesystem::ReadOnly;
|
||||
use crate::disk::mount::guard::TmpMountGuard;
|
||||
use crate::disk::util::{pvscan, recovery_info, DiskListResponse, EmbassyOsRecoveryInfo};
|
||||
use crate::hostname::PRODUCT_KEY_PATH;
|
||||
use crate::id::Id;
|
||||
use crate::init::init;
|
||||
use crate::install::PKG_PUBLIC_DIR;
|
||||
use crate::net::ssl::SslManager;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::sound::BEETHOVEN;
|
||||
use crate::util::io::{dir_size, from_yaml_async_reader};
|
||||
use crate::util::Version;
|
||||
use crate::volume::{data_dir, VolumeId};
|
||||
use crate::{ensure_code, Error, ErrorKind, ResultExt};
|
||||
|
||||
#[instrument(skip(secrets))]
|
||||
pub async fn password_hash<Ex>(secrets: &mut Ex) -> Result<String, Error>
|
||||
where
|
||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let password = sqlx::query!("SELECT password FROM account")
|
||||
.fetch_one(secrets)
|
||||
.await?
|
||||
.password;
|
||||
|
||||
Ok(password)
|
||||
}
|
||||
|
||||
#[command(subcommands(status, disk, attach, execute, recovery, cifs, complete))]
|
||||
pub fn setup() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct StatusRes {
|
||||
product_key: bool,
|
||||
migrating: bool,
|
||||
}
|
||||
|
||||
#[command(rpc_only, metadata(authenticated = false))]
|
||||
pub async fn status(#[context] ctx: SetupContext) -> Result<StatusRes, Error> {
|
||||
Ok(StatusRes {
|
||||
product_key: tokio::fs::metadata("/embassy-os/product_key.txt")
|
||||
.await
|
||||
.is_ok(),
|
||||
migrating: ctx.recovery_status.read().await.is_some(),
|
||||
})
|
||||
}
|
||||
|
||||
#[command(subcommands(list_disks))]
|
||||
pub fn disk() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(rename = "list", rpc_only, metadata(authenticated = false))]
|
||||
pub async fn list_disks() -> Result<DiskListResponse, Error> {
|
||||
crate::disk::list(None).await
|
||||
}
|
||||
|
||||
#[command(rpc_only)]
|
||||
pub async fn attach(
|
||||
#[context] ctx: SetupContext,
|
||||
#[arg] guid: Arc<String>,
|
||||
) -> Result<SetupResult, Error> {
|
||||
crate::disk::main::import(
|
||||
&*guid,
|
||||
&ctx.datadir,
|
||||
RepairStrategy::Preen,
|
||||
DEFAULT_PASSWORD,
|
||||
)
|
||||
.await?;
|
||||
let product_key_path = Path::new("/embassy-data/main/product_key.txt");
|
||||
if tokio::fs::metadata(product_key_path).await.is_ok() {
|
||||
let pkey = tokio::fs::read_to_string(product_key_path).await?;
|
||||
if pkey.trim() != &*ctx.product_key().await? {
|
||||
crate::disk::main::export(&*guid, &ctx.datadir).await?;
|
||||
return Err(Error::new(
|
||||
eyre!("The EmbassyOS product key does not match the supplied drive"),
|
||||
ErrorKind::ProductKeyMismatch,
|
||||
));
|
||||
}
|
||||
}
|
||||
init(
|
||||
&RpcContextConfig::load(ctx.config_path.as_ref()).await?,
|
||||
&*ctx.product_key().await?,
|
||||
)
|
||||
.await?;
|
||||
let secrets = ctx.secret_store().await?;
|
||||
let tor_key = crate::net::tor::os_key(&mut secrets.acquire().await?).await?;
|
||||
let (_, root_ca) = SslManager::init(secrets).await?.export_root_ca().await?;
|
||||
let setup_result = SetupResult {
|
||||
tor_address: format!("http://{}", tor_key.public().get_onion_address()),
|
||||
lan_address: format!(
|
||||
"https://embassy-{}.local",
|
||||
crate::hostname::derive_id(&*ctx.product_key().await?)
|
||||
),
|
||||
root_ca: String::from_utf8(root_ca.to_pem()?)?,
|
||||
};
|
||||
*ctx.setup_result.write().await = Some((guid, setup_result.clone()));
|
||||
Ok(setup_result)
|
||||
}
|
||||
|
||||
#[command(subcommands(v2, recovery_status))]
|
||||
pub fn recovery() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(subcommands(set))]
|
||||
pub fn v2() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(rpc_only, metadata(authenticated = false))]
|
||||
pub async fn set(#[context] ctx: SetupContext, #[arg] logicalname: PathBuf) -> Result<(), Error> {
|
||||
let guard = TmpMountGuard::mount(&BlockDev::new(&logicalname), ReadOnly).await?;
|
||||
let product_key = tokio::fs::read_to_string(guard.as_ref().join("root/agent/product_key"))
|
||||
.await?
|
||||
.trim()
|
||||
.to_owned();
|
||||
guard.unmount().await?;
|
||||
*ctx.cached_product_key.write().await = Some(Arc::new(product_key));
|
||||
*ctx.selected_v2_drive.write().await = Some(logicalname);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct RecoveryStatus {
|
||||
pub bytes_transferred: u64,
|
||||
pub total_bytes: u64,
|
||||
pub complete: bool,
|
||||
}
|
||||
|
||||
#[command(rename = "status", rpc_only, metadata(authenticated = false))]
|
||||
pub async fn recovery_status(
|
||||
#[context] ctx: SetupContext,
|
||||
) -> Result<Option<RecoveryStatus>, RpcError> {
|
||||
ctx.recovery_status.read().await.clone().transpose()
|
||||
}
|
||||
|
||||
#[command(subcommands(verify_cifs))]
|
||||
pub fn cifs() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(rename = "verify", rpc_only)]
|
||||
pub async fn verify_cifs(
|
||||
#[arg] hostname: String,
|
||||
#[arg] path: PathBuf,
|
||||
#[arg] username: String,
|
||||
#[arg] password: Option<String>,
|
||||
) -> Result<EmbassyOsRecoveryInfo, Error> {
|
||||
let guard = TmpMountGuard::mount(
|
||||
&Cifs {
|
||||
hostname,
|
||||
path,
|
||||
username,
|
||||
password,
|
||||
},
|
||||
ReadOnly,
|
||||
)
|
||||
.await?;
|
||||
let embassy_os = recovery_info(&guard).await?;
|
||||
guard.unmount().await?;
|
||||
embassy_os.ok_or_else(|| Error::new(eyre!("No Backup Found"), crate::ErrorKind::NotFound))
|
||||
}
|
||||
|
||||
#[command(rpc_only)]
|
||||
pub async fn execute(
|
||||
#[context] ctx: SetupContext,
|
||||
#[arg(rename = "embassy-logicalname")] embassy_logicalname: PathBuf,
|
||||
#[arg(rename = "embassy-password")] embassy_password: String,
|
||||
#[arg(rename = "recovery-source")] mut recovery_source: Option<BackupTargetFS>,
|
||||
#[arg(rename = "recovery-password")] recovery_password: Option<String>,
|
||||
) -> Result<SetupResult, Error> {
|
||||
if let Some(v2_drive) = &*ctx.selected_v2_drive.read().await {
|
||||
recovery_source = Some(BackupTargetFS::Disk(BlockDev::new(v2_drive.clone())))
|
||||
}
|
||||
match execute_inner(
|
||||
ctx.clone(),
|
||||
embassy_logicalname,
|
||||
embassy_password,
|
||||
recovery_source,
|
||||
recovery_password,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok((tor_addr, root_ca)) => {
|
||||
tracing::info!("Setup Successful! Tor Address: {}", tor_addr);
|
||||
Ok(SetupResult {
|
||||
tor_address: format!("http://{}", tor_addr),
|
||||
lan_address: format!(
|
||||
"https://embassy-{}.local",
|
||||
crate::hostname::derive_id(&ctx.product_key().await?)
|
||||
),
|
||||
root_ca: String::from_utf8(root_ca.to_pem()?)?,
|
||||
})
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Error Setting Up Embassy: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx))]
|
||||
#[command(rpc_only)]
|
||||
pub async fn complete(#[context] ctx: SetupContext) -> Result<SetupResult, Error> {
|
||||
let (guid, setup_result) = if let Some((guid, setup_result)) = &*ctx.setup_result.read().await {
|
||||
(guid.clone(), setup_result.clone())
|
||||
} else {
|
||||
return Err(Error::new(
|
||||
eyre!("setup.execute has not completed successfully"),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
));
|
||||
};
|
||||
if tokio::fs::metadata(PRODUCT_KEY_PATH).await.is_err() {
|
||||
crate::hostname::set_product_key(&*ctx.product_key().await?).await?;
|
||||
} else {
|
||||
let key_on_disk = crate::hostname::get_product_key().await?;
|
||||
let key_in_cache = ctx.product_key().await?;
|
||||
if *key_in_cache != key_on_disk {
|
||||
crate::hostname::set_product_key(&*ctx.product_key().await?).await?;
|
||||
}
|
||||
}
|
||||
tokio::fs::write(
|
||||
Path::new("/embassy-data/main/product_key.txt"),
|
||||
&*ctx.product_key().await?,
|
||||
)
|
||||
.await?;
|
||||
let secrets = ctx.secret_store().await?;
|
||||
let mut db = ctx.db(&secrets).await?.handle();
|
||||
let hostname = crate::hostname::get_hostname().await?;
|
||||
let si = crate::db::DatabaseModel::new().server_info();
|
||||
si.clone()
|
||||
.id()
|
||||
.put(&mut db, &crate::hostname::get_id().await?)
|
||||
.await?;
|
||||
si.lan_address()
|
||||
.put(
|
||||
&mut db,
|
||||
&format!("https://{}.local", &hostname).parse().unwrap(),
|
||||
)
|
||||
.await?;
|
||||
let mut guid_file = File::create("/embassy-os/disk.guid").await?;
|
||||
guid_file.write_all(guid.as_bytes()).await?;
|
||||
guid_file.sync_all().await?;
|
||||
ctx.shutdown.send(()).expect("failed to shutdown");
|
||||
Ok(setup_result)
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, embassy_password, recovery_password))]
|
||||
pub async fn execute_inner(
|
||||
ctx: SetupContext,
|
||||
embassy_logicalname: PathBuf,
|
||||
embassy_password: String,
|
||||
recovery_source: Option<BackupTargetFS>,
|
||||
recovery_password: Option<String>,
|
||||
) -> Result<(OnionAddressV3, X509), Error> {
|
||||
if ctx.recovery_status.read().await.is_some() {
|
||||
return Err(Error::new(
|
||||
eyre!("Cannot execute setup while in recovery!"),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
let guid = Arc::new(
|
||||
crate::disk::main::create(
|
||||
&[embassy_logicalname],
|
||||
&pvscan().await?,
|
||||
&ctx.datadir,
|
||||
DEFAULT_PASSWORD,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
crate::disk::main::import(
|
||||
&*guid,
|
||||
&ctx.datadir,
|
||||
RepairStrategy::Preen,
|
||||
DEFAULT_PASSWORD,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let res = if let Some(recovery_source) = recovery_source {
|
||||
let (tor_addr, root_ca, recover_fut) = recover(
|
||||
ctx.clone(),
|
||||
guid.clone(),
|
||||
embassy_password,
|
||||
recovery_source,
|
||||
recovery_password,
|
||||
)
|
||||
.await?;
|
||||
init(
|
||||
&RpcContextConfig::load(ctx.config_path.as_ref()).await?,
|
||||
&ctx.product_key().await?,
|
||||
)
|
||||
.await?;
|
||||
let res = (tor_addr, root_ca.clone());
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = recover_fut
|
||||
.and_then(|_| async {
|
||||
*ctx.setup_result.write().await = Some((
|
||||
guid,
|
||||
SetupResult {
|
||||
tor_address: format!("http://{}", tor_addr),
|
||||
lan_address: format!(
|
||||
"https://embassy-{}.local",
|
||||
crate::hostname::derive_id(&ctx.product_key().await?)
|
||||
),
|
||||
root_ca: String::from_utf8(root_ca.to_pem()?)?,
|
||||
},
|
||||
));
|
||||
if let Some(Ok(recovery_status)) = &mut *ctx.recovery_status.write().await {
|
||||
recovery_status.complete = true;
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
{
|
||||
BEETHOVEN.play().await.unwrap_or_default(); // ignore error in playing the song
|
||||
tracing::error!("Error recovering drive!: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
*ctx.recovery_status.write().await = Some(Err(e.into()));
|
||||
} else {
|
||||
tracing::info!("Recovery Complete!");
|
||||
}
|
||||
});
|
||||
res
|
||||
} else {
|
||||
let (tor_addr, root_ca) = fresh_setup(&ctx, &embassy_password).await?;
|
||||
init(
|
||||
&RpcContextConfig::load(ctx.config_path.as_ref()).await?,
|
||||
&ctx.product_key().await?,
|
||||
)
|
||||
.await?;
|
||||
*ctx.setup_result.write().await = Some((
|
||||
guid,
|
||||
SetupResult {
|
||||
tor_address: format!("http://{}", tor_addr),
|
||||
lan_address: format!(
|
||||
"https://embassy-{}.local",
|
||||
crate::hostname::derive_id(&ctx.product_key().await?)
|
||||
),
|
||||
root_ca: String::from_utf8(root_ca.to_pem()?)?,
|
||||
},
|
||||
));
|
||||
(tor_addr, root_ca)
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn fresh_setup(
|
||||
ctx: &SetupContext,
|
||||
embassy_password: &str,
|
||||
) -> Result<(OnionAddressV3, X509), Error> {
|
||||
let password = argon2::hash_encoded(
|
||||
embassy_password.as_bytes(),
|
||||
&rand::random::<[u8; 16]>()[..],
|
||||
&argon2::Config::default(),
|
||||
)
|
||||
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
|
||||
let tor_key = TorSecretKeyV3::generate();
|
||||
let key_vec = tor_key.as_bytes().to_vec();
|
||||
let sqlite_pool = ctx.secret_store().await?;
|
||||
sqlx::query!(
|
||||
"REPLACE INTO account (id, password, tor_key) VALUES (?, ?, ?)",
|
||||
0,
|
||||
password,
|
||||
key_vec,
|
||||
)
|
||||
.execute(&mut sqlite_pool.acquire().await?)
|
||||
.await?;
|
||||
let (_, root_ca) = SslManager::init(sqlite_pool.clone())
|
||||
.await?
|
||||
.export_root_ca()
|
||||
.await?;
|
||||
sqlite_pool.close().await;
|
||||
Ok((tor_key.public().get_onion_address(), root_ca))
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, embassy_password, recovery_password))]
|
||||
async fn recover(
|
||||
ctx: SetupContext,
|
||||
guid: Arc<String>,
|
||||
embassy_password: String,
|
||||
recovery_source: BackupTargetFS,
|
||||
recovery_password: Option<String>,
|
||||
) -> Result<(OnionAddressV3, X509, BoxFuture<'static, Result<(), Error>>), Error> {
|
||||
let recovery_source = TmpMountGuard::mount(&recovery_source, ReadOnly).await?;
|
||||
let recovery_version = recovery_info(&recovery_source)
|
||||
.await?
|
||||
.as_ref()
|
||||
.map(|i| i.version.clone())
|
||||
.unwrap_or_else(|| emver::Version::new(0, 2, 0, 0).into());
|
||||
let res = if recovery_version.major() == 0 && recovery_version.minor() == 2 {
|
||||
recover_v2(ctx.clone(), &embassy_password, recovery_source).await?
|
||||
} else if recovery_version.major() == 0 && recovery_version.minor() == 3 {
|
||||
recover_full_embassy(
|
||||
ctx.clone(),
|
||||
guid.clone(),
|
||||
embassy_password,
|
||||
recovery_source,
|
||||
recovery_password,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
return Err(Error::new(
|
||||
eyre!("Unsupported version of EmbassyOS: {}", recovery_version),
|
||||
crate::ErrorKind::VersionIncompatible,
|
||||
));
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
async fn shasum(
|
||||
path: impl AsRef<Path>,
|
||||
) -> Result<GenericArray<u8, <Sha256 as Digest>::OutputSize>, Error> {
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
let mut rdr = tokio::fs::File::open(path).await?;
|
||||
let mut hasher = Sha256::new();
|
||||
let mut buf = [0; 1024];
|
||||
let mut read;
|
||||
while {
|
||||
read = rdr.read(&mut buf).await?;
|
||||
read != 0
|
||||
} {
|
||||
hasher.update(&buf[0..read]);
|
||||
}
|
||||
Ok(hasher.finalize())
|
||||
}
|
||||
|
||||
async fn validated_copy(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> Result<(), Error> {
|
||||
let src_path = src.as_ref();
|
||||
let dst_path = dst.as_ref();
|
||||
tokio::fs::copy(src_path, dst_path).await.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
format!("cp {} -> {}", src_path.display(), dst_path.display()),
|
||||
)
|
||||
})?;
|
||||
let (src_hash, dst_hash) = tokio::try_join!(shasum(src_path), shasum(dst_path))?;
|
||||
if src_hash != dst_hash {
|
||||
Err(Error::new(
|
||||
eyre!(
|
||||
"source hash does not match destination hash for {}",
|
||||
dst_path.display()
|
||||
),
|
||||
crate::ErrorKind::Filesystem,
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn dir_copy<'a, P0: AsRef<Path> + 'a + Send + Sync, P1: AsRef<Path> + 'a + Send + Sync>(
|
||||
src: P0,
|
||||
dst: P1,
|
||||
ctr: &'a AtomicU64,
|
||||
) -> BoxFuture<'a, Result<(), Error>> {
|
||||
async move {
|
||||
let m = tokio::fs::metadata(&src).await?;
|
||||
let dst_path = dst.as_ref();
|
||||
tokio::fs::create_dir_all(&dst_path).await.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
format!("mkdir {}", dst_path.display()),
|
||||
)
|
||||
})?;
|
||||
tokio::fs::set_permissions(&dst_path, m.permissions())
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
format!("chmod {}", dst_path.display()),
|
||||
)
|
||||
})?;
|
||||
let tmp_dst_path = dst_path.to_owned();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
nix::unistd::chown(
|
||||
&tmp_dst_path,
|
||||
Some(Uid::from_raw(m.uid())),
|
||||
Some(Gid::from_raw(m.gid())),
|
||||
)
|
||||
})
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Unknown)?
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
format!("chown {}", dst_path.display()),
|
||||
)
|
||||
})?;
|
||||
tokio_stream::wrappers::ReadDirStream::new(tokio::fs::read_dir(src.as_ref()).await?)
|
||||
.map_err(|e| Error::new(e, crate::ErrorKind::Filesystem))
|
||||
.try_for_each(|e| async move {
|
||||
let m = e.metadata().await?;
|
||||
let src_path = e.path();
|
||||
let dst_path = dst_path.join(e.file_name());
|
||||
if m.is_file() {
|
||||
let len = m.len();
|
||||
let mut cp_res = Ok(());
|
||||
for _ in 0..10 {
|
||||
cp_res = validated_copy(&src_path, &dst_path).await;
|
||||
if cp_res.is_ok() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
cp_res?;
|
||||
let tmp_dst_path = dst_path.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
nix::unistd::chown(
|
||||
&tmp_dst_path,
|
||||
Some(Uid::from_raw(m.uid())),
|
||||
Some(Gid::from_raw(m.gid())),
|
||||
)
|
||||
})
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Unknown)?
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
format!("chown {}", dst_path.display()),
|
||||
)
|
||||
})?;
|
||||
ctr.fetch_add(len, Ordering::Relaxed);
|
||||
} else if m.is_dir() {
|
||||
dir_copy(src_path, dst_path, ctr).await?;
|
||||
} else if m.file_type().is_symlink() {
|
||||
tokio::fs::symlink(
|
||||
tokio::fs::read_link(&src_path).await.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
format!("readlink {}", src_path.display()),
|
||||
)
|
||||
})?,
|
||||
&dst_path,
|
||||
)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
format!("cp -P {} -> {}", src_path.display(), dst_path.display()),
|
||||
)
|
||||
})?;
|
||||
// Do not set permissions (see https://unix.stackexchange.com/questions/87200/change-permissions-for-a-symbolic-link)
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx))]
|
||||
async fn recover_v2(
|
||||
ctx: SetupContext,
|
||||
embassy_password: &str,
|
||||
recovery_source: TmpMountGuard,
|
||||
) -> Result<(OnionAddressV3, X509, BoxFuture<'static, Result<(), Error>>), Error> {
|
||||
let secret_store = ctx.secret_store().await?;
|
||||
|
||||
// migrate the root CA
|
||||
let root_ca_key_path = recovery_source
|
||||
.as_ref()
|
||||
.join("root")
|
||||
.join("agent")
|
||||
.join("ca")
|
||||
.join("private")
|
||||
.join("embassy-root-ca.key.pem");
|
||||
let root_ca_cert_path = recovery_source
|
||||
.as_ref()
|
||||
.join("root")
|
||||
.join("agent")
|
||||
.join("ca")
|
||||
.join("certs")
|
||||
.join("embassy-root-ca.cert.pem");
|
||||
let (root_ca_key_bytes, root_ca_cert_bytes) = tokio::try_join!(
|
||||
tokio::fs::read(root_ca_key_path),
|
||||
tokio::fs::read(root_ca_cert_path)
|
||||
)?;
|
||||
let root_ca_key = openssl::pkey::PKey::private_key_from_pem(&root_ca_key_bytes)?;
|
||||
let root_ca_cert = openssl::x509::X509::from_pem(&root_ca_cert_bytes)?;
|
||||
crate::net::ssl::SslManager::import_root_ca(
|
||||
secret_store.clone(),
|
||||
root_ca_key,
|
||||
root_ca_cert.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// migrate the tor address
|
||||
let tor_key_path = recovery_source
|
||||
.as_ref()
|
||||
.join("var")
|
||||
.join("lib")
|
||||
.join("tor")
|
||||
.join("agent")
|
||||
.join("hs_ed25519_secret_key");
|
||||
let tor_key_bytes = tokio::fs::read(tor_key_path).await?;
|
||||
let mut tor_key_array_tmp = [0u8; 64];
|
||||
tor_key_array_tmp.clone_from_slice(&tor_key_bytes[32..]);
|
||||
let tor_key: TorSecretKeyV3 = tor_key_array_tmp.into();
|
||||
let key_vec = tor_key.as_bytes().to_vec();
|
||||
let password = argon2::hash_encoded(
|
||||
embassy_password.as_bytes(),
|
||||
&rand::random::<[u8; 16]>()[..],
|
||||
&argon2::Config::default(),
|
||||
)
|
||||
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
|
||||
let sqlite_pool = ctx.secret_store().await?;
|
||||
sqlx::query!(
|
||||
"REPLACE INTO account (id, password, tor_key) VALUES (?, ?, ?)",
|
||||
0,
|
||||
password,
|
||||
key_vec
|
||||
)
|
||||
.execute(&mut sqlite_pool.acquire().await?)
|
||||
.await?;
|
||||
|
||||
// rest of migration as future
|
||||
let fut = async move {
|
||||
let db = ctx.db(&secret_store).await?;
|
||||
let mut handle = db.handle();
|
||||
// lock everything to avoid issues with renamed packages (bitwarden)
|
||||
crate::db::DatabaseModel::new()
|
||||
.lock(&mut handle, LockType::Write)
|
||||
.await?;
|
||||
|
||||
let apps_yaml_path = recovery_source
|
||||
.as_ref()
|
||||
.join("root")
|
||||
.join("appmgr")
|
||||
.join("apps.yaml");
|
||||
#[derive(Deserialize)]
|
||||
struct LegacyAppInfo {
|
||||
title: String,
|
||||
version: Version,
|
||||
}
|
||||
let packages: BTreeMap<PackageId, LegacyAppInfo> =
|
||||
from_yaml_async_reader(File::open(&apps_yaml_path).await.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
apps_yaml_path.display().to_string(),
|
||||
)
|
||||
})?)
|
||||
.await?;
|
||||
|
||||
let volume_path = recovery_source.as_ref().join("root/volumes");
|
||||
let mut total_bytes = 0;
|
||||
for (pkg_id, _) in &packages {
|
||||
let volume_src_path = volume_path.join(&pkg_id);
|
||||
total_bytes += dir_size(&volume_src_path).await.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
volume_src_path.display().to_string(),
|
||||
)
|
||||
})?;
|
||||
}
|
||||
*ctx.recovery_status.write().await = Some(Ok(RecoveryStatus {
|
||||
bytes_transferred: 0,
|
||||
total_bytes,
|
||||
complete: false,
|
||||
}));
|
||||
let bytes_transferred = AtomicU64::new(0);
|
||||
let volume_id = VolumeId::Custom(Id::try_from("main".to_owned())?);
|
||||
for (pkg_id, info) in packages {
|
||||
let (src_id, dst_id) = rename_pkg_id(pkg_id);
|
||||
let volume_src_path = volume_path.join(&src_id);
|
||||
let volume_dst_path = data_dir(&ctx.datadir, &dst_id, &volume_id);
|
||||
tokio::select!(
|
||||
res = dir_copy(
|
||||
&volume_src_path,
|
||||
&volume_dst_path,
|
||||
&bytes_transferred
|
||||
) => res?,
|
||||
_ = async {
|
||||
loop {
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
*ctx.recovery_status.write().await = Some(Ok(RecoveryStatus {
|
||||
bytes_transferred: bytes_transferred.load(Ordering::Relaxed),
|
||||
total_bytes,
|
||||
complete: false
|
||||
}));
|
||||
}
|
||||
} => (),
|
||||
);
|
||||
let tor_src_path = recovery_source
|
||||
.as_ref()
|
||||
.join("var/lib/tor")
|
||||
.join(format!("app-{}", src_id))
|
||||
.join("hs_ed25519_secret_key");
|
||||
let key_vec = tokio::fs::read(&tor_src_path).await.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
tor_src_path.display().to_string(),
|
||||
)
|
||||
})?;
|
||||
ensure_code!(
|
||||
key_vec.len() == 96,
|
||||
crate::ErrorKind::Tor,
|
||||
"{} not 96 bytes",
|
||||
tor_src_path.display()
|
||||
);
|
||||
let key_vec = key_vec[32..].to_vec();
|
||||
sqlx::query!(
|
||||
"REPLACE INTO tor (package, interface, key) VALUES (?, 'main', ?)",
|
||||
*dst_id,
|
||||
key_vec,
|
||||
)
|
||||
.execute(&mut secret_store.acquire().await?)
|
||||
.await?;
|
||||
let icon_leaf = AsRef::<Path>::as_ref(&dst_id)
|
||||
.join(info.version.as_str())
|
||||
.join("icon.png");
|
||||
let icon_src_path = recovery_source
|
||||
.as_ref()
|
||||
.join("root/agent/icons")
|
||||
.join(format!("{}.png", src_id));
|
||||
let icon_dst_path = ctx.datadir.join(PKG_PUBLIC_DIR).join(&icon_leaf);
|
||||
if let Some(parent) = icon_dst_path.parent() {
|
||||
tokio::fs::create_dir_all(&parent)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?;
|
||||
}
|
||||
tokio::fs::copy(&icon_src_path, &icon_dst_path)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
format!(
|
||||
"cp {} -> {}",
|
||||
icon_src_path.display(),
|
||||
icon_dst_path.display()
|
||||
),
|
||||
)
|
||||
})?;
|
||||
let icon_url = Path::new("/public/package-data").join(&icon_leaf);
|
||||
crate::db::DatabaseModel::new()
|
||||
.recovered_packages()
|
||||
.idx_model(&dst_id)
|
||||
.put(
|
||||
&mut handle,
|
||||
&RecoveredPackageInfo {
|
||||
title: info.title,
|
||||
icon: icon_url.display().to_string(),
|
||||
version: info.version,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
secret_store.close().await;
|
||||
recovery_source.unmount().await?;
|
||||
Ok(())
|
||||
};
|
||||
Ok((
|
||||
tor_key.public().get_onion_address(),
|
||||
root_ca_cert,
|
||||
fut.boxed(),
|
||||
))
|
||||
}
|
||||
|
||||
fn rename_pkg_id(src_pkg_id: PackageId) -> (PackageId, PackageId) {
|
||||
if &*src_pkg_id == "bitwarden" {
|
||||
(src_pkg_id, "vaultwarden".parse().unwrap())
|
||||
} else {
|
||||
(src_pkg_id.clone(), src_pkg_id)
|
||||
}
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use patch_db::{LockType, PatchDbHandle};
|
||||
use rpc_toolkit::command;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::disk::main::export;
|
||||
use crate::init::SYSTEM_REBUILD_PATH;
|
||||
use crate::sound::SHUTDOWN;
|
||||
use crate::util::{display_none, Invoke};
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Shutdown {
|
||||
pub datadir: PathBuf,
|
||||
pub disk_guid: Option<Arc<String>>,
|
||||
pub restart: bool,
|
||||
pub db_handle: Option<Arc<PatchDbHandle>>,
|
||||
}
|
||||
impl Shutdown {
|
||||
/// BLOCKING
|
||||
pub fn execute(&self) {
|
||||
use std::process::Command;
|
||||
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
rt.block_on(async {
|
||||
use tokio::process::Command;
|
||||
|
||||
if let Err(e) = Command::new("systemctl")
|
||||
.arg("stop")
|
||||
.arg("systemd-journald")
|
||||
.invoke(crate::ErrorKind::Journald)
|
||||
.await
|
||||
{
|
||||
tracing::error!("Error Stopping Journald: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
if let Err(e) = Command::new("systemctl")
|
||||
.arg("stop")
|
||||
.arg("docker")
|
||||
.invoke(crate::ErrorKind::Docker)
|
||||
.await
|
||||
{
|
||||
tracing::error!("Error Stopping Docker: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
if let Some(guid) = &self.disk_guid {
|
||||
if let Err(e) = export(guid, &self.datadir).await {
|
||||
tracing::error!("Error Exporting Volume Group: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
}
|
||||
if let Err(e) = SHUTDOWN.play().await {
|
||||
tracing::error!("Error Playing Shutdown Song: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
}
|
||||
});
|
||||
drop(rt);
|
||||
if self.restart {
|
||||
Command::new("reboot").spawn().unwrap().wait().unwrap();
|
||||
} else {
|
||||
Command::new("shutdown")
|
||||
.arg("-h")
|
||||
.arg("now")
|
||||
.spawn()
|
||||
.unwrap()
|
||||
.wait()
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[command(display(display_none))]
|
||||
pub async fn shutdown(#[context] ctx: RpcContext) -> Result<(), Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
crate::db::DatabaseModel::new()
|
||||
.lock(&mut db, LockType::Write)
|
||||
.await?;
|
||||
ctx.shutdown
|
||||
.send(Some(Shutdown {
|
||||
datadir: ctx.datadir.clone(),
|
||||
disk_guid: Some(ctx.disk_guid.clone()),
|
||||
restart: false,
|
||||
db_handle: Some(Arc::new(db)),
|
||||
}))
|
||||
.map_err(|_| ())
|
||||
.expect("receiver dropped");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(display(display_none))]
|
||||
pub async fn restart(#[context] ctx: RpcContext) -> Result<(), Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
crate::db::DatabaseModel::new()
|
||||
.lock(&mut db, LockType::Write)
|
||||
.await?;
|
||||
ctx.shutdown
|
||||
.send(Some(Shutdown {
|
||||
datadir: ctx.datadir.clone(),
|
||||
disk_guid: Some(ctx.disk_guid.clone()),
|
||||
restart: true,
|
||||
db_handle: Some(Arc::new(db)),
|
||||
}))
|
||||
.map_err(|_| ())
|
||||
.expect("receiver dropped");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(display(display_none))]
|
||||
pub async fn rebuild(#[context] ctx: RpcContext) -> Result<(), Error> {
|
||||
tokio::fs::write(SYSTEM_REBUILD_PATH, b"").await?;
|
||||
restart(ctx).await
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
use std::fs::Metadata;
|
||||
use std::future::Future;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
use digest::Digest;
|
||||
use http::response::Builder;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::{Body, Error as HyperError, Method, Request, Response, Server, StatusCode};
|
||||
use tokio::fs::File;
|
||||
use tokio_util::codec::{BytesCodec, FramedRead};
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::install::PKG_PUBLIC_DIR;
|
||||
use crate::middleware::auth::HasValidSession;
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
|
||||
static NOT_FOUND: &[u8] = b"Not Found";
|
||||
static NOT_AUTHORIZED: &[u8] = b"Not Authorized";
|
||||
|
||||
pub fn init(
|
||||
ctx: RpcContext,
|
||||
shutdown: impl Future<Output = ()> + Send + 'static,
|
||||
) -> impl Future<Output = Result<(), HyperError>> {
|
||||
let addr = ctx.bind_static;
|
||||
|
||||
let make_service = make_service_fn(move |_| {
|
||||
let ctx = ctx.clone();
|
||||
async move {
|
||||
Ok::<_, HyperError>(service_fn(move |req| {
|
||||
let ctx = ctx.clone();
|
||||
async move {
|
||||
match file_server_router(req, ctx).await {
|
||||
Ok(x) => Ok::<_, HyperError>(x),
|
||||
Err(err) => {
|
||||
tracing::error!("{:?}", err);
|
||||
Ok(server_error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
Server::bind(&addr)
|
||||
.serve(make_service)
|
||||
.with_graceful_shutdown(shutdown)
|
||||
}
|
||||
|
||||
async fn file_server_router(req: Request<Body>, ctx: RpcContext) -> Result<Response<Body>, Error> {
|
||||
let (request_parts, _body) = req.into_parts();
|
||||
let valid_session = HasValidSession::from_request_parts(&request_parts, &ctx).await;
|
||||
match (
|
||||
valid_session,
|
||||
request_parts.method,
|
||||
request_parts
|
||||
.uri
|
||||
.path()
|
||||
.strip_prefix("/")
|
||||
.unwrap_or(request_parts.uri.path())
|
||||
.split_once("/"),
|
||||
) {
|
||||
(Err(error), _, _) => {
|
||||
tracing::warn!("unauthorized for {} @{:?}", error, request_parts.uri.path());
|
||||
tracing::debug!("{:?}", error);
|
||||
return Ok(Response::builder()
|
||||
.status(StatusCode::UNAUTHORIZED)
|
||||
.body(NOT_AUTHORIZED.into())
|
||||
.unwrap());
|
||||
}
|
||||
(Ok(valid_session), Method::GET, Some(("package-data", path))) => {
|
||||
file_send(
|
||||
valid_session,
|
||||
&ctx,
|
||||
ctx.datadir.join(PKG_PUBLIC_DIR).join(path),
|
||||
)
|
||||
.await
|
||||
}
|
||||
(Ok(valid_session), Method::GET, Some(("eos", "local.crt"))) => {
|
||||
file_send(
|
||||
valid_session,
|
||||
&ctx,
|
||||
PathBuf::from(crate::net::ssl::ROOT_CA_STATIC_PATH),
|
||||
)
|
||||
.await
|
||||
}
|
||||
_ => Ok(not_found()),
|
||||
}
|
||||
}
|
||||
|
||||
/// HTTP status code 404
|
||||
fn not_found() -> Response<Body> {
|
||||
Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(NOT_FOUND.into())
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// HTTP status code 500
|
||||
fn server_error() -> Response<Body> {
|
||||
Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.body("".into())
|
||||
.unwrap()
|
||||
}
|
||||
async fn file_send(
|
||||
_valid_session: HasValidSession,
|
||||
_ctx: &RpcContext,
|
||||
path: impl AsRef<Path>,
|
||||
) -> Result<Response<Body>, Error> {
|
||||
// Serve a file by asynchronously reading it by chunks using tokio-util crate.
|
||||
|
||||
let path = path.as_ref();
|
||||
|
||||
if let Ok(file) = File::open(path).await {
|
||||
let metadata = file.metadata().await.with_kind(ErrorKind::Filesystem)?;
|
||||
let _is_non_empty = match IsNonEmptyFile::new(&metadata, path) {
|
||||
Some(a) => a,
|
||||
None => return Ok(not_found()),
|
||||
};
|
||||
|
||||
let mut builder = Response::builder().status(StatusCode::OK);
|
||||
builder = with_e_tag(path, &metadata, builder)?;
|
||||
builder = with_content_type(path, builder);
|
||||
builder = with_content_length(&metadata, builder);
|
||||
let stream = FramedRead::new(file, BytesCodec::new());
|
||||
let body = Body::wrap_stream(stream);
|
||||
return Ok(builder.body(body).with_kind(ErrorKind::Network)?);
|
||||
}
|
||||
tracing::debug!("File not found: {:?}", path);
|
||||
|
||||
Ok(not_found())
|
||||
}
|
||||
|
||||
struct IsNonEmptyFile(());
|
||||
impl IsNonEmptyFile {
|
||||
fn new(metadata: &Metadata, path: &Path) -> Option<Self> {
|
||||
let length = metadata.len();
|
||||
if !metadata.is_file() || length == 0 {
|
||||
tracing::debug!("File is empty: {:?}", path);
|
||||
return None;
|
||||
}
|
||||
Some(Self(()))
|
||||
}
|
||||
}
|
||||
|
||||
fn with_e_tag(path: &Path, metadata: &Metadata, builder: Builder) -> Result<Builder, Error> {
|
||||
let modified = metadata.modified().with_kind(ErrorKind::Filesystem)?;
|
||||
let mut hasher = sha2::Sha256::new();
|
||||
hasher.update(format!("{:?}", path).as_bytes());
|
||||
hasher.update(
|
||||
format!(
|
||||
"{}",
|
||||
modified
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
)
|
||||
.as_bytes(),
|
||||
);
|
||||
let res = hasher.finalize();
|
||||
Ok(builder.header(
|
||||
"ETag",
|
||||
base32::encode(base32::Alphabet::RFC4648 { padding: false }, res.as_slice()).to_lowercase(),
|
||||
))
|
||||
}
|
||||
///https://en.wikipedia.org/wiki/Media_type
|
||||
fn with_content_type(path: &Path, builder: Builder) -> Builder {
|
||||
let content_type = match path.extension() {
|
||||
Some(os_str) => match os_str.to_str() {
|
||||
Some("apng") => "image/apng",
|
||||
Some("avif") => "image/avif",
|
||||
Some("flif") => "image/flif",
|
||||
Some("gif") => "image/gif",
|
||||
Some("jpg") | Some("jpeg") | Some("jfif") | Some("pjpeg") | Some("pjp") => "image/jpeg",
|
||||
Some("jxl") => "image/jxl",
|
||||
Some("png") => "image/png",
|
||||
Some("svg") => "image/svg+xml",
|
||||
Some("webp") => "image/webp",
|
||||
Some("mng") | Some("x-mng") => "image/x-mng",
|
||||
Some("css") => "text/css",
|
||||
Some("csv") => "text/csv",
|
||||
Some("html") => "text/html",
|
||||
Some("php") => "text/php",
|
||||
Some("plain") | Some("md") | Some("txt") => "text/plain",
|
||||
Some("xml") => "text/xml",
|
||||
None | Some(_) => "text/plain",
|
||||
},
|
||||
None => "text/plain",
|
||||
};
|
||||
builder.header("Content-Type", content_type)
|
||||
}
|
||||
fn with_content_length(metadata: &Metadata, builder: Builder) -> Builder {
|
||||
builder.header("Content-Length", metadata.len())
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use patch_db::{HasModel, Model};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use self::health_check::HealthCheckId;
|
||||
use crate::dependencies::DependencyErrors;
|
||||
use crate::status::health_check::HealthCheckResult;
|
||||
|
||||
pub mod health_check;
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Status {
|
||||
pub configured: bool,
|
||||
#[model]
|
||||
pub main: MainStatus,
|
||||
#[model]
|
||||
pub dependency_errors: DependencyErrors,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, HasModel)]
|
||||
#[serde(tag = "status")]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum MainStatus {
|
||||
Stopped,
|
||||
Stopping,
|
||||
Starting,
|
||||
Running {
|
||||
started: DateTime<Utc>,
|
||||
health: BTreeMap<HealthCheckId, HealthCheckResult>,
|
||||
},
|
||||
BackingUp {
|
||||
started: Option<DateTime<Utc>>,
|
||||
health: BTreeMap<HealthCheckId, HealthCheckResult>,
|
||||
},
|
||||
}
|
||||
impl MainStatus {
|
||||
pub fn running(&self) -> bool {
|
||||
match self {
|
||||
MainStatus::Starting
|
||||
| MainStatus::Running { .. }
|
||||
| MainStatus::BackingUp {
|
||||
started: Some(_), ..
|
||||
} => true,
|
||||
MainStatus::Stopped
|
||||
| MainStatus::Stopping
|
||||
| MainStatus::BackingUp { started: None, .. } => false,
|
||||
}
|
||||
}
|
||||
pub fn stop(&mut self) {
|
||||
match self {
|
||||
MainStatus::Starting | MainStatus::Running { .. } => {
|
||||
*self = MainStatus::Stopping;
|
||||
}
|
||||
MainStatus::BackingUp { started, .. } => {
|
||||
*started = None;
|
||||
}
|
||||
MainStatus::Stopped | MainStatus::Stopping => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl MainStatusModel {
|
||||
pub fn started(self) -> Model<Option<DateTime<Utc>>> {
|
||||
self.0.child("started")
|
||||
}
|
||||
}
|
||||
@@ -1,486 +0,0 @@
|
||||
use std::future::Future;
|
||||
use std::path::Path;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::{eyre, Result};
|
||||
use digest::Digest;
|
||||
use emver::Version;
|
||||
use futures::Stream;
|
||||
use lazy_static::lazy_static;
|
||||
use patch_db::{DbHandle, LockType, Revision};
|
||||
use regex::Regex;
|
||||
use reqwest::Url;
|
||||
use rpc_toolkit::command;
|
||||
use sha2::Sha256;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::pin;
|
||||
use tokio::process::Command;
|
||||
use tokio::time::Instant;
|
||||
use tokio_stream::StreamExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::model::UpdateProgress;
|
||||
use crate::db::util::WithRevision;
|
||||
use crate::disk::mount::filesystem::block_dev::BlockDev;
|
||||
use crate::disk::mount::filesystem::{FileSystem, ReadWrite};
|
||||
use crate::disk::mount::guard::TmpMountGuard;
|
||||
use crate::disk::BOOT_RW_PATH;
|
||||
use crate::notifications::NotificationLevel;
|
||||
use crate::sound::{BEP, UPDATE_FAILED_1, UPDATE_FAILED_2, UPDATE_FAILED_3, UPDATE_FAILED_4};
|
||||
use crate::update::latest_information::LatestInformation;
|
||||
use crate::util::Invoke;
|
||||
use crate::version::{Current, VersionT};
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
|
||||
mod latest_information;
|
||||
|
||||
lazy_static! {
|
||||
static ref UPDATED: AtomicBool = AtomicBool::new(false);
|
||||
}
|
||||
|
||||
/// An user/ daemon would call this to update the system to the latest version and do the updates available,
|
||||
/// and this will return something if there is an update, and in that case there will need to be a restart.
|
||||
#[command(rename = "update", display(display_update_result))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn update_system(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg(rename = "marketplace-url")] marketplace_url: Url,
|
||||
) -> Result<WithRevision<UpdateResult>, Error> {
|
||||
let noop = WithRevision {
|
||||
response: UpdateResult::NoUpdates,
|
||||
revision: None,
|
||||
};
|
||||
if UPDATED.load(Ordering::SeqCst) {
|
||||
return Ok(noop);
|
||||
}
|
||||
match maybe_do_update(ctx, marketplace_url).await? {
|
||||
None => Ok(noop),
|
||||
Some(r) => Ok(WithRevision {
|
||||
response: UpdateResult::Updating,
|
||||
revision: Some(r),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// What is the status of the updates?
|
||||
#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum UpdateResult {
|
||||
NoUpdates,
|
||||
Updating,
|
||||
}
|
||||
|
||||
fn display_update_result(status: WithRevision<UpdateResult>, _: &ArgMatches<'_>) {
|
||||
match status.response {
|
||||
UpdateResult::Updating => {
|
||||
println!("Updating...");
|
||||
}
|
||||
UpdateResult::NoUpdates => {
|
||||
println!("No updates available");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const HEADER_KEY: &str = "x-eos-hash";
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum WritableDrives {
|
||||
Green,
|
||||
Blue,
|
||||
}
|
||||
impl WritableDrives {
|
||||
pub fn label(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Green => "green",
|
||||
Self::Blue => "blue",
|
||||
}
|
||||
}
|
||||
pub fn block_dev(&self) -> &'static Path {
|
||||
Path::new(match self {
|
||||
Self::Green => "/dev/mmcblk0p3",
|
||||
Self::Blue => "/dev/mmcblk0p4",
|
||||
})
|
||||
}
|
||||
pub fn part_uuid(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Green => "cb15ae4d-03",
|
||||
Self::Blue => "cb15ae4d-04",
|
||||
}
|
||||
}
|
||||
pub fn as_fs(&self) -> impl FileSystem {
|
||||
BlockDev::new(self.block_dev())
|
||||
}
|
||||
}
|
||||
|
||||
/// This will be where we are going to be putting the new update
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct NewLabel(pub WritableDrives);
|
||||
|
||||
/// This is our current label where the os is running
|
||||
pub struct CurrentLabel(pub WritableDrives);
|
||||
|
||||
lazy_static! {
|
||||
static ref PARSE_COLOR: Regex = Regex::new("LABEL=(\\w+)[ \t]+/").unwrap();
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx))]
|
||||
async fn maybe_do_update(
|
||||
ctx: RpcContext,
|
||||
marketplace_url: Url,
|
||||
) -> Result<Option<Arc<Revision>>, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let latest_version = reqwest::get(format!(
|
||||
"{}/eos/v0/latest?eos-version={}&arch={}",
|
||||
marketplace_url,
|
||||
Current::new().semver(),
|
||||
platforms::TARGET_ARCH,
|
||||
))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?
|
||||
.json::<LatestInformation>()
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?
|
||||
.version;
|
||||
crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.lock(&mut db, LockType::Write)
|
||||
.await?;
|
||||
let current_version = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.version()
|
||||
.get_mut(&mut db)
|
||||
.await?;
|
||||
if &latest_version <= ¤t_version {
|
||||
return Ok(None);
|
||||
}
|
||||
let mut tx = db.begin().await?;
|
||||
let mut status = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.status_info()
|
||||
.get_mut(&mut tx)
|
||||
.await?;
|
||||
if status.update_progress.is_some() {
|
||||
return Err(Error::new(
|
||||
eyre!("Server is already updating!"),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
if status.updated {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let (new_label, _current_label) = query_mounted_label().await?;
|
||||
let (size, download) = download_file(
|
||||
ctx.db.handle(),
|
||||
&EosUrl {
|
||||
base: marketplace_url,
|
||||
version: latest_version.clone(),
|
||||
},
|
||||
new_label,
|
||||
)
|
||||
.await?;
|
||||
status.update_progress = Some(UpdateProgress {
|
||||
size,
|
||||
downloaded: 0,
|
||||
});
|
||||
status.save(&mut tx).await?;
|
||||
let rev = tx.commit(None).await?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut db = ctx.db.handle();
|
||||
let res = do_update(download, new_label).await;
|
||||
let mut status = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.status_info()
|
||||
.get_mut(&mut db)
|
||||
.await
|
||||
.expect("could not access status");
|
||||
status.update_progress = None;
|
||||
match res {
|
||||
Ok(()) => {
|
||||
status.updated = true;
|
||||
status.save(&mut db).await.expect("could not save status");
|
||||
BEP.play().await.expect("could not bep");
|
||||
BEP.play().await.expect("could not bep");
|
||||
BEP.play().await.expect("could not bep");
|
||||
}
|
||||
Err(e) => {
|
||||
status.save(&mut db).await.expect("could not save status");
|
||||
ctx.notification_manager
|
||||
.notify(
|
||||
&mut db,
|
||||
None,
|
||||
NotificationLevel::Error,
|
||||
"EmbassyOS Update Failed".to_owned(),
|
||||
format!("Update was not successful because of {}", e),
|
||||
(),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.expect("");
|
||||
// TODO: refactor sound lib to make compound tempos easier to deal with
|
||||
UPDATE_FAILED_1
|
||||
.play()
|
||||
.await
|
||||
.expect("could not play song: update failed 1");
|
||||
UPDATE_FAILED_2
|
||||
.play()
|
||||
.await
|
||||
.expect("could not play song: update failed 2");
|
||||
UPDATE_FAILED_3
|
||||
.play()
|
||||
.await
|
||||
.expect("could not play song: update failed 3");
|
||||
UPDATE_FAILED_4
|
||||
.play()
|
||||
.await
|
||||
.expect("could not play song: update failed 4");
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(rev)
|
||||
}
|
||||
|
||||
#[instrument(skip(download))]
|
||||
async fn do_update(
|
||||
download: impl Future<Output = Result<(), Error>>,
|
||||
new_label: NewLabel,
|
||||
) -> Result<(), Error> {
|
||||
download.await?;
|
||||
copy_machine_id(new_label).await?;
|
||||
copy_ssh_host_keys(new_label).await?;
|
||||
swap_boot_label(new_label).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn query_mounted_label() -> Result<(NewLabel, CurrentLabel), Error> {
|
||||
let output = tokio::fs::read_to_string("/etc/fstab")
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, "/etc/fstab"))?;
|
||||
|
||||
match &PARSE_COLOR.captures(&output).ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("Can't find pattern in {}", output),
|
||||
crate::ErrorKind::Filesystem,
|
||||
)
|
||||
})?[1]
|
||||
{
|
||||
x if x == WritableDrives::Green.label() => Ok((
|
||||
NewLabel(WritableDrives::Blue),
|
||||
CurrentLabel(WritableDrives::Green),
|
||||
)),
|
||||
x if x == WritableDrives::Blue.label() => Ok((
|
||||
NewLabel(WritableDrives::Green),
|
||||
CurrentLabel(WritableDrives::Blue),
|
||||
)),
|
||||
e => {
|
||||
return Err(Error::new(
|
||||
eyre!("Could not find a mounted resource for {}", e),
|
||||
crate::ErrorKind::Filesystem,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct EosUrl {
|
||||
base: Url,
|
||||
version: Version,
|
||||
}
|
||||
impl std::fmt::Display for EosUrl {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}/eos/v0/eos.img?spec=={}&eos-version={}&arch={}",
|
||||
self.base,
|
||||
self.version,
|
||||
Current::new().semver(),
|
||||
platforms::TARGET_ARCH,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(db))]
|
||||
async fn download_file<'a, Db: DbHandle + 'a>(
|
||||
mut db: Db,
|
||||
eos_url: &EosUrl,
|
||||
new_label: NewLabel,
|
||||
) -> Result<(Option<u64>, impl Future<Output = Result<(), Error>> + 'a), Error> {
|
||||
let download_request = reqwest::get(eos_url.to_string())
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
let size = download_request
|
||||
.headers()
|
||||
.get("content-length")
|
||||
.and_then(|a| a.to_str().ok())
|
||||
.map(|l| l.parse())
|
||||
.transpose()?;
|
||||
Ok((size, async move {
|
||||
let hash_from_header: String = download_request
|
||||
.headers()
|
||||
.get(HEADER_KEY)
|
||||
.ok_or_else(|| Error::new(eyre!("No {} in headers", HEADER_KEY), ErrorKind::Network))?
|
||||
.to_str()
|
||||
.with_kind(ErrorKind::InvalidRequest)?
|
||||
.to_owned();
|
||||
let stream_download = download_request.bytes_stream();
|
||||
let file_sum = write_stream_to_label(&mut db, size, stream_download, new_label).await?;
|
||||
check_download(&hash_from_header, file_sum).await?;
|
||||
Ok(())
|
||||
}))
|
||||
}
|
||||
|
||||
#[instrument(skip(db, stream_download))]
|
||||
async fn write_stream_to_label<Db: DbHandle>(
|
||||
db: &mut Db,
|
||||
size: Option<u64>,
|
||||
stream_download: impl Stream<Item = Result<rpc_toolkit::hyper::body::Bytes, reqwest::Error>>,
|
||||
file: NewLabel,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
let block_dev = file.0.block_dev();
|
||||
let mut file = tokio::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.open(&block_dev)
|
||||
.await
|
||||
.with_kind(ErrorKind::Filesystem)?;
|
||||
let mut hasher = Sha256::new();
|
||||
pin!(stream_download);
|
||||
let mut downloaded = 0;
|
||||
let mut last_progress_update = Instant::now();
|
||||
while let Some(Ok(item)) = stream_download.next().await {
|
||||
file.write_all(&item)
|
||||
.await
|
||||
.with_kind(ErrorKind::Filesystem)?;
|
||||
downloaded += item.len() as u64;
|
||||
if last_progress_update.elapsed() > Duration::from_secs(1) {
|
||||
last_progress_update = Instant::now();
|
||||
crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.status_info()
|
||||
.update_progress()
|
||||
.put(db, &UpdateProgress { size, downloaded })
|
||||
.await?;
|
||||
}
|
||||
hasher.update(item);
|
||||
}
|
||||
file.flush().await.with_kind(ErrorKind::Filesystem)?;
|
||||
file.shutdown().await.with_kind(ErrorKind::Filesystem)?;
|
||||
drop(file);
|
||||
Ok(hasher.finalize().to_vec())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn check_download(hash_from_header: &str, file_digest: Vec<u8>) -> Result<(), Error> {
|
||||
if hex::decode(hash_from_header).with_kind(ErrorKind::Network)? != file_digest {
|
||||
return Err(Error::new(
|
||||
eyre!("Hash sum does not match source"),
|
||||
ErrorKind::Network,
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn copy_machine_id(new_label: NewLabel) -> Result<(), Error> {
|
||||
let new_guard = TmpMountGuard::mount(&new_label.0.as_fs(), ReadWrite).await?;
|
||||
tokio::fs::copy("/etc/machine-id", new_guard.as_ref().join("etc/machine-id")).await?;
|
||||
new_guard.unmount().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn copy_ssh_host_keys(new_label: NewLabel) -> Result<(), Error> {
|
||||
let new_guard = TmpMountGuard::mount(&new_label.0.as_fs(), ReadWrite).await?;
|
||||
tokio::fs::copy(
|
||||
"/etc/ssh/ssh_host_rsa_key",
|
||||
new_guard.as_ref().join("etc/ssh/ssh_host_rsa_key"),
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::copy(
|
||||
"/etc/ssh/ssh_host_rsa_key.pub",
|
||||
new_guard.as_ref().join("etc/ssh/ssh_host_rsa_key.pub"),
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::copy(
|
||||
"/etc/ssh/ssh_host_ecdsa_key",
|
||||
new_guard.as_ref().join("etc/ssh/ssh_host_ecdsa_key"),
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::copy(
|
||||
"/etc/ssh/ssh_host_ecdsa_key.pub",
|
||||
new_guard.as_ref().join("etc/ssh/ssh_host_ecdsa_key.pub"),
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::copy(
|
||||
"/etc/ssh/ssh_host_ed25519_key",
|
||||
new_guard.as_ref().join("etc/ssh/ssh_host_ed25519_key"),
|
||||
)
|
||||
.await?;
|
||||
tokio::fs::copy(
|
||||
"/etc/ssh/ssh_host_ed25519_key.pub",
|
||||
new_guard.as_ref().join("etc/ssh/ssh_host_ed25519_key.pub"),
|
||||
)
|
||||
.await?;
|
||||
new_guard.unmount().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn swap_boot_label(new_label: NewLabel) -> Result<(), Error> {
|
||||
let block_dev = new_label.0.block_dev();
|
||||
Command::new("e2label")
|
||||
.arg(block_dev)
|
||||
.arg(new_label.0.label())
|
||||
.invoke(crate::ErrorKind::BlockDevice)
|
||||
.await?;
|
||||
let mounted = TmpMountGuard::mount(&new_label.0.as_fs(), ReadWrite).await?;
|
||||
Command::new("sed")
|
||||
.arg("-i")
|
||||
.arg(&format!(
|
||||
"s/LABEL=\\(blue\\|green\\)/LABEL={}/g",
|
||||
new_label.0.label()
|
||||
))
|
||||
.arg(mounted.as_ref().join("etc/fstab"))
|
||||
.invoke(crate::ErrorKind::Filesystem)
|
||||
.await?;
|
||||
mounted.unmount().await?;
|
||||
Command::new("sed")
|
||||
.arg("-i")
|
||||
.arg(&format!(
|
||||
"s/PARTUUID=cb15ae4d-\\(03\\|04\\)/PARTUUID={}/g",
|
||||
new_label.0.part_uuid()
|
||||
))
|
||||
.arg(Path::new(BOOT_RW_PATH).join("cmdline.txt.orig"))
|
||||
.invoke(crate::ErrorKind::Filesystem)
|
||||
.await?;
|
||||
Command::new("sed")
|
||||
.arg("-i")
|
||||
.arg(&format!(
|
||||
"s/PARTUUID=cb15ae4d-\\(03\\|04\\)/PARTUUID={}/g",
|
||||
new_label.0.part_uuid()
|
||||
))
|
||||
.arg(Path::new(BOOT_RW_PATH).join("cmdline.txt"))
|
||||
.invoke(crate::ErrorKind::Filesystem)
|
||||
.await?;
|
||||
|
||||
UPDATED.store(true, Ordering::SeqCst);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Captured from doing an fstab with an embassy box and the cat from the /etc/fstab
|
||||
#[test]
|
||||
fn test_capture() {
|
||||
let output = r#"
|
||||
LABEL=blue / ext4 discard,errors=remount-ro 0 1
|
||||
LABEL=system-boot /media/boot-rw vfat defaults 0 1
|
||||
/media/boot-rw /boot none defaults,bind,ro 0 0
|
||||
LABEL=EMBASSY /embassy-os vfat defaults 0 1
|
||||
# a swapfile is not a swap partition, no line here
|
||||
# use dphys-swapfile swap[on|off] for that
|
||||
"#;
|
||||
assert_eq!(&PARSE_COLOR.captures(&output).unwrap()[1], "blue");
|
||||
}
|
||||
@@ -1,235 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{FutureExt, TryStreamExt};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
|
||||
|
||||
use crate::ResultExt;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AsyncCompat<T>(pub T);
|
||||
impl<T> futures::io::AsyncRead for AsyncCompat<T>
|
||||
where
|
||||
T: tokio::io::AsyncRead,
|
||||
{
|
||||
fn poll_read(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
buf: &mut [u8],
|
||||
) -> std::task::Poll<std::io::Result<usize>> {
|
||||
let mut read_buf = ReadBuf::new(buf);
|
||||
tokio::io::AsyncRead::poll_read(
|
||||
unsafe { self.map_unchecked_mut(|a| &mut a.0) },
|
||||
cx,
|
||||
&mut read_buf,
|
||||
)
|
||||
.map(|res| res.map(|_| read_buf.filled().len()))
|
||||
}
|
||||
}
|
||||
impl<T> tokio::io::AsyncRead for AsyncCompat<T>
|
||||
where
|
||||
T: futures::io::AsyncRead,
|
||||
{
|
||||
fn poll_read(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
buf: &mut ReadBuf,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
futures::io::AsyncRead::poll_read(
|
||||
unsafe { self.map_unchecked_mut(|a| &mut a.0) },
|
||||
cx,
|
||||
buf.initialize_unfilled(),
|
||||
)
|
||||
.map(|res| res.map(|len| buf.set_filled(len)))
|
||||
}
|
||||
}
|
||||
impl<T> futures::io::AsyncWrite for AsyncCompat<T>
|
||||
where
|
||||
T: tokio::io::AsyncWrite,
|
||||
{
|
||||
fn poll_write(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> std::task::Poll<std::io::Result<usize>> {
|
||||
tokio::io::AsyncWrite::poll_write(unsafe { self.map_unchecked_mut(|a| &mut a.0) }, cx, buf)
|
||||
}
|
||||
fn poll_flush(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
tokio::io::AsyncWrite::poll_flush(unsafe { self.map_unchecked_mut(|a| &mut a.0) }, cx)
|
||||
}
|
||||
fn poll_close(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
tokio::io::AsyncWrite::poll_shutdown(unsafe { self.map_unchecked_mut(|a| &mut a.0) }, cx)
|
||||
}
|
||||
}
|
||||
impl<T> tokio::io::AsyncWrite for AsyncCompat<T>
|
||||
where
|
||||
T: futures::io::AsyncWrite,
|
||||
{
|
||||
fn poll_write(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> std::task::Poll<std::io::Result<usize>> {
|
||||
futures::io::AsyncWrite::poll_write(
|
||||
unsafe { self.map_unchecked_mut(|a| &mut a.0) },
|
||||
cx,
|
||||
buf,
|
||||
)
|
||||
}
|
||||
fn poll_flush(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
futures::io::AsyncWrite::poll_flush(unsafe { self.map_unchecked_mut(|a| &mut a.0) }, cx)
|
||||
}
|
||||
fn poll_shutdown(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
futures::io::AsyncWrite::poll_close(unsafe { self.map_unchecked_mut(|a| &mut a.0) }, cx)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn from_yaml_async_reader<T, R>(mut reader: R) -> Result<T, crate::Error>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
let mut buffer = Vec::new();
|
||||
reader.read_to_end(&mut buffer).await?;
|
||||
serde_yaml::from_slice(&buffer)
|
||||
.map_err(color_eyre::eyre::Error::from)
|
||||
.with_kind(crate::ErrorKind::Deserialization)
|
||||
}
|
||||
|
||||
pub async fn to_yaml_async_writer<T, W>(mut writer: W, value: &T) -> Result<(), crate::Error>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buffer = serde_yaml::to_vec(value).with_kind(crate::ErrorKind::Serialization)?;
|
||||
buffer.extend_from_slice(b"\n");
|
||||
writer.write_all(&buffer).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn from_toml_async_reader<T, R>(mut reader: R) -> Result<T, crate::Error>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
let mut buffer = Vec::new();
|
||||
reader.read_to_end(&mut buffer).await?;
|
||||
serde_toml::from_slice(&buffer)
|
||||
.map_err(color_eyre::eyre::Error::from)
|
||||
.with_kind(crate::ErrorKind::Deserialization)
|
||||
}
|
||||
|
||||
pub async fn to_toml_async_writer<T, W>(mut writer: W, value: &T) -> Result<(), crate::Error>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buffer = serde_toml::to_vec(value).with_kind(crate::ErrorKind::Serialization)?;
|
||||
buffer.extend_from_slice(b"\n");
|
||||
writer.write_all(&buffer).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn from_cbor_async_reader<T, R>(mut reader: R) -> Result<T, crate::Error>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
let mut buffer = Vec::new();
|
||||
reader.read_to_end(&mut buffer).await?;
|
||||
serde_cbor::de::from_reader(buffer.as_slice())
|
||||
.map_err(color_eyre::eyre::Error::from)
|
||||
.with_kind(crate::ErrorKind::Deserialization)
|
||||
}
|
||||
|
||||
pub async fn from_json_async_reader<T, R>(mut reader: R) -> Result<T, crate::Error>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
let mut buffer = Vec::new();
|
||||
reader.read_to_end(&mut buffer).await?;
|
||||
serde_json::from_slice(&buffer)
|
||||
.map_err(color_eyre::eyre::Error::from)
|
||||
.with_kind(crate::ErrorKind::Deserialization)
|
||||
}
|
||||
|
||||
pub async fn to_json_async_writer<T, W>(mut writer: W, value: &T) -> Result<(), crate::Error>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
let buffer = serde_json::to_string(value).with_kind(crate::ErrorKind::Serialization)?;
|
||||
writer.write_all(&buffer.as_bytes()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn to_json_pretty_async_writer<T, W>(mut writer: W, value: &T) -> Result<(), crate::Error>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buffer =
|
||||
serde_json::to_string_pretty(value).with_kind(crate::ErrorKind::Serialization)?;
|
||||
buffer.push_str("\n");
|
||||
writer.write_all(&buffer.as_bytes()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn copy_and_shutdown<R: AsyncRead + Unpin, W: AsyncWrite + Unpin>(
|
||||
r: &mut R,
|
||||
mut w: W,
|
||||
) -> Result<(), std::io::Error> {
|
||||
tokio::io::copy(r, &mut w).await?;
|
||||
w.flush().await?;
|
||||
w.shutdown().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn dir_size<'a, P: AsRef<Path> + 'a + Send + Sync>(
|
||||
path: P,
|
||||
) -> BoxFuture<'a, Result<u64, std::io::Error>> {
|
||||
async move {
|
||||
tokio_stream::wrappers::ReadDirStream::new(tokio::fs::read_dir(path.as_ref()).await?)
|
||||
.try_fold(0, |acc, e| async move {
|
||||
let m = e.metadata().await?;
|
||||
Ok(acc
|
||||
+ if m.is_file() {
|
||||
m.len()
|
||||
} else if m.is_dir() {
|
||||
dir_size(e.path()).await?
|
||||
} else {
|
||||
0
|
||||
})
|
||||
})
|
||||
.await
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
|
||||
pub fn response_to_reader(response: reqwest::Response) -> impl AsyncRead + Unpin {
|
||||
tokio_util::io::StreamReader::new(response.bytes_stream().map_err(|e| {
|
||||
std::io::Error::new(
|
||||
if e.is_connect() {
|
||||
std::io::ErrorKind::ConnectionRefused
|
||||
} else if e.is_timeout() {
|
||||
std::io::ErrorKind::TimedOut
|
||||
} else {
|
||||
std::io::ErrorKind::Other
|
||||
},
|
||||
e,
|
||||
)
|
||||
}))
|
||||
}
|
||||
@@ -1,141 +0,0 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use color_eyre::eyre::eyre;
|
||||
use patch_db::json_ptr::JsonPointer;
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use rpc_toolkit::command;
|
||||
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
mod v0_3_0;
|
||||
mod v0_3_0_1;
|
||||
mod v0_3_0_2;
|
||||
|
||||
pub type Current = v0_3_0_2::Version;
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum Version {
|
||||
V0_3_0(Wrapper<v0_3_0::Version>),
|
||||
V0_3_0_1(Wrapper<v0_3_0_1::Version>),
|
||||
V0_3_0_2(Wrapper<v0_3_0_2::Version>),
|
||||
Other(emver::Version),
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait VersionT
|
||||
where
|
||||
Self: Sized + Send + Sync,
|
||||
{
|
||||
type Previous: VersionT;
|
||||
fn new() -> Self;
|
||||
fn semver(&self) -> emver::Version;
|
||||
fn compat(&self) -> &'static emver::VersionRange;
|
||||
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error>;
|
||||
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error>;
|
||||
async fn commit<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
|
||||
crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.eos_version_compat()
|
||||
.put(db, &self.compat())
|
||||
.await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.version()
|
||||
.put(db, &self.semver().into())
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
async fn migrate_to<V: VersionT, Db: DbHandle>(
|
||||
&self,
|
||||
version: &V,
|
||||
db: &mut Db,
|
||||
) -> Result<(), Error> {
|
||||
match self.semver().cmp(&version.semver()) {
|
||||
Ordering::Greater => self.rollback_to_unchecked(version, db).await,
|
||||
Ordering::Less => version.migrate_from_unchecked(self, db).await,
|
||||
Ordering::Equal => Ok(()),
|
||||
}
|
||||
}
|
||||
async fn migrate_from_unchecked<V: VersionT, Db: DbHandle>(
|
||||
&self,
|
||||
version: &V,
|
||||
db: &mut Db,
|
||||
) -> Result<(), Error> {
|
||||
let previous = Self::Previous::new();
|
||||
if version.semver() != previous.semver() {
|
||||
previous.migrate_from_unchecked(version, db).await?;
|
||||
}
|
||||
tracing::info!("{} -> {}", previous.semver(), self.semver(),);
|
||||
self.up(db).await?;
|
||||
self.commit(db).await?;
|
||||
Ok(())
|
||||
}
|
||||
async fn rollback_to_unchecked<V: VersionT, Db: DbHandle>(
|
||||
&self,
|
||||
version: &V,
|
||||
db: &mut Db,
|
||||
) -> Result<(), Error> {
|
||||
let previous = Self::Previous::new();
|
||||
tracing::info!("{} -> {}", self.semver(), previous.semver(),);
|
||||
self.down(db).await?;
|
||||
previous.commit(db).await?;
|
||||
if version.semver() != previous.semver() {
|
||||
previous.rollback_to_unchecked(version, db).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
struct Wrapper<T>(T);
|
||||
impl<T> serde::Serialize for Wrapper<T>
|
||||
where
|
||||
T: VersionT,
|
||||
{
|
||||
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
||||
self.0.semver().serialize(serializer)
|
||||
}
|
||||
}
|
||||
impl<'de, T> serde::Deserialize<'de> for Wrapper<T>
|
||||
where
|
||||
T: VersionT,
|
||||
{
|
||||
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
|
||||
let v = crate::util::Version::deserialize(deserializer)?;
|
||||
let version = T::new();
|
||||
if &*v == &version.semver() {
|
||||
Ok(Wrapper(version))
|
||||
} else {
|
||||
Err(serde::de::Error::custom("Mismatched Version"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init<Db: DbHandle>(db: &mut Db) -> Result<(), Error> {
|
||||
let ptr: JsonPointer = "/server-info/version"
|
||||
.parse()
|
||||
.with_kind(crate::ErrorKind::Database)?;
|
||||
db.lock(ptr.clone(), LockType::Write).await?;
|
||||
let version: Version = db.get(&ptr).await?;
|
||||
match version {
|
||||
Version::V0_3_0(v) => v.0.migrate_to(&Current::new(), db).await?,
|
||||
Version::V0_3_0_1(v) => v.0.migrate_to(&Current::new(), db).await?,
|
||||
Version::V0_3_0_2(v) => v.0.migrate_to(&Current::new(), db).await?,
|
||||
Version::Other(_) => {
|
||||
return Err(Error::new(
|
||||
eyre!("Cannot downgrade"),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
))
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub const COMMIT_HASH: &'static str =
|
||||
git_version::git_version!(args = ["--always", "--abbrev=40", "--dirty=-modified"]);
|
||||
|
||||
#[command(rename = "git-info", local, metadata(authenticated = false))]
|
||||
pub fn git_info() -> Result<&'static str, Error> {
|
||||
Ok(COMMIT_HASH)
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
use emver::VersionRange;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use super::*;
|
||||
|
||||
const V0_3_0: emver::Version = emver::Version::new(0, 3, 0, 0);
|
||||
lazy_static! {
|
||||
pub static ref V0_3_0_COMPAT: VersionRange = VersionRange::Conj(
|
||||
Box::new(VersionRange::Anchor(
|
||||
emver::GTE,
|
||||
emver::Version::new(0, 3, 0, 0),
|
||||
)),
|
||||
Box::new(VersionRange::Anchor(emver::LTE, Current::new().semver())),
|
||||
);
|
||||
}
|
||||
|
||||
pub struct Version;
|
||||
#[async_trait]
|
||||
impl VersionT for Version {
|
||||
type Previous = v0_3_0::Version;
|
||||
fn new() -> Self {
|
||||
Version
|
||||
}
|
||||
fn semver(&self) -> emver::Version {
|
||||
V0_3_0
|
||||
}
|
||||
fn compat(&self) -> &'static VersionRange {
|
||||
&*V0_3_0_COMPAT
|
||||
}
|
||||
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use emver::VersionRange;
|
||||
use tokio::process::Command;
|
||||
|
||||
use super::*;
|
||||
use crate::disk::quirks::{fetch_quirks, save_quirks, update_quirks};
|
||||
use crate::disk::BOOT_RW_PATH;
|
||||
use crate::update::query_mounted_label;
|
||||
use crate::util::Invoke;
|
||||
|
||||
const V0_3_0_1: emver::Version = emver::Version::new(0, 3, 0, 1);
|
||||
|
||||
pub struct Version;
|
||||
#[async_trait]
|
||||
impl VersionT for Version {
|
||||
type Previous = v0_3_0::Version;
|
||||
fn new() -> Self {
|
||||
Version
|
||||
}
|
||||
fn semver(&self) -> emver::Version {
|
||||
V0_3_0_1
|
||||
}
|
||||
fn compat(&self) -> &'static VersionRange {
|
||||
&*v0_3_0::V0_3_0_COMPAT
|
||||
}
|
||||
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
|
||||
let (_, current) = query_mounted_label().await?;
|
||||
Command::new("sed")
|
||||
.arg("-i")
|
||||
.arg(&format!(
|
||||
"s/PARTUUID=cb15ae4d-\\(03\\|04\\)/PARTUUID={}/g",
|
||||
current.0.part_uuid()
|
||||
))
|
||||
.arg(Path::new(BOOT_RW_PATH).join("cmdline.txt.orig"))
|
||||
.invoke(crate::ErrorKind::Filesystem)
|
||||
.await?;
|
||||
let mut q = fetch_quirks().await?;
|
||||
update_quirks(&mut q).await?;
|
||||
save_quirks(&q).await?;
|
||||
Ok(())
|
||||
}
|
||||
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use emver::VersionRange;
|
||||
use tokio::process::Command;
|
||||
|
||||
use super::*;
|
||||
use crate::disk::quirks::{fetch_quirks, save_quirks, update_quirks};
|
||||
use crate::disk::BOOT_RW_PATH;
|
||||
use crate::update::query_mounted_label;
|
||||
use crate::util::Invoke;
|
||||
|
||||
const V0_3_0_2: emver::Version = emver::Version::new(0, 3, 0, 2);
|
||||
|
||||
pub struct Version;
|
||||
#[async_trait]
|
||||
impl VersionT for Version {
|
||||
type Previous = v0_3_0_1::Version;
|
||||
fn new() -> Self {
|
||||
Version
|
||||
}
|
||||
fn semver(&self) -> emver::Version {
|
||||
V0_3_0_2
|
||||
}
|
||||
fn compat(&self) -> &'static VersionRange {
|
||||
&*v0_3_0::V0_3_0_COMPAT
|
||||
}
|
||||
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
19
basename.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
PLATFORM="$(if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo unknown; fi)"
|
||||
VERSION="$(cat ./VERSION.txt)"
|
||||
GIT_HASH="$(cat ./GIT_HASH.txt)"
|
||||
if [[ "$GIT_HASH" =~ ^@ ]]; then
|
||||
GIT_HASH=unknown
|
||||
else
|
||||
GIT_HASH="$(echo -n "$GIT_HASH" | head -c 7)"
|
||||
fi
|
||||
STARTOS_ENV="$(cat ./ENVIRONMENT.txt)"
|
||||
VERSION_FULL="${VERSION}-${GIT_HASH}"
|
||||
if [ -n "$STARTOS_ENV" ]; then
|
||||
VERSION_FULL="$VERSION_FULL~${STARTOS_ENV}"
|
||||
fi
|
||||
|
||||
echo -n "startos-${VERSION_FULL}_${PLATFORM}"
|
||||
25
build-cargo-dep.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
shopt -s expand_aliases
|
||||
|
||||
if [ "$0" != "./build-cargo-dep.sh" ]; then
|
||||
>&2 echo "Must be run from start-os directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
USE_TTY=
|
||||
if tty -s; then
|
||||
USE_TTY="-it"
|
||||
fi
|
||||
|
||||
if [ -z "$ARCH" ]; then
|
||||
ARCH=$(uname -m)
|
||||
fi
|
||||
|
||||
mkdir -p cargo-deps
|
||||
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$(pwd)"/cargo-deps:/home/rust/src -P start9/rust-arm-cross:aarch64'
|
||||
|
||||
rust-arm64-builder cargo install "$1" --target-dir /home/rust/src --target=$ARCH-unknown-linux-gnu
|
||||
sudo chown -R $USER cargo-deps
|
||||
sudo chown -R $USER ~/.cargo
|
||||
2
build/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
lib/depends
|
||||
lib/conflicts
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/bin/sh
|
||||
printf "\n"
|
||||
printf "Welcome to\n"
|
||||
cat << "ASCII"
|
||||
,---. | ,---.,---.
|
||||
|--- ,-.-.|---.,---.,---.,---., .| |`---.
|
||||
| | | || |,---|`---.`---.| || | |
|
||||
`---'` ' '`---'`---^`---'`---'`---|`---'`---'
|
||||
`---'
|
||||
ASCII
|
||||
printf " %s (%s %s)\n" "$(uname -o)" "$(uname -r)" "$(uname -m)"
|
||||
printf " $(embassy-cli --version | sed 's/Embassy CLI /EmbassyOS v/g') - $(embassy-cli git-info)\n"
|
||||
|
||||
printf "\n"
|
||||
printf " * Documentation: https://start9.com\n"
|
||||
printf " * Management: https://%s.local\n" "$(hostname)"
|
||||
printf " * Support: https://t.me/start9_labs\n"
|
||||
printf "\n"
|
||||
111
build/README.md
@@ -1,72 +1,107 @@
|
||||
# Building Embassy OS
|
||||
# Building StartOS
|
||||
|
||||
⚠️ The commands given assume a Debian or Ubuntu-based environment. *Building in a VM is NOT yet supported* ⚠️
|
||||
⚠️ The commands given assume a Debian or Ubuntu-based environment. _Building in
|
||||
a VM is NOT yet supported_ ⚠️
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Install dependencies
|
||||
|
||||
- Avahi
|
||||
- `sudo apt install -y avahi-daemon`
|
||||
- Installed by default on most Debian systems - https://avahi.org
|
||||
- `sudo apt install -y avahi-daemon`
|
||||
- Installed by default on most Debian systems - https://avahi.org
|
||||
- Build Essentials (needed to run `make`)
|
||||
- `sudo apt install -y build-essential`
|
||||
- Docker
|
||||
- `curl -fsSL https://get.docker.com | sh`
|
||||
- https://docs.docker.com/get-docker
|
||||
- Add your user to the docker group: `sudo usermod -a -G docker $USER`
|
||||
- Reload user environment `exec sudo su -l $USER`
|
||||
- `curl -fsSL https://get.docker.com | sh`
|
||||
- https://docs.docker.com/get-docker
|
||||
- Add your user to the docker group: `sudo usermod -a -G docker $USER`
|
||||
- Reload user environment `exec sudo su -l $USER`
|
||||
- Prepare Docker environment
|
||||
- Setup buildx (https://docs.docker.com/buildx/working-with-buildx/)
|
||||
- Create a builder: `docker buildx create --use`
|
||||
- Add multi-arch build ability: `docker run --rm --privileged linuxkit/binfmt:v0.8`
|
||||
- Setup buildx (https://docs.docker.com/buildx/working-with-buildx/)
|
||||
- Create a builder: `docker buildx create --use`
|
||||
- Add multi-arch build ability:
|
||||
`docker run --rm --privileged linuxkit/binfmt:v0.8`
|
||||
- Node Version 12+
|
||||
- `sudo snap install node`
|
||||
- https://nodejs.org/en/docs
|
||||
- snap: `sudo snap install node`
|
||||
- [nvm](https://github.com/nvm-sh/nvm#installing-and-updating):
|
||||
`nvm install --lts`
|
||||
- https://nodejs.org/en/docs
|
||||
- NPM Version 7+
|
||||
- `sudo apt install -y npm`
|
||||
- https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
|
||||
- apt: `sudo apt install -y npm`
|
||||
- [nvm](https://github.com/nvm-sh/nvm#installing-and-updating):
|
||||
`nvm install --lts`
|
||||
- https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
|
||||
- jq
|
||||
- `sudo apt install -y jq`
|
||||
- https://stedolan.github.io/jq
|
||||
- `sudo apt install -y jq`
|
||||
- https://stedolan.github.io/jq
|
||||
- yq
|
||||
- snap: `sudo snap install yq`
|
||||
- binaries: https://github.com/mikefarah/yq/releases/
|
||||
- https://mikefarah.gitbook.io/yq
|
||||
|
||||
2. Clone the repo, move into it, and bring in required submodules
|
||||
2. Clone the latest repo with required submodules
|
||||
> :information_source: You chan check latest available version
|
||||
> [here](https://github.com/Start9Labs/start-os/releases)
|
||||
```
|
||||
git clone --recursive https://github.com/Start9Labs/start-os.git --branch latest
|
||||
```
|
||||
|
||||
```
|
||||
git clone --recursive https://github.com/Start9Labs/embassy-os.git
|
||||
cd embassy-os
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
|
||||
## Build
|
||||
## Build Raspberry Pi Image
|
||||
|
||||
```
|
||||
make
|
||||
cd start-os
|
||||
make embassyos-raspi.img ARCH=aarch64
|
||||
```
|
||||
|
||||
## Flash
|
||||
|
||||
Flash the resulting `eos.img` to your SD Card (16GB required, any larger is neither necessary, nor advantageous)
|
||||
Flash the resulting `embassyos-raspi.img` to your SD Card
|
||||
|
||||
We recommend [Balena Etcher](https://www.balena.io/etcher/)
|
||||
|
||||
## Setup
|
||||
|
||||
Visit http://embassy.local from any web browser - We recommend [Firefox](https://www.mozilla.org/firefox/browsers)
|
||||
Visit http://start.local from any web browser - We recommend
|
||||
[Firefox](https://www.mozilla.org/firefox/browsers)
|
||||
|
||||
Enter your product key. This is generated during the build process and can be found in `product_key.txt`, located in the root directory.
|
||||
Enter your product key. This is generated during the build process and can be
|
||||
found in `product_key.txt`, located in the root directory.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
1. I just flashed my SD card, fired up my Embassy, bootup sounds and all, but my browser is saying "Unable to connect" with embassy.local.
|
||||
1. I just flashed my SD card, fired up StartOS, bootup sounds and all, but my
|
||||
browser is saying "Unable to connect" with start.local.
|
||||
|
||||
- Try doing a hard refresh on your browser, or opening the url in a private/incognito window. If you've ran an instance
|
||||
of Embassy before, sometimes you can have a stale cache that will block you from navigating to the page.
|
||||
- Try doing a hard refresh on your browser, or opening the url in a
|
||||
private/incognito window. If you've ran an instance of StartOS before,
|
||||
sometimes you can have a stale cache that will block you from navigating to
|
||||
the page.
|
||||
|
||||
2. Flashing the image isn't working with balenaEtcher. I'm getting `Cannot read property 'message' of null` when I try.
|
||||
- The latest versions of Balena may not flash properly. This version here: https://github.com/balena-io/etcher/releases/tag/v1.5.122 should work properly.
|
||||
2. Flashing the image isn't working with balenaEtcher. I'm getting
|
||||
`Cannot read property 'message' of null` when I try.
|
||||
|
||||
3. Startup isn't working properly and I'm curious as to why. How can I view logs regarding startup for debugging?
|
||||
- During the Build step, instead of running just `make` run `ENVIRONMENT=dev make`. Flash like normal, and insert into your Embassy. Boot up your Embassy, and on another computer
|
||||
on the same network, ssh into the Embassy with the username/password `ubuntu`. After logging in and changing the password, run `journalctl -u initialization.service -ef` to view the start up logs.
|
||||
- The latest versions of Balena may not flash properly. This version here:
|
||||
https://github.com/balena-io/etcher/releases/tag/v1.5.122 should work
|
||||
properly.
|
||||
|
||||
3. Startup isn't working properly and I'm curious as to why. How can I view logs
|
||||
regarding startup for debugging?
|
||||
|
||||
- Find the IP of your device
|
||||
- Run `nc <ip> 8080` and it will print the logs
|
||||
|
||||
4. I need to ssh into my server to fix something, but I cannot get to the
|
||||
console to add ssh keys normally.
|
||||
|
||||
- During the Build step, instead of running just
|
||||
`make embassyos-raspi.img ARCH=aarch64` run
|
||||
`ENVIRONMENT=dev make embassyos-raspi.img ARCH=aarch64`. Flash like normal,
|
||||
and insert into your server. Boot up StartOS, then on another computer on
|
||||
the same network, ssh into the the server with the username `start9` password
|
||||
`embassy`.
|
||||
|
||||
4. I need to reset my password, how can I do that?
|
||||
- At the time of writing, there is no way to do that in 0.3.0 cleanly. You'll need to reflash your device unfortunately.
|
||||
|
||||
- You will need to reflash your device. Select "Use Existing Drive" once you are
|
||||
in setup, and it will prompt you to set a new password.
|
||||
|
||||
76
build/RELEASE.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Release Process
|
||||
|
||||
## `embassyos_0.3.x-1_amd64.deb`
|
||||
|
||||
- Description: debian package for x86_64 - intended to be installed on pureos
|
||||
- Destination: GitHub Release Tag
|
||||
- Requires: N/A
|
||||
- Build steps:
|
||||
- Clone `https://github.com/Start9Labs/embassy-os-deb` at `master`
|
||||
- Run `make TAG=master` from that folder
|
||||
- Artifact: `./embassyos_0.3.x-1_amd64.deb`
|
||||
|
||||
## `eos-<version>-<git hash>-<date>_amd64.iso`
|
||||
|
||||
- Description: live usb image for x86_64
|
||||
- Destination: GitHub Release Tag
|
||||
- Requires: `embassyos_0.3.x-1_amd64.deb`
|
||||
- Build steps:
|
||||
- Clone `https://github.com/Start9Labs/eos-image-recipes` at `master`
|
||||
- Copy `embassyos_0.3.x-1_amd64.deb` to
|
||||
`overlays/vendor/root/embassyos_0.3.x-1_amd64.deb`
|
||||
- Run `./run-local-build.sh byzantium` from that folder
|
||||
- Artifact: `./results/eos-<version>-<git hash>-<date>_amd64.iso`
|
||||
|
||||
## `eos.x86_64.squashfs`
|
||||
|
||||
- Description: compressed embassyOS x86_64 filesystem image
|
||||
- Destination: GitHub Release Tag, Registry @
|
||||
`resources/eos/<version>/eos.x86_64.squashfs`
|
||||
- Requires: `eos-<version>-<git hash>-<date>_amd64.iso`
|
||||
- Build steps:
|
||||
- From `https://github.com/Start9Labs/eos-image-recipes` at `master`
|
||||
- `./extract-squashfs.sh results/eos-<version>-<git hash>-<date>_amd64.iso`
|
||||
- Artifact: `./results/eos.x86_64.squashfs`
|
||||
|
||||
## `eos.raspberrypi.squashfs`
|
||||
|
||||
- Description: compressed embassyOS raspberrypi filesystem image
|
||||
- Destination: GitHub Release Tag, Registry @
|
||||
`resources/eos/<version>/eos.raspberrypi.squashfs`
|
||||
- Requires: N/A
|
||||
- Build steps:
|
||||
- Clone `https://github.com/Start9Labs/embassy-os` at `master`
|
||||
- `make embassyos-raspi.img`
|
||||
- flash `embassyos-raspi.img` to raspberry pi
|
||||
- boot raspberry pi with ethernet
|
||||
- wait for chime
|
||||
- you can watch logs using `nc <ip> 8080`
|
||||
- unplug raspberry pi, put sd card back in build machine
|
||||
- `./build/raspberry-pi/rip-image.sh`
|
||||
- Artifact: `./eos.raspberrypi.squashfs`
|
||||
|
||||
## `lite-upgrade.img`
|
||||
|
||||
- Description: update image for users coming from 0.3.2.1 and before
|
||||
- Destination: Registry @ `resources/eos/<version>/eos.img`
|
||||
- Requires: `eos.raspberrypi.squashfs`
|
||||
- Build steps:
|
||||
- From `https://github.com/Start9Labs/embassy-os` at `master`
|
||||
- `make lite-upgrade.img`
|
||||
- Artifact `./lite-upgrade.img`
|
||||
|
||||
## `eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
|
||||
|
||||
- Description: pre-initialized raspberrypi image
|
||||
- Destination: GitHub Release Tag (as tar.gz)
|
||||
- Requires: `eos.raspberrypi.squashfs`
|
||||
- Build steps:
|
||||
- From `https://github.com/Start9Labs/embassy-os` at `master`
|
||||
- `make eos_raspberrypi.img`
|
||||
- `tar --format=posix -cS -f- eos-<version>-<git hash>-<date>_raspberrypi.img | gzip > eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
|
||||
- Artifact `./eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
|
||||
|
||||
## `embassy-sdk`
|
||||
|
||||
- Build and deploy to all registries
|
||||
5
build/dpkg-deps/conflicts
Normal file
@@ -0,0 +1,5 @@
|
||||
dhcpcd5
|
||||
firewalld
|
||||
nginx
|
||||
nginx-common
|
||||
openresolv
|
||||
54
build/dpkg-deps/depends
Normal file
@@ -0,0 +1,54 @@
|
||||
avahi-daemon
|
||||
avahi-utils
|
||||
bash-completion
|
||||
beep
|
||||
bmon
|
||||
btrfs-progs
|
||||
ca-certificates
|
||||
cifs-utils
|
||||
cryptsetup
|
||||
curl
|
||||
dmidecode
|
||||
dosfstools
|
||||
e2fsprogs
|
||||
ecryptfs-utils
|
||||
exfatprogs
|
||||
flashrom
|
||||
grub-common
|
||||
htop
|
||||
httpdirfs
|
||||
iotop
|
||||
iw
|
||||
jq
|
||||
libavahi-client3
|
||||
libyajl2
|
||||
linux-cpupower
|
||||
lm-sensors
|
||||
lshw
|
||||
lvm2
|
||||
magic-wormhole
|
||||
man-db
|
||||
ncdu
|
||||
net-tools
|
||||
network-manager
|
||||
nvme-cli
|
||||
nyx
|
||||
openssh-server
|
||||
podman
|
||||
postgresql
|
||||
psmisc
|
||||
qemu-guest-agent
|
||||
rsync
|
||||
samba-common-bin
|
||||
smartmontools
|
||||
sqlite3
|
||||
squashfs-tools
|
||||
sudo
|
||||
systemd
|
||||
systemd-resolved
|
||||
systemd-sysv
|
||||
systemd-timesyncd
|
||||
tor
|
||||
util-linux
|
||||
vim
|
||||
wireless-tools
|
||||
5
build/dpkg-deps/docker.depends
Normal file
@@ -0,0 +1,5 @@
|
||||
+ containerd.io
|
||||
+ docker-ce
|
||||
+ docker-ce-cli
|
||||
+ docker-compose-plugin
|
||||
- podman
|
||||
43
build/dpkg-deps/generate.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
IFS="-" read -ra FEATURES <<< "$ENVIRONMENT"
|
||||
|
||||
feature_file_checker='
|
||||
/^#/ { next }
|
||||
/^\+ [a-z0-9]+$/ { next }
|
||||
/^- [a-z0-9]+$/ { next }
|
||||
{ exit 1 }
|
||||
'
|
||||
|
||||
for type in conflicts depends; do
|
||||
pkgs=()
|
||||
for feature in ${FEATURES[@]}; do
|
||||
file="$feature.$type"
|
||||
if [ -f $file ]; then
|
||||
# TODO check for syntax errrors
|
||||
cat $file | awk "$feature_file_checker"
|
||||
for pkg in $(cat $file | awk '/^\+/ {print $2}'); do
|
||||
pkgs+=($pkg)
|
||||
done
|
||||
fi
|
||||
done
|
||||
for pkg in $(cat $type); do
|
||||
SKIP=
|
||||
for feature in ${FEATURES[@]}; do
|
||||
file="$feature.$type"
|
||||
if [ -f $file ]; then
|
||||
if grep "^- $pkg$" $file; then
|
||||
SKIP=1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [ -z $SKIP ]; then
|
||||
pkgs+=($pkg)
|
||||
fi
|
||||
done
|
||||
(IFS=$'\n'; echo "${pkgs[*]}") | sort -u > ../lib/$type
|
||||
done
|
||||
2
build/dpkg-deps/unstable.depends
Normal file
@@ -0,0 +1,2 @@
|
||||
+ gdb
|
||||
+ heaptrack
|
||||
@@ -1,4 +0,0 @@
|
||||
LABEL=green / ext4 discard,errors=remount-ro 0 1
|
||||
LABEL=system-boot /media/boot-rw vfat defaults 0 1
|
||||
/media/boot-rw /boot none defaults,bind,ro 0 0
|
||||
LABEL=EMBASSY /embassy-os vfat defaults 0 1
|
||||
@@ -1,13 +0,0 @@
|
||||
[Unit]
|
||||
Description=Boot process for system initialization.
|
||||
After=network-online.target systemd-time-wait-sync.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
Restart=on-failure
|
||||
RestartSec=5s
|
||||
ExecStart=/usr/local/bin/initialization.sh
|
||||
RemainAfterExit=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||