Compare commits
1444 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dea6bea38c | ||
|
|
49d4da03ca | ||
|
|
3765465618 | ||
|
|
61f820d09e | ||
|
|
db7f3341ac | ||
|
|
4decf9335c | ||
|
|
339e5f799a | ||
|
|
89d3e0cf35 | ||
|
|
638ed27599 | ||
|
|
da75b8498e | ||
|
|
8ef4ecf5ac | ||
|
|
25049ce9f1 | ||
|
|
0260c1532d | ||
|
|
2a54625f43 | ||
|
|
4e638fb58e | ||
|
|
73274ef6e0 | ||
|
|
e1915bf497 | ||
|
|
8204074bdf | ||
|
|
2ee403e7de | ||
|
|
1974dfd66f | ||
|
|
2e03a95e47 | ||
|
|
b6262c8e13 | ||
|
|
ba740a9ee2 | ||
|
|
8f809dab21 | ||
|
|
c0b2cbe1c8 | ||
|
|
f2142f0bb3 | ||
|
|
86ca23c093 | ||
|
|
463b6ca4ef | ||
|
|
58e0b166cb | ||
|
|
2a678bb017 | ||
|
|
5664456b77 | ||
|
|
3685b7e57e | ||
|
|
989d5f73b1 | ||
|
|
4f84073cb5 | ||
|
|
c190295c34 | ||
|
|
60875644a1 | ||
|
|
113b09ad01 | ||
|
|
2605d0e671 | ||
|
|
d232b91d31 | ||
|
|
c65db31fd9 | ||
|
|
99871805bd | ||
|
|
e8ef39adad | ||
|
|
466b9217b5 | ||
|
|
c9a7f519b9 | ||
|
|
96ae532879 | ||
|
|
eda08d5b0f | ||
|
|
7c12b58bb5 | ||
|
|
5446c89bc0 | ||
|
|
2d0251e585 | ||
|
|
f41710c892 | ||
|
|
df3f79f282 | ||
|
|
f8df692865 | ||
|
|
0c6d3b188d | ||
|
|
e7a38863ab | ||
|
|
720e0fcdab | ||
|
|
bf8ff84522 | ||
|
|
5a9510238e | ||
|
|
7b3c74179b | ||
|
|
cd70fa4c32 | ||
|
|
83133ced6a | ||
|
|
6c5179a179 | ||
|
|
e33ab39b85 | ||
|
|
9567bcec1b | ||
|
|
550b16dc0b | ||
|
|
5d8331b7f7 | ||
|
|
e35b643e51 | ||
|
|
bc6a92677b | ||
|
|
f52072e6ec | ||
|
|
9c43c43a46 | ||
|
|
0430e0f930 | ||
|
|
b945243d1a | ||
|
|
d8484a8b26 | ||
|
|
3c27499795 | ||
|
|
7c772e873d | ||
|
|
db2fab245e | ||
|
|
a9c9917f1a | ||
|
|
23e2e9e9cc | ||
|
|
2369e92460 | ||
|
|
a53b15f2a3 | ||
|
|
72eb8b1eb6 | ||
|
|
4db54f3b83 | ||
|
|
24eb27f005 | ||
|
|
009d76ea35 | ||
|
|
6e8a425eb1 | ||
|
|
66188d791b | ||
|
|
015ff02d71 | ||
|
|
10bfaf5415 | ||
|
|
e3e0b85e0c | ||
|
|
ad0632892e | ||
|
|
f26791ba39 | ||
|
|
2fbaaebf44 | ||
|
|
edb916338c | ||
|
|
f7e947d37d | ||
|
|
a9e3d1ed75 | ||
|
|
ce97827c42 | ||
|
|
3efec07338 | ||
|
|
68f401bfa3 | ||
|
|
1ea525feaa | ||
|
|
57c4a7527e | ||
|
|
5aa9c045e1 | ||
|
|
6f1900f3bb | ||
|
|
bc62de795e | ||
|
|
c62ca4b183 | ||
|
|
876e5bc683 | ||
|
|
b99f3b73cd | ||
|
|
7eecf29449 | ||
|
|
1d331d7810 | ||
|
|
68414678d8 | ||
|
|
2f6b9dac26 | ||
|
|
d1812d875b | ||
|
|
723dea100f | ||
|
|
c4419ed31f | ||
|
|
754ab86e51 | ||
|
|
04dab532cd | ||
|
|
add01ebc68 | ||
|
|
1cc9a1a30b | ||
|
|
92a1de7500 | ||
|
|
a6fedcff80 | ||
|
|
55eb999305 | ||
|
|
377b7b12ce | ||
|
|
ba2906a42e | ||
|
|
ee27f14be0 | ||
|
|
46c8be63a7 | ||
|
|
7ba66c419a | ||
|
|
340775a593 | ||
|
|
35d2ec8a44 | ||
|
|
2983b9950f | ||
|
|
dbf08a6cf8 | ||
|
|
28f31be36f | ||
|
|
3ec4db0225 | ||
|
|
f5688e077a | ||
|
|
2464d255d5 | ||
|
|
586d950b8c | ||
|
|
e7469388cc | ||
|
|
ab6ca8e16a | ||
|
|
02413a4fac | ||
|
|
05b8dd9ad8 | ||
|
|
29c9419a6e | ||
|
|
90e61989a4 | ||
|
|
b1f9f90fec | ||
|
|
b40849f672 | ||
|
|
44560c8da8 | ||
|
|
46fd01c264 | ||
|
|
100695c262 | ||
|
|
54b5a4ae55 | ||
|
|
ffb252962b | ||
|
|
ae31270e63 | ||
|
|
9b2b54d585 | ||
|
|
e1ccc583a3 | ||
|
|
7750e33f82 | ||
|
|
d2c4741f0b | ||
|
|
c79c4f6bde | ||
|
|
3849d0d1a9 | ||
|
|
8bd71ccd5e | ||
|
|
b731f7fb64 | ||
|
|
cd554f77f3 | ||
|
|
8c977c51ca | ||
|
|
a3252f9671 | ||
|
|
9bc945f76f | ||
|
|
f6b4dfffb6 | ||
|
|
68955c29cb | ||
|
|
97e4d036dc | ||
|
|
0f49f54c29 | ||
|
|
828e13adbb | ||
|
|
e6f0067728 | ||
|
|
5c473eb9cc | ||
|
|
2adf34fbaf | ||
|
|
05dd760388 | ||
|
|
2cf4864078 | ||
|
|
df4c92672f | ||
|
|
5b173315f9 | ||
|
|
c85ea7d8fa | ||
|
|
113154702f | ||
|
|
33ae46f76a | ||
|
|
27272680a2 | ||
|
|
b1621f6b34 | ||
|
|
2c65033c0a | ||
|
|
dcfbaa9243 | ||
|
|
accef65ede | ||
|
|
50755d8ba3 | ||
|
|
47b6509f70 | ||
|
|
89f3fdc05f | ||
|
|
03f8b73627 | ||
|
|
2e6e9635c3 | ||
|
|
6a312e3fdd | ||
|
|
79dbbdf6b4 | ||
|
|
0e8961efe3 | ||
|
|
fc2be42418 | ||
|
|
ab4336cfd7 | ||
|
|
20d3b5288c | ||
|
|
63a29d3a4a | ||
|
|
31856d9895 | ||
|
|
f51dcf23d6 | ||
|
|
6ecaeb4fde | ||
|
|
1883c9666e | ||
|
|
4b4cf76641 | ||
|
|
0016b4bd72 | ||
|
|
495bbecc01 | ||
|
|
e6af7e9885 | ||
|
|
182b8c2283 | ||
|
|
5318cccc5f | ||
|
|
99739575d4 | ||
|
|
b8ff331ccc | ||
|
|
9e63f3f7c6 | ||
|
|
6f9069a4fb | ||
|
|
a18ab7f1e9 | ||
|
|
05162ca350 | ||
|
|
be0371fb11 | ||
|
|
fa3329abf2 | ||
|
|
e830fade06 | ||
|
|
ac392dcb96 | ||
|
|
e662b2f393 | ||
|
|
00a5fdf491 | ||
|
|
63bc71da13 | ||
|
|
7fff9579c0 | ||
|
|
737beb11f6 | ||
|
|
f55af7da4c | ||
|
|
80461a78b0 | ||
|
|
40d194672b | ||
|
|
d63341ea06 | ||
|
|
df8c8dc93b | ||
|
|
dd3a140cb1 | ||
|
|
1b006599cf | ||
|
|
44aa3cc9b5 | ||
|
|
b88b24e231 | ||
|
|
890c31ba74 | ||
|
|
6dc9a11a89 | ||
|
|
ce2842d365 | ||
|
|
7d1096dbd8 | ||
|
|
95722802dc | ||
|
|
3047dae703 | ||
|
|
95cad7bdd9 | ||
|
|
4e22f13007 | ||
|
|
04611b0ae2 | ||
|
|
a00f1ab549 | ||
|
|
446b37793b | ||
|
|
b2b98643d8 | ||
|
|
b83eeeb131 | ||
|
|
e8d727c07a | ||
|
|
bb8109f67d | ||
|
|
e28fa26c43 | ||
|
|
639fc3793a | ||
|
|
2aaae5265a | ||
|
|
baa4c1fd25 | ||
|
|
479797361e | ||
|
|
0a9f1d2a27 | ||
|
|
5e103770fd | ||
|
|
e012a29b5e | ||
|
|
5d759f810c | ||
|
|
eb1f3a0ced | ||
|
|
29e8210782 | ||
|
|
45ca9405d3 | ||
|
|
e6f02bf8f7 | ||
|
|
57e75e3614 | ||
|
|
89ab67e067 | ||
|
|
e9d851e4d3 | ||
|
|
c675d0feee | ||
|
|
1859c0505e | ||
|
|
f15251096c | ||
|
|
115c599fd8 | ||
|
|
3121c08ee8 | ||
|
|
ef28b01286 | ||
|
|
a5bac39196 | ||
|
|
9f640b24b3 | ||
|
|
f48750c22c | ||
|
|
7a96e94491 | ||
|
|
22a32af750 | ||
|
|
dd423f2e7b | ||
|
|
12dec676db | ||
|
|
75e7556bfa | ||
|
|
504f1a8e97 | ||
|
|
e4a2af6ae7 | ||
|
|
fefa88fc2a | ||
|
|
ed8a7ee8a5 | ||
|
|
1771797453 | ||
|
|
46179f5c83 | ||
|
|
db6fc661a6 | ||
|
|
beb3a9f60a | ||
|
|
c088ab7a79 | ||
|
|
aab2b8fdbc | ||
|
|
b1e7a717af | ||
|
|
25e38bfc98 | ||
|
|
279c7324c4 | ||
|
|
1c90303914 | ||
|
|
6ab6502742 | ||
|
|
b79c029f21 | ||
|
|
020268fe67 | ||
|
|
176b1c9d20 | ||
|
|
5ab2efa0c0 | ||
|
|
88320488a7 | ||
|
|
2091abeea2 | ||
|
|
480f5c1a9a | ||
|
|
8e0db2705f | ||
|
|
1be9cdae67 | ||
|
|
e1a91a7e53 | ||
|
|
b952e3183f | ||
|
|
26ae0bf207 | ||
|
|
42cfd69463 | ||
|
|
7694b68e06 | ||
|
|
28e39c57bd | ||
|
|
2fa0a57d2b | ||
|
|
c9f3e1bdab | ||
|
|
2ba56b8c59 | ||
|
|
fb074c8c32 | ||
|
|
9fc082d1e6 | ||
|
|
dfda2f7d5d | ||
|
|
0c04802560 | ||
|
|
5146689158 | ||
|
|
e7fa94c3d3 | ||
|
|
a77ebd3b55 | ||
|
|
00114287e5 | ||
|
|
db0695126f | ||
|
|
eec5cf6b65 | ||
|
|
a9569d0ed9 | ||
|
|
88d9388be2 | ||
|
|
93c72ecea5 | ||
|
|
b5b0ac50bd | ||
|
|
4d2afdb1a9 | ||
|
|
39a177bd70 | ||
|
|
34fb6ac837 | ||
|
|
f868a454d9 | ||
|
|
24c6cd235b | ||
|
|
47855dc78b | ||
|
|
751ceab04e | ||
|
|
dbbc42c5fd | ||
|
|
27416efb6d | ||
|
|
21dd08544b | ||
|
|
ae88f7d181 | ||
|
|
9981ee7601 | ||
|
|
66b018a355 | ||
|
|
b6c48d0f98 | ||
|
|
097d77f7b3 | ||
|
|
ed1bc6c215 | ||
|
|
c552fdfc0f | ||
|
|
4006dba9f1 | ||
|
|
7a0586684b | ||
|
|
8f34d1c555 | ||
|
|
571db5c0ee | ||
|
|
9059855f2b | ||
|
|
e423678995 | ||
|
|
ece5577f26 | ||
|
|
f373abdd14 | ||
|
|
4defec194f | ||
|
|
5270a6781f | ||
|
|
fa93e195cb | ||
|
|
72898d897c | ||
|
|
c6ee65b654 | ||
|
|
4d7694de24 | ||
|
|
a083f25b6c | ||
|
|
befa9eb16d | ||
|
|
a278c630bb | ||
|
|
6a8d8babce | ||
|
|
76eb0f1775 | ||
|
|
0abe08f243 | ||
|
|
f692ebbbb9 | ||
|
|
c174b65465 | ||
|
|
015131f198 | ||
|
|
a730543c76 | ||
|
|
c704626a39 | ||
|
|
7ef25a3816 | ||
|
|
b43ad93c54 | ||
|
|
7850681ce1 | ||
|
|
846189b15b | ||
|
|
46a893a8b6 | ||
|
|
657aac0d68 | ||
|
|
30885cee01 | ||
|
|
9237984782 | ||
|
|
c289629a28 | ||
|
|
806196f572 | ||
|
|
0e598660b4 | ||
|
|
058bfe0737 | ||
|
|
81932c8cff | ||
|
|
bd7adafee0 | ||
|
|
faf0c2b816 | ||
|
|
419d4986f6 | ||
|
|
9f1a9a7d9c | ||
|
|
a3e7e7c6c9 | ||
|
|
94a5075b6d | ||
|
|
7c32404b69 | ||
|
|
d0c2dc53fe | ||
|
|
0e8530172c | ||
|
|
4427aeac54 | ||
|
|
93640bb08e | ||
|
|
512ed71fc3 | ||
|
|
0cfc43c444 | ||
|
|
ecd0edc29e | ||
|
|
6168a006f4 | ||
|
|
82ba5dad1b | ||
|
|
972ee8e42e | ||
|
|
7cd3f285ad | ||
|
|
89e327383e | ||
|
|
290a15bbd9 | ||
|
|
1dd21f1f76 | ||
|
|
46b3f83ce2 | ||
|
|
5c153c9e21 | ||
|
|
bca75a3ea4 | ||
|
|
0bc6f972b2 | ||
|
|
36cc9cc1ec | ||
|
|
20f6a5e797 | ||
|
|
ccbb68aa0c | ||
|
|
08003c59b6 | ||
|
|
dafa638558 | ||
|
|
75e5250509 | ||
|
|
0ed6eb7029 | ||
|
|
63e26b6050 | ||
|
|
949f1c648a | ||
|
|
3e7578d670 | ||
|
|
6f07ec2597 | ||
|
|
e65c0a0d1d | ||
|
|
be217b5354 | ||
|
|
bfe3029d31 | ||
|
|
6abdc39fe5 | ||
|
|
bf55367f4d | ||
|
|
9480758310 | ||
|
|
25b33fb031 | ||
|
|
10ede0d21c | ||
|
|
698bdd619f | ||
|
|
5cef6874f6 | ||
|
|
6d42ae2629 | ||
|
|
a3b94816f9 | ||
|
|
e0b47feb8b | ||
|
|
8aecec0b9a | ||
|
|
078bf41029 | ||
|
|
2754302fb7 | ||
|
|
dfb7658c3e | ||
|
|
a743785faf | ||
|
|
e4782dee68 | ||
|
|
64315df85f | ||
|
|
2a1fd16849 | ||
|
|
21e31d540e | ||
|
|
370c38ec76 | ||
|
|
854044229c | ||
|
|
69baa44a3a | ||
|
|
419e3f7f2b | ||
|
|
a9373d9779 | ||
|
|
1a0536d212 | ||
|
|
099b77cf9b | ||
|
|
c3d17bf847 | ||
|
|
e04b93a51a | ||
|
|
b36b62c68e | ||
|
|
ab465a755e | ||
|
|
c6f19db1ec | ||
|
|
019142efc9 | ||
|
|
a535fc17c3 | ||
|
|
0fbb18b315 | ||
|
|
3eb0093d2a | ||
|
|
d159dde2ca | ||
|
|
729a510c5b | ||
|
|
196561fed2 | ||
|
|
8f0bdcd172 | ||
|
|
fffc7f4098 | ||
|
|
c7a2e7ada1 | ||
|
|
95611e9c4b | ||
|
|
62fc6afd8a | ||
|
|
0f5cec0a60 | ||
|
|
d235ebaac9 | ||
|
|
6def083b4f | ||
|
|
87322744d4 | ||
|
|
f2a02b392e | ||
|
|
e6cedc257e | ||
|
|
1b5cf2d272 | ||
|
|
f76e822381 | ||
|
|
a2b1968d6e | ||
|
|
398eb13a7f | ||
|
|
956c8a8e03 | ||
|
|
6aba166c82 | ||
|
|
fd7c7ea6b7 | ||
|
|
d85e621bb3 | ||
|
|
822dd5e100 | ||
|
|
25801f374c | ||
|
|
8fd2d0b35c | ||
|
|
c16d8a1da1 | ||
|
|
ab1fdf69c8 | ||
|
|
dd196c0e11 | ||
|
|
0e506f5716 | ||
|
|
0a98ccff0c | ||
|
|
0c188f6d10 | ||
|
|
8009dd691b | ||
|
|
13d0e9914b | ||
|
|
9da49be44d | ||
|
|
00f7fa507b | ||
|
|
2c255b6dfe | ||
|
|
6e2cf8bb3f | ||
|
|
68ed1c80ce | ||
|
|
e0d23f4436 | ||
|
|
509f8a5353 | ||
|
|
b0c0cd7fda | ||
|
|
133dfd5063 | ||
|
|
e6abf4e33b | ||
|
|
07104b18f5 | ||
|
|
f39b85abf2 | ||
|
|
c6c97491ac | ||
|
|
355452cdb3 | ||
|
|
da3720c7a9 | ||
|
|
e92d4ff147 | ||
|
|
bb514d6216 | ||
|
|
3f380fa0da | ||
|
|
5aefb707fa | ||
|
|
4afd3c2322 | ||
|
|
b8eb8a90a5 | ||
|
|
4d6cb091cc | ||
|
|
fc8b1193de | ||
|
|
2c12af5af8 | ||
|
|
bd4d89fc21 | ||
|
|
9487529992 | ||
|
|
fa347fd49d | ||
|
|
8f7072d7e9 | ||
|
|
412c5d68cc | ||
|
|
e06b068033 | ||
|
|
6234391229 | ||
|
|
2568bfde5e | ||
|
|
fd7c2fbe93 | ||
|
|
206c185a3b | ||
|
|
7689cbbe0d | ||
|
|
c832b5d29e | ||
|
|
b57a9351b3 | ||
|
|
f0ae9e21ae | ||
|
|
9510c92288 | ||
|
|
755f3f05d8 | ||
|
|
5d8114b475 | ||
|
|
0ccbb52c1f | ||
|
|
85b39ecf99 | ||
|
|
230838c22b | ||
|
|
a7bfcdcb01 | ||
|
|
47ff630c55 | ||
|
|
70dc53bda7 | ||
|
|
0b8a142de0 | ||
|
|
7e1b433c17 | ||
|
|
800b0763e4 | ||
|
|
30aabe255b | ||
|
|
9b14d714ca | ||
|
|
8a38666105 | ||
|
|
ec878defab | ||
|
|
1786b70e14 | ||
|
|
7f525fa7dc | ||
|
|
e08d93b2aa | ||
|
|
df777c63fe | ||
|
|
3a5ee4a296 | ||
|
|
7b8a0114f5 | ||
|
|
003d110948 | ||
|
|
e9c9a67365 | ||
|
|
8b89e03999 | ||
|
|
9eff920989 | ||
|
|
711c82472c | ||
|
|
156bf02d21 | ||
|
|
932b53d92d | ||
|
|
2693b9a42d | ||
|
|
e9166c4a7d | ||
|
|
2bc64920dd | ||
|
|
aee5500833 | ||
|
|
6b336b7b2f | ||
|
|
f07992c091 | ||
|
|
3c0e77241d | ||
|
|
87461c7f72 | ||
|
|
a67f2b4976 | ||
|
|
8594781780 | ||
|
|
313e415ee9 | ||
|
|
c13d8f3699 | ||
|
|
e41f8f1d0f | ||
|
|
b2c8907635 | ||
|
|
05f4df1a30 | ||
|
|
35fe06a892 | ||
|
|
75ff541aec | ||
|
|
cd933ce6e4 | ||
|
|
0b93988450 | ||
|
|
056cab23e0 | ||
|
|
6bc8027644 | ||
|
|
3b9298ed2b | ||
|
|
12a323f691 | ||
|
|
9c4c211233 | ||
|
|
74ba68ff2c | ||
|
|
7273b37c16 | ||
|
|
0d4ebffc0e | ||
|
|
352b2fb4e7 | ||
|
|
6e6ef57303 | ||
|
|
cc1f14e5e9 | ||
|
|
1c419d5c65 | ||
|
|
71b83245b4 | ||
|
|
2b88555028 | ||
|
|
f021ad9b0a | ||
|
|
8884f64b4e | ||
|
|
dd790dceb5 | ||
|
|
b80e41503f | ||
|
|
8dfc5052e9 | ||
|
|
7f28fc17ca | ||
|
|
2c308ccd35 | ||
|
|
4d6dd44e10 | ||
|
|
b6992e32a5 | ||
|
|
ac080edb02 | ||
|
|
231859303d | ||
|
|
1acdd67fd9 | ||
|
|
bec63a9471 | ||
|
|
44e856e8dc | ||
|
|
3bab7678b7 | ||
|
|
61f68d9e1b | ||
|
|
94f1562ec5 | ||
|
|
46412acd13 | ||
|
|
e7426ea365 | ||
|
|
665eef68b9 | ||
|
|
7c63d4012f | ||
|
|
92be4e774e | ||
|
|
2395502e60 | ||
|
|
9f3902b48d | ||
|
|
6e76bcb77e | ||
|
|
e05a95dc2d | ||
|
|
86d61d698a | ||
|
|
8ce6535a7e | ||
|
|
65ca038eee | ||
|
|
f41f5ebebd | ||
|
|
9cf62f03fa | ||
|
|
f770d5072e | ||
|
|
5698b830ed | ||
|
|
bcc76dd60a | ||
|
|
70d4a0c022 | ||
|
|
8cfd994170 | ||
|
|
22d8d08355 | ||
|
|
641e829e3f | ||
|
|
f9edff8bf4 | ||
|
|
33e6be1ca6 | ||
|
|
e25c50a467 | ||
|
|
f8441ab42e | ||
|
|
4589d4b3f5 | ||
|
|
9cf720e040 | ||
|
|
cf793f7f49 | ||
|
|
2b3fddfe89 | ||
|
|
e148f143ea | ||
|
|
d202cb731d | ||
|
|
299d9998ad | ||
|
|
fba1484e2e | ||
|
|
4ab7300376 | ||
|
|
18cc5e0ee8 | ||
|
|
af0cda5dbf | ||
|
|
a730a3719b | ||
|
|
3b669193f6 | ||
|
|
c782bab296 | ||
|
|
b14646ebd9 | ||
|
|
7441de5fd9 | ||
|
|
f5360cb8d4 | ||
|
|
22cd2e3337 | ||
|
|
7e9d453a2c | ||
|
|
a4338b0d03 | ||
|
|
a35baca580 | ||
|
|
66b0108c51 | ||
|
|
2021431e2f | ||
|
|
ab836c6922 | ||
|
|
405b3be496 | ||
|
|
4a27128a1c | ||
|
|
c74bdc97ca | ||
|
|
ddd5e4c76d | ||
|
|
5e6a7e134f | ||
|
|
41bc519855 | ||
|
|
53d82618d9 | ||
|
|
57f548c6c0 | ||
|
|
8d83f64aba | ||
|
|
9162697117 | ||
|
|
47b19e3211 | ||
|
|
590f6d4c19 | ||
|
|
53108e816f | ||
|
|
3ac71e2f7f | ||
|
|
f4fadd366e | ||
|
|
cc38dab76f | ||
|
|
c8be701f0e | ||
|
|
417befb2be | ||
|
|
a0ce7f38e7 | ||
|
|
962e3d8e56 | ||
|
|
3a3df96996 | ||
|
|
2ffa632796 | ||
|
|
3c6c0b253d | ||
|
|
5f40fd6038 | ||
|
|
8e2dc8b3ee | ||
|
|
a02b531e47 | ||
|
|
a4cb2708cc | ||
|
|
973284607d | ||
|
|
28fd2f0314 | ||
|
|
9715873007 | ||
|
|
18a20407f6 | ||
|
|
1a396cfc7b | ||
|
|
e604c914d1 | ||
|
|
a310c160a5 | ||
|
|
45d50b12fd | ||
|
|
e87182264a | ||
|
|
a089d544a5 | ||
|
|
b6fe0be1b2 | ||
|
|
ba325b1581 | ||
|
|
1f47abf195 | ||
|
|
750f35bc36 | ||
|
|
c99d9d95c5 | ||
|
|
4d402b2600 | ||
|
|
64fb002168 | ||
|
|
1308b5bcf3 | ||
|
|
dc3dc4a1f0 | ||
|
|
99bb55af73 | ||
|
|
4a285225db | ||
|
|
d986bd2a6c | ||
|
|
8665342edf | ||
|
|
2e7c3bf789 | ||
|
|
31ea0fe3fe | ||
|
|
e0c9f8a5aa | ||
|
|
a17ec4221b | ||
|
|
328beaba35 | ||
|
|
efbbaa5741 | ||
|
|
14be2fa344 | ||
|
|
f3ccad192c | ||
|
|
5e580f9372 | ||
|
|
8410929e86 | ||
|
|
093a5d4ddf | ||
|
|
88028412bd | ||
|
|
11c93231aa | ||
|
|
5366b4c873 | ||
|
|
171e0ed312 | ||
|
|
a5b1b4e103 | ||
|
|
f50ddb436f | ||
|
|
0b4b091580 | ||
|
|
2f6d7ac128 | ||
|
|
6b990e1cee | ||
|
|
ddeed65994 | ||
|
|
d87748fda1 | ||
|
|
50f0ead113 | ||
|
|
4e3075aaba | ||
|
|
87d6684ca7 | ||
|
|
3bd7596873 | ||
|
|
39964bf077 | ||
|
|
089199e7c2 | ||
|
|
7b41b295b7 | ||
|
|
d7bc7a2d38 | ||
|
|
eae75c13bb | ||
|
|
fab13db4b4 | ||
|
|
69d5f521a5 | ||
|
|
c0a55142b5 | ||
|
|
513fb3428a | ||
|
|
9a0ae549f6 | ||
|
|
4410d7f195 | ||
|
|
92aa70182d | ||
|
|
90f5864f1e | ||
|
|
d44de670cd | ||
|
|
cb63025078 | ||
|
|
685e865b42 | ||
|
|
e47f126bd5 | ||
|
|
ea6f70e3c5 | ||
|
|
0469aab433 | ||
|
|
ad13b5eb4e | ||
|
|
7324a4973f | ||
|
|
8bc93d23b2 | ||
|
|
39de098461 | ||
|
|
531f232418 | ||
|
|
c708b685e1 | ||
|
|
65009e2f69 | ||
|
|
cbde91744f | ||
|
|
4c8a92bb0c | ||
|
|
5f047d22f4 | ||
|
|
efdc558cba | ||
|
|
04bd1cfa41 | ||
|
|
11a2e96d06 | ||
|
|
095c5e4f95 | ||
|
|
aa2a2e12cc | ||
|
|
8f231424d1 | ||
|
|
069db28fb6 | ||
|
|
2e747d3ece | ||
|
|
d03aadb367 | ||
|
|
749cde13c4 | ||
|
|
0b43aab855 | ||
|
|
147e24204b | ||
|
|
6580153f29 | ||
|
|
fbc94cfbfc | ||
|
|
e631b145b9 | ||
|
|
8cf0ae0994 | ||
|
|
a551bc5375 | ||
|
|
417053a6a2 | ||
|
|
a1495dd33d | ||
|
|
13c50e428f | ||
|
|
8403ccd3da | ||
|
|
c988bca958 | ||
|
|
e92bd61545 | ||
|
|
e84e8edb29 | ||
|
|
5f3db8e567 | ||
|
|
8215e0221a | ||
|
|
a4ef7205ca | ||
|
|
43ecd8b362 | ||
|
|
4b44d6fb83 | ||
|
|
ba8df96e41 | ||
|
|
722a30812f | ||
|
|
0e2fc07881 | ||
|
|
0ae3e83ce4 | ||
|
|
f4b573379d | ||
|
|
862ca375ee | ||
|
|
06bed20a2a | ||
|
|
5c578c0328 | ||
|
|
530de6741b | ||
|
|
5f7ff460fb | ||
|
|
3b3e1e37b9 | ||
|
|
5f40d9400c | ||
|
|
fcdc642acb | ||
|
|
46f594ab71 | ||
|
|
e8684cbb9d | ||
|
|
a36ab71600 | ||
|
|
35c1ff9014 | ||
|
|
e4ce05f94d | ||
|
|
9a9eb57676 | ||
|
|
86567e7fa5 | ||
|
|
38a624fecf | ||
|
|
fd96859883 | ||
|
|
b7b022cc7b | ||
|
|
94d22ed1aa | ||
|
|
3f4caed922 | ||
|
|
521014cd1f | ||
|
|
09303ab2fb | ||
|
|
df1ac8e1e2 | ||
|
|
7a55c91349 | ||
|
|
c491dfdd3a | ||
|
|
b5da076e2c | ||
|
|
18cd6c81a3 | ||
|
|
d9cc21f761 | ||
|
|
40b19c5e67 | ||
|
|
871f78b570 | ||
|
|
753fbc0c5c | ||
|
|
748277aa0e | ||
|
|
bf40a9ef6d | ||
|
|
733000eaa2 | ||
|
|
06207145af | ||
|
|
6a399a7250 | ||
|
|
7ba22f1a09 | ||
|
|
f54f950f81 | ||
|
|
4625711606 | ||
|
|
5735ea2b3c | ||
|
|
b597d0366a | ||
|
|
9c6dcc4a43 | ||
|
|
27c5464cb6 | ||
|
|
1dad7965d2 | ||
|
|
c14ca1d7fd | ||
|
|
2b9e7432b8 | ||
|
|
547747ff74 | ||
|
|
e5b137b331 | ||
|
|
9e554bdecd | ||
|
|
765b542264 | ||
|
|
182a095420 | ||
|
|
0865cffddf | ||
|
|
5a312b9900 | ||
|
|
af2b2f33c2 | ||
|
|
9aa08dfb9b | ||
|
|
b28c673133 | ||
|
|
9a545f176d | ||
|
|
65728eb6ab | ||
|
|
531e037974 | ||
|
|
a96467cb3e | ||
|
|
6e92a7d93d | ||
|
|
740e63da2b | ||
|
|
a69cae22dd | ||
|
|
8ea3c3c29e | ||
|
|
b195e3435f | ||
|
|
63ab739b3d | ||
|
|
34b4577c0b | ||
|
|
58bb788034 | ||
|
|
9e633b37e7 | ||
|
|
bb6a4842bd | ||
|
|
246727995d | ||
|
|
202695096a | ||
|
|
afbab293a8 | ||
|
|
78faf888af | ||
|
|
5164c21923 | ||
|
|
edcd1a3c5b | ||
|
|
532ab9128f | ||
|
|
a3072aacc2 | ||
|
|
8034e5bbcb | ||
|
|
df7a30bd14 | ||
|
|
27296d8880 | ||
|
|
8549b9bc37 | ||
|
|
7632373097 | ||
|
|
23b0674ac0 | ||
|
|
01f0484a0e | ||
|
|
3ca9035fdb | ||
|
|
caaf9d26db | ||
|
|
eb521b2332 | ||
|
|
68c29ab99e | ||
|
|
f12b7f4319 | ||
|
|
7db331320a | ||
|
|
97ad8a85c3 | ||
|
|
6f588196cb | ||
|
|
20241c27ee | ||
|
|
05d6aea37f | ||
|
|
7e0e7860cd | ||
|
|
a0afd7b8ed | ||
|
|
500369ab2b | ||
|
|
dc26d5c0c8 | ||
|
|
0def02f604 | ||
|
|
0ffa9167da | ||
|
|
a110e8f241 | ||
|
|
491f363392 | ||
|
|
33a67bf7b4 | ||
|
|
1e6f583431 | ||
|
|
5e3412d735 | ||
|
|
e6e4cd63f3 | ||
|
|
d9dfacaaf4 | ||
|
|
d43767b945 | ||
|
|
f5da5f4ef0 | ||
|
|
9a202cc124 | ||
|
|
c305deab52 | ||
|
|
0daaf3b1ec | ||
|
|
cb36754c46 | ||
|
|
8e21504bdb | ||
|
|
7e18aafe20 | ||
|
|
fcf1be52ac | ||
|
|
394bc9ceb8 | ||
|
|
e3786592b2 | ||
|
|
d6eaf8d3d9 | ||
|
|
b1c23336e3 | ||
|
|
44c5073dea | ||
|
|
b7593fac44 | ||
|
|
7a31d09356 | ||
|
|
af116794c4 | ||
|
|
88c85e1d8a | ||
|
|
f7b079b1b4 | ||
|
|
9322b3d07e | ||
|
|
72ffedead7 | ||
|
|
cf3a501562 | ||
|
|
7becdc3034 | ||
|
|
f0d599781d | ||
|
|
3386105048 | ||
|
|
3b8fb70db1 | ||
|
|
c3ae146580 | ||
|
|
0d079f0d89 | ||
|
|
55f5329817 | ||
|
|
79d92c30f8 | ||
|
|
73229501c2 | ||
|
|
32ca91a7c9 | ||
|
|
9f5a90ee9c | ||
|
|
a5307fd8cc | ||
|
|
180589144a | ||
|
|
d9c1867bd7 | ||
|
|
da37d649ec | ||
|
|
9e03ac084e | ||
|
|
082c51109d | ||
|
|
8f44c75dc3 | ||
|
|
4204b4af90 | ||
|
|
234f0d75e8 | ||
|
|
564186a1f9 | ||
|
|
ccdb477dbb | ||
|
|
5f92f9e965 | ||
|
|
c2db4390bb | ||
|
|
11c21b5259 | ||
|
|
3cd9e17e3f | ||
|
|
1982ce796f | ||
|
|
941650f668 | ||
|
|
9c0c6c1bd6 | ||
|
|
825e18a551 | ||
|
|
9ff0128fb1 | ||
|
|
bd0ddafcd0 | ||
|
|
36c3617204 | ||
|
|
90a9db3a91 | ||
|
|
59d6795d9e | ||
|
|
2c07cf50fa | ||
|
|
cc0e525dc5 | ||
|
|
73bd973109 | ||
|
|
19f5e92a74 | ||
|
|
a7e501d874 | ||
|
|
3202c38061 | ||
|
|
4676f0595c | ||
|
|
e35a8c942b | ||
|
|
31811eb91e | ||
|
|
b9316a4112 | ||
|
|
b7abd878ac | ||
|
|
38c2c47789 | ||
|
|
1d3d70e8d6 | ||
|
|
c03778ec8b | ||
|
|
29b0850a94 | ||
|
|
712fde46eb | ||
|
|
c2e79ca5a7 | ||
|
|
c3a52b3989 | ||
|
|
7213d82f1b | ||
|
|
5bcad69cf7 | ||
|
|
c9a487fa4d | ||
|
|
3804a46f3b | ||
|
|
52c0bb5302 | ||
|
|
8aa19e6420 | ||
|
|
4d1c7a3884 | ||
|
|
25f2c057b7 | ||
|
|
010be05920 | ||
|
|
4c465850a2 | ||
|
|
8313dfaeb9 | ||
|
|
873f2b2814 | ||
|
|
e53c90f8f0 | ||
|
|
9499ea8ca9 | ||
|
|
f6c09109ba | ||
|
|
273b5768c4 | ||
|
|
ee13cf7dd9 | ||
|
|
fecbae761e | ||
|
|
e0ee89bdd9 | ||
|
|
833c1f22a3 | ||
|
|
6fed6c8d30 | ||
|
|
94cdaf5314 | ||
|
|
f83ae27352 | ||
|
|
6badf047c3 | ||
|
|
47de9ad15f | ||
|
|
09b91cc663 | ||
|
|
ded16549f7 | ||
|
|
c89e47577b | ||
|
|
bb50beb7ab | ||
|
|
e4cd4d64d7 | ||
|
|
5675fc51a0 | ||
|
|
c7438c4aff | ||
|
|
4a6a3da36c | ||
|
|
a657c332b1 | ||
|
|
cc9cd3fc14 | ||
|
|
234258a077 | ||
|
|
13cda80ee6 | ||
|
|
f6e142baf5 | ||
|
|
ddf1f9bcd5 | ||
|
|
aa950669f6 | ||
|
|
dacd5d3e6b | ||
|
|
e76ccba2f7 | ||
|
|
3933819d53 | ||
|
|
99019c2b1f | ||
|
|
4bf5eb398b | ||
|
|
dbfbac62c0 | ||
|
|
7685293da4 | ||
|
|
ee9c328606 | ||
|
|
cb7790ccba | ||
|
|
6556fcc531 | ||
|
|
178391e7b2 | ||
|
|
18922a1c6d | ||
|
|
5e9e26fa67 | ||
|
|
f5430f9151 | ||
|
|
4dfdf2f92f | ||
|
|
e4d283cc99 | ||
|
|
8ee64d22b3 | ||
|
|
10e3e80042 | ||
|
|
f77a208e2c | ||
|
|
9366dbb96e | ||
|
|
550b17552b | ||
|
|
bec307d0e9 | ||
|
|
93c751f6eb | ||
|
|
bada88157e | ||
|
|
13f3137701 | ||
|
|
d3316ff6ff | ||
|
|
1b384e61b4 | ||
|
|
addea20cab | ||
|
|
fac23f2f57 | ||
|
|
bffe1ccb3d | ||
|
|
e577434fe6 | ||
|
|
5d1d9827e4 | ||
|
|
dd28ad20ef | ||
|
|
ef416ef60b | ||
|
|
95b3b55971 | ||
|
|
b3f32ae03e | ||
|
|
c7472174e5 | ||
|
|
2ad749354d | ||
|
|
4ed9d2ea22 | ||
|
|
280eb47de7 | ||
|
|
324a12b0ff | ||
|
|
a2543ccddc | ||
|
|
22666412c3 | ||
|
|
dd58044cdf | ||
|
|
10312d89d7 | ||
|
|
b4c0d877cb | ||
|
|
e95d56a5d0 | ||
|
|
90424e8329 | ||
|
|
1bfeb42a06 | ||
|
|
a936f92954 | ||
|
|
0bc514ec17 | ||
|
|
a2cf4001af | ||
|
|
cb4e12a68c | ||
|
|
a7f5124dfe | ||
|
|
ccbf71c5e7 | ||
|
|
04bf5f58d9 | ||
|
|
ab3f5956d4 | ||
|
|
c1fe8e583f | ||
|
|
fd166c4433 | ||
|
|
f29c7ba4f2 | ||
|
|
88869e9710 | ||
|
|
f8404ab043 | ||
|
|
9fa5d1ff9e | ||
|
|
483f353fd0 | ||
|
|
a11bf5b5c7 | ||
|
|
d4113ff753 | ||
|
|
1969f036fa | ||
|
|
8c90e01016 | ||
|
|
756c5c9b99 | ||
|
|
ee54b355af | ||
|
|
26cbbc0c56 | ||
|
|
f4f719d52a | ||
|
|
f2071d8b7e | ||
|
|
df88a55784 | ||
|
|
3ccbc626ff | ||
|
|
71a15cf222 | ||
|
|
26ddf769b1 | ||
|
|
3137387c0c | ||
|
|
fc142cfde8 | ||
|
|
b0503fa507 | ||
|
|
b86a97c9c0 | ||
|
|
eb6cd23772 | ||
|
|
efae1e7e6c | ||
|
|
19d55b840e | ||
|
|
cc0c1d05ab | ||
|
|
f088f65d5a | ||
|
|
5441b5a06b | ||
|
|
efc56c0a88 | ||
|
|
321fca2c0a | ||
|
|
bbd66e9cb0 | ||
|
|
eb0277146c | ||
|
|
10ee32ec48 | ||
|
|
bdb4be89ff | ||
|
|
61445e0b56 | ||
|
|
f15a010e0e | ||
|
|
58747004fe | ||
|
|
e7ff1eb66b | ||
|
|
4a00bd4797 | ||
|
|
2e6fc7e4a0 | ||
|
|
4a8f323be7 | ||
|
|
c7d82102ed | ||
|
|
068b861edc | ||
|
|
3c908c6a09 | ||
|
|
ba3805786c | ||
|
|
70afb197f1 | ||
|
|
d966e35054 | ||
|
|
1675570291 | ||
|
|
9b88de656e | ||
|
|
3d39b5653d | ||
|
|
eb5f7f64ad | ||
|
|
9fc0164c4d | ||
|
|
65eb520cca | ||
|
|
f7f07932b4 | ||
|
|
de52494039 | ||
|
|
4d87ee2bb6 | ||
|
|
d0ba0936ca | ||
|
|
b08556861f | ||
|
|
c96628ad49 | ||
|
|
a615882b3f | ||
|
|
2bcc8e0d30 | ||
|
|
de519edf78 | ||
|
|
caf47943c3 | ||
|
|
427ab12724 | ||
|
|
eba16c0cc3 | ||
|
|
a485de6359 | ||
|
|
1a985f7e82 | ||
|
|
7867411095 | ||
|
|
2f6ebd16c1 | ||
|
|
878b235614 | ||
|
|
75f9c6b0fb | ||
|
|
7c1e2bf96f | ||
|
|
181b44e117 | ||
|
|
f7793976fb | ||
|
|
8ffcd9b60a | ||
|
|
52d3c4d62d | ||
|
|
0fb3e75253 | ||
|
|
2c40e403c4 | ||
|
|
d1c519ed0d | ||
|
|
27470ef934 | ||
|
|
8a1da87702 | ||
|
|
c8d89f805b | ||
|
|
c9fceafc16 | ||
|
|
bbb9980941 | ||
|
|
da55d6f7cd | ||
|
|
eeacdc1359 | ||
|
|
ee1e92e1cb | ||
|
|
705802e584 | ||
|
|
b2e509f055 | ||
|
|
cca70764d4 | ||
|
|
3ac94710fb | ||
|
|
ca73a47785 | ||
|
|
1ef67fc8e9 | ||
|
|
8f3c2f4f3d | ||
|
|
e42b98ec17 | ||
|
|
efb318a979 | ||
|
|
3c0a82293c | ||
|
|
e867f31c31 | ||
|
|
aeb6da111b | ||
|
|
2736fa5202 | ||
|
|
4d3df867da | ||
|
|
62f78e4312 | ||
|
|
d223ac4675 | ||
|
|
c16404bb2d | ||
|
|
cf70933e21 | ||
|
|
46222e9352 | ||
|
|
212e94756b | ||
|
|
b42abbd4a2 | ||
|
|
730a55e721 | ||
|
|
06cf83b901 | ||
|
|
673e5af030 | ||
|
|
a0bc16c255 | ||
|
|
76b5234f7b | ||
|
|
928de47d1d | ||
|
|
274db6f606 | ||
|
|
89ca0ca927 | ||
|
|
8047008fa5 | ||
|
|
f914110626 | ||
|
|
5656fd0b96 | ||
|
|
c3d8c72302 | ||
|
|
1eefff9025 | ||
|
|
1dc7c7b0a4 | ||
|
|
011bac7b4f | ||
|
|
dc2d6e60d8 | ||
|
|
7809b6e50f | ||
|
|
f7f0370bf5 | ||
|
|
6300fc5364 | ||
|
|
16270cbd1a | ||
|
|
3b226dd2c0 | ||
|
|
4ac61d18ff | ||
|
|
fd7abdb8a4 | ||
|
|
92cd85b204 | ||
|
|
4bb7998208 | ||
|
|
91b22311af | ||
|
|
ddd00d4c25 | ||
|
|
428997f26a | ||
|
|
c9d35d8096 | ||
|
|
761b3bd591 | ||
|
|
a440e6f115 | ||
|
|
837b1a9a73 | ||
|
|
bed37184d1 | ||
|
|
785ed480bb | ||
|
|
d8c39c42a1 | ||
|
|
4b06138d35 | ||
|
|
bd5668d15d | ||
|
|
1d6c61cc5b | ||
|
|
ed22e53cb6 | ||
|
|
d18a34785c | ||
|
|
79fb8de7b7 | ||
|
|
07f5f3f1bb | ||
|
|
8fffa40502 | ||
|
|
6680b32579 | ||
|
|
af618f42bd | ||
|
|
aafcce871e | ||
|
|
71d1418559 | ||
|
|
e0678cc869 | ||
|
|
74ddf7114c | ||
|
|
837d4c1597 | ||
|
|
ccb85737f7 | ||
|
|
f9a4699e84 | ||
|
|
bab3aea8ff | ||
|
|
c52cf1fc3f | ||
|
|
3fe43a5b57 | ||
|
|
1a8b6d2fe7 | ||
|
|
570a4b7915 | ||
|
|
63859b81ad | ||
|
|
d8d13f8bf6 | ||
|
|
c3ce44e202 | ||
|
|
3372cdc0df | ||
|
|
82fc945d73 | ||
|
|
040bd52705 | ||
|
|
415cfcb72f | ||
|
|
2b0efb32c1 | ||
|
|
a3a4fdd7fc | ||
|
|
78f6bbf7fe | ||
|
|
43606d26e4 | ||
|
|
b77c409257 | ||
|
|
96f77a6275 | ||
|
|
2336e36314 | ||
|
|
9146c31abf | ||
|
|
bd4c431eb4 | ||
|
|
b620e5319a | ||
|
|
f12df8ded4 | ||
|
|
0ecd920ad9 | ||
|
|
b40be8c494 | ||
|
|
f7c5e64fbc | ||
|
|
6eea2526f6 | ||
|
|
be9db47276 | ||
|
|
35cb81518c | ||
|
|
4042b8f026 | ||
|
|
a3d1b2d671 | ||
|
|
eec8c41e20 | ||
|
|
4f9fe7245b | ||
|
|
6e1ae69691 | ||
|
|
65a1fcfda5 | ||
|
|
373e11495d | ||
|
|
8b6eac3c1c | ||
|
|
43bae7fb01 | ||
|
|
18ee1e2685 | ||
|
|
5b91b5f436 | ||
|
|
54749dfd1e | ||
|
|
f86212dfe1 | ||
|
|
9ed2e2b0ca | ||
|
|
a29cd622c3 | ||
|
|
6cea0139d1 | ||
|
|
45a6a930c9 | ||
|
|
22b273b145 | ||
|
|
ca71c88744 | ||
|
|
20b93e9fba | ||
|
|
05b29a7e9a | ||
|
|
913ef5c817 | ||
|
|
60534597e0 | ||
|
|
a7173b6bc9 | ||
|
|
6deb51428a | ||
|
|
2f00a642be | ||
|
|
4e47960440 | ||
|
|
67b54ac1eb | ||
|
|
0e82b6981f | ||
|
|
d6bf52c11f | ||
|
|
c1ac66f6e5 | ||
|
|
b9e4a66fdc | ||
|
|
9c363be16f | ||
|
|
affab384cf | ||
|
|
0fc546962e | ||
|
|
d215d96b9b | ||
|
|
327e873ef6 | ||
|
|
a2f65de1ce | ||
|
|
bc23129759 | ||
|
|
3e7b184ab4 | ||
|
|
fe0b0d1157 | ||
|
|
55b1c021ec | ||
|
|
21cf4cd2ce | ||
|
|
defc98ab0e | ||
|
|
74af03408f | ||
|
|
1d151d8fa6 | ||
|
|
e5aeced045 | ||
|
|
17d39143ac | ||
|
|
26c37ba824 | ||
|
|
d380cc31fa | ||
|
|
aa2fedee9d | ||
|
|
14fa0e478a | ||
|
|
ac878d46a5 | ||
|
|
6da0a473be | ||
|
|
2642ec85e5 | ||
|
|
26d2152a36 | ||
|
|
1cfd404321 | ||
|
|
207020b7a0 | ||
|
|
6ad9a5952e | ||
|
|
0511680fc5 | ||
|
|
ad14503e9f | ||
|
|
9221f25e35 | ||
|
|
95eec90a62 | ||
|
|
927cb51b5d | ||
|
|
9f4025fdfb | ||
|
|
b57336f6cf | ||
|
|
6e1c2fd7fd | ||
|
|
50e3b7cd5a | ||
|
|
8beda5b0ae | ||
|
|
9998ed177b | ||
|
|
e2db3d84d8 | ||
|
|
141a390105 | ||
|
|
78ad5d5879 | ||
|
|
2ddd38796d | ||
|
|
35b220d7a5 | ||
|
|
8093faee19 | ||
|
|
10a7bd2eff | ||
|
|
2f8a25ae26 | ||
|
|
19bf80dfaf | ||
|
|
fbfaac9859 | ||
|
|
0c3d0dd525 | ||
|
|
1388632562 | ||
|
|
771ecaf3e5 | ||
|
|
2000a8f3ed | ||
|
|
719cd5512c | ||
|
|
afb4536247 | ||
|
|
71b19e6582 | ||
|
|
f37cfda365 | ||
|
|
f63a841cb5 | ||
|
|
d469e802ad | ||
|
|
1702c07481 | ||
|
|
31c5aebe90 | ||
|
|
8cf84a6cf2 | ||
|
|
18336e4d0a | ||
|
|
abf297d095 | ||
|
|
061a350cc6 | ||
|
|
c85491cc71 | ||
|
|
8b794c2299 | ||
|
|
11b11375fd | ||
|
|
c728f1a694 | ||
|
|
28f9fa35e5 | ||
|
|
f8ea2ebf62 | ||
|
|
7575e8c1de | ||
|
|
395db5f1cf | ||
|
|
ee1acda7aa | ||
|
|
1150f4c438 | ||
|
|
f04b90d9c6 | ||
|
|
53463077df | ||
|
|
e326c5be4a | ||
|
|
e199dbc37b | ||
|
|
2e8bfcc74d | ||
|
|
ca53793e32 | ||
|
|
a5f31fbf4e | ||
|
|
40d47c9f44 | ||
|
|
67743b37bb | ||
|
|
36911d7ed6 | ||
|
|
5564154da2 | ||
|
|
27f9869b38 | ||
|
|
f274747af3 | ||
|
|
05832b8b4b | ||
|
|
b9ce2bf2dc | ||
|
|
5442459b2d | ||
|
|
f0466aaa56 | ||
|
|
50111e37da | ||
|
|
76682ebef0 | ||
|
|
705653465a | ||
|
|
8cd2fac9b9 | ||
|
|
b2d7f4f606 | ||
|
|
2dd31fa93f | ||
|
|
df20d4f100 | ||
|
|
3ddeb5fa94 | ||
|
|
70baed88f4 | ||
|
|
5ba0d594a2 | ||
|
|
6505c4054f | ||
|
|
e1c30a918b | ||
|
|
f812e208fa | ||
|
|
9e7526c191 | ||
|
|
07194e52cd | ||
|
|
2f8d825970 | ||
|
|
c44eb3a2c3 | ||
|
|
8207770369 | ||
|
|
365952bbe9 | ||
|
|
5404ebce1c | ||
|
|
13411f1830 | ||
|
|
43090c9873 | ||
|
|
34000fb9f0 | ||
|
|
c2f9c6a38d | ||
|
|
a5c97d4c24 | ||
|
|
9514b97ca0 | ||
|
|
22e84cc922 | ||
|
|
13b97296f5 | ||
|
|
d5f7e15dfb | ||
|
|
7bf7b1e71e | ||
|
|
7b17498722 | ||
|
|
3473633e43 | ||
|
|
f455b8a007 | ||
|
|
daabba12d3 | ||
|
|
61864d082f | ||
|
|
a7cd1e0ce6 | ||
|
|
0dd6d3a500 | ||
|
|
bdb906bf26 | ||
|
|
61da050fe8 | ||
|
|
83fe391796 | ||
|
|
37657fa6ad | ||
|
|
908a945b95 | ||
|
|
36c720227f | ||
|
|
c22c80d3b0 | ||
|
|
15af827cbc | ||
|
|
4a54c7ca87 | ||
|
|
7b8a0eadf3 | ||
|
|
9a01a0df8e | ||
|
|
ea2d77f536 | ||
|
|
e29003539b | ||
|
|
97bdb2dd64 | ||
|
|
40d446ba32 | ||
|
|
5fa743755d | ||
|
|
0f027fefb8 | ||
|
|
56acb3f281 | ||
|
|
5268185604 | ||
|
|
635c3627c9 | ||
|
|
009f7ddf84 | ||
|
|
4526618c32 | ||
|
|
6dfd46197d | ||
|
|
778471d3cc | ||
|
|
bbcf2990f6 | ||
|
|
ac30ab223b | ||
|
|
50e7b479b5 | ||
|
|
1367428499 | ||
|
|
e5de91cbe5 |
6
.claude/settings.json
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"attribution": {
|
||||||
|
"commit": "",
|
||||||
|
"pr": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
36
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -1,36 +1,34 @@
|
|||||||
name: 🐛 Bug Report
|
name: 🐛 Bug Report
|
||||||
description: Create a report to help us improve EmbassyOS
|
description: Create a report to help us improve StartOS
|
||||||
title: '[bug]: '
|
title: "[bug]: "
|
||||||
labels: [Bug, Needs Triage]
|
labels: [Bug, Needs Triage]
|
||||||
assignees:
|
assignees:
|
||||||
- dr-bonez
|
- MattDHill
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Prerequisites
|
label: Prerequisites
|
||||||
description: Please confirm you have completed the following.
|
description: Please confirm you have completed the following.
|
||||||
options:
|
options:
|
||||||
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already report this problem, without success.
|
- label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already report this problem.
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: EmbassyOS Version
|
label: Server Hardware
|
||||||
description: What version of EmbassyOS are you running?
|
description: On what hardware are you running StartOS? Please be as detailed as possible!
|
||||||
placeholder: e.g. 0.3.0
|
placeholder: Pi (8GB) w/ 32GB microSD & Samsung T7 SSD
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: input
|
||||||
|
attributes:
|
||||||
|
label: StartOS Version
|
||||||
|
description: What version of StartOS are you running?
|
||||||
|
placeholder: e.g. 0.3.4.3
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: Device
|
label: Client OS
|
||||||
description: What device are you using to connect to Embassy?
|
|
||||||
options:
|
|
||||||
- Phone/tablet
|
|
||||||
- Laptop/Desktop
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
attributes:
|
|
||||||
label: Device OS
|
|
||||||
description: What operating system is your device running?
|
description: What operating system is your device running?
|
||||||
options:
|
options:
|
||||||
- MacOS
|
- MacOS
|
||||||
@@ -45,14 +43,14 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
attributes:
|
attributes:
|
||||||
label: Device OS Version
|
label: Client OS Version
|
||||||
description: What version is your device OS?
|
description: What version is your device OS?
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: Browser
|
label: Browser
|
||||||
description: What browser are you using to connect to Embassy?
|
description: What browser are you using to connect to your server?
|
||||||
options:
|
options:
|
||||||
- Firefox
|
- Firefox
|
||||||
- Brave
|
- Brave
|
||||||
|
|||||||
10
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@@ -1,16 +1,16 @@
|
|||||||
name: 💡 Feature Request
|
name: 💡 Feature Request
|
||||||
description: Suggest an idea for EmbassyOS
|
description: Suggest an idea for StartOS
|
||||||
title: '[feat]: '
|
title: "[feat]: "
|
||||||
labels: [Enhancement]
|
labels: [Enhancement]
|
||||||
assignees:
|
assignees:
|
||||||
- dr-bonez
|
- MattDHill
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Prerequisites
|
label: Prerequisites
|
||||||
description: Please confirm you have completed the following.
|
description: Please confirm you have completed the following.
|
||||||
options:
|
options:
|
||||||
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already suggest this feature, without success.
|
- label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already suggest this feature.
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
@@ -27,7 +27,7 @@ body:
|
|||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: Describe Preferred Solution
|
label: Describe Preferred Solution
|
||||||
description: How you want this feature added to EmbassyOS?
|
description: How you want this feature added to StartOS?
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
label: Describe Alternatives
|
label: Describe Alternatives
|
||||||
|
|||||||
81
.github/actions/setup-build/action.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
name: Setup Build Environment
|
||||||
|
description: Common build environment setup steps
|
||||||
|
|
||||||
|
inputs:
|
||||||
|
nodejs-version:
|
||||||
|
description: Node.js version
|
||||||
|
required: true
|
||||||
|
setup-python:
|
||||||
|
description: Set up Python
|
||||||
|
required: false
|
||||||
|
default: "false"
|
||||||
|
setup-docker:
|
||||||
|
description: Set up Docker QEMU and Buildx
|
||||||
|
required: false
|
||||||
|
default: "true"
|
||||||
|
setup-sccache:
|
||||||
|
description: Configure sccache for GitHub Actions
|
||||||
|
required: false
|
||||||
|
default: "true"
|
||||||
|
free-space:
|
||||||
|
description: Remove unnecessary packages to free disk space
|
||||||
|
required: false
|
||||||
|
default: "true"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- name: Free disk space
|
||||||
|
if: inputs.free-space == 'true'
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt-get remove --purge -y azure-cli || true
|
||||||
|
sudo apt-get remove --purge -y firefox || true
|
||||||
|
sudo apt-get remove --purge -y ghc-* || true
|
||||||
|
sudo apt-get remove --purge -y google-cloud-sdk || true
|
||||||
|
sudo apt-get remove --purge -y google-chrome-stable || true
|
||||||
|
sudo apt-get remove --purge -y powershell || true
|
||||||
|
sudo apt-get remove --purge -y php* || true
|
||||||
|
sudo apt-get remove --purge -y ruby* || true
|
||||||
|
sudo apt-get remove --purge -y mono-* || true
|
||||||
|
sudo apt-get autoremove -y
|
||||||
|
sudo apt-get clean
|
||||||
|
sudo rm -rf /usr/lib/jvm
|
||||||
|
sudo rm -rf /usr/local/.ghcup
|
||||||
|
sudo rm -rf /usr/local/lib/android
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo rm -rf /usr/share/swift
|
||||||
|
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||||
|
|
||||||
|
# BuildJet runners lack /opt/hostedtoolcache, which setup-python and setup-qemu expect
|
||||||
|
- name: Ensure hostedtoolcache exists
|
||||||
|
shell: bash
|
||||||
|
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
if: inputs.setup-python == 'true'
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.x"
|
||||||
|
|
||||||
|
- uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: ${{ inputs.nodejs-version }}
|
||||||
|
cache: npm
|
||||||
|
cache-dependency-path: "**/package-lock.json"
|
||||||
|
|
||||||
|
- name: Set up Docker QEMU
|
||||||
|
if: inputs.setup-docker == 'true'
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
if: inputs.setup-docker == 'true'
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Configure sccache
|
||||||
|
if: inputs.setup-sccache == 'true'
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
|
||||||
|
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
|
||||||
24
.github/workflows/README.md
vendored
@@ -1,24 +0,0 @@
|
|||||||
# This folder contains GitHub Actions workflows for building the project
|
|
||||||
|
|
||||||
## backend-pr
|
|
||||||
Runs: when a pull request targets the master branch and changes the libs/ and/or backend/ folders
|
|
||||||
|
|
||||||
This workflow uses the actions docker/setup-qemu-action@v1 and docker/setup-buildx-action@v1 to prepare the environment for aarch64 cross complilation using docker buildx.
|
|
||||||
A matrix-strategy has been used for building the v8 snapshot instead of the makefile to allow parallel job execution.
|
|
||||||
|
|
||||||
## frontend-pr
|
|
||||||
Runs: when a pull request targets the master branch and changes the frontend/ folder
|
|
||||||
|
|
||||||
This workflow builds the frontends.
|
|
||||||
|
|
||||||
## product
|
|
||||||
Runs: when a change to the master branch is made
|
|
||||||
|
|
||||||
This workflow builds everything, re-using the backend-pr and frontend-pr workflows.
|
|
||||||
The download and extraction order of artifacts is relevant to `make`, as it checks the file timestamps to decide which targets need to be executed.
|
|
||||||
|
|
||||||
Result: eos.img
|
|
||||||
|
|
||||||
## a note on uploading artifacts
|
|
||||||
|
|
||||||
Artifacts are used to share data between jobs. File permissions are not maintained during artifact upload. Where file permissions are relevant, the workaround using tar has been used. See (here)[https://github.com/actions/upload-artifact#maintaining-file-permissions-and-case-sensitive-files].
|
|
||||||
104
.github/workflows/backend-pr.yaml
vendored
@@ -1,104 +0,0 @@
|
|||||||
name: Backend PR
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
workflow_dispatch:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
paths:
|
|
||||||
- 'backend/**'
|
|
||||||
- 'libs/**'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
libs:
|
|
||||||
name: Build libs
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
target: [amd64, arm64]
|
|
||||||
include:
|
|
||||||
- target: amd64
|
|
||||||
snapshot_command: ./build-v8-snapshot.sh
|
|
||||||
artifact_name: js_snapshot
|
|
||||||
artifact_path: libs/js_engine/src/artifacts/JS_SNAPSHOT.bin
|
|
||||||
- target: arm64
|
|
||||||
snapshot_command: ./build-arm-v8-snapshot.sh
|
|
||||||
artifact_name: arm_js_snapshot
|
|
||||||
artifact_path: libs/js_engine/src/artifacts/ARM_JS_SNAPSHOT.bin
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/bin/
|
|
||||||
~/.cargo/registry/index/
|
|
||||||
~/.cargo/registry/cache/
|
|
||||||
~/.cargo/git/db/
|
|
||||||
target/
|
|
||||||
key: ${{ runner.os }}-cargo-libs-${{ matrix.target }}-${{ hashFiles('libs/Cargo.lock') }}
|
|
||||||
|
|
||||||
- name: Build v8 snapshot
|
|
||||||
run: ${{ matrix.snapshot_command }}
|
|
||||||
working-directory: libs
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: ${{ matrix.artifact_name }}
|
|
||||||
path: ${{ matrix.artifact_path }}
|
|
||||||
|
|
||||||
backend:
|
|
||||||
name: Build backend
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: libs
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Download arm_js_snapshot artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: arm_js_snapshot
|
|
||||||
path: libs/js_engine/src/artifacts/
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
|
|
||||||
- uses: actions-rs/toolchain@v1
|
|
||||||
with:
|
|
||||||
toolchain: stable
|
|
||||||
override: true
|
|
||||||
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/bin/
|
|
||||||
~/.cargo/registry/index/
|
|
||||||
~/.cargo/registry/cache/
|
|
||||||
~/.cargo/git/db/
|
|
||||||
target/
|
|
||||||
key: ${{ runner.os }}-cargo-backend-${{ hashFiles('backend/Cargo.lock') }}
|
|
||||||
|
|
||||||
- name: Build backend
|
|
||||||
run: make backend
|
|
||||||
|
|
||||||
- name: 'Tar files to preserve file permissions'
|
|
||||||
run: tar -cvf backend.tar backend/target/aarch64-unknown-linux-gnu/release/embassy*
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: backend
|
|
||||||
path: backend.tar
|
|
||||||
46
.github/workflows/frontend-pr.yaml
vendored
@@ -1,46 +0,0 @@
|
|||||||
name: Frontend PR
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_call:
|
|
||||||
workflow_dispatch:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
paths:
|
|
||||||
- 'frontend/**'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
frontend:
|
|
||||||
name: Build frontend
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- uses: actions/setup-node@v3
|
|
||||||
with:
|
|
||||||
node-version: 16
|
|
||||||
|
|
||||||
- name: Get npm cache directory
|
|
||||||
id: npm-cache-dir
|
|
||||||
run: |
|
|
||||||
echo "::set-output name=dir::$(npm config get cache)"
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
id: npm-cache
|
|
||||||
with:
|
|
||||||
path: ${{ steps.npm-cache-dir.outputs.dir }}
|
|
||||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-node-
|
|
||||||
|
|
||||||
- name: Build frontends
|
|
||||||
run: make frontends
|
|
||||||
|
|
||||||
- name: 'Tar files to preserve file permissions'
|
|
||||||
run: tar -cvf frontend.tar frontend/dist frontend/config.json
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: frontend
|
|
||||||
path: frontend.tar
|
|
||||||
142
.github/workflows/product.yaml
vendored
@@ -1,142 +0,0 @@
|
|||||||
name: Build Pipeline
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
compat:
|
|
||||||
name: Build compat.tar
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
|
|
||||||
- uses: actions-rs/toolchain@v1
|
|
||||||
with:
|
|
||||||
toolchain: stable
|
|
||||||
override: true
|
|
||||||
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/.cargo/bin/
|
|
||||||
~/.cargo/registry/index/
|
|
||||||
~/.cargo/registry/cache/
|
|
||||||
~/.cargo/git/db/
|
|
||||||
target/
|
|
||||||
key: ${{ runner.os }}-cargo-compat-${{ hashFiles('**/system-images/compat/Cargo.lock') }}
|
|
||||||
|
|
||||||
- name: Build image
|
|
||||||
run: make system-images/compat/compat.tar
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: compat.tar
|
|
||||||
path: system-images/compat/compat.tar
|
|
||||||
|
|
||||||
utils:
|
|
||||||
name: Build utils.tar
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
|
|
||||||
- name: Build image
|
|
||||||
run: make system-images/utils/utils.tar
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: utils.tar
|
|
||||||
path: system-images/utils/utils.tar
|
|
||||||
|
|
||||||
backend:
|
|
||||||
uses: ./.github/workflows/backend-pr.yaml
|
|
||||||
|
|
||||||
frontend:
|
|
||||||
uses: ./.github/workflows/frontend-pr.yaml
|
|
||||||
|
|
||||||
image:
|
|
||||||
name: Build image
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [compat,utils,backend,frontend]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Download compat.tar artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: compat.tar
|
|
||||||
path: system-images/compat
|
|
||||||
|
|
||||||
- name: Download utils.tar artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: utils.tar
|
|
||||||
path: system-images/utils
|
|
||||||
|
|
||||||
- name: Download js_snapshot artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: js_snapshot
|
|
||||||
path: libs/js_engine/src/artifacts/
|
|
||||||
|
|
||||||
- name: Download arm_js_snapshot artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: arm_js_snapshot
|
|
||||||
path: libs/js_engine/src/artifacts/
|
|
||||||
|
|
||||||
- name: Download backend artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: backend
|
|
||||||
|
|
||||||
- name: 'Extract backend'
|
|
||||||
run:
|
|
||||||
tar -mxvf backend.tar
|
|
||||||
|
|
||||||
- name: Download frontend artifact
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: frontend
|
|
||||||
|
|
||||||
- name: Skip frontend build
|
|
||||||
run: |
|
|
||||||
mkdir frontend/node_modules
|
|
||||||
mkdir frontend/dist
|
|
||||||
mkdir patch-db/client/node_modules
|
|
||||||
mkdir patch-db/client/dist
|
|
||||||
|
|
||||||
- name: 'Extract frontend'
|
|
||||||
run: |
|
|
||||||
tar -mxvf frontend.tar frontend/config.json
|
|
||||||
tar -mxvf frontend.tar frontend/dist
|
|
||||||
|
|
||||||
- name: Cache raspiOS
|
|
||||||
id: cache-raspios
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: raspios.img
|
|
||||||
key: cache-raspios
|
|
||||||
|
|
||||||
- name: Build image
|
|
||||||
run: "make V=1 eos.img --debug"
|
|
||||||
88
.github/workflows/start-cli.yaml
vendored
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
name: start-cli
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
environment:
|
||||||
|
type: choice
|
||||||
|
description: Environment
|
||||||
|
options:
|
||||||
|
- NONE
|
||||||
|
- dev
|
||||||
|
- unstable
|
||||||
|
- dev-unstable
|
||||||
|
runner:
|
||||||
|
type: choice
|
||||||
|
description: Runner
|
||||||
|
options:
|
||||||
|
- standard
|
||||||
|
- fast
|
||||||
|
arch:
|
||||||
|
type: choice
|
||||||
|
description: Architecture
|
||||||
|
options:
|
||||||
|
- ALL
|
||||||
|
- x86_64
|
||||||
|
- x86_64-apple
|
||||||
|
- aarch64
|
||||||
|
- aarch64-apple
|
||||||
|
- riscv64
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODEJS_VERSION: "24.11.0"
|
||||||
|
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
compile:
|
||||||
|
name: Build Debian Package
|
||||||
|
if: github.event.pull_request.draft != true
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
triple: >-
|
||||||
|
${{
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": ["x86_64-unknown-linux-musl"],
|
||||||
|
"x86_64-apple": ["x86_64-apple-darwin"],
|
||||||
|
"aarch64": ["aarch64-unknown-linux-musl"],
|
||||||
|
"x86_64-apple": ["aarch64-apple-darwin"],
|
||||||
|
"riscv64": ["riscv64gc-unknown-linux-musl"],
|
||||||
|
"ALL": ["x86_64-unknown-linux-musl", "x86_64-apple-darwin", "aarch64-unknown-linux-musl", "aarch64-apple-darwin", "riscv64gc-unknown-linux-musl"]
|
||||||
|
}')[github.event.inputs.platform || 'ALL']
|
||||||
|
}}
|
||||||
|
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||||
|
steps:
|
||||||
|
- name: Mount tmpfs
|
||||||
|
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||||
|
run: sudo mount -t tmpfs tmpfs .
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: ./.github/actions/setup-build
|
||||||
|
with:
|
||||||
|
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||||
|
|
||||||
|
- name: Make
|
||||||
|
run: TARGET=${{ matrix.triple }} make cli
|
||||||
|
env:
|
||||||
|
PLATFORM: ${{ matrix.arch }}
|
||||||
|
SCCACHE_GHA_ENABLED: on
|
||||||
|
SCCACHE_GHA_VERSION: 0
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: start-cli_${{ matrix.triple }}
|
||||||
|
path: core/target/${{ matrix.triple }}/release/start-cli
|
||||||
173
.github/workflows/start-registry.yaml
vendored
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
name: start-registry
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
environment:
|
||||||
|
type: choice
|
||||||
|
description: Environment
|
||||||
|
options:
|
||||||
|
- NONE
|
||||||
|
- dev
|
||||||
|
- unstable
|
||||||
|
- dev-unstable
|
||||||
|
runner:
|
||||||
|
type: choice
|
||||||
|
description: Runner
|
||||||
|
options:
|
||||||
|
- standard
|
||||||
|
- fast
|
||||||
|
arch:
|
||||||
|
type: choice
|
||||||
|
description: Architecture
|
||||||
|
options:
|
||||||
|
- ALL
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
|
- riscv64
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODEJS_VERSION: "24.11.0"
|
||||||
|
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
compile:
|
||||||
|
name: Build Debian Package
|
||||||
|
if: github.event.pull_request.draft != true
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
arch: >-
|
||||||
|
${{
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": ["x86_64"],
|
||||||
|
"aarch64": ["aarch64"],
|
||||||
|
"riscv64": ["riscv64"],
|
||||||
|
"ALL": ["x86_64", "aarch64", "riscv64"]
|
||||||
|
}')[github.event.inputs.platform || 'ALL']
|
||||||
|
}}
|
||||||
|
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||||
|
steps:
|
||||||
|
- name: Mount tmpfs
|
||||||
|
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||||
|
run: sudo mount -t tmpfs tmpfs .
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: ./.github/actions/setup-build
|
||||||
|
with:
|
||||||
|
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||||
|
|
||||||
|
- name: Make
|
||||||
|
run: make registry-deb
|
||||||
|
env:
|
||||||
|
PLATFORM: ${{ matrix.arch }}
|
||||||
|
SCCACHE_GHA_ENABLED: on
|
||||||
|
SCCACHE_GHA_VERSION: 0
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: start-registry_${{ matrix.arch }}.deb
|
||||||
|
path: results/start-registry-*_${{ matrix.arch }}.deb
|
||||||
|
|
||||||
|
create-image:
|
||||||
|
name: Create Docker Image
|
||||||
|
needs: [compile]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||||
|
steps:
|
||||||
|
- name: Cleaning up unnecessary files
|
||||||
|
run: |
|
||||||
|
sudo apt-get remove --purge -y google-chrome-stable firefox mono-devel
|
||||||
|
sudo apt-get autoremove -y
|
||||||
|
sudo apt-get clean
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
sudo mount -t tmpfs tmpfs .
|
||||||
|
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||||
|
|
||||||
|
- name: Set up docker QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: "Login to GitHub Container Registry"
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{github.actor}}
|
||||||
|
password: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: ghcr.io/Start9Labs/startos-registry
|
||||||
|
tags: |
|
||||||
|
type=raw,value=${{ github.ref_name }}
|
||||||
|
|
||||||
|
- name: Download debian package
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
pattern: start-registry_*.deb
|
||||||
|
|
||||||
|
- name: Map matrix.arch to docker platform
|
||||||
|
run: |
|
||||||
|
platforms=""
|
||||||
|
for deb in *.deb; do
|
||||||
|
filename=$(basename "$deb" .deb)
|
||||||
|
arch="${filename#*_}"
|
||||||
|
case "$arch" in
|
||||||
|
x86_64)
|
||||||
|
platform="linux/amd64"
|
||||||
|
;;
|
||||||
|
aarch64)
|
||||||
|
platform="linux/arm64"
|
||||||
|
;;
|
||||||
|
riscv64)
|
||||||
|
platform="linux/riscv64"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown architecture: $arch" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
if [ -z "$platforms" ]; then
|
||||||
|
platforms="$platform"
|
||||||
|
else
|
||||||
|
platforms="$platforms,$platform"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo "DOCKER_PLATFORM=$platforms" >> "$GITHUB_ENV"
|
||||||
|
|
||||||
|
- run: |
|
||||||
|
cat | docker buildx build --platform "$DOCKER_PLATFORM" --push -t ${{ steps.meta.outputs.tags }} -f - . << 'EOF'
|
||||||
|
FROM debian:trixie
|
||||||
|
|
||||||
|
ADD *.deb .
|
||||||
|
|
||||||
|
RUN apt-get install -y ./*_$(uname -m).deb && rm *.deb
|
||||||
|
|
||||||
|
VOLUME /var/lib/startos
|
||||||
|
|
||||||
|
ENV RUST_LOG=startos=debug
|
||||||
|
|
||||||
|
ENTRYPOINT ["start-registryd"]
|
||||||
|
|
||||||
|
EOF
|
||||||
84
.github/workflows/start-tunnel.yaml
vendored
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
name: start-tunnel
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
environment:
|
||||||
|
type: choice
|
||||||
|
description: Environment
|
||||||
|
options:
|
||||||
|
- NONE
|
||||||
|
- dev
|
||||||
|
- unstable
|
||||||
|
- dev-unstable
|
||||||
|
runner:
|
||||||
|
type: choice
|
||||||
|
description: Runner
|
||||||
|
options:
|
||||||
|
- standard
|
||||||
|
- fast
|
||||||
|
arch:
|
||||||
|
type: choice
|
||||||
|
description: Architecture
|
||||||
|
options:
|
||||||
|
- ALL
|
||||||
|
- x86_64
|
||||||
|
- aarch64
|
||||||
|
- riscv64
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODEJS_VERSION: "24.11.0"
|
||||||
|
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
compile:
|
||||||
|
name: Build Debian Package
|
||||||
|
if: github.event.pull_request.draft != true
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
arch: >-
|
||||||
|
${{
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": ["x86_64"],
|
||||||
|
"aarch64": ["aarch64"],
|
||||||
|
"riscv64": ["riscv64"],
|
||||||
|
"ALL": ["x86_64", "aarch64", "riscv64"]
|
||||||
|
}')[github.event.inputs.platform || 'ALL']
|
||||||
|
}}
|
||||||
|
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||||
|
steps:
|
||||||
|
- name: Mount tmpfs
|
||||||
|
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||||
|
run: sudo mount -t tmpfs tmpfs .
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: ./.github/actions/setup-build
|
||||||
|
with:
|
||||||
|
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||||
|
|
||||||
|
- name: Make
|
||||||
|
run: make tunnel-deb
|
||||||
|
env:
|
||||||
|
PLATFORM: ${{ matrix.arch }}
|
||||||
|
SCCACHE_GHA_ENABLED: on
|
||||||
|
SCCACHE_GHA_VERSION: 0
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: start-tunnel_${{ matrix.arch }}.deb
|
||||||
|
path: results/start-tunnel-*_${{ matrix.arch }}.deb
|
||||||
256
.github/workflows/startos-iso.yaml
vendored
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
name: Debian-based ISO and SquashFS
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
environment:
|
||||||
|
type: choice
|
||||||
|
description: Environment
|
||||||
|
options:
|
||||||
|
- NONE
|
||||||
|
- dev
|
||||||
|
- unstable
|
||||||
|
- dev-unstable
|
||||||
|
runner:
|
||||||
|
type: choice
|
||||||
|
description: Runner
|
||||||
|
options:
|
||||||
|
- standard
|
||||||
|
- fast
|
||||||
|
platform:
|
||||||
|
type: choice
|
||||||
|
description: Platform
|
||||||
|
options:
|
||||||
|
- ALL
|
||||||
|
- x86_64
|
||||||
|
- x86_64-nonfree
|
||||||
|
- aarch64
|
||||||
|
- aarch64-nonfree
|
||||||
|
# - raspberrypi
|
||||||
|
- riscv64
|
||||||
|
deploy:
|
||||||
|
type: choice
|
||||||
|
description: Deploy
|
||||||
|
options:
|
||||||
|
- NONE
|
||||||
|
- alpha
|
||||||
|
- beta
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODEJS_VERSION: "24.11.0"
|
||||||
|
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
compile:
|
||||||
|
name: Compile Base Binaries
|
||||||
|
if: github.event.pull_request.draft != true
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
arch: >-
|
||||||
|
${{
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": ["x86_64"],
|
||||||
|
"x86_64-nonfree": ["x86_64"],
|
||||||
|
"aarch64": ["aarch64"],
|
||||||
|
"aarch64-nonfree": ["aarch64"],
|
||||||
|
"raspberrypi": ["aarch64"],
|
||||||
|
"riscv64": ["riscv64"],
|
||||||
|
"ALL": ["x86_64", "aarch64", "riscv64"]
|
||||||
|
}')[github.event.inputs.platform || 'ALL']
|
||||||
|
}}
|
||||||
|
runs-on: >-
|
||||||
|
${{
|
||||||
|
fromJson(
|
||||||
|
format(
|
||||||
|
'["{0}", "{1}"]',
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": "ubuntu-latest",
|
||||||
|
"aarch64": "ubuntu-24.04-arm",
|
||||||
|
"riscv64": "ubuntu-latest"
|
||||||
|
}')[matrix.arch],
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": "buildjet-32vcpu-ubuntu-2204",
|
||||||
|
"aarch64": "buildjet-32vcpu-ubuntu-2204-arm",
|
||||||
|
"riscv64": "buildjet-32vcpu-ubuntu-2204"
|
||||||
|
}')[matrix.arch]
|
||||||
|
)
|
||||||
|
)[github.event.inputs.runner == 'fast']
|
||||||
|
}}
|
||||||
|
steps:
|
||||||
|
- name: Mount tmpfs
|
||||||
|
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||||
|
run: sudo mount -t tmpfs tmpfs .
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: ./.github/actions/setup-build
|
||||||
|
with:
|
||||||
|
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||||
|
setup-python: "true"
|
||||||
|
|
||||||
|
- name: Make
|
||||||
|
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
|
||||||
|
env:
|
||||||
|
SCCACHE_GHA_ENABLED: on
|
||||||
|
SCCACHE_GHA_VERSION: 0
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: compiled-${{ matrix.arch }}.tar
|
||||||
|
path: compiled-${{ matrix.arch }}.tar
|
||||||
|
image:
|
||||||
|
name: Build Image
|
||||||
|
needs: [compile]
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
# TODO: re-add "raspberrypi" to the platform list below
|
||||||
|
platform: >-
|
||||||
|
${{
|
||||||
|
fromJson(
|
||||||
|
format(
|
||||||
|
'[
|
||||||
|
["{0}"],
|
||||||
|
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "riscv64"]
|
||||||
|
]',
|
||||||
|
github.event.inputs.platform || 'ALL'
|
||||||
|
)
|
||||||
|
)[(github.event.inputs.platform || 'ALL') == 'ALL']
|
||||||
|
}}
|
||||||
|
runs-on: >-
|
||||||
|
${{
|
||||||
|
fromJson(
|
||||||
|
format(
|
||||||
|
'["{0}", "{1}"]',
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": "ubuntu-latest",
|
||||||
|
"x86_64-nonfree": "ubuntu-latest",
|
||||||
|
"aarch64": "ubuntu-24.04-arm",
|
||||||
|
"aarch64-nonfree": "ubuntu-24.04-arm",
|
||||||
|
"raspberrypi": "ubuntu-24.04-arm",
|
||||||
|
"riscv64": "ubuntu-24.04-arm",
|
||||||
|
}')[matrix.platform],
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": "buildjet-8vcpu-ubuntu-2204",
|
||||||
|
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
|
||||||
|
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||||
|
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||||
|
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||||
|
"riscv64": "buildjet-8vcpu-ubuntu-2204",
|
||||||
|
}')[matrix.platform]
|
||||||
|
)
|
||||||
|
)[github.event.inputs.runner == 'fast']
|
||||||
|
}}
|
||||||
|
env:
|
||||||
|
ARCH: >-
|
||||||
|
${{
|
||||||
|
fromJson('{
|
||||||
|
"x86_64": "x86_64",
|
||||||
|
"x86_64-nonfree": "x86_64",
|
||||||
|
"aarch64": "aarch64",
|
||||||
|
"aarch64-nonfree": "aarch64",
|
||||||
|
"raspberrypi": "aarch64",
|
||||||
|
"riscv64": "riscv64",
|
||||||
|
}')[matrix.platform]
|
||||||
|
}}
|
||||||
|
steps:
|
||||||
|
- name: Free space
|
||||||
|
run: |
|
||||||
|
sudo apt-get remove --purge -y azure-cli || true
|
||||||
|
sudo apt-get remove --purge -y firefox || true
|
||||||
|
sudo apt-get remove --purge -y ghc-* || true
|
||||||
|
sudo apt-get remove --purge -y google-cloud-sdk || true
|
||||||
|
sudo apt-get remove --purge -y google-chrome-stable || true
|
||||||
|
sudo apt-get remove --purge -y powershell || true
|
||||||
|
sudo apt-get remove --purge -y php* || true
|
||||||
|
sudo apt-get remove --purge -y ruby* || true
|
||||||
|
sudo apt-get remove --purge -y mono-* || true
|
||||||
|
sudo apt-get autoremove -y
|
||||||
|
sudo apt-get clean
|
||||||
|
sudo rm -rf /usr/lib/jvm # All JDKs
|
||||||
|
sudo rm -rf /usr/local/.ghcup # Haskell toolchain
|
||||||
|
sudo rm -rf /usr/local/lib/android # Android SDK/NDK, emulator
|
||||||
|
sudo rm -rf /usr/share/dotnet # .NET SDKs
|
||||||
|
sudo rm -rf /usr/share/swift # Swift toolchain (if present)
|
||||||
|
sudo rm -rf "$AGENT_TOOLSDIRECTORY" # Pre-cached tool cache (Go, Node, etc.)
|
||||||
|
if: ${{ github.event.inputs.runner != 'fast' }}
|
||||||
|
|
||||||
|
# BuildJet runners lack /opt/hostedtoolcache, which setup-qemu expects
|
||||||
|
- name: Ensure hostedtoolcache exists
|
||||||
|
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
|
||||||
|
|
||||||
|
- name: Set up docker QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Download compiled artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: compiled-${{ env.ARCH }}.tar
|
||||||
|
|
||||||
|
- name: Extract compiled artifacts
|
||||||
|
run: tar -xvf compiled-${{ env.ARCH }}.tar
|
||||||
|
|
||||||
|
- name: Prevent rebuild of compiled artifacts
|
||||||
|
run: |
|
||||||
|
mkdir -p web/node_modules
|
||||||
|
mkdir -p web/dist/raw
|
||||||
|
mkdir -p core/bindings
|
||||||
|
mkdir -p sdk/base/lib/osBindings
|
||||||
|
mkdir -p container-runtime/node_modules
|
||||||
|
mkdir -p container-runtime/dist
|
||||||
|
mkdir -p container-runtime/dist/node_modules
|
||||||
|
mkdir -p sdk/dist
|
||||||
|
mkdir -p sdk/baseDist
|
||||||
|
mkdir -p patch-db/client/node_modules
|
||||||
|
mkdir -p patch-db/client/dist
|
||||||
|
mkdir -p web/.angular
|
||||||
|
mkdir -p web/dist/raw/ui
|
||||||
|
mkdir -p web/dist/raw/setup-wizard
|
||||||
|
mkdir -p web/dist/static/ui
|
||||||
|
mkdir -p web/dist/static/setup-wizard
|
||||||
|
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar
|
||||||
|
|
||||||
|
- run: git status
|
||||||
|
|
||||||
|
- name: Run iso build
|
||||||
|
run: PLATFORM=${{ matrix.platform }} make iso
|
||||||
|
if: ${{ matrix.platform != 'raspberrypi' }}
|
||||||
|
|
||||||
|
- name: Run img build
|
||||||
|
run: PLATFORM=${{ matrix.platform }} make img
|
||||||
|
if: ${{ matrix.platform == 'raspberrypi' }}
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.platform }}.squashfs
|
||||||
|
path: results/*.squashfs
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.platform }}.iso
|
||||||
|
path: results/*.iso
|
||||||
|
if: ${{ matrix.platform != 'raspberrypi' }}
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.platform }}.img
|
||||||
|
path: results/*.img
|
||||||
|
if: ${{ matrix.platform == 'raspberrypi' }}
|
||||||
38
.github/workflows/test.yaml
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
name: Automated Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- next/*
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
env:
|
||||||
|
NODEJS_VERSION: "24.11.0"
|
||||||
|
ENVIRONMENT: dev-unstable
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
name: Run Automated Tests
|
||||||
|
if: github.event.pull_request.draft != true
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: ./.github/actions/setup-build
|
||||||
|
with:
|
||||||
|
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||||
|
free-space: "false"
|
||||||
|
setup-docker: "false"
|
||||||
|
setup-sccache: "false"
|
||||||
|
|
||||||
|
- name: Build And Run Tests
|
||||||
|
run: make test
|
||||||
22
.gitignore
vendored
@@ -1,14 +1,24 @@
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
.idea
|
.idea
|
||||||
/*.img
|
*.img
|
||||||
/*.img.gz
|
*.img.gz
|
||||||
/*.img.xz
|
*.img.xz
|
||||||
/*-raspios-bullseye-arm64-lite.img
|
*.zip
|
||||||
/*-raspios-bullseye-arm64-lite.zip
|
|
||||||
/product_key.txt
|
/product_key.txt
|
||||||
/*_product_key.txt
|
/*_product_key.txt
|
||||||
.vscode/settings.json
|
.vscode/settings.json
|
||||||
deploy_web.sh
|
deploy_web.sh
|
||||||
deploy_web.sh
|
|
||||||
secrets.db
|
secrets.db
|
||||||
.vscode/
|
.vscode/
|
||||||
|
/build/env/*.txt
|
||||||
|
*.deb
|
||||||
|
/target
|
||||||
|
*.squashfs
|
||||||
|
/results
|
||||||
|
/dpkg-workdir
|
||||||
|
/compiled.tar
|
||||||
|
/compiled-*.tar
|
||||||
|
/build/lib/firmware
|
||||||
|
tmp
|
||||||
|
web/.i18n-checked
|
||||||
|
docs/USER.md
|
||||||
|
|||||||
3
.gitmodules
vendored
@@ -1,6 +1,3 @@
|
|||||||
[submodule "rpc-toolkit"]
|
|
||||||
path = rpc-toolkit
|
|
||||||
url = https://github.com/Start9Labs/rpc-toolkit.git
|
|
||||||
[submodule "patch-db"]
|
[submodule "patch-db"]
|
||||||
path = patch-db
|
path = patch-db
|
||||||
url = https://github.com/Start9Labs/patch-db.git
|
url = https://github.com/Start9Labs/patch-db.git
|
||||||
|
|||||||
83
CLAUDE.md
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# CLAUDE.md
|
||||||
|
|
||||||
|
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
StartOS is an open-source Linux distribution for running personal servers. It manages discovery, installation, network configuration, backups, and health monitoring of self-hosted services.
|
||||||
|
|
||||||
|
**Tech Stack:**
|
||||||
|
- Backend: Rust (async/Tokio, Axum web framework)
|
||||||
|
- Frontend: Angular 20 + TypeScript + TaigaUI
|
||||||
|
- Container runtime: Node.js/TypeScript with LXC
|
||||||
|
- Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync
|
||||||
|
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`)
|
||||||
|
- Auth: Password + session cookie, public/private key signatures, local authcookie (see `core/src/middleware/auth/`)
|
||||||
|
|
||||||
|
## Build & Development
|
||||||
|
|
||||||
|
See [CONTRIBUTING.md](CONTRIBUTING.md) for:
|
||||||
|
- Environment setup and requirements
|
||||||
|
- Build commands and make targets
|
||||||
|
- Testing and formatting commands
|
||||||
|
- Environment variables
|
||||||
|
|
||||||
|
**Quick reference:**
|
||||||
|
```bash
|
||||||
|
. ./devmode.sh # Enable dev mode
|
||||||
|
make update-startbox REMOTE=start9@<ip> # Fastest iteration (binary + UI)
|
||||||
|
make test-core # Run Rust tests
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verifying code changes
|
||||||
|
|
||||||
|
When making changes across multiple layers (Rust, SDK, web, container-runtime), verify in this order:
|
||||||
|
|
||||||
|
1. **Rust**: `cargo check -p start-os` — verifies core compiles
|
||||||
|
2. **TS bindings**: `make ts-bindings` — regenerates TypeScript types from Rust `#[ts(export)]` structs
|
||||||
|
- Runs `./core/build/build-ts.sh` to export ts-rs types to `core/bindings/`
|
||||||
|
- Syncs `core/bindings/` → `sdk/base/lib/osBindings/` via rsync
|
||||||
|
- If you manually edit files in `sdk/base/lib/osBindings/`, you must still rebuild the SDK (step 3)
|
||||||
|
3. **SDK bundle**: `cd sdk && make baseDist dist` — compiles SDK source into packages
|
||||||
|
- `baseDist/` is consumed by `/web` (via `@start9labs/start-sdk-base`)
|
||||||
|
- `dist/` is consumed by `/container-runtime` (via `@start9labs/start-sdk`)
|
||||||
|
- Web and container-runtime reference the **built** SDK, not source files
|
||||||
|
4. **Web type check**: `cd web && npm run check` — type-checks all Angular projects
|
||||||
|
5. **Container runtime type check**: `cd container-runtime && npm run check` — type-checks the runtime
|
||||||
|
|
||||||
|
**Important**: Editing `sdk/base/lib/osBindings/*.ts` alone is NOT sufficient — you must rebuild the SDK bundle (step 3) before web/container-runtime can see the changes.
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
Each major component has its own `CLAUDE.md` with detailed guidance.
|
||||||
|
|
||||||
|
- **`core/`** — Rust backend daemon (startbox, start-cli, start-container, registrybox, tunnelbox)
|
||||||
|
- **`web/`** — Angular frontend workspace (admin UI, setup wizard, marketplace, shared library)
|
||||||
|
- **`container-runtime/`** — Node.js runtime managing service containers via JSON-RPC
|
||||||
|
- **`sdk/`** — TypeScript SDK for packaging services (`@start9labs/start-sdk`)
|
||||||
|
- **`patch-db/`** — Git submodule providing diff-based state synchronization
|
||||||
|
|
||||||
|
## Supplementary Documentation
|
||||||
|
|
||||||
|
The `docs/` directory contains cross-cutting documentation for AI assistants:
|
||||||
|
|
||||||
|
- `TODO.md` - Pending tasks for AI agents (check this first, remove items when completed)
|
||||||
|
- `USER.md` - Current user identifier (gitignored, see below)
|
||||||
|
- `exver.md` - Extended versioning format (used across core, sdk, and web)
|
||||||
|
- `VERSION_BUMP.md` - Guide for bumping the StartOS version across the codebase
|
||||||
|
|
||||||
|
Component-specific docs live alongside their code (e.g., `core/rpc-toolkit.md`, `core/i18n-patterns.md`).
|
||||||
|
|
||||||
|
### Session Startup
|
||||||
|
|
||||||
|
On startup:
|
||||||
|
|
||||||
|
1. **Check for `docs/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer.
|
||||||
|
|
||||||
|
2. **Check `docs/TODO.md` for relevant tasks** - Show TODOs that either:
|
||||||
|
- Have no `@username` tag (relevant to everyone)
|
||||||
|
- Are tagged with the current user's identifier
|
||||||
|
|
||||||
|
Skip TODOs tagged with a different user.
|
||||||
|
|
||||||
|
3. **Ask "What would you like to do today?"** - Offer options for each relevant TODO item, plus "Something else" for other requests.
|
||||||
422
CONTRIBUTING.md
@@ -1,208 +1,260 @@
|
|||||||
<!-- omit in toc -->
|
# Contributing to StartOS
|
||||||
# Contributing to Embassy OS
|
|
||||||
|
|
||||||
First off, thanks for taking the time to contribute! ❤️
|
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://github.com/Start9Labs/ai-service-packaging). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
|
||||||
|
|
||||||
All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
|
## Collaboration
|
||||||
|
|
||||||
> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
|
- [Matrix](https://matrix.to/#/#dev-startos:matrix.start9labs.com)
|
||||||
> - Star the project
|
|
||||||
> - Tweet about it
|
|
||||||
> - Refer this project in your project's readme
|
|
||||||
> - Mention the project at local meetups and tell your friends/colleagues
|
|
||||||
> - Buy an [Embassy](https://start9labs.com)
|
|
||||||
|
|
||||||
<!-- omit in toc -->
|
## Project Structure
|
||||||
## Table of Contents
|
|
||||||
|
|
||||||
- [I Have a Question](#i-have-a-question)
|
```bash
|
||||||
- [I Want To Contribute](#i-want-to-contribute)
|
/
|
||||||
- [Reporting Bugs](#reporting-bugs)
|
├── assets/ # Screenshots for README
|
||||||
- [Suggesting Enhancements](#suggesting-enhancements)
|
├── build/ # Auxiliary files and scripts for deployed images
|
||||||
- [Project Structure](#project-structure)
|
├── container-runtime/ # Node.js program managing package containers
|
||||||
- [Your First Code Contribution](#your-first-code-contribution)
|
├── core/ # Rust backend: API, daemon (startd), CLI (start-cli)
|
||||||
- [Setting Up Your Development Environment](#setting-up-your-development-environment)
|
├── debian/ # Debian package maintainer scripts
|
||||||
- [Building The Image](#building-the-image)
|
├── image-recipe/ # Scripts for building StartOS images
|
||||||
- [Improving The Documentation](#improving-the-documentation)
|
├── patch-db/ # (submodule) Diff-based data store for frontend sync
|
||||||
- [Styleguides](#styleguides)
|
├── sdk/ # TypeScript SDK for building StartOS packages
|
||||||
- [Formatting](#formatting)
|
└── web/ # Web UIs (Angular)
|
||||||
- [Atomic Commits](#atomic-commits)
|
|
||||||
- [Commit Messages](#commit-messages)
|
|
||||||
- [Pull Requests](#pull-requests)
|
|
||||||
- [Rebasing Changes](#rebasing-changes)
|
|
||||||
- [Join The Discussion](#join-the-discussion)
|
|
||||||
- [Join The Project Team](#join-the-project-team)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## I Have a Question
|
|
||||||
|
|
||||||
> If you want to ask a question, we assume that you have read the available [Documentation](https://docs.start9labs.com).
|
|
||||||
|
|
||||||
Before you ask a question, it is best to search for existing [Issues](https://github.com/Start9Labs/embassy-os/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
|
|
||||||
|
|
||||||
If you then still feel the need to ask a question and need clarification, we recommend the following:
|
|
||||||
|
|
||||||
- Open an [Issue](https://github.com/Start9Labs/embassy-os/issues/new).
|
|
||||||
- Provide as much context as you can about what you're running into.
|
|
||||||
- Provide project and platform versions, depending on what seems relevant.
|
|
||||||
|
|
||||||
We will then take care of the issue as soon as possible.
|
|
||||||
|
|
||||||
<!--
|
|
||||||
You might want to create a separate issue tag for questions and include it in this description. People should then tag their issues accordingly.
|
|
||||||
|
|
||||||
Depending on how large the project is, you may want to outsource the questioning, e.g. to Stack Overflow or Gitter. You may add additional contact and information possibilities:
|
|
||||||
- IRC
|
|
||||||
- Slack
|
|
||||||
- Gitter
|
|
||||||
- Stack Overflow tag
|
|
||||||
- Blog
|
|
||||||
- FAQ
|
|
||||||
- Roadmap
|
|
||||||
- E-Mail List
|
|
||||||
- Forum
|
|
||||||
-->
|
|
||||||
|
|
||||||
## I Want To Contribute
|
|
||||||
|
|
||||||
> ### Legal Notice <!-- omit in toc -->
|
|
||||||
> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
|
|
||||||
|
|
||||||
### Reporting Bugs
|
|
||||||
|
|
||||||
<!-- omit in toc -->
|
|
||||||
#### Before Submitting a Bug Report
|
|
||||||
|
|
||||||
A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
|
|
||||||
|
|
||||||
- Make sure that you are using the latest version.
|
|
||||||
- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](https://start9.com/latest/user-manual). If you are looking for support, you might want to check [this section](#i-have-a-question)).
|
|
||||||
- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/Start9Labs/embassy-os/issues?q=label%3Abug).
|
|
||||||
- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
|
|
||||||
- Collect information about the bug:
|
|
||||||
- Stack trace (Traceback)
|
|
||||||
- Client OS, Platform and Version (Windows/Linux/macOS/iOS/Android, Firefox/Tor Browser/Consulate)
|
|
||||||
- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
|
|
||||||
- Possibly your input and the output
|
|
||||||
- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
|
|
||||||
|
|
||||||
<!-- omit in toc -->
|
|
||||||
#### How Do I Submit a Good Bug Report?
|
|
||||||
|
|
||||||
> You must never report security related issues, vulnerabilities or bugs to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to <security@start9labs.com>.
|
|
||||||
<!-- You may add a PGP key to allow the messages to be sent encrypted as well. -->
|
|
||||||
|
|
||||||
We use GitHub issues to track bugs and errors. If you run into an issue with the project:
|
|
||||||
|
|
||||||
- Open an [Issue](https://github.com/Start9Labs/embassy-os/issues/new/choose) selecting the appropriate type.
|
|
||||||
- Explain the behavior you would expect and the actual behavior.
|
|
||||||
- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
|
|
||||||
- Provide the information you collected in the previous section.
|
|
||||||
|
|
||||||
Once it's filed:
|
|
||||||
|
|
||||||
- The project team will label the issue accordingly.
|
|
||||||
- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `Question`. Bugs with the `Question` tag will not be addressed until they are answered.
|
|
||||||
- If the team is able to reproduce the issue, it will be marked a scoping level tag, as well as possibly other tags (such as `Security`), and the issue will be left to be [implemented by someone](#your-first-code-contribution).
|
|
||||||
|
|
||||||
<!-- You might want to create an issue template for bugs and errors that can be used as a guide and that defines the structure of the information to be included. If you do so, reference it here in the description. -->
|
|
||||||
|
|
||||||
|
|
||||||
### Suggesting Enhancements
|
|
||||||
|
|
||||||
This section guides you through submitting an enhancement suggestion for Embassy OS, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
|
|
||||||
|
|
||||||
<!-- omit in toc -->
|
|
||||||
#### Before Submitting an Enhancement
|
|
||||||
|
|
||||||
- Make sure that you are using the latest version.
|
|
||||||
- Read the [documentation](https://start9.com/latest/user-manual) carefully and find out if the functionality is already covered, maybe by an individual configuration.
|
|
||||||
- Perform a [search](https://github.com/Start9Labs/embassy-os/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
|
|
||||||
- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
|
|
||||||
|
|
||||||
<!-- omit in toc -->
|
|
||||||
#### How Do I Submit a Good Enhancement Suggestion?
|
|
||||||
|
|
||||||
Enhancement suggestions are tracked as [GitHub issues](https://github.com/Start9Labs/embassy-os/issues).
|
|
||||||
|
|
||||||
- Use a **clear and descriptive title** for the issue to identify the suggestion.
|
|
||||||
- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
|
|
||||||
- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
|
|
||||||
- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. <!-- this should only be included if the project has a GUI -->
|
|
||||||
- **Explain why this enhancement would be useful** to most Embassy OS users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
|
|
||||||
|
|
||||||
<!-- You might want to create an issue template for enhancement suggestions that can be used as a guide and that defines the structure of the information to be included. If you do so, reference it here in the description. -->
|
|
||||||
|
|
||||||
### Project Structure
|
|
||||||
|
|
||||||
EmbassyOS is composed of the following components. Please visit the README for each component to understand the dependency requirements and installation instructions.
|
|
||||||
- [`ui`](frontend/README.md) (Typescript Ionic Angular) is the code that is deployed to the browser to provide the user interface for EmbassyOS.
|
|
||||||
- [`backend`](backend/README.md) (Rust) is a command line utility, daemon, and software development kit that sets up and manages services and their environments, provides the interface for the ui, manages system state, and provides utilities for packaging services for EmbassyOS.
|
|
||||||
- `patch-db` - A diff based data store that is used to synchronize data between the front and backend.
|
|
||||||
- Notably, `patch-db` has a [client](https://github.com/Start9Labs/patch-db/tree/master/client) with its own dependency and installation requirements.
|
|
||||||
- `rpc-toolkit` - A library for generating an rpc server with cli bindings from Rust functions.
|
|
||||||
- `system-images` - (Docker, Rust) A suite of utility Docker images that are preloaded with EmbassyOS to assist with functions relating to services (eg. configuration, backups, health checks).
|
|
||||||
- [`setup-wizard`](frontend/README.md)- Code for the user interface that is displayed during the setup and recovery process for EmbassyOS.
|
|
||||||
- [`diagnostic-ui`](frontend/README.md) - Code for the user interface that is displayed when something has gone wrong with starting up EmbassyOS, which provides helpful debugging tools.
|
|
||||||
|
|
||||||
### Your First Code Contribution
|
|
||||||
|
|
||||||
#### Setting Up Your Development Environment
|
|
||||||
|
|
||||||
First, clone the EmbassyOS repository and from the project root, pull in the submodules for dependent libraries.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
git clone https://github.com/Start9Labs/embassy-os.git
|
|
||||||
git submodule update --init --recursive
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Depending on which component of the ecosystem you are interested in contributing to, follow the installation requirements listed in that component's README (linked [above](#project-structure))
|
See component READMEs for details:
|
||||||
|
|
||||||
#### Building The Image
|
- [`core`](core/README.md)
|
||||||
This step is for setting up an environment in which to test your code changes if you do not yet have a EmbassyOS.
|
- [`web`](web/README.md)
|
||||||
|
- [`build`](build/README.md)
|
||||||
|
- [`patch-db`](https://github.com/Start9Labs/patch-db)
|
||||||
|
|
||||||
- Requirements
|
## Environment Setup
|
||||||
- `ext4fs` (available if running on the Linux kernel)
|
|
||||||
- [Docker](https://docs.docker.com/get-docker/)
|
|
||||||
- GNU Make
|
|
||||||
- Building
|
|
||||||
- see setup instructions [here](build/README.md)
|
|
||||||
- run `make` from the project root
|
|
||||||
|
|
||||||
### Improving The Documentation
|
### Installing Dependencies (Debian/Ubuntu)
|
||||||
You can find the repository for Start9's documentation [here](https://github.com/Start9Labs/documentation). If there is something you would like to see added, let us know, or create an issue yourself. Welcome are contributions for lacking or incorrect information, broken links, requested additions, or general style improvements.
|
|
||||||
|
|
||||||
Contributions in the form of setup guides for integrations with external applications are highly encouraged. If you struggled through a process and would like to share your steps with others, check out the docs for each [service](https://github.com/Start9Labs/documentation/blob/master/source/user-manuals/available-services/index.rst) we support. The wrapper repos contain sections for adding integration guides, such as this [one](https://github.com/Start9Labs/bitcoind-wrapper/tree/master/docs). These not only help out others in the community, but inform how we can create a more seamless and intuitive experience.
|
> Debian/Ubuntu is the only officially supported build environment.
|
||||||
|
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install).
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y ca-certificates curl gpg build-essential
|
||||||
|
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||||
|
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
|
||||||
|
sudo mkdir -p /etc/debspawn/
|
||||||
|
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
|
||||||
|
sudo usermod -aG docker $USER
|
||||||
|
sudo su $USER
|
||||||
|
docker run --privileged --rm tonistiigi/binfmt --install all
|
||||||
|
docker buildx create --use
|
||||||
|
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
|
||||||
|
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
|
||||||
|
source ~/.bashrc
|
||||||
|
nvm install 24
|
||||||
|
nvm use 24
|
||||||
|
nvm alias default 24 # this prevents your machine from reverting back to another version
|
||||||
|
```
|
||||||
|
|
||||||
|
### Cloning the Repository
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
|
||||||
|
cd start-os
|
||||||
|
```
|
||||||
|
|
||||||
|
### Development Mode
|
||||||
|
|
||||||
|
For faster iteration during development:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
. ./devmode.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
This sets `ENVIRONMENT=dev` and `GIT_BRANCH_AS_HASH=1` to prevent rebuilds on every commit.
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
All builds can be performed on any operating system that can run Docker.
|
||||||
|
|
||||||
|
This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
- [GNU Make](https://www.gnu.org/software/make/)
|
||||||
|
- [Docker](https://docs.docker.com/get-docker/) or [Podman](https://podman.io/)
|
||||||
|
- [NodeJS v20.16.0](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
|
||||||
|
- [Rust](https://rustup.rs/) (nightly for formatting)
|
||||||
|
- [sed](https://www.gnu.org/software/sed/), [grep](https://www.gnu.org/software/grep/), [awk](https://www.gnu.org/software/gawk/)
|
||||||
|
- [jq](https://jqlang.github.io/jq/)
|
||||||
|
- [gzip](https://www.gnu.org/software/gzip/), [brotli](https://github.com/google/brotli)
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
| Variable | Description |
|
||||||
|
| -------------------- | --------------------------------------------------------------------------------------------------- |
|
||||||
|
| `PLATFORM` | Target platform: `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `riscv64`, `raspberrypi` |
|
||||||
|
| `ENVIRONMENT` | Hyphen-separated feature flags (see below) |
|
||||||
|
| `PROFILE` | Build profile: `release` (default) or `dev` |
|
||||||
|
| `GIT_BRANCH_AS_HASH` | Set to `1` to use git branch name as version hash (avoids rebuilds) |
|
||||||
|
|
||||||
|
**ENVIRONMENT flags:**
|
||||||
|
|
||||||
|
- `dev` - Enables password SSH before setup, skips frontend compression
|
||||||
|
- `unstable` - Enables assertions and debugging with performance penalty
|
||||||
|
- `console` - Enables tokio-console for async debugging
|
||||||
|
|
||||||
|
**Platform notes:**
|
||||||
|
|
||||||
|
- `-nonfree` variants include proprietary firmware and drivers
|
||||||
|
- `raspberrypi` includes non-free components by necessity
|
||||||
|
- Platform is remembered between builds if not specified
|
||||||
|
|
||||||
|
### Make Targets
|
||||||
|
|
||||||
|
#### Building
|
||||||
|
|
||||||
|
| Target | Description |
|
||||||
|
| ------------- | ---------------------------------------------- |
|
||||||
|
| `iso` | Create full `.iso` image (not for raspberrypi) |
|
||||||
|
| `img` | Create full `.img` image (raspberrypi only) |
|
||||||
|
| `deb` | Build Debian package |
|
||||||
|
| `all` | Build all Rust binaries |
|
||||||
|
| `uis` | Build all web UIs |
|
||||||
|
| `ui` | Build main UI only |
|
||||||
|
| `ts-bindings` | Generate TypeScript bindings from Rust types |
|
||||||
|
|
||||||
|
#### Deploying to Device
|
||||||
|
|
||||||
|
For devices on the same network:
|
||||||
|
|
||||||
|
| Target | Description |
|
||||||
|
| ------------------------------------ | ----------------------------------------------- |
|
||||||
|
| `update-startbox REMOTE=start9@<ip>` | Deploy binary + UI only (fastest) |
|
||||||
|
| `update-deb REMOTE=start9@<ip>` | Deploy full Debian package |
|
||||||
|
| `update REMOTE=start9@<ip>` | OTA-style update |
|
||||||
|
| `reflash REMOTE=start9@<ip>` | Reflash as if using live ISO |
|
||||||
|
| `update-overlay REMOTE=start9@<ip>` | Deploy to in-memory overlay (reverts on reboot) |
|
||||||
|
|
||||||
|
For devices on different networks (uses [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)):
|
||||||
|
|
||||||
|
| Target | Description |
|
||||||
|
| ------------------- | -------------------- |
|
||||||
|
| `wormhole` | Send startbox binary |
|
||||||
|
| `wormhole-deb` | Send Debian package |
|
||||||
|
| `wormhole-squashfs` | Send squashfs image |
|
||||||
|
|
||||||
|
### Creating a VM
|
||||||
|
|
||||||
|
Install virt-manager:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y virt-manager
|
||||||
|
sudo usermod -aG libvirt $USER
|
||||||
|
sudo su $USER
|
||||||
|
virt-manager
|
||||||
|
```
|
||||||
|
|
||||||
|
Follow the screenshot walkthrough in [`assets/create-vm/`](assets/create-vm/) to create a new virtual machine. Key steps:
|
||||||
|
|
||||||
|
1. Create a new virtual machine
|
||||||
|
2. Browse for the ISO — create a storage pool pointing to your `results/` directory
|
||||||
|
3. Select "Generic or unknown OS"
|
||||||
|
4. Set memory and CPUs
|
||||||
|
5. Create a disk and name the VM
|
||||||
|
|
||||||
|
Build an ISO first:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Other
|
||||||
|
|
||||||
|
| Target | Description |
|
||||||
|
| ------------------------ | ------------------------------------------- |
|
||||||
|
| `format` | Run code formatting (Rust nightly required) |
|
||||||
|
| `test` | Run all automated tests |
|
||||||
|
| `test-core` | Run Rust tests |
|
||||||
|
| `test-sdk` | Run SDK tests |
|
||||||
|
| `test-container-runtime` | Run container runtime tests |
|
||||||
|
| `clean` | Delete all compiled artifacts |
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make test # All tests
|
||||||
|
make test-core # Rust tests (via ./core/run-tests.sh)
|
||||||
|
make test-sdk # SDK tests
|
||||||
|
make test-container-runtime # Container runtime tests
|
||||||
|
|
||||||
|
# Run specific Rust test
|
||||||
|
cd core && cargo test <test_name> --features=test
|
||||||
|
```
|
||||||
|
|
||||||
|
## Code Formatting
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Rust (requires nightly)
|
||||||
|
make format
|
||||||
|
|
||||||
|
# TypeScript/HTML/SCSS (web)
|
||||||
|
cd web && npm run format
|
||||||
|
```
|
||||||
|
|
||||||
|
## Code Style Guidelines
|
||||||
|
|
||||||
## Styleguides
|
|
||||||
### Formatting
|
### Formatting
|
||||||
Each component of EmbassyOS contains its own style guide. Code must be formatted with the formatter designated for each component. These are outlined within each component folder's README.
|
|
||||||
|
|
||||||
### Atomic Commits
|
Run the formatters before committing. Configuration is handled by `rustfmt.toml` (Rust) and prettier configs (TypeScript).
|
||||||
Commits [should be atomic](https://en.wikipedia.org/wiki/Atomic_commit#Atomic_commit_convention) and diffs should be easy to read.
|
|
||||||
Do not mix any formatting fixes or code moves with actual code changes.
|
### Documentation & Comments
|
||||||
|
|
||||||
|
**Rust:**
|
||||||
|
|
||||||
|
- Add doc comments (`///`) to public APIs, structs, and non-obvious functions
|
||||||
|
- Use `//` comments sparingly for complex logic that isn't self-evident
|
||||||
|
- Prefer self-documenting code (clear naming, small functions) over comments
|
||||||
|
|
||||||
|
**TypeScript:**
|
||||||
|
|
||||||
|
- Document exported functions and complex types with JSDoc
|
||||||
|
- Keep comments focused on "why" rather than "what"
|
||||||
|
|
||||||
|
**General:**
|
||||||
|
|
||||||
|
- Don't add comments that just restate the code
|
||||||
|
- Update or remove comments when code changes
|
||||||
|
- TODOs should include context: `// TODO(username): reason`
|
||||||
|
|
||||||
### Commit Messages
|
### Commit Messages
|
||||||
If a commit touches only 1 component, prefix the message with the affected component. i.e. `backend: update to tokio v0.3`.
|
|
||||||
|
|
||||||
### Pull Requests
|
Use [Conventional Commits](https://www.conventionalcommits.org/):
|
||||||
The body of a pull request should contain sufficient description of what the changes do, as well as a justification.
|
|
||||||
You should include references to any relevant [issues](https://github.com/Start9Labs/embassy-os/issues).
|
|
||||||
|
|
||||||
### Rebasing Changes
|
```
|
||||||
When a pull request conflicts with the target branch, you may be asked to rebase it on top of the current target branch. The `git rebase` command will take care of rebuilding your commits on top of the new base.
|
<type>(<scope>): <description>
|
||||||
|
|
||||||
This project aims to have a clean git history, where code changes are only made in non-merge commits. This simplifies auditability because merge commits can be assumed to not contain arbitrary code changes.
|
[optional body]
|
||||||
|
|
||||||
## Join The Discussion
|
[optional footer]
|
||||||
Current or aspiring contributors? Join our community developer [Matrix channel](https://matrix.to/#/#community-dev:matrix.start9labs.com).
|
```
|
||||||
|
|
||||||
Just interested in or using the project? Join our community [Telegram](https://t.me/start9_labs) or [Matrix](https://matrix.to/#/#community:matrix.start9labs.com).
|
**Types:**
|
||||||
|
|
||||||
## Join The Project Team
|
- `feat` - New feature
|
||||||
Interested in becoming a part of the Start9 Labs team? Send an email to <jobs@start9labs.com>
|
- `fix` - Bug fix
|
||||||
|
- `docs` - Documentation only
|
||||||
|
- `style` - Formatting, no code change
|
||||||
|
- `refactor` - Code change that neither fixes a bug nor adds a feature
|
||||||
|
- `test` - Adding or updating tests
|
||||||
|
- `chore` - Build process, dependencies, etc.
|
||||||
|
|
||||||
<!-- omit in toc -->
|
**Examples:**
|
||||||
## Attribution
|
|
||||||
This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
|
```
|
||||||
|
feat(web): add dark mode toggle
|
||||||
|
fix(core): resolve race condition in service startup
|
||||||
|
docs: update CONTRIBUTING.md with style guidelines
|
||||||
|
refactor(sdk): simplify package validation logic
|
||||||
|
```
|
||||||
|
|||||||
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Start9 Labs, Inc.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
25
LICENSE.md
@@ -1,25 +0,0 @@
|
|||||||
# START9 PERSONAL USE LICENSE v1.0
|
|
||||||
|
|
||||||
This license governs the use of the accompanying Software. If you use the Software, you accept this license. If you do not accept the license, do not use the Software.
|
|
||||||
|
|
||||||
1. **Definitions.**
|
|
||||||
1. “Licensor” means the copyright owner, Start9 Labs, Inc, or its successor(s) in interest, or a future assignee of the copyright.
|
|
||||||
2. “Source Code” means the preferred form of the Software for making modifications to it.
|
|
||||||
3. “Object Code” means any non-source form of the Software, including the machine-language output by a compiler or assembler.
|
|
||||||
4. “Distribute” means to convey or to publish and generally has the same meaning here as under U.S. Copyright law.
|
|
||||||
5. “Sell” means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/support services related to the Software), a product or service whose value derives, entirely or substantially, from the functionality of the Software.
|
|
||||||
|
|
||||||
2. **Grant of Rights.** Subject to the terms of this license, the Licensor grants you, the licensee, a non-exclusive, worldwide, royalty-free copyright license to:
|
|
||||||
1. Access, audit, copy, modify, compile, or distribute the Source Code or modifications to the Source Code.
|
|
||||||
2. Run, test, or otherwise use the Object Code.
|
|
||||||
|
|
||||||
3. **Limitations.**
|
|
||||||
1. The grant of rights under the License will NOT include, and the License does NOT grant you the right to:
|
|
||||||
1. Sell the Software or any derivative works based thereon.
|
|
||||||
2. Distribute the Object Code.
|
|
||||||
2. If you Distribute the Source Code, or if permission is separately granted to Distribute the Object Code, you expressly undertake not to remove, or modify, in any manner, the copyright notices attached to the Source Code, and displayed in any output of the Object Code when run, and to reproduce these notices, in an identical manner, in any distributed copies of the Software together with a copy of this license. If you Distribute a modified copy of the Software, or a derivative work based thereon, the work must carry prominent notices stating that you modified it, and giving a relevant date.
|
|
||||||
3. The terms of this license will apply to anyone who comes into possession of a copy of the Software, and any modifications or derivative works based thereon, made by anyone.
|
|
||||||
|
|
||||||
4. **Contributions.** You hereby grant to Licensor a perpetual, irrevocable, worldwide, non-exclusive, royalty-free license to use and exploit any modifications or derivative works based on the Source Code of which you are the author.
|
|
||||||
|
|
||||||
5. **Disclaimer.** THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. LICENSOR HAS NO OBLIGATION TO SUPPORT RECIPIENTS OF THE SOFTWARE.
|
|
||||||
416
Makefile
@@ -1,87 +1,381 @@
|
|||||||
EMBASSY_BINS := backend/target/aarch64-unknown-linux-gnu/release/embassyd backend/target/aarch64-unknown-linux-gnu/release/embassy-init backend/target/aarch64-unknown-linux-gnu/release/embassy-cli backend/target/aarch64-unknown-linux-gnu/release/embassy-sdk
|
ls-files = $(shell git ls-files --cached --others --exclude-standard $1)
|
||||||
EMBASSY_UIS := frontend/dist/ui frontend/dist/setup-wizard frontend/dist/diagnostic-ui
|
PROFILE = release
|
||||||
EMBASSY_SRC := raspios.img product_key.txt $(EMBASSY_BINS) backend/embassyd.service backend/embassy-init.service $(EMBASSY_UIS) $(shell find build)
|
|
||||||
COMPAT_SRC := $(shell find system-images/compat/src)
|
PLATFORM_FILE := $(shell ./build/env/check-platform.sh)
|
||||||
UTILS_SRC := $(shell find system-images/utils/Dockerfile)
|
ENVIRONMENT_FILE := $(shell ./build/env/check-environment.sh)
|
||||||
BACKEND_SRC := $(shell find backend/src) $(shell find patch-db/*/src) $(shell find rpc-toolkit/*/src) backend/Cargo.toml backend/Cargo.lock
|
GIT_HASH_FILE := $(shell ./build/env/check-git-hash.sh)
|
||||||
FRONTEND_SRC := $(shell find frontend/projects) $(shell find frontend/assets)
|
VERSION_FILE := $(shell ./build/env/check-version.sh)
|
||||||
PATCH_DB_CLIENT_SRC = $(shell find patch-db/client -not -path patch-db/client/dist)
|
BASENAME := $(shell PROJECT=startos ./build/env/basename.sh)
|
||||||
GIT_REFS := $(shell find .git/refs/heads)
|
PLATFORM := $(shell if [ -f $(PLATFORM_FILE) ]; then cat $(PLATFORM_FILE); else echo unknown; fi)
|
||||||
TMP_FILE := $(shell mktemp)
|
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
|
||||||
|
RUST_ARCH := $(shell if [ "$(ARCH)" = "riscv64" ]; then echo riscv64gc; else echo $(ARCH); fi)
|
||||||
|
REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./build/env/basename.sh)
|
||||||
|
TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./build/env/basename.sh)
|
||||||
|
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
|
||||||
|
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html
|
||||||
|
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html
|
||||||
|
FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
|
||||||
|
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
|
||||||
|
IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/)
|
||||||
|
STARTD_SRC := core/startd.service $(BUILD_SRC)
|
||||||
|
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
|
||||||
|
WEB_SHARED_SRC := $(call ls-files, web/projects/shared) $(call ls-files, web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
|
||||||
|
WEB_UI_SRC := $(call ls-files, web/projects/ui)
|
||||||
|
WEB_SETUP_WIZARD_SRC := $(call ls-files, web/projects/setup-wizard)
|
||||||
|
WEB_START_TUNNEL_SRC := $(call ls-files, web/projects/start-tunnel)
|
||||||
|
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
|
||||||
|
GZIP_BIN := $(shell which pigz || which gzip)
|
||||||
|
TAR_BIN := $(shell which gtar || which tar)
|
||||||
|
COMPILED_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container container-runtime/rootfs.$(ARCH).squashfs
|
||||||
|
STARTOS_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs $(PLATFORM_FILE) \
|
||||||
|
$(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then \
|
||||||
|
echo target/aarch64-unknown-linux-musl/release/pi-beep; \
|
||||||
|
fi) \
|
||||||
|
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then \
|
||||||
|
echo target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph; \
|
||||||
|
fi') \
|
||||||
|
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]; then \
|
||||||
|
echo target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console; \
|
||||||
|
fi')
|
||||||
|
REGISTRY_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox core/start-registryd.service
|
||||||
|
TUNNEL_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/start-tunneld.service
|
||||||
|
|
||||||
|
ifeq ($(REMOTE),)
|
||||||
|
mkdir = mkdir -p $1
|
||||||
|
rm = rm -rf $1
|
||||||
|
cp = cp -r $1 $2
|
||||||
|
ln = ln -sf $1 $2
|
||||||
|
else
|
||||||
|
ifeq ($(SSHPASS),)
|
||||||
|
ssh = ssh $(REMOTE) $1
|
||||||
|
else
|
||||||
|
ssh = sshpass -p $(SSHPASS) ssh $(REMOTE) $1
|
||||||
|
endif
|
||||||
|
mkdir = $(call ssh,'sudo mkdir -p $1')
|
||||||
|
rm = $(call ssh,'sudo rm -rf $1')
|
||||||
|
ln = $(call ssh,'sudo ln -sf $1 $2')
|
||||||
|
define cp
|
||||||
|
$(TAR_BIN) --transform "s|^$1|x|" -czv -f- $1 | $(call ssh,"sudo tar --transform 's|^x|$2|' -xzv -f- -C /")
|
||||||
|
endef
|
||||||
|
endif
|
||||||
|
|
||||||
.DELETE_ON_ERROR:
|
.DELETE_ON_ERROR:
|
||||||
|
|
||||||
all: eos.img
|
.PHONY: all metadata install clean format install-cli cli uis ui reflash deb $(IMAGE_TYPE) squashfs wormhole wormhole-deb test test-core test-sdk test-container-runtime registry install-registry tunnel install-tunnel ts-bindings
|
||||||
|
|
||||||
gzip: eos.img
|
all: $(STARTOS_TARGETS)
|
||||||
gzip -k eos.img
|
|
||||||
|
touch:
|
||||||
|
touch $(STARTOS_TARGETS)
|
||||||
|
|
||||||
|
metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -f eos.img
|
rm -rf core/target
|
||||||
rm -f ubuntu.img
|
rm -rf core/bindings
|
||||||
rm -f product_key.txt
|
rm -rf web/.angular
|
||||||
rm -f system-images/**/*.tar
|
rm -f web/config.json
|
||||||
sudo rm -f $(EMBASSY_BINS)
|
rm -rf web/node_modules
|
||||||
rm -f frontend/config.json
|
rm -rf web/dist
|
||||||
rm -rf frontend/node_modules
|
|
||||||
rm -rf frontend/dist
|
|
||||||
rm -rf patch-db/client/node_modules
|
rm -rf patch-db/client/node_modules
|
||||||
rm -rf patch-db/client/dist
|
rm -rf patch-db/client/dist
|
||||||
|
rm -rf patch-db/target
|
||||||
|
rm -rf target
|
||||||
|
rm -rf dpkg-workdir
|
||||||
|
rm -rf image-recipe/deb
|
||||||
|
rm -rf results
|
||||||
|
rm -rf build/lib/firmware
|
||||||
|
rm -rf container-runtime/dist
|
||||||
|
rm -rf container-runtime/node_modules
|
||||||
|
rm -f container-runtime/*.squashfs
|
||||||
|
(cd sdk && make clean)
|
||||||
|
rm -f env/*.txt
|
||||||
|
|
||||||
sdk:
|
format:
|
||||||
cd backend/ && ./install-sdk.sh
|
cd core && cargo +nightly fmt
|
||||||
|
|
||||||
eos.img: $(EMBASSY_SRC) system-images/compat/compat.tar system-images/utils/utils.tar
|
test: | test-core test-sdk test-container-runtime
|
||||||
! test -f eos.img || rm eos.img
|
|
||||||
if [ "$(NO_KEY)" = "1" ]; then NO_KEY=1 ./build/make-image.sh; else ./build/make-image.sh; fi
|
|
||||||
|
|
||||||
system-images/compat/compat.tar: $(COMPAT_SRC)
|
test-core: $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||||
cd system-images/compat && ./build.sh
|
./core/run-tests.sh
|
||||||
cd system-images/compat && DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --tag start9/x_system/compat --platform=linux/arm64 -o type=docker,dest=compat.tar .
|
|
||||||
|
|
||||||
system-images/utils/utils.tar: $(UTILS_SRC)
|
test-sdk: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
|
||||||
cd system-images/utils && DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --tag start9/x_system/utils --platform=linux/arm64 -o type=docker,dest=utils.tar .
|
cd sdk && make test
|
||||||
|
|
||||||
raspios.img:
|
test-container-runtime: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
|
||||||
wget --continue https://downloads.raspberrypi.org/raspios_lite_arm64/images/raspios_lite_arm64-2022-01-28/2022-01-28-raspios-bullseye-arm64-lite.zip
|
cd container-runtime && npm test
|
||||||
unzip 2022-01-28-raspios-bullseye-arm64-lite.zip
|
|
||||||
mv 2022-01-28-raspios-bullseye-arm64-lite.img raspios.img
|
|
||||||
|
|
||||||
product_key.txt:
|
install-cli: $(GIT_HASH_FILE)
|
||||||
$(shell which echo) -n "X" > product_key.txt
|
./core/build/build-cli.sh --install
|
||||||
cat /dev/urandom | base32 | head -c11 | tr '[:upper:]' '[:lower:]' >> product_key.txt
|
|
||||||
if [ "$(KEY)" != "" ]; then $(shell which echo) -n "$(KEY)" > product_key.txt; fi
|
|
||||||
echo >> product_key.txt
|
|
||||||
|
|
||||||
snapshots: libs/snapshot-creator/Cargo.toml
|
cli: $(GIT_HASH_FILE)
|
||||||
cd libs/ && ./build-v8-snapshot.sh
|
./core/build/build-cli.sh
|
||||||
cd libs/ && ./build-arm-v8-snapshot.sh
|
|
||||||
|
|
||||||
$(EMBASSY_BINS): $(BACKEND_SRC)
|
registry: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox
|
||||||
cd backend && ./build-prod.sh
|
|
||||||
|
|
||||||
frontend/node_modules: frontend/package.json
|
install-registry: $(REGISTRY_TARGETS)
|
||||||
npm --prefix frontend ci
|
$(call mkdir,$(DESTDIR)/usr/bin)
|
||||||
|
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox,$(DESTDIR)/usr/bin/start-registrybox)
|
||||||
|
$(call ln,/usr/bin/start-registrybox,$(DESTDIR)/usr/bin/start-registryd)
|
||||||
|
$(call ln,/usr/bin/start-registrybox,$(DESTDIR)/usr/bin/start-registry)
|
||||||
|
|
||||||
$(EMBASSY_UIS): $(FRONTEND_SRC) frontend/node_modules patch-db/client patch-db/client/dist frontend/config.json
|
$(call mkdir,$(DESTDIR)/lib/systemd/system)
|
||||||
npm --prefix frontend run build:all
|
$(call cp,core/start-registryd.service,$(DESTDIR)/lib/systemd/system/start-registryd.service)
|
||||||
|
|
||||||
frontend/config.json: .git/HEAD $(GIT_REFS)
|
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox: $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||||
jq '.useMocks = false' frontend/config-sample.json > frontend/config.json
|
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-registrybox.sh
|
||||||
npm --prefix frontend run-script build-config
|
|
||||||
|
|
||||||
patch-db/client/node_modules: patch-db/client/package.json
|
tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox
|
||||||
npm --prefix patch-db/client install
|
|
||||||
|
|
||||||
patch-db/client/dist: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules
|
install-tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/start-tunneld.service
|
||||||
! test -d patch-db/client/dist || rm -rf patch-db/client/dist
|
$(call mkdir,$(DESTDIR)/usr/bin)
|
||||||
|
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox,$(DESTDIR)/usr/bin/start-tunnelbox)
|
||||||
|
$(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunneld)
|
||||||
|
$(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunnel)
|
||||||
|
|
||||||
|
$(call mkdir,$(DESTDIR)/lib/systemd/system)
|
||||||
|
$(call cp,core/start-tunneld.service,$(DESTDIR)/lib/systemd/system/start-tunneld.service)
|
||||||
|
|
||||||
|
$(call mkdir,$(DESTDIR)/usr/lib/startos/scripts)
|
||||||
|
$(call cp,build/lib/scripts/forward-port,$(DESTDIR)/usr/lib/startos/scripts/forward-port)
|
||||||
|
|
||||||
|
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox: $(CORE_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) web/dist/static/start-tunnel/index.html
|
||||||
|
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-tunnelbox.sh
|
||||||
|
|
||||||
|
deb: results/$(BASENAME).deb
|
||||||
|
|
||||||
|
results/$(BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/startos) $(STARTOS_TARGETS)
|
||||||
|
PLATFORM=$(PLATFORM) REQUIRES=debian ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
|
||||||
|
|
||||||
|
registry-deb: results/$(REGISTRY_BASENAME).deb
|
||||||
|
|
||||||
|
results/$(REGISTRY_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-registry) $(REGISTRY_TARGETS)
|
||||||
|
PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
|
||||||
|
|
||||||
|
tunnel-deb: results/$(TUNNEL_BASENAME).deb
|
||||||
|
|
||||||
|
results/$(TUNNEL_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-tunnel) $(TUNNEL_TARGETS) build/lib/scripts/forward-port
|
||||||
|
PROJECT=start-tunnel PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=wireguard-tools,iptables,conntrack ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
|
||||||
|
|
||||||
|
$(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE)
|
||||||
|
|
||||||
|
squashfs: results/$(BASENAME).squashfs
|
||||||
|
|
||||||
|
results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_SRC) results/$(BASENAME).deb
|
||||||
|
ARCH=$(ARCH) ./build/image-recipe/run-local-build.sh "results/$(BASENAME).deb"
|
||||||
|
|
||||||
|
# For creating os images. DO NOT USE
|
||||||
|
install: $(STARTOS_TARGETS)
|
||||||
|
$(call mkdir,$(DESTDIR)/usr/bin)
|
||||||
|
$(call mkdir,$(DESTDIR)/usr/sbin)
|
||||||
|
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox,$(DESTDIR)/usr/bin/startbox)
|
||||||
|
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
|
||||||
|
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
|
||||||
|
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,target/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
|
||||||
|
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \
|
||||||
|
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph,$(DESTDIR)/usr/bin/flamegraph); \
|
||||||
|
fi
|
||||||
|
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]'; then \
|
||||||
|
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); \
|
||||||
|
fi
|
||||||
|
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs)
|
||||||
|
$(call ln,/usr/bin/startos-backup-fs,$(DESTDIR)/usr/sbin/mount.backup-fs)
|
||||||
|
|
||||||
|
$(call mkdir,$(DESTDIR)/lib/systemd/system)
|
||||||
|
$(call cp,core/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
|
||||||
|
|
||||||
|
$(call mkdir,$(DESTDIR)/usr/lib)
|
||||||
|
$(call rm,$(DESTDIR)/usr/lib/startos)
|
||||||
|
$(call cp,build/lib,$(DESTDIR)/usr/lib/startos)
|
||||||
|
$(call mkdir,$(DESTDIR)/usr/lib/startos/container-runtime)
|
||||||
|
$(call cp,container-runtime/rootfs.$(ARCH).squashfs,$(DESTDIR)/usr/lib/startos/container-runtime/rootfs.squashfs)
|
||||||
|
|
||||||
|
$(call cp,build/env/PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
|
||||||
|
$(call cp,build/env/ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
|
||||||
|
$(call cp,build/env/GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
|
||||||
|
$(call cp,build/env/VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
|
||||||
|
|
||||||
|
update-overlay: $(STARTOS_TARGETS)
|
||||||
|
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
|
||||||
|
@echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m"
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
@if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat $(VERSION_FILE)`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi
|
||||||
|
$(call ssh,"sudo systemctl stop startd")
|
||||||
|
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM)
|
||||||
|
$(call ssh,"sudo systemctl start startd")
|
||||||
|
|
||||||
|
wormhole: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
|
||||||
|
@echo "Paste the following command into the shell of your StartOS server:"
|
||||||
|
@echo
|
||||||
|
@wormhole send core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
|
||||||
|
|
||||||
|
wormhole-deb: results/$(BASENAME).deb
|
||||||
|
@echo "Paste the following command into the shell of your StartOS server:"
|
||||||
|
@echo
|
||||||
|
@wormhole send results/$(BASENAME).deb 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade '"'"'cd $$(mktemp -d) && wormhole receive --accept-file %s && apt-get install -y --reinstall ./$(BASENAME).deb'"'"'\n", $$3 }'
|
||||||
|
|
||||||
|
wormhole-squashfs: results/$(BASENAME).squashfs
|
||||||
|
$(eval SQFS_SUM := $(shell b3sum results/$(BASENAME).squashfs | head -c 32))
|
||||||
|
$(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}'))
|
||||||
|
@echo "Paste the following command into the shell of your StartOS server:"
|
||||||
|
@echo
|
||||||
|
@wormhole send results/$(BASENAME).squashfs 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo sh -c '"'"'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE) && /usr/lib/startos/scripts/prune-boot && cd /media/startos/images && wormhole receive --accept-file %s && CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/upgrade ./$(BASENAME).squashfs'"'"'\n", $$3 }'
|
||||||
|
|
||||||
|
update: $(STARTOS_TARGETS)
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
|
||||||
|
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
|
||||||
|
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
|
||||||
|
|
||||||
|
update-startbox: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox # only update binary (faster than full update)
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
|
||||||
|
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox,/media/startos/next/usr/bin/startbox)
|
||||||
|
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync true')
|
||||||
|
|
||||||
|
update-deb: results/$(BASENAME).deb # better than update, but only available from debian
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
|
||||||
|
$(call mkdir,/media/startos/next/var/tmp/startos-deb)
|
||||||
|
$(call cp,results/$(BASENAME).deb,/media/startos/next/var/tmp/startos-deb/$(BASENAME).deb)
|
||||||
|
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /var/tmp/startos-deb/$(BASENAME).deb"')
|
||||||
|
|
||||||
|
update-squashfs: results/$(BASENAME).squashfs
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
$(eval SQFS_SUM := $(shell b3sum results/$(BASENAME).squashfs))
|
||||||
|
$(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}'))
|
||||||
|
$(call ssh,'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE)')
|
||||||
|
$(call ssh,'/usr/lib/startos/scripts/prune-boot')
|
||||||
|
$(call cp,results/$(BASENAME).squashfs,/media/startos/images/next.rootfs)
|
||||||
|
$(call ssh,'sudo CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/upgrade /media/startos/images/next.rootfs')
|
||||||
|
|
||||||
|
emulate-reflash: $(STARTOS_TARGETS)
|
||||||
|
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||||
|
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
|
||||||
|
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
|
||||||
|
$(call ssh,'sudo rm -f /media/startos/config/disk.guid /media/startos/config/overlay/etc/hostname')
|
||||||
|
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
|
||||||
|
|
||||||
|
upload-ota: results/$(BASENAME).squashfs
|
||||||
|
TARGET=$(TARGET) KEY=$(KEY) ./build/upload-ota.sh
|
||||||
|
|
||||||
|
container-runtime/debian.$(ARCH).squashfs: ./container-runtime/download-base-image.sh
|
||||||
|
ARCH=$(ARCH) ./container-runtime/download-base-image.sh
|
||||||
|
|
||||||
|
container-runtime/package-lock.json: sdk/dist/package.json
|
||||||
|
npm --prefix container-runtime i
|
||||||
|
touch container-runtime/package-lock.json
|
||||||
|
|
||||||
|
container-runtime/node_modules/.package-lock.json: container-runtime/package-lock.json
|
||||||
|
npm --prefix container-runtime ci
|
||||||
|
touch container-runtime/node_modules/.package-lock.json
|
||||||
|
|
||||||
|
ts-bindings: core/bindings/index.ts
|
||||||
|
mkdir -p sdk/base/lib/osBindings
|
||||||
|
rsync -ac --delete core/bindings/ sdk/base/lib/osBindings/
|
||||||
|
|
||||||
|
core/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
|
||||||
|
rm -rf core/bindings
|
||||||
|
./core/build/build-ts.sh
|
||||||
|
ls core/bindings/*.ts | sed 's/core\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/bindings/index.ts
|
||||||
|
npm --prefix sdk exec -- prettier --config ./sdk/base/package.json -w ./core/bindings/*.ts
|
||||||
|
touch core/bindings/index.ts
|
||||||
|
|
||||||
|
sdk/dist/package.json sdk/baseDist/package.json: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
|
||||||
|
(cd sdk && make bundle)
|
||||||
|
touch sdk/dist/package.json
|
||||||
|
touch sdk/baseDist/package.json
|
||||||
|
|
||||||
|
# TODO: make container-runtime its own makefile?
|
||||||
|
container-runtime/dist/index.js: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
|
||||||
|
npm --prefix container-runtime run build
|
||||||
|
|
||||||
|
container-runtime/dist/node_modules/.package-lock.json container-runtime/dist/package.json container-runtime/dist/package-lock.json: container-runtime/package.json container-runtime/package-lock.json sdk/dist/package.json container-runtime/install-dist-deps.sh
|
||||||
|
./container-runtime/install-dist-deps.sh
|
||||||
|
touch container-runtime/dist/node_modules/.package-lock.json
|
||||||
|
|
||||||
|
container-runtime/rootfs.$(ARCH).squashfs: container-runtime/debian.$(ARCH).squashfs container-runtime/container-runtime.service container-runtime/update-image.sh container-runtime/update-image-local.sh container-runtime/deb-install.sh container-runtime/dist/index.js container-runtime/dist/node_modules/.package-lock.json core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container
|
||||||
|
ARCH=$(ARCH) ./container-runtime/update-image-local.sh
|
||||||
|
|
||||||
|
build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(shell ls build/dpkg-deps/*)
|
||||||
|
PLATFORM=$(PLATFORM) ARCH=$(ARCH) build/dpkg-deps/generate.sh
|
||||||
|
|
||||||
|
$(FIRMWARE_ROMS): build/lib/firmware.json ./build/download-firmware.sh $(PLATFORM_FILE)
|
||||||
|
./build/download-firmware.sh $(PLATFORM)
|
||||||
|
|
||||||
|
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
|
||||||
|
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-startbox.sh
|
||||||
|
touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
|
||||||
|
|
||||||
|
core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container: $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||||
|
ARCH=$(ARCH) ./core/build/build-start-container.sh
|
||||||
|
touch core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container
|
||||||
|
|
||||||
|
web/package-lock.json: web/package.json sdk/baseDist/package.json
|
||||||
|
npm --prefix web i
|
||||||
|
touch web/package-lock.json
|
||||||
|
|
||||||
|
web/node_modules/.package-lock.json: web/package-lock.json
|
||||||
|
npm --prefix web ci
|
||||||
|
touch web/node_modules/.package-lock.json
|
||||||
|
|
||||||
|
web/.angular/.updated: patch-db/client/dist/index.js sdk/baseDist/package.json web/node_modules/.package-lock.json
|
||||||
|
rm -rf web/.angular
|
||||||
|
mkdir -p web/.angular
|
||||||
|
touch web/.angular/.updated
|
||||||
|
|
||||||
|
web/.i18n-checked: $(WEB_SHARED_SRC) $(WEB_UI_SRC) $(WEB_SETUP_WIZARD_SRC) $(WEB_START_TUNNEL_SRC)
|
||||||
|
npm --prefix web run check:i18n
|
||||||
|
touch web/.i18n-checked
|
||||||
|
|
||||||
|
web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
|
||||||
|
npm --prefix web run build:ui
|
||||||
|
touch web/dist/raw/ui/index.html
|
||||||
|
|
||||||
|
web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
|
||||||
|
npm --prefix web run build:setup
|
||||||
|
touch web/dist/raw/setup-wizard/index.html
|
||||||
|
|
||||||
|
web/dist/raw/start-tunnel/index.html: $(WEB_START_TUNNEL_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
|
||||||
|
npm --prefix web run build:tunnel
|
||||||
|
touch web/dist/raw/start-tunnel/index.html
|
||||||
|
|
||||||
|
web/dist/static/%/index.html: web/dist/raw/%/index.html
|
||||||
|
./web/compress-uis.sh $*
|
||||||
|
|
||||||
|
web/config.json: $(GIT_HASH_FILE) $(ENVIRONMENT_FILE) web/config-sample.json web/update-config.sh
|
||||||
|
./web/update-config.sh
|
||||||
|
|
||||||
|
patch-db/client/node_modules/.package-lock.json: patch-db/client/package.json
|
||||||
|
npm --prefix patch-db/client ci
|
||||||
|
touch patch-db/client/node_modules/.package-lock.json
|
||||||
|
|
||||||
|
patch-db/client/dist/index.js: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules/.package-lock.json
|
||||||
|
rm -rf patch-db/client/dist
|
||||||
npm --prefix patch-db/client run build
|
npm --prefix patch-db/client run build
|
||||||
|
touch patch-db/client/dist/index.js
|
||||||
|
|
||||||
# this is a convenience step to build all frontends - it is not referenced elsewhere in this file
|
# used by github actions
|
||||||
frontends: frontend/node_modules frontend/config.json $(EMBASSY_UIS)
|
compiled-$(ARCH).tar: $(COMPILED_TARGETS) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
|
||||||
|
tar -cvf $@ $^
|
||||||
|
|
||||||
|
# this is a convenience step to build all web uis - it is not referenced elsewhere in this file
|
||||||
|
uis: $(WEB_UIS)
|
||||||
|
|
||||||
# this is a convenience step to build the UI
|
# this is a convenience step to build the UI
|
||||||
ui: frontend/node_modules frontend/config.json frontend/dist/ui
|
ui: web/dist/raw/ui
|
||||||
|
|
||||||
# this is a convenience step to build the backend
|
target/aarch64-unknown-linux-musl/release/pi-beep: ./build/build-cargo-dep.sh
|
||||||
backend: $(EMBASSY_BINS)
|
ARCH=aarch64 ./build/build-cargo-dep.sh pi-beep
|
||||||
|
|
||||||
|
target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console: ./build/build-cargo-dep.sh
|
||||||
|
ARCH=$(ARCH) ./build/build-cargo-dep.sh tokio-console
|
||||||
|
touch $@
|
||||||
|
|
||||||
|
target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs: ./build/build-cargo-dep.sh
|
||||||
|
ARCH=$(ARCH) ./build/build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs
|
||||||
|
touch $@
|
||||||
|
|
||||||
|
target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph: ./build/build-cargo-dep.sh
|
||||||
|
ARCH=$(ARCH) ./build/build-cargo-dep.sh flamegraph
|
||||||
|
touch $@
|
||||||
|
|||||||
97
README.md
@@ -1,49 +1,70 @@
|
|||||||
# EmbassyOS
|
<div align="center">
|
||||||
[](https://github.com/Start9Labs/embassy-os/releases)
|
<img src="web/projects/shared/assets/img/icon.png" alt="StartOS Logo" width="16%" />
|
||||||
[](https://matrix.to/#/#community:matrix.start9labs.com)
|
<h1 style="margin-top: 0;">StartOS</h1>
|
||||||
[](https://t.me/start9_labs)
|
<a href="https://github.com/Start9Labs/start-os/releases">
|
||||||
[](https://docs.start9labs.com)
|
<img alt="GitHub release (with filter)" src="https://img.shields.io/github/v/release/start9labs/start-os?logo=github">
|
||||||
[](https://matrix.to/#/#community-dev:matrix.start9labs.com)
|
</a>
|
||||||
[](https://start9labs.com)
|
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
|
||||||
|
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
|
||||||
|
</a>
|
||||||
|
<a href="https://heyapollo.com/product/startos">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
|
||||||
|
</a>
|
||||||
|
<a href="https://twitter.com/start9labs">
|
||||||
|
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
|
||||||
|
</a>
|
||||||
|
<a href="https://docs.start9.com">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
|
||||||
|
</a>
|
||||||
|
<a href="https://matrix.to/#/#dev-startos:matrix.start9labs.com">
|
||||||
|
<img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
|
||||||
|
</a>
|
||||||
|
<a href="https://start9.com">
|
||||||
|
<img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
|
||||||
[](http://mastodon.start9labs.com)
|
## What is StartOS?
|
||||||
[](https://twitter.com/start9labs)
|
|
||||||
|
|
||||||
### _Welcome to the era of Sovereign Computing_ ###
|
StartOS is an open-source Linux distribution for running a personal server. It handles discovery, installation, network configuration, data backup, dependency management, and health monitoring of self-hosted services.
|
||||||
|
|
||||||
EmbassyOS is a browser-based, graphical operating system for a personal server. EmbassyOS facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services. It is the most advanced, secure, reliable, and user friendly personal server OS in the world.
|
**Tech stack:** Rust backend (Tokio/Axum), Angular frontend, Node.js container runtime with LXC, and a custom diff-based database ([Patch-DB](https://github.com/Start9Labs/patch-db)) for reactive state synchronization.
|
||||||
|
|
||||||
## Running EmbassyOS
|
Services run in isolated LXC containers, packaged as [S9PKs](https://github.com/Start9Labs/start-os/blob/master/core/s9pk-structure.md) — a signed, merkle-archived format that supports partial downloads and cryptographic verification.
|
||||||
There are multiple ways to get your hands on EmbassyOS.
|
|
||||||
|
|
||||||
### :moneybag: Buy an Embassy
|
## What can you do with it?
|
||||||
This is the most convenient option. Simply [buy an Embassy](https://start9.com) from Start9 and plug it in. Depending on where you live, shipping costs and import duties will vary.
|
|
||||||
|
|
||||||
### :construction_worker: Build your own Embassy
|
StartOS lets you self-host services that would otherwise depend on third-party cloud providers — giving you full ownership of your data and infrastructure.
|
||||||
While not as convenient as buying an Embassy, this option is easier than you might imagine, and there are 4 reasons why you might prefer it:
|
|
||||||
1. You already have a Raspberry Pi and would like to re-purpose it.
|
|
||||||
1. You want to save on shipping costs.
|
|
||||||
1. You prefer not to divulge your physical address.
|
|
||||||
1. You just like building things.
|
|
||||||
|
|
||||||
To pursue this option, follow this [guide](https://start9.com/latest/diy).
|
Browse available services on the [Start9 Marketplace](https://marketplace.start9.com/), including:
|
||||||
|
|
||||||
### :hammer_and_wrench: Build EmbassyOS from Source
|
- **Bitcoin & Lightning** — Run a full Bitcoin node, Lightning node, BTCPay Server, and other payment infrastructure
|
||||||
|
- **Communication** — Self-host Matrix, SimpleX, or other messaging platforms
|
||||||
|
- **Cloud Storage** — Run Nextcloud, Vaultwarden, and other productivity tools
|
||||||
|
|
||||||
EmbassyOS can be built from source, for personal use, for free.
|
Services are added by the community. If a service you want isn't available, you can [package it yourself](https://github.com/Start9Labs/ai-service-packaging/).
|
||||||
A detailed guide for doing so can be found [here](https://github.com/Start9Labs/embassy-os/blob/master/build/README.md).
|
|
||||||
|
|
||||||
## :heart: Contributing
|
## Getting StartOS
|
||||||
There are multiple ways to contribute: work directly on EmbassyOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://github.com/Start9Labs/embassy-os/blob/master/CONTRIBUTING.md).
|
|
||||||
|
|
||||||
## UI Screenshots
|
### Buy a Start9 server
|
||||||
<p align="center">
|
|
||||||
<img src="assets/EmbassyOS.png" alt="EmbassyOS" width="65%">
|
The easiest path. [Buy a server](https://store.start9.com) from Start9 and plug it in.
|
||||||
</p>
|
|
||||||
<p align="center">
|
### Build your own
|
||||||
<img src="assets/eos-services.png" alt="Embassy Services" width="45%">
|
|
||||||
<img src="assets/eos-preferences.png" alt="Embassy Preferences" width="45%">
|
Install StartOS on your own hardware. Follow one of the [DIY guides](https://start9.com/latest/diy). Reasons to go this route:
|
||||||
</p>
|
|
||||||
<p align="center">
|
1. You already have compatible hardware
|
||||||
<img src="assets/eos-bitcoind-health-check.png" alt="Embassy Bitcoin Health Checks" width="45%"> <img src="assets/eos-logs.png" alt="Embassy Logs" width="45%">
|
2. You want to save on shipping costs
|
||||||
</p>
|
3. You prefer not to share your physical address
|
||||||
|
4. You enjoy building things
|
||||||
|
|
||||||
|
### Build from source
|
||||||
|
|
||||||
|
See [CONTRIBUTING.md](CONTRIBUTING.md) for environment setup, build instructions, and development workflow.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. See [CONTRIBUTING.md](CONTRIBUTING.md) or visit [start9.com/contribute](https://start9.com/contribute/).
|
||||||
|
|
||||||
|
To report security issues, email [security@start9.com](mailto:security@start9.com).
|
||||||
|
|||||||
261
TODO.md
Normal file
@@ -0,0 +1,261 @@
|
|||||||
|
# AI Agent TODOs
|
||||||
|
|
||||||
|
Pending tasks for AI agents. Remove items when completed.
|
||||||
|
|
||||||
|
## Unreviewed CLAUDE.md Sections
|
||||||
|
|
||||||
|
- [ ] Architecture - Web (`/web`) - @MattDHill
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- [ ] Support preferred external ports besides 443 - @dr-bonez
|
||||||
|
|
||||||
|
**Problem**: Currently, port 443 is the only preferred external port that is actually honored. When a
|
||||||
|
service requests `preferred_external_port: 8443` (or any non-443 value) for SSL, the system ignores
|
||||||
|
the preference and assigns a dynamic-range port (49152-65535). The `preferred_external_port` is only
|
||||||
|
used as a label for Tor mappings and as a trigger for the port-443 special case in `update()`.
|
||||||
|
|
||||||
|
**Goal**: Honor `preferred_external_port` for both SSL and non-SSL binds when the requested port is
|
||||||
|
available, with proper conflict resolution and fallback to dynamic-range allocation.
|
||||||
|
|
||||||
|
### Design
|
||||||
|
|
||||||
|
**Key distinction**: There are two separate concepts for SSL port usage:
|
||||||
|
|
||||||
|
1. **Port ownership** (`assigned_ssl_port`) — A port exclusively owned by a binding, allocated from
|
||||||
|
`AvailablePorts`. Used for server hostnames (`.local`, mDNS, etc.) and iptables forwards.
|
||||||
|
2. **Domain SSL port** — The port used for domain-based vhost entries. A binding does NOT need to own
|
||||||
|
a port to have a domain vhost on it. The VHostController already supports multiple hostnames on the
|
||||||
|
same port via SNI. Any binding can create a domain vhost entry on any SSL port that the
|
||||||
|
VHostController has a listener for, regardless of who "owns" that port.
|
||||||
|
|
||||||
|
For example: the OS owns port 443 as its `assigned_ssl_port`. A service with
|
||||||
|
`preferred_external_port: 443` won't get 443 as its `assigned_ssl_port` (it's taken), but it CAN
|
||||||
|
still have domain vhost entries on port 443 — SNI routes by hostname.
|
||||||
|
|
||||||
|
#### 1. Preferred Port Allocation for Ownership ✅ DONE
|
||||||
|
|
||||||
|
`AvailablePorts::try_alloc(port) -> Option<u16>` added to `forward.rs`. `BindInfo::new()` and
|
||||||
|
`BindInfo::update()` attempt the preferred port first, falling back to dynamic-range allocation.
|
||||||
|
|
||||||
|
#### 2. Per-Address Enable/Disable ✅ DONE
|
||||||
|
|
||||||
|
Gateway-level `private_disabled`/`public_enabled` on `NetInfo` replaced with per-address
|
||||||
|
`DerivedAddressInfo` on `BindInfo`. `hostname_info` removed from `Host` — computed addresses now
|
||||||
|
live in `BindInfo.addresses.possible`.
|
||||||
|
|
||||||
|
**`DerivedAddressInfo` struct** (on `BindInfo`):
|
||||||
|
|
||||||
|
```rust
|
||||||
|
pub struct DerivedAddressInfo {
|
||||||
|
pub private_disabled: BTreeSet<HostnameInfo>,
|
||||||
|
pub public_enabled: BTreeSet<HostnameInfo>,
|
||||||
|
pub possible: BTreeSet<HostnameInfo>, // COMPUTED by update()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
`DerivedAddressInfo::enabled()` returns `possible` filtered by the two sets. `HostnameInfo` derives
|
||||||
|
`Ord` for `BTreeSet` usage. `AddressFilter` (implementing `InterfaceFilter`) derives enabled
|
||||||
|
gateway set from `DerivedAddressInfo` for vhost/forward filtering.
|
||||||
|
|
||||||
|
**RPC endpoint**: `set-gateway-enabled` replaced with `set-address-enabled` (on both
|
||||||
|
`server.host.binding` and `package.host.binding`).
|
||||||
|
|
||||||
|
**How disabling works per address type** (enforcement deferred to Section 3):
|
||||||
|
|
||||||
|
- **WAN/LAN IP:port**: Will be enforced via **source-IP gating** in the vhost layer (Section 3).
|
||||||
|
- **Hostname-based addresses** (`.local`, domains): Disabled by **not creating the vhost/SNI
|
||||||
|
entry** for that hostname.
|
||||||
|
|
||||||
|
#### 3. Eliminate the Port 5443 Hack: Source-IP-Based WAN Blocking (`vhost.rs`, `net_controller.rs`)
|
||||||
|
|
||||||
|
**Current problem**: The `if ssl.preferred_external_port == 443` branch (line 341 of
|
||||||
|
`net_controller.rs`) creates a bespoke dual-vhost setup: port 5443 for private-only access and port
|
||||||
|
443 for public (or public+private). This exists because both public and private traffic arrive on the
|
||||||
|
same port 443 listener, and the current `InterfaceFilter`/`PublicFilter` model distinguishes
|
||||||
|
public/private by which *network interface* the connection arrived on — which doesn't work when both
|
||||||
|
traffic types share a listener.
|
||||||
|
|
||||||
|
**Solution**: Determine public vs private based on **source IP** at the vhost level. Traffic arriving
|
||||||
|
from the gateway IP should be treated as public (the gateway may MASQUERADE/NAT internet traffic, so
|
||||||
|
anything from the gateway is potentially public). Traffic from LAN IPs is private.
|
||||||
|
|
||||||
|
This applies to **all** vhost targets, not just port 443:
|
||||||
|
|
||||||
|
- **Add a `public` field to `ProxyTarget`** (or an enum: `Public`, `Private`, `Both`) indicating
|
||||||
|
what traffic this target accepts, derived from the binding's user-controlled `public` field.
|
||||||
|
- **Modify `VHostTarget::filter()`** (`vhost.rs:342`): Instead of (or in addition to) checking the
|
||||||
|
network interface via `GatewayInfo`, check the source IP of the TCP connection against known gateway
|
||||||
|
IPs. If the source IP matches a gateway or IP outside the subnet, the connection is public;
|
||||||
|
otherwise it's private. Use this to gate against the target's `public` field.
|
||||||
|
- **Eliminate the 5443 port entirely**: A single vhost entry on port 443 (or any shared SSL port) can
|
||||||
|
serve both public and private traffic, with per-target source-IP gating determining which backend
|
||||||
|
handles which connections.
|
||||||
|
|
||||||
|
#### 4. Port Forward Mapping in Patch-DB
|
||||||
|
|
||||||
|
When a binding is marked `public = true`, StartOS must record the required port forwards in patch-db
|
||||||
|
so the frontend can display them to the user. The user then configures these on their router manually.
|
||||||
|
|
||||||
|
For each public binding, store:
|
||||||
|
- The external port the router should forward (the actual vhost port used for domains, or the
|
||||||
|
`assigned_port` / `assigned_ssl_port` for non-domain access)
|
||||||
|
- The protocol (TCP/UDP)
|
||||||
|
- The StartOS LAN IP as the forward target
|
||||||
|
- Which service/binding this forward is for (for display purposes)
|
||||||
|
|
||||||
|
This mapping should be in the public database model so the frontend can read and display it.
|
||||||
|
|
||||||
|
#### 5. Simplify `update()` Domain Vhost Logic (`net_controller.rs`)
|
||||||
|
|
||||||
|
With source-IP gating in the vhost controller:
|
||||||
|
|
||||||
|
- **Remove the `== 443` special case** and the 5443 secondary vhost.
|
||||||
|
- For **server hostnames** (`.local`, mDNS, embassy, startos, localhost): use `assigned_ssl_port`
|
||||||
|
(the port the binding owns).
|
||||||
|
- For **domain-based vhost entries**: attempt to use `preferred_external_port` as the vhost port.
|
||||||
|
This succeeds if the port is either unused or already has an SSL listener (SNI handles sharing).
|
||||||
|
It fails only if the port is already in use by a non-SSL binding, or is a restricted port. On
|
||||||
|
failure, fall back to `assigned_ssl_port`.
|
||||||
|
- The binding's `public` field determines the `ProxyTarget`'s public/private gating.
|
||||||
|
- Hostname info must exactly match the actual vhost port used: for server hostnames, report
|
||||||
|
`ssl_port: assigned_ssl_port`. For domains, report `ssl_port: preferred_external_port` if it was
|
||||||
|
successfully used for the domain vhost, otherwise report `ssl_port: assigned_ssl_port`.
|
||||||
|
|
||||||
|
#### 6. Frontend: Interfaces Page Overhaul (View/Manage Split)
|
||||||
|
|
||||||
|
The current interfaces page is a single page showing gateways (with toggle), addresses, public
|
||||||
|
domains, and private domains. It gets split into two pages: **View** and **Manage**.
|
||||||
|
|
||||||
|
**SDK**: `preferredExternalPort` is already exposed. No additional SDK changes needed.
|
||||||
|
|
||||||
|
##### View Page
|
||||||
|
|
||||||
|
Displays all computed addresses for the interface (from `BindInfo.addresses`) as a flat list. For each
|
||||||
|
address, show: URL, type (IPv4, IPv6, .local, domain), access level (public/private),
|
||||||
|
gateway name, SSL indicator, enable/disable state, port forward info for public addresses, and a test button
|
||||||
|
for reachability (see Section 7).
|
||||||
|
|
||||||
|
No gateway-level toggles. The old `gateways.component.ts` toggle UI is removed.
|
||||||
|
|
||||||
|
**Note**: Exact UI element placement (where toggles, buttons, info badges go) is sensitive.
|
||||||
|
Prompt the user for specific placement decisions during implementation.
|
||||||
|
|
||||||
|
##### Manage Page
|
||||||
|
|
||||||
|
Simple CRUD interface for configuring which addresses exist. Two sections:
|
||||||
|
|
||||||
|
- **Public domains**: Add/remove. Uses existing RPC endpoints:
|
||||||
|
- `{server,package}.host.address.domain.public.add`
|
||||||
|
- `{server,package}.host.address.domain.public.remove`
|
||||||
|
- **Private domains**: Add/remove. Uses existing RPC endpoints:
|
||||||
|
- `{server,package}.host.address.domain.private.add`
|
||||||
|
- `{server,package}.host.address.domain.private.remove`
|
||||||
|
|
||||||
|
##### Key Frontend Files to Modify
|
||||||
|
|
||||||
|
| File | Change |
|
||||||
|
|------|--------|
|
||||||
|
| `web/projects/ui/src/app/routes/portal/components/interfaces/` | Overhaul: split into view/manage |
|
||||||
|
| `web/projects/ui/src/app/routes/portal/components/interfaces/gateways.component.ts` | Remove (replaced by per-address toggles on View page) |
|
||||||
|
| `web/projects/ui/src/app/routes/portal/components/interfaces/interface.service.ts` | Update `MappedServiceInterface` to compute enabled addresses from `DerivedAddressInfo` |
|
||||||
|
| `web/projects/ui/src/app/routes/portal/components/interfaces/addresses/` | Refactor for View page with overflow menu (enable/disable) and test buttons |
|
||||||
|
| `web/projects/ui/src/app/routes/portal/routes/services/services.routes.ts` | Add routes for view/manage sub-pages |
|
||||||
|
| `web/projects/ui/src/app/routes/portal/routes/system/system.routes.ts` | Add routes for view/manage sub-pages |
|
||||||
|
|
||||||
|
#### 7. Reachability Test Endpoint
|
||||||
|
|
||||||
|
New RPC endpoint that tests whether an address is actually reachable, with diagnostic info on
|
||||||
|
failure.
|
||||||
|
|
||||||
|
**RPC endpoint** (`binding.rs` or new file):
|
||||||
|
|
||||||
|
- **`test-address`** — Test reachability of a specific address.
|
||||||
|
|
||||||
|
```ts
|
||||||
|
interface BindingTestAddressParams {
|
||||||
|
internalPort: number
|
||||||
|
address: HostnameInfo
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The backend simply performs the raw checks and returns the results. The **frontend** owns all
|
||||||
|
interpretation — it already knows the address type, expected IP, expected port, etc. from the
|
||||||
|
`HostnameInfo` data, so it can compare against the backend results and construct fix messaging.
|
||||||
|
|
||||||
|
```ts
|
||||||
|
interface TestAddressResult {
|
||||||
|
dns: string[] | null // resolved IPs, null if not a domain address or lookup failed
|
||||||
|
portOpen: boolean | null // TCP connect result, null if not applicable
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This yields two RPC methods:
|
||||||
|
- `server.host.binding.test-address`
|
||||||
|
- `package.host.binding.test-address`
|
||||||
|
|
||||||
|
The frontend already has the full `HostnameInfo` context (expected IP, domain, port, gateway,
|
||||||
|
public/private). It compares the backend's raw results against the expected state and constructs
|
||||||
|
localized fix instructions. For example:
|
||||||
|
- `dns` returned but doesn't contain the expected WAN IP → "Update DNS A record for {domain}
|
||||||
|
to {wanIp}"
|
||||||
|
- `dns` is `null` for a domain address → "DNS lookup failed for {domain}"
|
||||||
|
- `portOpen` is `false` → "Configure port forward on your router: external {port} TCP →
|
||||||
|
{lanIp}:{port}"
|
||||||
|
|
||||||
|
### Key Files
|
||||||
|
|
||||||
|
| File | Role |
|
||||||
|
|------|------|
|
||||||
|
| `core/src/net/forward.rs` | `AvailablePorts` — port pool allocation, `try_alloc()` for preferred ports |
|
||||||
|
| `core/src/net/host/binding.rs` | `Bindings` (Map wrapper for patchdb), `BindInfo`/`NetInfo`/`DerivedAddressInfo`/`AddressFilter` — per-address enable/disable, `set-address-enabled` RPC |
|
||||||
|
| `core/src/net/net_controller.rs:259` | `NetServiceData::update()` — computes `DerivedAddressInfo.possible`, vhost/forward/DNS reconciliation, 5443 hack removal |
|
||||||
|
| `core/src/net/vhost.rs` | `VHostController` / `ProxyTarget` — source-IP gating for public/private |
|
||||||
|
| `core/src/net/gateway.rs` | `InterfaceFilter` trait and filter types (`AddressFilter`, `PublicFilter`, etc.) |
|
||||||
|
| `core/src/net/service_interface.rs` | `HostnameInfo` — derives `Ord` for `BTreeSet` usage |
|
||||||
|
| `core/src/net/host/address.rs` | `HostAddress` (flattened struct), domain CRUD endpoints |
|
||||||
|
| `sdk/base/lib/interfaces/Host.ts` | SDK `MultiHost.bindPort()` — no changes needed |
|
||||||
|
| `core/src/db/model/public.rs` | Public DB model — port forward mapping |
|
||||||
|
|
||||||
|
- [ ] Extract TS-exported types into a lightweight sub-crate for fast binding generation
|
||||||
|
|
||||||
|
**Problem**: `make ts-bindings` compiles the entire `start-os` crate (with all dependencies: tokio,
|
||||||
|
axum, openssl, etc.) just to run test functions that serialize type definitions to `.ts` files.
|
||||||
|
Even in debug mode, this takes minutes. The generated output is pure type info — no runtime code
|
||||||
|
is needed.
|
||||||
|
|
||||||
|
**Goal**: Generate TS bindings in seconds by isolating exported types in a small crate with minimal
|
||||||
|
dependencies.
|
||||||
|
|
||||||
|
**Approach**: Create a `core/bindings-types/` sub-crate containing (or re-exporting) all 168
|
||||||
|
`#[ts(export)]` types. This crate depends only on `serde`, `ts-rs`, `exver`, and other type-only
|
||||||
|
crates — not on tokio, axum, openssl, etc. Then `build-ts.sh` runs `cargo test -p bindings-types`
|
||||||
|
instead of `cargo test -p start-os`.
|
||||||
|
|
||||||
|
**Challenge**: The exported types are scattered across `core/src/` and reference each other and
|
||||||
|
other crate types. Extracting them requires either moving the type definitions into the sub-crate
|
||||||
|
(and importing them back into `start-os`) or restructuring to share a common types crate.
|
||||||
|
|
||||||
|
- [ ] Use auto-generated RPC types in the frontend instead of manual duplicates
|
||||||
|
|
||||||
|
**Problem**: The web frontend manually defines ~755 lines of API request/response types in
|
||||||
|
`web/projects/ui/src/app/services/api/api.types.ts` that can drift from the actual Rust types.
|
||||||
|
|
||||||
|
**Current state**: The Rust backend already has `#[ts(export)]` on RPC param types (e.g.
|
||||||
|
`AddTunnelParams`, `SetWifiEnabledParams`, `LoginParams`), and they are generated into
|
||||||
|
`core/bindings/`. However, commit `71b83245b` ("Chore/unexport api ts #2585", April 2024)
|
||||||
|
deliberately stopped building them into the SDK and had the frontend maintain its own types.
|
||||||
|
|
||||||
|
**Goal**: Reverse that decision — pipe the generated RPC types through the SDK into the frontend
|
||||||
|
so `api.types.ts` can import them instead of duplicating them. This eliminates drift between
|
||||||
|
backend and frontend API contracts.
|
||||||
|
|
||||||
|
- [ ] Auto-configure port forwards via UPnP/NAT-PMP/PCP - @dr-bonez
|
||||||
|
|
||||||
|
**Blocked by**: "Support preferred external ports besides 443" (must be implemented and tested
|
||||||
|
end-to-end first).
|
||||||
|
|
||||||
|
**Goal**: When a binding is marked public, automatically configure port forwards on the user's router
|
||||||
|
using UPnP, NAT-PMP, or PCP, instead of requiring manual router configuration. Fall back to
|
||||||
|
displaying manual instructions (the port forward mapping from patch-db) when auto-configuration is
|
||||||
|
unavailable or fails.
|
||||||
|
Before Width: | Height: | Size: 285 KiB |
BIN
assets/create-vm/step-1.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
assets/create-vm/step-10.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
assets/create-vm/step-11.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
assets/create-vm/step-12.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
assets/create-vm/step-2.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
assets/create-vm/step-3.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
assets/create-vm/step-4.png
Normal file
|
After Width: | Height: | Size: 50 KiB |
BIN
assets/create-vm/step-5.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
assets/create-vm/step-6.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
assets/create-vm/step-7.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
assets/create-vm/step-8.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
assets/create-vm/step-9.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 334 KiB |
|
Before Width: | Height: | Size: 1.2 MiB |
|
Before Width: | Height: | Size: 347 KiB |
|
Before Width: | Height: | Size: 599 KiB |
10
backend/.gitignore
vendored
@@ -1,10 +0,0 @@
|
|||||||
/target
|
|
||||||
**/*.rs.bk
|
|
||||||
.DS_Store
|
|
||||||
.vscode
|
|
||||||
secrets.db
|
|
||||||
*.s9pk
|
|
||||||
*.sqlite3
|
|
||||||
.env
|
|
||||||
.editorconfig
|
|
||||||
proptest-regressions/*
|
|
||||||
5388
backend/Cargo.lock
generated
@@ -1,149 +0,0 @@
|
|||||||
[package]
|
|
||||||
authors = ["Aiden McClelland <me@drbonez.dev>"]
|
|
||||||
description = "The core of the Start9 Embassy Operating System"
|
|
||||||
documentation = "https://docs.rs/embassy-os"
|
|
||||||
edition = "2018"
|
|
||||||
keywords = [
|
|
||||||
"self-hosted",
|
|
||||||
"raspberry-pi",
|
|
||||||
"privacy",
|
|
||||||
"bitcoin",
|
|
||||||
"full-node",
|
|
||||||
"lightning",
|
|
||||||
]
|
|
||||||
name = "embassy-os"
|
|
||||||
readme = "README.md"
|
|
||||||
repository = "https://github.com/Start9Labs/embassy-os"
|
|
||||||
version = "0.3.1"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
name = "embassy"
|
|
||||||
path = "src/lib.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "embassyd"
|
|
||||||
path = "src/bin/embassyd.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "embassy-init"
|
|
||||||
path = "src/bin/embassy-init.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "embassy-sdk"
|
|
||||||
path = "src/bin/embassy-sdk.rs"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "embassy-cli"
|
|
||||||
path = "src/bin/embassy-cli.rs"
|
|
||||||
|
|
||||||
[features]
|
|
||||||
avahi = ["avahi-sys"]
|
|
||||||
default = ["avahi", "sound", "metal", "js_engine"]
|
|
||||||
dev = []
|
|
||||||
metal = []
|
|
||||||
sound = []
|
|
||||||
unstable = ["patch-db/unstable"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
aes = { version = "0.7.5", features = ["ctr"] }
|
|
||||||
async-stream = "0.3.3"
|
|
||||||
async-trait = "0.1.51"
|
|
||||||
avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [
|
|
||||||
"dynamic",
|
|
||||||
], optional = true }
|
|
||||||
base32 = "0.4.0"
|
|
||||||
base64 = "0.13.0"
|
|
||||||
base64ct = "1.5.0"
|
|
||||||
basic-cookies = "0.1.4"
|
|
||||||
bollard = "0.11.0"
|
|
||||||
chrono = { version = "0.4.19", features = ["serde"] }
|
|
||||||
clap = "2.33"
|
|
||||||
color-eyre = "0.5"
|
|
||||||
cookie_store = "0.15.0"
|
|
||||||
digest = "0.10.3"
|
|
||||||
digest-old = { package = "digest", version = "0.9.0" }
|
|
||||||
divrem = "1.0.0"
|
|
||||||
ed25519 = { version = "1.5.2", features = ["pkcs8", "pem", "alloc"] }
|
|
||||||
ed25519-dalek = { version = "1.0.1", features = ["serde"] }
|
|
||||||
emver = { version = "0.1.6", features = ["serde"] }
|
|
||||||
fd-lock-rs = "0.1.3"
|
|
||||||
futures = "0.3.17"
|
|
||||||
git-version = "0.3.5"
|
|
||||||
helpers = { path = "../libs/helpers" }
|
|
||||||
hex = "0.4.3"
|
|
||||||
hmac = "0.12.1"
|
|
||||||
http = "0.2.5"
|
|
||||||
hyper = "0.14.13"
|
|
||||||
hyper-ws-listener = { git = "https://github.com/Start9Labs/hyper-ws-listener.git", branch = "main" }
|
|
||||||
imbl = "1.0.1"
|
|
||||||
indexmap = { version = "1.8.1", features = ["serde"] }
|
|
||||||
isocountry = "0.3.2"
|
|
||||||
itertools = "0.10.1"
|
|
||||||
js_engine = { path = '../libs/js_engine', optional = true }
|
|
||||||
jsonpath_lib = "0.3.0"
|
|
||||||
lazy_static = "1.4"
|
|
||||||
libc = "0.2.103"
|
|
||||||
log = "0.4.14"
|
|
||||||
models = { version = "*", path = "../libs/models" }
|
|
||||||
nix = "0.23.0"
|
|
||||||
nom = "7.0.0"
|
|
||||||
num = "0.4.0"
|
|
||||||
num_enum = "0.5.4"
|
|
||||||
openssh-keys = "0.5.0"
|
|
||||||
openssl = { version = "0.10.36", features = ["vendored"] }
|
|
||||||
patch-db = { version = "*", path = "../patch-db/patch-db", features = [
|
|
||||||
"trace",
|
|
||||||
] }
|
|
||||||
pbkdf2 = "0.11.0"
|
|
||||||
pin-project = "1.0.8"
|
|
||||||
pkcs8 = { version = "0.9.0", features = ["std"] }
|
|
||||||
platforms = "1.1.0"
|
|
||||||
prettytable-rs = "0.8.0"
|
|
||||||
proptest = "1.0.0"
|
|
||||||
proptest-derive = "0.3.0"
|
|
||||||
rand = "0.7.3"
|
|
||||||
regex = "1.5.4"
|
|
||||||
reqwest = { version = "0.11.4", features = ["stream", "json", "socks"] }
|
|
||||||
reqwest_cookie_store = "0.2.0"
|
|
||||||
rpassword = "5.0.1"
|
|
||||||
rpc-toolkit = { version = "*", path = "../rpc-toolkit/rpc-toolkit" }
|
|
||||||
rust-argon2 = "0.8.3"
|
|
||||||
scopeguard = "1.1" # because avahi-sys fucks your shit up
|
|
||||||
serde = { version = "1.0.130", features = ["derive", "rc"] }
|
|
||||||
serde_cbor = { package = "ciborium", version = "0.2.0" }
|
|
||||||
serde_json = "1.0.68"
|
|
||||||
serde_toml = { package = "toml", version = "0.5.8" }
|
|
||||||
serde_yaml = "0.8.21"
|
|
||||||
sha2 = "0.10.2"
|
|
||||||
sha2-old = { package = "sha2", version = "0.9.8" }
|
|
||||||
simple-logging = "2.0"
|
|
||||||
sqlx = { version = "0.5.11", features = [
|
|
||||||
"chrono",
|
|
||||||
"offline",
|
|
||||||
"runtime-tokio-rustls",
|
|
||||||
"sqlite",
|
|
||||||
] }
|
|
||||||
stderrlog = "0.5.1"
|
|
||||||
tar = "0.4.37"
|
|
||||||
thiserror = "1.0.29"
|
|
||||||
tokio = { version = "1.15.0", features = ["full"] }
|
|
||||||
tokio-compat-02 = "0.2.0"
|
|
||||||
tokio-stream = { version = "0.1.7", features = ["io-util", "sync"] }
|
|
||||||
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
|
|
||||||
tokio-tungstenite = "0.14.0"
|
|
||||||
tokio-util = { version = "0.6.8", features = ["io"] }
|
|
||||||
torut = "0.2.0"
|
|
||||||
tracing = "0.1"
|
|
||||||
tracing-error = "0.1"
|
|
||||||
tracing-futures = "0.2"
|
|
||||||
tracing-subscriber = "0.2"
|
|
||||||
trust-dns-server = "0.21.2"
|
|
||||||
typed-builder = "0.9.1"
|
|
||||||
url = { version = "2.2.2", features = ["serde"] }
|
|
||||||
|
|
||||||
[dependencies.serde_with]
|
|
||||||
features = ["macros", "json"]
|
|
||||||
version = "1.10.0"
|
|
||||||
|
|
||||||
[profile.dev.package.backtrace]
|
|
||||||
opt-level = 3
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
# EmbassyOS Backend
|
|
||||||
|
|
||||||
- Requirements:
|
|
||||||
- [Install Rust](https://rustup.rs)
|
|
||||||
- Recommended: [rust-analyzer](https://rust-analyzer.github.io/)
|
|
||||||
- [Docker](https://docs.docker.com/get-docker/)
|
|
||||||
- [Rust ARM64 Build Container](https://github.com/Start9Labs/rust-arm-builder)
|
|
||||||
- Scripts (run withing the `./backend` directory)
|
|
||||||
- `build-prod.sh` - compiles a release build of the artifacts for running on ARM64
|
|
||||||
- `build-dev.sh` - compiles a development build of the artifacts for running on ARM64
|
|
||||||
- A Linux computer or VM
|
|
||||||
|
|
||||||
## Structure
|
|
||||||
|
|
||||||
The EmbassyOS backend is broken up into 4 different binaries:
|
|
||||||
|
|
||||||
- embassyd: This is the main workhorse of EmbassyOS - any new functionality you want will likely go here
|
|
||||||
- embassy-init: This is the component responsible for allowing you to set up your device, and handles system initialization on startup
|
|
||||||
- embassy-cli: This is a CLI tool that will allow you to issue commands to embassyd and control it similarly to the UI
|
|
||||||
- embassy-sdk: This is a CLI tool that aids in building and packaging services you wish to deploy to the Embassy
|
|
||||||
|
|
||||||
Finally there is a library `embassy` that supports all four of these tools.
|
|
||||||
|
|
||||||
See [here](/backend/Cargo.toml) for details.
|
|
||||||
|
|
||||||
## Building
|
|
||||||
|
|
||||||
You can build the entire operating system image using `make` from the root of the EmbassyOS project. This will subsequently invoke the build scripts above to actually create the requisite binaries and put them onto the final operating system image.
|
|
||||||
|
|
||||||
## Questions
|
|
||||||
|
|
||||||
If you have questions about how various pieces of the backend system work. Open an issue and tag the following people
|
|
||||||
|
|
||||||
- dr-bonez
|
|
||||||
- ProofOfKeags
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
shopt -s expand_aliases
|
|
||||||
|
|
||||||
if [ "$0" != "./build-dev.sh" ]; then
|
|
||||||
>&2 echo "Must be run from backend directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
USE_TTY=
|
|
||||||
if tty -s; then
|
|
||||||
USE_TTY="-it"
|
|
||||||
fi
|
|
||||||
|
|
||||||
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
rust-arm64-builder sh -c "(cd backend && cargo build)"
|
|
||||||
cd backend
|
|
||||||
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
shopt -s expand_aliases
|
|
||||||
|
|
||||||
if [ "$0" != "./build-portable-dev.sh" ]; then
|
|
||||||
>&2 echo "Must be run from backend directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
USE_TTY=
|
|
||||||
if tty -s; then
|
|
||||||
USE_TTY="-it"
|
|
||||||
fi
|
|
||||||
|
|
||||||
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
rust-musl-builder sh -c "(cd backend && cargo +beta build --target=x86_64-unknown-linux-musl --no-default-features)"
|
|
||||||
cd backend
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
shopt -s expand_aliases
|
|
||||||
|
|
||||||
if [ "$0" != "./build-portable.sh" ]; then
|
|
||||||
>&2 echo "Must be run from backend directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
USE_TTY=
|
|
||||||
if tty -s; then
|
|
||||||
USE_TTY="-it"
|
|
||||||
fi
|
|
||||||
|
|
||||||
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
rust-musl-builder sh -c "(cd backend && cargo +beta build --release --target=x86_64-unknown-linux-musl --no-default-features)"
|
|
||||||
cd backend
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
shopt -s expand_aliases
|
|
||||||
|
|
||||||
if [ "$0" != "./build-prod.sh" ]; then
|
|
||||||
>&2 echo "Must be run from backend directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
USE_TTY=
|
|
||||||
if tty -s; then
|
|
||||||
USE_TTY="-it"
|
|
||||||
fi
|
|
||||||
|
|
||||||
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P start9/rust-arm-cross:aarch64'
|
|
||||||
|
|
||||||
cd ..
|
|
||||||
FLAGS=""
|
|
||||||
if [[ "$ENVIRONMENT" =~ (^|-)unstable($|-) ]]; then
|
|
||||||
FLAGS="unstable,$FLAGS"
|
|
||||||
fi
|
|
||||||
if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then
|
|
||||||
FLAGS="dev,$FLAGS"
|
|
||||||
fi
|
|
||||||
if [[ "$FLAGS" = "" ]]; then
|
|
||||||
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release)"
|
|
||||||
else
|
|
||||||
echo "FLAGS=$FLAGS"
|
|
||||||
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --features $FLAGS)"
|
|
||||||
fi
|
|
||||||
cd backend
|
|
||||||
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
[licenses]
|
|
||||||
unlicensed = "warn"
|
|
||||||
allow-osi-fsf-free = "neither"
|
|
||||||
copyleft = "deny"
|
|
||||||
confidence-threshold = 0.93
|
|
||||||
allow = [
|
|
||||||
"Apache-2.0",
|
|
||||||
"Apache-2.0 WITH LLVM-exception",
|
|
||||||
"MIT",
|
|
||||||
"ISC",
|
|
||||||
"MPL-2.0",
|
|
||||||
"CC0-1.0",
|
|
||||||
"BSD-2-Clause",
|
|
||||||
"BSD-3-Clause",
|
|
||||||
"LGPL-3.0",
|
|
||||||
"OpenSSL",
|
|
||||||
]
|
|
||||||
|
|
||||||
clarify = [
|
|
||||||
{ name = "webpki", expression = "ISC", license-files = [ { path = "LICENSE", hash = 0x001c7e6c } ] },
|
|
||||||
{ name = "ring", expression = "OpenSSL", license-files = [ { path = "LICENSE", hash = 0xbd0eed23 } ] },
|
|
||||||
]
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Embassy Init
|
|
||||||
After=network.target
|
|
||||||
Requires=network.target
|
|
||||||
Wants=avahi-daemon.service nginx.service tor.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug
|
|
||||||
ExecStart=/usr/local/bin/embassy-init
|
|
||||||
RemainAfterExit=true
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=embassyd.service
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Embassy Daemon
|
|
||||||
After=embassy-init.service
|
|
||||||
Requires=embassy-init.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
Environment=RUST_LOG=embassyd=debug,embassy=debug,js_engine=debug
|
|
||||||
ExecStart=/usr/local/bin/embassyd
|
|
||||||
Restart=always
|
|
||||||
RestartSec=3
|
|
||||||
ManagedOOMPreference=avoid
|
|
||||||
CPUAccounting=true
|
|
||||||
CPUWeight=1000
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
shopt -s expand_aliases
|
|
||||||
|
|
||||||
if [ "$0" != "./install-sdk.sh" ]; then
|
|
||||||
>&2 echo "Must be run from backend directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
cargo install --bin=embassy-sdk --bin=embassy-cli --path=. --no-default-features --features=js_engine
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
-- Add migration script here
|
|
||||||
CREATE TABLE IF NOT EXISTS tor
|
|
||||||
(
|
|
||||||
package TEXT NOT NULL,
|
|
||||||
interface TEXT NOT NULL,
|
|
||||||
key BLOB NOT NULL CHECK (length(key) = 64),
|
|
||||||
PRIMARY KEY (package, interface)
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS session
|
|
||||||
(
|
|
||||||
id TEXT NOT NULL PRIMARY KEY,
|
|
||||||
logged_in TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
logged_out TIMESTAMP,
|
|
||||||
last_active TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
user_agent TEXT,
|
|
||||||
metadata TEXT NOT NULL DEFAULT 'null'
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS account
|
|
||||||
(
|
|
||||||
id INTEGER PRIMARY KEY CHECK (id = 0),
|
|
||||||
password TEXT NOT NULL,
|
|
||||||
tor_key BLOB NOT NULL CHECK (length(tor_key) = 64)
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS ssh_keys
|
|
||||||
(
|
|
||||||
fingerprint TEXT NOT NULL,
|
|
||||||
openssh_pubkey TEXT NOT NULL,
|
|
||||||
created_at TEXT NOT NULL,
|
|
||||||
PRIMARY KEY (fingerprint)
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS certificates
|
|
||||||
(
|
|
||||||
id INTEGER PRIMARY KEY, -- Root = 0, Int = 1, Other = 2..
|
|
||||||
priv_key_pem TEXT NOT NULL,
|
|
||||||
certificate_pem TEXT NOT NULL,
|
|
||||||
lookup_string TEXT UNIQUE,
|
|
||||||
created_at TEXT,
|
|
||||||
updated_at TEXT
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS notifications
|
|
||||||
(
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
package_id TEXT,
|
|
||||||
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
code INTEGER NOT NULL,
|
|
||||||
level TEXT NOT NULL,
|
|
||||||
title TEXT NOT NULL,
|
|
||||||
message TEXT NOT NULL,
|
|
||||||
data TEXT
|
|
||||||
);
|
|
||||||
CREATE TABLE IF NOT EXISTS cifs_shares
|
|
||||||
(
|
|
||||||
id INTEGER PRIMARY KEY,
|
|
||||||
hostname TEXT NOT NULL,
|
|
||||||
path TEXT NOT NULL,
|
|
||||||
username TEXT NOT NULL,
|
|
||||||
password TEXT
|
|
||||||
);
|
|
||||||
@@ -1,715 +0,0 @@
|
|||||||
{
|
|
||||||
"db": "SQLite",
|
|
||||||
"10350f5a16f1b2a6ce91672ae5dc6acc46691bd8f901861545ec83c326a8ccef": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 3
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES (?, ?, ?)"
|
|
||||||
},
|
|
||||||
"118d59de5cf930d5a3b5667b2220e9a3d593bd84276beb2b76c93b2694b0fd72": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 3
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO session (id, user_agent, metadata) VALUES (?, ?, ?)"
|
|
||||||
},
|
|
||||||
"165daa7d6a60cb42122373b2c5ac7d39399bcc99992f0002ee7bfef50a8daceb": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM certificates WHERE id = 0 OR id = 1;"
|
|
||||||
},
|
|
||||||
"177c4b9cc7901a3b906e5969b86b1c11e6acbfb8e86e98f197d7333030b17964": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM notifications WHERE id = ?"
|
|
||||||
},
|
|
||||||
"1b2242afa55e730b37b00929b656d80940b457ec86c234ddd0de917bd8872611": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id: u32",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Int64"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 4
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES (?, ?, ?, ?) RETURNING id AS \"id: u32\""
|
|
||||||
},
|
|
||||||
"1eee1fdc793919c391008854407143d7a11b4668486c11a760b49af49992f9f8": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 2
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "REPLACE INTO tor (package, interface, key) VALUES (?, 'main', ?)"
|
|
||||||
},
|
|
||||||
"2932aa02735b6422fca4ba889abfb3de8598178d4690076dc278898753d9df62": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = ?"
|
|
||||||
},
|
|
||||||
"3502e58f2ab48fb4566d21c920c096f81acfa3ff0d02f970626a4dcd67bac71d": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "tor_key",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Blob"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT tor_key FROM account"
|
|
||||||
},
|
|
||||||
"3e57a0e52b69f33e9411c13b03a5d82c5856d63f0375eb4c23b255a09c54f8b1": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "key",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Blob"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 2
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT key FROM tor WHERE package = ? AND interface = ?"
|
|
||||||
},
|
|
||||||
"4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "logged_in",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Datetime"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "logged_out",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Datetime"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "last_active",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Datetime"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "user_agent",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "metadata",
|
|
||||||
"ordinal": 5,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
|
|
||||||
},
|
|
||||||
"530192a2a530ee6b92e5b98e1eb1bf6d1426c7b0cb2578593a367cb0bf2c3ca8": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 3
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "UPDATE certificates SET priv_key_pem = ?, certificate_pem = ?, updated_at = datetime('now') WHERE lookup_string = ?"
|
|
||||||
},
|
|
||||||
"56b986f2a2b7091d9c3acdd78f75d9842242de1f4da8f3672f2793d9fb256928": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM tor WHERE package = ?"
|
|
||||||
},
|
|
||||||
"5b114c450073f77f466c980a2541293f30087b57301c379630326e5e5c2fb792": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 3
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "REPLACE INTO tor (package, interface, key) VALUES (?, ?, ?)"
|
|
||||||
},
|
|
||||||
"5c47da44b9c84468e95a13fc47301989900f130b3b5899d1ee6664df3ed812ac": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 2
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, ?, ?, NULL, datetime('now'), datetime('now'))"
|
|
||||||
},
|
|
||||||
"629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "password",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT password FROM account"
|
|
||||||
},
|
|
||||||
"63785dc5f193ea31e6f641a910c75857ccd288a3f6e9c4f704331531e4f0689f": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = ? AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
|
|
||||||
},
|
|
||||||
"6440354d73a67c041ea29508b43b5f309d45837a44f1a562051ad540d894c7d6": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM ssh_keys WHERE fingerprint = ?"
|
|
||||||
},
|
|
||||||
"65e6c3fbb138da5cf385af096fdd3c062b6e826e12a8a4b23e16fcc773004c29": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Int64"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "package_id",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Datetime"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "code",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Int64"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "level",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"ordinal": 5,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "message",
|
|
||||||
"ordinal": 6,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "data",
|
|
||||||
"ordinal": 7,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 2
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < ? ORDER BY id DESC LIMIT ?"
|
|
||||||
},
|
|
||||||
"668f39c868f90cdbcc635858bac9e55ed73192ed2aec5c52dcfba9800a7a4a41": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id: u32",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Int64"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "hostname",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "path",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "username",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "password",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT id AS \"id: u32\", hostname, path, username, password FROM cifs_shares"
|
|
||||||
},
|
|
||||||
"6b9abc9e079cff975f8a7f07ff70548c7877ecae3be0d0f2d3f439a6713326c0": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM notifications WHERE id < ?"
|
|
||||||
},
|
|
||||||
"6c96d76bffcc5f03290d8d8544a58521345ed2a843a509b17bbcd6257bb81821": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "priv_key_pem",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "certificate_pem",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 1;"
|
|
||||||
},
|
|
||||||
"7d548d2472fa3707bd17364b4800e229b9c2b1c0a22e245bf4e635b9b16b8c24": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 3
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (?, ?, ?, datetime('now'), datetime('now'))"
|
|
||||||
},
|
|
||||||
"82a8fa7eae8a73b5345015c72af024b4f21489b1d9b42235398d7eb8977fb132": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "UPDATE account SET password = ?"
|
|
||||||
},
|
|
||||||
"8595651866e7db772260bd79e19d55b7271fd795b82a99821c935a9237c1aa16": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "interface",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "key",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Blob"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT interface, key FROM tor WHERE package = ?"
|
|
||||||
},
|
|
||||||
"9496e17a73672ac3675e02efa7c4bf8bd479b866c0d31fa1e3a85ef159310a57": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "priv_key_pem",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "certificate_pem",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = ?"
|
|
||||||
},
|
|
||||||
"9fcedab1ba34daa2c6ae97c5953c09821b35b55be75b0c66045ab31a2cf4553e": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 3
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "REPLACE INTO account (id, password, tor_key) VALUES (?, ?, ?)"
|
|
||||||
},
|
|
||||||
"a1cbaac36d8e14c8c3e7276237c4824bff18861f91b0b08aa5791704c492acb7": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 2
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, ?, ?, NULL, datetime('now'), datetime('now'))"
|
|
||||||
},
|
|
||||||
"a4e7162322b28508310b9de7ebc891e619b881ff6d3ea09eba13da39626ab12f": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 5
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "UPDATE cifs_shares SET hostname = ?, path = ?, username = ?, password = ? WHERE id = ?"
|
|
||||||
},
|
|
||||||
"a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "fingerprint",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "openssh_pubkey",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys"
|
|
||||||
},
|
|
||||||
"abfdeea8cd10343b85f647d7abc5dc3bd0b5891101b143485938192ee3b8c907": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Int64"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "package_id",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Datetime"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "code",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Int64"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "level",
|
|
||||||
"ordinal": 4,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "title",
|
|
||||||
"ordinal": 5,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "message",
|
|
||||||
"ordinal": 6,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "data",
|
|
||||||
"ordinal": 7,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
true,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT ?"
|
|
||||||
},
|
|
||||||
"b376d9e77e0861a9af2d1081ca48d14e83abc5a1546213d15bb570972c403beb": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "-- Add migration script here\nCREATE TABLE IF NOT EXISTS tor\n(\n package TEXT NOT NULL,\n interface TEXT NOT NULL,\n key BLOB NOT NULL CHECK (length(key) = 64),\n PRIMARY KEY (package, interface)\n);\nCREATE TABLE IF NOT EXISTS session\n(\n id TEXT NOT NULL PRIMARY KEY,\n logged_in TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n logged_out TIMESTAMP,\n last_active TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n user_agent TEXT,\n metadata TEXT NOT NULL DEFAULT 'null'\n);\nCREATE TABLE IF NOT EXISTS account\n(\n id INTEGER PRIMARY KEY CHECK (id = 0),\n password TEXT NOT NULL,\n tor_key BLOB NOT NULL CHECK (length(tor_key) = 64)\n);\nCREATE TABLE IF NOT EXISTS ssh_keys\n(\n fingerprint TEXT NOT NULL,\n openssh_pubkey TEXT NOT NULL,\n created_at TEXT NOT NULL,\n PRIMARY KEY (fingerprint)\n);\nCREATE TABLE IF NOT EXISTS certificates\n(\n id INTEGER PRIMARY KEY, -- Root = 0, Int = 1, Other = 2..\n priv_key_pem TEXT NOT NULL,\n certificate_pem TEXT NOT NULL,\n lookup_string TEXT UNIQUE,\n created_at TEXT,\n updated_at TEXT\n);\nCREATE TABLE IF NOT EXISTS notifications\n(\n id INTEGER PRIMARY KEY,\n package_id TEXT,\n created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n code INTEGER NOT NULL,\n level TEXT NOT NULL,\n title TEXT NOT NULL,\n message TEXT NOT NULL,\n data TEXT\n);\nCREATE TABLE IF NOT EXISTS cifs_shares\n(\n id INTEGER PRIMARY KEY,\n hostname TEXT NOT NULL,\n path TEXT NOT NULL,\n username TEXT NOT NULL,\n password TEXT\n);"
|
|
||||||
},
|
|
||||||
"cc33fe2958fe7caeac6999a217f918a68b45ad596664170b4d07671c6ea49566": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "hostname",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "path",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "username",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "password",
|
|
||||||
"ordinal": 3,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = ?"
|
|
||||||
},
|
|
||||||
"d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "openssh_pubkey",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT openssh_pubkey FROM ssh_keys"
|
|
||||||
},
|
|
||||||
"d54bd5b53f8c760e1f8cde604aa8b1bdc66e4e025a636bc44ffbcd788b5168fd": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 6
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES (?, ?, ?, ?, ?, ?)"
|
|
||||||
},
|
|
||||||
"d79d608ceb862c15b741a6040044c6dd54a837a3a0c5594d15a6041c7bc68ea8": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 3
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "INSERT OR IGNORE INTO tor (package, interface, key) VALUES (?, ?, ?)"
|
|
||||||
},
|
|
||||||
"de2a5e90798d606047ab8180c044baac05469c0cdf151316bd58ee8c7196fdef": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "fingerprint",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "openssh_pubkey",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "created_at",
|
|
||||||
"ordinal": 2,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT * FROM ssh_keys WHERE fingerprint = ?"
|
|
||||||
},
|
|
||||||
"ed848affa5bf92997cd441e3a50b3616b6724df3884bd9d199b3225e0bea8a54": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [
|
|
||||||
{
|
|
||||||
"name": "priv_key_pem",
|
|
||||||
"ordinal": 0,
|
|
||||||
"type_info": "Text"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "certificate_pem",
|
|
||||||
"ordinal": 1,
|
|
||||||
"type_info": "Text"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"nullable": [
|
|
||||||
false,
|
|
||||||
false
|
|
||||||
],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 0
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 0;"
|
|
||||||
},
|
|
||||||
"f63c8c5a8754b34a49ef5d67802fa2b72aa409bbec92ecc6901492092974b71a": {
|
|
||||||
"describe": {
|
|
||||||
"columns": [],
|
|
||||||
"nullable": [],
|
|
||||||
"parameters": {
|
|
||||||
"Right": 1
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"query": "DELETE FROM cifs_shares WHERE id = ?"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,157 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use indexmap::IndexSet;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::config::{Config, ConfigSpec};
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::id::{ ImageId};
|
|
||||||
use crate::procedure::{PackageProcedure, ProcedureName};
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
|
|
||||||
use crate::util::Version;
|
|
||||||
use crate::volume::Volumes;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
pub use models::ActionId;
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
|
||||||
pub struct Actions(pub BTreeMap<ActionId, Action>);
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
#[serde(tag = "version")]
|
|
||||||
pub enum ActionResult {
|
|
||||||
#[serde(rename = "0")]
|
|
||||||
V0(ActionResultV0),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct ActionResultV0 {
|
|
||||||
pub message: String,
|
|
||||||
pub value: Option<String>,
|
|
||||||
pub copyable: bool,
|
|
||||||
pub qr: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum DockerStatus {
|
|
||||||
Running,
|
|
||||||
Stopped,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct Action {
|
|
||||||
pub name: String,
|
|
||||||
pub description: String,
|
|
||||||
#[serde(default)]
|
|
||||||
pub warning: Option<String>,
|
|
||||||
pub implementation: PackageProcedure,
|
|
||||||
pub allowed_statuses: IndexSet<DockerStatus>,
|
|
||||||
#[serde(default)]
|
|
||||||
pub input_spec: ConfigSpec,
|
|
||||||
}
|
|
||||||
impl Action {
|
|
||||||
#[instrument]
|
|
||||||
pub fn validate(&self, volumes: &Volumes, image_ids: &BTreeSet<ImageId>) -> Result<(), Error> {
|
|
||||||
self.implementation
|
|
||||||
.validate(volumes, image_ids, true)
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::ValidateS9pk,
|
|
||||||
format!("Action {}", self.name),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn execute(
|
|
||||||
&self,
|
|
||||||
ctx: &RpcContext,
|
|
||||||
pkg_id: &PackageId,
|
|
||||||
pkg_version: &Version,
|
|
||||||
action_id: &ActionId,
|
|
||||||
volumes: &Volumes,
|
|
||||||
input: Option<Config>,
|
|
||||||
) -> Result<ActionResult, Error> {
|
|
||||||
if let Some(ref input) = input {
|
|
||||||
self.input_spec
|
|
||||||
.matches(&input)
|
|
||||||
.with_kind(crate::ErrorKind::ConfigSpecViolation)?;
|
|
||||||
}
|
|
||||||
self.implementation
|
|
||||||
.execute(
|
|
||||||
ctx,
|
|
||||||
pkg_id,
|
|
||||||
pkg_version,
|
|
||||||
ProcedureName::Action(action_id.clone()),
|
|
||||||
volumes,
|
|
||||||
input,
|
|
||||||
true,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.map_err(|e| Error::new(eyre!("{}", e.1), crate::ErrorKind::Action))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn display_action_result(action_result: ActionResult, matches: &ArgMatches<'_>) {
|
|
||||||
if matches.is_present("format") {
|
|
||||||
return display_serializable(action_result, matches);
|
|
||||||
}
|
|
||||||
match action_result {
|
|
||||||
ActionResult::V0(ar) => {
|
|
||||||
println!(
|
|
||||||
"{}: {}",
|
|
||||||
ar.message,
|
|
||||||
serde_json::to_string(&ar.value).unwrap()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(about = "Executes an action", display(display_action_result))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn action(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(rename = "id")] pkg_id: PackageId,
|
|
||||||
#[arg(rename = "action-id")] action_id: ActionId,
|
|
||||||
#[arg(stdin, parse(parse_stdin_deserializable))] input: Option<Config>,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<ActionResult, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let manifest = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(&pkg_id)
|
|
||||||
.and_then(|p| p.installed())
|
|
||||||
.expect(&mut db)
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::NotFound)?
|
|
||||||
.manifest()
|
|
||||||
.get(&mut db, true)
|
|
||||||
.await?
|
|
||||||
.to_owned();
|
|
||||||
if let Some(action) = manifest.actions.0.get(&action_id) {
|
|
||||||
action
|
|
||||||
.execute(
|
|
||||||
&ctx,
|
|
||||||
&manifest.id,
|
|
||||||
&manifest.version,
|
|
||||||
&action_id,
|
|
||||||
&manifest.volumes,
|
|
||||||
input,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
Err(Error::new(
|
|
||||||
eyre!("Action not found in manifest"),
|
|
||||||
crate::ErrorKind::NotFound,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,369 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::marker::PhantomData;
|
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use patch_db::{DbHandle, LockReceipt};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use rpc_toolkit::command_helpers::prelude::{RequestParts, ResponseParts};
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use sqlx::{Executor, Sqlite};
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::context::{CliContext, RpcContext};
|
|
||||||
use crate::middleware::auth::{AsLogoutSessionId, HasLoggedOutSessions, HashSessionToken};
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::serde::{display_serializable, IoFormat};
|
|
||||||
use crate::{ensure_code, Error, ResultExt};
|
|
||||||
|
|
||||||
#[command(subcommands(login, logout, session, reset_password))]
|
|
||||||
pub fn auth() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn parse_metadata(_: &str, _: &ArgMatches<'_>) -> Result<Value, Error> {
|
|
||||||
Ok(serde_json::json!({
|
|
||||||
"platforms": ["cli"],
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn gen_pwd() {
|
|
||||||
println!(
|
|
||||||
"{:?}",
|
|
||||||
argon2::hash_encoded(
|
|
||||||
b"testing1234",
|
|
||||||
&rand::random::<[u8; 16]>()[..],
|
|
||||||
&argon2::Config::default()
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, password))]
|
|
||||||
async fn cli_login(
|
|
||||||
ctx: CliContext,
|
|
||||||
password: Option<String>,
|
|
||||||
metadata: Value,
|
|
||||||
) -> Result<(), RpcError> {
|
|
||||||
let password = if let Some(password) = password {
|
|
||||||
password
|
|
||||||
} else {
|
|
||||||
rpassword::prompt_password_stdout("Password: ")?
|
|
||||||
};
|
|
||||||
|
|
||||||
rpc_toolkit::command_helpers::call_remote(
|
|
||||||
ctx,
|
|
||||||
"auth.login",
|
|
||||||
serde_json::json!({ "password": password, "metadata": metadata }),
|
|
||||||
PhantomData::<()>,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.result?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn check_password(hash: &str, password: &str) -> Result<(), Error> {
|
|
||||||
ensure_code!(
|
|
||||||
argon2::verify_encoded(&hash, password.as_bytes()).map_err(|_| {
|
|
||||||
Error::new(
|
|
||||||
eyre!("Password Incorrect"),
|
|
||||||
crate::ErrorKind::IncorrectPassword,
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
crate::ErrorKind::IncorrectPassword,
|
|
||||||
"Password Incorrect"
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn check_password_against_db<Ex>(secrets: &mut Ex, password: &str) -> Result<(), Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
let pw_hash = sqlx::query!("SELECT password FROM account")
|
|
||||||
.fetch_one(secrets)
|
|
||||||
.await?
|
|
||||||
.password;
|
|
||||||
check_password(&pw_hash, password)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(
|
|
||||||
custom_cli(cli_login(async, context(CliContext))),
|
|
||||||
display(display_none),
|
|
||||||
metadata(authenticated = false)
|
|
||||||
)]
|
|
||||||
#[instrument(skip(ctx, password))]
|
|
||||||
pub async fn login(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[request] req: &RequestParts,
|
|
||||||
#[response] res: &mut ResponseParts,
|
|
||||||
#[arg] password: Option<String>,
|
|
||||||
#[arg(
|
|
||||||
parse(parse_metadata),
|
|
||||||
default = "",
|
|
||||||
help = "RPC Only: This value cannot be overidden from the cli"
|
|
||||||
)]
|
|
||||||
metadata: Value,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let password = password.unwrap_or_default();
|
|
||||||
let mut handle = ctx.secret_store.acquire().await?;
|
|
||||||
check_password_against_db(&mut handle, &password).await?;
|
|
||||||
|
|
||||||
let hash_token = HashSessionToken::new();
|
|
||||||
let user_agent = req.headers.get("user-agent").and_then(|h| h.to_str().ok());
|
|
||||||
let metadata = serde_json::to_string(&metadata).with_kind(crate::ErrorKind::Database)?;
|
|
||||||
let hash_token_hashed = hash_token.hashed();
|
|
||||||
sqlx::query!(
|
|
||||||
"INSERT INTO session (id, user_agent, metadata) VALUES (?, ?, ?)",
|
|
||||||
hash_token_hashed,
|
|
||||||
user_agent,
|
|
||||||
metadata,
|
|
||||||
)
|
|
||||||
.execute(&mut handle)
|
|
||||||
.await?;
|
|
||||||
res.headers.insert(
|
|
||||||
"set-cookie",
|
|
||||||
hash_token.header_value()?, // Should be impossible, but don't want to panic
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none), metadata(authenticated = false))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn logout(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[request] req: &RequestParts,
|
|
||||||
) -> Result<Option<HasLoggedOutSessions>, Error> {
|
|
||||||
let auth = match HashSessionToken::from_request_parts(req) {
|
|
||||||
Err(_) => return Ok(None),
|
|
||||||
Ok(a) => a,
|
|
||||||
};
|
|
||||||
Ok(Some(HasLoggedOutSessions::new(vec![auth], &ctx).await?))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct Session {
|
|
||||||
logged_in: DateTime<Utc>,
|
|
||||||
last_active: DateTime<Utc>,
|
|
||||||
user_agent: Option<String>,
|
|
||||||
metadata: Value,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct SessionList {
|
|
||||||
current: String,
|
|
||||||
sessions: BTreeMap<String, Session>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(list, kill))]
|
|
||||||
pub async fn session() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn display_sessions(arg: SessionList, matches: &ArgMatches<'_>) {
|
|
||||||
use prettytable::*;
|
|
||||||
|
|
||||||
if matches.is_present("format") {
|
|
||||||
return display_serializable(arg, matches);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut table = Table::new();
|
|
||||||
table.add_row(row![bc =>
|
|
||||||
"ID",
|
|
||||||
"LOGGED IN",
|
|
||||||
"LAST ACTIVE",
|
|
||||||
"USER AGENT",
|
|
||||||
"METADATA",
|
|
||||||
]);
|
|
||||||
for (id, session) in arg.sessions {
|
|
||||||
let mut row = row![
|
|
||||||
&id,
|
|
||||||
&format!("{}", session.logged_in),
|
|
||||||
&format!("{}", session.last_active),
|
|
||||||
session.user_agent.as_deref().unwrap_or("N/A"),
|
|
||||||
&format!("{}", session.metadata),
|
|
||||||
];
|
|
||||||
if id == arg.current {
|
|
||||||
row.iter_mut()
|
|
||||||
.map(|c| c.style(Attr::ForegroundColor(color::GREEN)))
|
|
||||||
.collect::<()>()
|
|
||||||
}
|
|
||||||
table.add_row(row);
|
|
||||||
}
|
|
||||||
table.print_tty(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_sessions))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn list(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[request] req: &RequestParts,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<SessionList, Error> {
|
|
||||||
Ok(SessionList {
|
|
||||||
current: HashSessionToken::from_request_parts(req)?.as_hash(),
|
|
||||||
sessions: sqlx::query!(
|
|
||||||
"SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
|
|
||||||
)
|
|
||||||
.fetch_all(&mut ctx.secret_store.acquire().await?)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.map(|row| {
|
|
||||||
Ok((
|
|
||||||
row.id,
|
|
||||||
Session {
|
|
||||||
logged_in: DateTime::from_utc(row.logged_in, Utc),
|
|
||||||
last_active: DateTime::from_utc(row.last_active, Utc),
|
|
||||||
user_agent: row.user_agent,
|
|
||||||
metadata: serde_json::from_str(&row.metadata)
|
|
||||||
.with_kind(crate::ErrorKind::Database)?,
|
|
||||||
},
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.collect::<Result<_, Error>>()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_comma_separated(arg: &str, _: &ArgMatches<'_>) -> Result<Vec<String>, RpcError> {
|
|
||||||
Ok(arg.split(",").map(|s| s.trim().to_owned()).collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
struct KillSessionId(String);
|
|
||||||
|
|
||||||
impl AsLogoutSessionId for KillSessionId {
|
|
||||||
fn as_logout_session_id(self) -> String {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn kill(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(parse(parse_comma_separated))] ids: Vec<String>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
HasLoggedOutSessions::new(ids.into_iter().map(KillSessionId), &ctx).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, old_password, new_password))]
|
|
||||||
async fn cli_reset_password(
|
|
||||||
ctx: CliContext,
|
|
||||||
old_password: Option<String>,
|
|
||||||
new_password: Option<String>,
|
|
||||||
) -> Result<(), RpcError> {
|
|
||||||
let old_password = if let Some(old_password) = old_password {
|
|
||||||
old_password
|
|
||||||
} else {
|
|
||||||
rpassword::prompt_password_stdout("Current Password: ")?
|
|
||||||
};
|
|
||||||
|
|
||||||
let new_password = if let Some(new_password) = new_password {
|
|
||||||
new_password
|
|
||||||
} else {
|
|
||||||
let new_password = rpassword::prompt_password_stdout("New Password: ")?;
|
|
||||||
if new_password != rpassword::prompt_password_stdout("Confirm: ")? {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Passwords do not match"),
|
|
||||||
crate::ErrorKind::IncorrectPassword,
|
|
||||||
)
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
new_password
|
|
||||||
};
|
|
||||||
|
|
||||||
rpc_toolkit::command_helpers::call_remote(
|
|
||||||
ctx,
|
|
||||||
"auth.reset-password",
|
|
||||||
serde_json::json!({ "old-password": old_password, "new-password": new_password }),
|
|
||||||
PhantomData::<()>,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.result?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SetPasswordReceipt(LockReceipt<String, ()>);
|
|
||||||
impl SetPasswordReceipt {
|
|
||||||
pub async fn new<Db: DbHandle>(db: &mut Db) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<patch_db::LockTargetId>,
|
|
||||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
|
||||||
let password_hash = crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.password_hash()
|
|
||||||
.make_locker(patch_db::LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| Ok(Self(password_hash.verify(skeleton_key)?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn set_password<Db: DbHandle, Ex>(
|
|
||||||
db: &mut Db,
|
|
||||||
receipt: &SetPasswordReceipt,
|
|
||||||
secrets: &mut Ex,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<(), Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
let password = argon2::hash_encoded(
|
|
||||||
password.as_bytes(),
|
|
||||||
&rand::random::<[u8; 16]>()[..],
|
|
||||||
&argon2::Config::default(),
|
|
||||||
)
|
|
||||||
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
|
|
||||||
|
|
||||||
sqlx::query!("UPDATE account SET password = ?", password,)
|
|
||||||
.execute(secrets)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
receipt.0.set(db, password).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(
|
|
||||||
rename = "reset-password",
|
|
||||||
custom_cli(cli_reset_password(async, context(CliContext))),
|
|
||||||
display(display_none)
|
|
||||||
)]
|
|
||||||
#[instrument(skip(ctx, old_password, new_password))]
|
|
||||||
pub async fn reset_password(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(rename = "old-password")] old_password: Option<String>,
|
|
||||||
#[arg(rename = "new-password")] new_password: Option<String>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let old_password = old_password.unwrap_or_default();
|
|
||||||
let new_password = new_password.unwrap_or_default();
|
|
||||||
|
|
||||||
let mut secrets = ctx.secret_store.acquire().await?;
|
|
||||||
check_password_against_db(&mut secrets, &old_password).await?;
|
|
||||||
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
|
|
||||||
let set_password_receipt = SetPasswordReceipt::new(&mut db).await?;
|
|
||||||
|
|
||||||
set_password(&mut db, &set_password_receipt, &mut secrets, &new_password).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,459 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use chrono::Utc;
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use openssl::pkey::{PKey, Private};
|
|
||||||
use openssl::x509::X509;
|
|
||||||
use patch_db::{DbHandle, LockType, PatchDbHandle, Revision};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
use torut::onion::TorSecretKeyV3;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::target::BackupTargetId;
|
|
||||||
use super::PackageBackupReport;
|
|
||||||
use crate::auth::check_password_against_db;
|
|
||||||
use crate::backup::{BackupReport, ServerBackupReport};
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::db::model::BackupProgress;
|
|
||||||
use crate::db::util::WithRevision;
|
|
||||||
use crate::disk::mount::backup::BackupMountGuard;
|
|
||||||
use crate::disk::mount::filesystem::ReadWrite;
|
|
||||||
use crate::disk::mount::guard::TmpMountGuard;
|
|
||||||
use crate::notifications::NotificationLevel;
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::status::MainStatus;
|
|
||||||
use crate::util::serde::IoFormat;
|
|
||||||
use crate::util::{display_none, AtomicFile};
|
|
||||||
use crate::version::VersionT;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct OsBackup {
|
|
||||||
pub tor_key: TorSecretKeyV3,
|
|
||||||
pub root_ca_key: PKey<Private>,
|
|
||||||
pub root_ca_cert: X509,
|
|
||||||
pub ui: Value,
|
|
||||||
}
|
|
||||||
impl<'de> Deserialize<'de> for OsBackup {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
#[serde(rename = "kebab-case")]
|
|
||||||
struct OsBackupDe {
|
|
||||||
tor_key: String,
|
|
||||||
root_ca_key: String,
|
|
||||||
root_ca_cert: String,
|
|
||||||
ui: Value,
|
|
||||||
}
|
|
||||||
let int = OsBackupDe::deserialize(deserializer)?;
|
|
||||||
let key_vec = base32::decode(base32::Alphabet::RFC4648 { padding: true }, &int.tor_key)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
serde::de::Error::invalid_value(
|
|
||||||
serde::de::Unexpected::Str(&int.tor_key),
|
|
||||||
&"an RFC4648 encoded string",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
if key_vec.len() != 64 {
|
|
||||||
return Err(serde::de::Error::invalid_value(
|
|
||||||
serde::de::Unexpected::Str(&int.tor_key),
|
|
||||||
&"a 64 byte value encoded as an RFC4648 string",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let mut key_slice = [0; 64];
|
|
||||||
key_slice.clone_from_slice(&key_vec);
|
|
||||||
Ok(OsBackup {
|
|
||||||
tor_key: TorSecretKeyV3::from(key_slice),
|
|
||||||
root_ca_key: PKey::<Private>::private_key_from_pem(int.root_ca_key.as_bytes())
|
|
||||||
.map_err(serde::de::Error::custom)?,
|
|
||||||
root_ca_cert: X509::from_pem(int.root_ca_cert.as_bytes())
|
|
||||||
.map_err(serde::de::Error::custom)?,
|
|
||||||
ui: int.ui,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Serialize for OsBackup {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
#[derive(Serialize)]
|
|
||||||
#[serde(rename = "kebab-case")]
|
|
||||||
struct OsBackupSer<'a> {
|
|
||||||
tor_key: String,
|
|
||||||
root_ca_key: String,
|
|
||||||
root_ca_cert: String,
|
|
||||||
ui: &'a Value,
|
|
||||||
}
|
|
||||||
OsBackupSer {
|
|
||||||
tor_key: base32::encode(
|
|
||||||
base32::Alphabet::RFC4648 { padding: true },
|
|
||||||
&self.tor_key.as_bytes(),
|
|
||||||
),
|
|
||||||
root_ca_key: String::from_utf8(
|
|
||||||
self.root_ca_key
|
|
||||||
.private_key_to_pem_pkcs8()
|
|
||||||
.map_err(serde::ser::Error::custom)?,
|
|
||||||
)
|
|
||||||
.map_err(serde::ser::Error::custom)?,
|
|
||||||
root_ca_cert: String::from_utf8(
|
|
||||||
self.root_ca_cert
|
|
||||||
.to_pem()
|
|
||||||
.map_err(serde::ser::Error::custom)?,
|
|
||||||
)
|
|
||||||
.map_err(serde::ser::Error::custom)?,
|
|
||||||
ui: &self.ui,
|
|
||||||
}
|
|
||||||
.serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_comma_separated(arg: &str, _: &ArgMatches<'_>) -> Result<BTreeSet<PackageId>, Error> {
|
|
||||||
arg.split(',')
|
|
||||||
.map(|s| s.trim().parse().map_err(Error::from))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "create", display(display_none))]
|
|
||||||
#[instrument(skip(ctx, old_password, password))]
|
|
||||||
pub async fn backup_all(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(rename = "target-id")] target_id: BackupTargetId,
|
|
||||||
#[arg(rename = "old-password", long = "old-password")] old_password: Option<String>,
|
|
||||||
#[arg(
|
|
||||||
rename = "package-ids",
|
|
||||||
long = "package-ids",
|
|
||||||
parse(parse_comma_separated)
|
|
||||||
)]
|
|
||||||
package_ids: Option<BTreeSet<PackageId>>,
|
|
||||||
#[arg] password: String,
|
|
||||||
) -> Result<WithRevision<()>, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?;
|
|
||||||
let fs = target_id
|
|
||||||
.load(&mut ctx.secret_store.acquire().await?)
|
|
||||||
.await?;
|
|
||||||
let mut backup_guard = BackupMountGuard::mount(
|
|
||||||
TmpMountGuard::mount(&fs, ReadWrite).await?,
|
|
||||||
old_password.as_ref().unwrap_or(&password),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let all_packages = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.get(&mut db, false)
|
|
||||||
.await?
|
|
||||||
.0
|
|
||||||
.keys()
|
|
||||||
.into_iter()
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
let package_ids = package_ids.unwrap_or(all_packages);
|
|
||||||
if old_password.is_some() {
|
|
||||||
backup_guard.change_password(&password)?;
|
|
||||||
}
|
|
||||||
let revision = assure_backing_up(&mut db, &package_ids).await?;
|
|
||||||
tokio::task::spawn(async move {
|
|
||||||
let backup_res = perform_backup(&ctx, &mut db, backup_guard, &package_ids).await;
|
|
||||||
let backup_progress = crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.status_info()
|
|
||||||
.backup_progress();
|
|
||||||
backup_progress
|
|
||||||
.clone()
|
|
||||||
.lock(&mut db, LockType::Write)
|
|
||||||
.await
|
|
||||||
.expect("failed to lock server status");
|
|
||||||
match backup_res {
|
|
||||||
Ok(report) if report.iter().all(|(_, rep)| rep.error.is_none()) => ctx
|
|
||||||
.notification_manager
|
|
||||||
.notify(
|
|
||||||
&mut db,
|
|
||||||
None,
|
|
||||||
NotificationLevel::Success,
|
|
||||||
"Backup Complete".to_owned(),
|
|
||||||
"Your backup has completed".to_owned(),
|
|
||||||
BackupReport {
|
|
||||||
server: ServerBackupReport {
|
|
||||||
attempted: true,
|
|
||||||
error: None,
|
|
||||||
},
|
|
||||||
packages: report,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("failed to send notification"),
|
|
||||||
Ok(report) => ctx
|
|
||||||
.notification_manager
|
|
||||||
.notify(
|
|
||||||
&mut db,
|
|
||||||
None,
|
|
||||||
NotificationLevel::Warning,
|
|
||||||
"Backup Complete".to_owned(),
|
|
||||||
"Your backup has completed, but some package(s) failed to backup".to_owned(),
|
|
||||||
BackupReport {
|
|
||||||
server: ServerBackupReport {
|
|
||||||
attempted: true,
|
|
||||||
error: None,
|
|
||||||
},
|
|
||||||
packages: report,
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("failed to send notification"),
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Backup Failed: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
ctx.notification_manager
|
|
||||||
.notify(
|
|
||||||
&mut db,
|
|
||||||
None,
|
|
||||||
NotificationLevel::Error,
|
|
||||||
"Backup Failed".to_owned(),
|
|
||||||
"Your backup failed to complete.".to_owned(),
|
|
||||||
BackupReport {
|
|
||||||
server: ServerBackupReport {
|
|
||||||
attempted: true,
|
|
||||||
error: Some(e.to_string()),
|
|
||||||
},
|
|
||||||
packages: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.expect("failed to send notification");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
backup_progress
|
|
||||||
.delete(&mut db)
|
|
||||||
.await
|
|
||||||
.expect("failed to change server status");
|
|
||||||
});
|
|
||||||
Ok(WithRevision {
|
|
||||||
response: (),
|
|
||||||
revision,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(db, packages))]
|
|
||||||
async fn assure_backing_up(
|
|
||||||
db: &mut PatchDbHandle,
|
|
||||||
packages: impl IntoIterator<Item = &PackageId>,
|
|
||||||
) -> Result<Option<Arc<Revision>>, Error> {
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
let mut backing_up = crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.status_info()
|
|
||||||
.backup_progress()
|
|
||||||
.get_mut(&mut tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if backing_up
|
|
||||||
.iter()
|
|
||||||
.flat_map(|x| x.values())
|
|
||||||
.fold(false, |acc, x| {
|
|
||||||
if !x.complete {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
acc
|
|
||||||
})
|
|
||||||
{
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Server is already backing up!"),
|
|
||||||
crate::ErrorKind::InvalidRequest,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
*backing_up = Some(
|
|
||||||
packages
|
|
||||||
.into_iter()
|
|
||||||
.map(|x| (x.clone(), BackupProgress { complete: false }))
|
|
||||||
.collect(),
|
|
||||||
);
|
|
||||||
backing_up.save(&mut tx).await?;
|
|
||||||
Ok(tx.commit(None).await?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db, backup_guard))]
|
|
||||||
async fn perform_backup<Db: DbHandle>(
|
|
||||||
ctx: &RpcContext,
|
|
||||||
mut db: Db,
|
|
||||||
mut backup_guard: BackupMountGuard<TmpMountGuard>,
|
|
||||||
package_ids: &BTreeSet<PackageId>,
|
|
||||||
) -> Result<BTreeMap<PackageId, PackageBackupReport>, Error> {
|
|
||||||
let mut backup_report = BTreeMap::new();
|
|
||||||
|
|
||||||
for package_id in crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.keys(&mut db, false)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.filter(|id| package_ids.contains(id))
|
|
||||||
{
|
|
||||||
let mut tx = db.begin().await?; // for lock scope
|
|
||||||
let installed_model = if let Some(installed_model) = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(&package_id)
|
|
||||||
.and_then(|m| m.installed())
|
|
||||||
.check(&mut tx)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
installed_model
|
|
||||||
} else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let main_status_model = installed_model.clone().status().main();
|
|
||||||
|
|
||||||
main_status_model.lock(&mut tx, LockType::Write).await?;
|
|
||||||
let (started, health) = match main_status_model.get(&mut tx, true).await?.into_owned() {
|
|
||||||
MainStatus::Starting { .. } => (Some(Utc::now()), Default::default()),
|
|
||||||
MainStatus::Running { started, health } => (Some(started), health.clone()),
|
|
||||||
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restarting => {
|
|
||||||
(None, Default::default())
|
|
||||||
}
|
|
||||||
MainStatus::BackingUp { .. } => {
|
|
||||||
backup_report.insert(
|
|
||||||
package_id,
|
|
||||||
PackageBackupReport {
|
|
||||||
error: Some(
|
|
||||||
"Can't do backup because service is in a backing up state".to_owned(),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
main_status_model
|
|
||||||
.put(
|
|
||||||
&mut tx,
|
|
||||||
&MainStatus::BackingUp {
|
|
||||||
started,
|
|
||||||
health: health.clone(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tx.save().await?; // drop locks
|
|
||||||
|
|
||||||
let manifest = installed_model
|
|
||||||
.clone()
|
|
||||||
.manifest()
|
|
||||||
.get(&mut db, false)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
ctx.managers
|
|
||||||
.get(&(manifest.id.clone(), manifest.version.clone()))
|
|
||||||
.await
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest)
|
|
||||||
})?
|
|
||||||
.synchronize()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
|
|
||||||
installed_model.lock(&mut tx, LockType::Write).await?;
|
|
||||||
|
|
||||||
let guard = backup_guard.mount_package_backup(&package_id).await?;
|
|
||||||
let res = manifest
|
|
||||||
.backup
|
|
||||||
.create(
|
|
||||||
ctx,
|
|
||||||
&package_id,
|
|
||||||
&manifest.title,
|
|
||||||
&manifest.version,
|
|
||||||
&manifest.interfaces,
|
|
||||||
&manifest.volumes,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
guard.unmount().await?;
|
|
||||||
backup_report.insert(
|
|
||||||
package_id.clone(),
|
|
||||||
PackageBackupReport {
|
|
||||||
error: res.as_ref().err().map(|e| e.to_string()),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Ok(pkg_meta) = res {
|
|
||||||
installed_model
|
|
||||||
.last_backup()
|
|
||||||
.put(&mut tx, &Some(pkg_meta.timestamp))
|
|
||||||
.await?;
|
|
||||||
backup_guard
|
|
||||||
.metadata
|
|
||||||
.package_backups
|
|
||||||
.insert(package_id.clone(), pkg_meta);
|
|
||||||
}
|
|
||||||
|
|
||||||
main_status_model
|
|
||||||
.put(
|
|
||||||
&mut tx,
|
|
||||||
&match started {
|
|
||||||
Some(started) => MainStatus::Running { started, health },
|
|
||||||
None => MainStatus::Stopped,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut backup_progress = crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.status_info()
|
|
||||||
.backup_progress()
|
|
||||||
.get_mut(&mut tx)
|
|
||||||
.await?;
|
|
||||||
if backup_progress.is_none() {
|
|
||||||
*backup_progress = Some(Default::default());
|
|
||||||
}
|
|
||||||
if let Some(mut backup_progress) = backup_progress
|
|
||||||
.as_mut()
|
|
||||||
.and_then(|bp| bp.get_mut(&package_id))
|
|
||||||
{
|
|
||||||
(*backup_progress).complete = true;
|
|
||||||
}
|
|
||||||
backup_progress.save(&mut tx).await?;
|
|
||||||
tx.save().await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
crate::db::DatabaseModel::new()
|
|
||||||
.lock(&mut db, LockType::Write)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let (root_ca_key, root_ca_cert) = ctx.net_controller.ssl.export_root_ca().await?;
|
|
||||||
let mut os_backup_file = AtomicFile::new(backup_guard.as_ref().join("os-backup.cbor")).await?;
|
|
||||||
os_backup_file
|
|
||||||
.write_all(
|
|
||||||
&IoFormat::Cbor.to_vec(&OsBackup {
|
|
||||||
tor_key: ctx.net_controller.tor.embassyd_tor_key().await,
|
|
||||||
root_ca_key,
|
|
||||||
root_ca_cert,
|
|
||||||
ui: crate::db::DatabaseModel::new()
|
|
||||||
.ui()
|
|
||||||
.get(&mut db, true)
|
|
||||||
.await?
|
|
||||||
.into_owned(),
|
|
||||||
})?,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
os_backup_file.save().await?;
|
|
||||||
|
|
||||||
let timestamp = Some(Utc::now());
|
|
||||||
|
|
||||||
backup_guard.unencrypted_metadata.version = crate::version::Current::new().semver().into();
|
|
||||||
backup_guard.unencrypted_metadata.full = true;
|
|
||||||
backup_guard.metadata.version = crate::version::Current::new().semver().into();
|
|
||||||
backup_guard.metadata.timestamp = timestamp;
|
|
||||||
|
|
||||||
backup_guard.save_and_unmount().await?;
|
|
||||||
|
|
||||||
crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.last_backup()
|
|
||||||
.put(&mut db, ×tamp)
|
|
||||||
.await?;
|
|
||||||
Ok(backup_report)
|
|
||||||
}
|
|
||||||
@@ -1,248 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use patch_db::{DbHandle, HasModel, LockType};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sqlx::{Executor, Sqlite};
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use self::target::PackageBackupInfo;
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::dependencies::reconfigure_dependents_with_live_pointers;
|
|
||||||
use crate::id::ImageId;
|
|
||||||
use crate::install::PKG_ARCHIVE_DIR;
|
|
||||||
use crate::net::interface::{InterfaceId, Interfaces};
|
|
||||||
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::util::serde::IoFormat;
|
|
||||||
use crate::util::{AtomicFile, Version};
|
|
||||||
use crate::version::{Current, VersionT};
|
|
||||||
use crate::volume::{backup_dir, Volume, VolumeId, Volumes, BACKUP_DIR};
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
pub mod backup_bulk;
|
|
||||||
pub mod restore;
|
|
||||||
pub mod target;
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
pub struct BackupReport {
|
|
||||||
server: ServerBackupReport,
|
|
||||||
packages: BTreeMap<PackageId, PackageBackupReport>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
pub struct ServerBackupReport {
|
|
||||||
attempted: bool,
|
|
||||||
error: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
pub struct PackageBackupReport {
|
|
||||||
error: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(backup_bulk::backup_all, target::target))]
|
|
||||||
pub fn backup() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "backup", subcommands(restore::restore_packages_rpc))]
|
|
||||||
pub fn package_backup() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
struct BackupMetadata {
|
|
||||||
pub timestamp: DateTime<Utc>,
|
|
||||||
pub tor_keys: BTreeMap<InterfaceId, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
pub struct BackupActions {
|
|
||||||
pub create: PackageProcedure,
|
|
||||||
pub restore: PackageProcedure,
|
|
||||||
}
|
|
||||||
impl BackupActions {
|
|
||||||
pub fn validate(&self, volumes: &Volumes, image_ids: &BTreeSet<ImageId>) -> Result<(), Error> {
|
|
||||||
self.create
|
|
||||||
.validate(volumes, image_ids, false)
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Create"))?;
|
|
||||||
self.restore
|
|
||||||
.validate(volumes, image_ids, false)
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Restore"))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn create(
|
|
||||||
&self,
|
|
||||||
ctx: &RpcContext,
|
|
||||||
pkg_id: &PackageId,
|
|
||||||
pkg_title: &str,
|
|
||||||
pkg_version: &Version,
|
|
||||||
interfaces: &Interfaces,
|
|
||||||
volumes: &Volumes,
|
|
||||||
) -> Result<PackageBackupInfo, Error> {
|
|
||||||
let mut volumes = volumes.to_readonly();
|
|
||||||
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: false });
|
|
||||||
let backup_dir = backup_dir(pkg_id);
|
|
||||||
if tokio::fs::metadata(&backup_dir).await.is_err() {
|
|
||||||
tokio::fs::create_dir_all(&backup_dir).await?
|
|
||||||
}
|
|
||||||
self.create
|
|
||||||
.execute::<(), NoOutput>(
|
|
||||||
ctx,
|
|
||||||
pkg_id,
|
|
||||||
pkg_version,
|
|
||||||
ProcedureName::CreateBackup,
|
|
||||||
&volumes,
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.map_err(|e| eyre!("{}", e.1))
|
|
||||||
.with_kind(crate::ErrorKind::Backup)?;
|
|
||||||
let tor_keys = interfaces
|
|
||||||
.tor_keys(&mut ctx.secret_store.acquire().await?, pkg_id)
|
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.map(|(id, key)| {
|
|
||||||
(
|
|
||||||
id,
|
|
||||||
base32::encode(base32::Alphabet::RFC4648 { padding: true }, &key.as_bytes()),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
let tmp_path = Path::new(BACKUP_DIR)
|
|
||||||
.join(pkg_id)
|
|
||||||
.join(format!("{}.s9pk", pkg_id));
|
|
||||||
let s9pk_path = ctx
|
|
||||||
.datadir
|
|
||||||
.join(PKG_ARCHIVE_DIR)
|
|
||||||
.join(pkg_id)
|
|
||||||
.join(pkg_version.as_str())
|
|
||||||
.join(format!("{}.s9pk", pkg_id));
|
|
||||||
let mut infile = File::open(&s9pk_path).await?;
|
|
||||||
let mut outfile = AtomicFile::new(&tmp_path).await?;
|
|
||||||
tokio::io::copy(&mut infile, &mut *outfile)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
format!("cp {} -> {}", s9pk_path.display(), tmp_path.display()),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
outfile.save().await?;
|
|
||||||
let timestamp = Utc::now();
|
|
||||||
let metadata_path = Path::new(BACKUP_DIR).join(pkg_id).join("metadata.cbor");
|
|
||||||
let mut outfile = AtomicFile::new(&metadata_path).await?;
|
|
||||||
outfile
|
|
||||||
.write_all(&IoFormat::Cbor.to_vec(&BackupMetadata {
|
|
||||||
timestamp,
|
|
||||||
tor_keys,
|
|
||||||
})?)
|
|
||||||
.await?;
|
|
||||||
outfile.save().await?;
|
|
||||||
Ok(PackageBackupInfo {
|
|
||||||
os_version: Current::new().semver().into(),
|
|
||||||
title: pkg_title.to_owned(),
|
|
||||||
version: pkg_version.clone(),
|
|
||||||
timestamp,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db, secrets))]
|
|
||||||
pub async fn restore<Ex, Db: DbHandle>(
|
|
||||||
&self,
|
|
||||||
ctx: &RpcContext,
|
|
||||||
db: &mut Db,
|
|
||||||
secrets: &mut Ex,
|
|
||||||
pkg_id: &PackageId,
|
|
||||||
pkg_version: &Version,
|
|
||||||
interfaces: &Interfaces,
|
|
||||||
volumes: &Volumes,
|
|
||||||
) -> Result<(), Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
let mut volumes = volumes.clone();
|
|
||||||
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: true });
|
|
||||||
self.restore
|
|
||||||
.execute::<(), NoOutput>(
|
|
||||||
ctx,
|
|
||||||
pkg_id,
|
|
||||||
pkg_version,
|
|
||||||
ProcedureName::RestoreBackup,
|
|
||||||
&volumes,
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.map_err(|e| eyre!("{}", e.1))
|
|
||||||
.with_kind(crate::ErrorKind::Restore)?;
|
|
||||||
let metadata_path = Path::new(BACKUP_DIR).join(pkg_id).join("metadata.cbor");
|
|
||||||
let metadata: BackupMetadata = IoFormat::Cbor.from_slice(
|
|
||||||
&tokio::fs::read(&metadata_path).await.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
metadata_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
)?;
|
|
||||||
for (iface, key) in metadata.tor_keys {
|
|
||||||
let key_vec = base32::decode(base32::Alphabet::RFC4648 { padding: true }, &key)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::new(
|
|
||||||
eyre!("invalid base32 string"),
|
|
||||||
crate::ErrorKind::Deserialization,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
sqlx::query!(
|
|
||||||
"REPLACE INTO tor (package, interface, key) VALUES (?, ?, ?)",
|
|
||||||
**pkg_id,
|
|
||||||
*iface,
|
|
||||||
key_vec,
|
|
||||||
)
|
|
||||||
.execute(&mut *secrets)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.lock(db, LockType::Write)
|
|
||||||
.await?;
|
|
||||||
crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(pkg_id)
|
|
||||||
.expect(db)
|
|
||||||
.await?
|
|
||||||
.installed()
|
|
||||||
.expect(db)
|
|
||||||
.await?
|
|
||||||
.interface_addresses()
|
|
||||||
.put(db, &interfaces.install(&mut *secrets, pkg_id).await?)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let entry = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(pkg_id)
|
|
||||||
.expect(db)
|
|
||||||
.await?
|
|
||||||
.installed()
|
|
||||||
.expect(db)
|
|
||||||
.await?
|
|
||||||
.get(db, true)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let receipts = crate::config::ConfigReceipts::new(db).await?;
|
|
||||||
reconfigure_dependents_with_live_pointers(ctx, db, &receipts, &entry).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,420 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::sync::atomic::Ordering;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use futures::future::BoxFuture;
|
|
||||||
use futures::FutureExt;
|
|
||||||
use openssl::x509::X509;
|
|
||||||
use patch_db::{DbHandle, PatchDbHandle, Revision};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::task::JoinHandle;
|
|
||||||
use torut::onion::OnionAddressV3;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::target::BackupTargetId;
|
|
||||||
use crate::backup::backup_bulk::OsBackup;
|
|
||||||
use crate::context::{RpcContext, SetupContext};
|
|
||||||
use crate::db::model::{PackageDataEntry, StaticFiles};
|
|
||||||
use crate::db::util::WithRevision;
|
|
||||||
use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard};
|
|
||||||
use crate::disk::mount::filesystem::ReadOnly;
|
|
||||||
use crate::disk::mount::guard::TmpMountGuard;
|
|
||||||
use crate::install::progress::InstallProgress;
|
|
||||||
use crate::install::{download_install_s9pk, PKG_PUBLIC_DIR};
|
|
||||||
use crate::net::ssl::SslManager;
|
|
||||||
use crate::notifications::NotificationLevel;
|
|
||||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
|
||||||
use crate::s9pk::reader::S9pkReader;
|
|
||||||
use crate::setup::RecoveryStatus;
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::io::dir_size;
|
|
||||||
use crate::util::serde::IoFormat;
|
|
||||||
use crate::volume::{backup_dir, BACKUP_DIR, PKG_VOLUME_DIR};
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
fn parse_comma_separated(arg: &str, _: &ArgMatches<'_>) -> Result<Vec<PackageId>, Error> {
|
|
||||||
arg.split(',')
|
|
||||||
.map(|s| s.trim().parse().map_err(Error::from))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "restore", display(display_none))]
|
|
||||||
#[instrument(skip(ctx, password))]
|
|
||||||
pub async fn restore_packages_rpc(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(parse(parse_comma_separated))] ids: Vec<PackageId>,
|
|
||||||
#[arg(rename = "target-id")] target_id: BackupTargetId,
|
|
||||||
#[arg] password: String,
|
|
||||||
) -> Result<WithRevision<()>, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let fs = target_id
|
|
||||||
.load(&mut ctx.secret_store.acquire().await?)
|
|
||||||
.await?;
|
|
||||||
let backup_guard = BackupMountGuard::mount(
|
|
||||||
TmpMountGuard::mount(&fs, ReadOnly).await?,
|
|
||||||
&password,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let (revision, backup_guard, tasks, _) =
|
|
||||||
restore_packages(&ctx, &mut db, backup_guard, ids).await?;
|
|
||||||
|
|
||||||
tokio::spawn(async {
|
|
||||||
futures::future::join_all(tasks).await;
|
|
||||||
if let Err(e) = backup_guard.unmount().await {
|
|
||||||
tracing::error!("Error unmounting backup drive: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(WithRevision {
|
|
||||||
response: (),
|
|
||||||
revision,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn approximate_progress(
|
|
||||||
rpc_ctx: &RpcContext,
|
|
||||||
progress: &mut ProgressInfo,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
for (id, size) in &mut progress.target_volume_size {
|
|
||||||
let dir = rpc_ctx.datadir.join(PKG_VOLUME_DIR).join(id).join("data");
|
|
||||||
if tokio::fs::metadata(&dir).await.is_err() {
|
|
||||||
*size = 0;
|
|
||||||
} else {
|
|
||||||
*size = dir_size(&dir).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn approximate_progress_loop(
|
|
||||||
ctx: &SetupContext,
|
|
||||||
rpc_ctx: &RpcContext,
|
|
||||||
mut starting_info: ProgressInfo,
|
|
||||||
) {
|
|
||||||
loop {
|
|
||||||
if let Err(e) = approximate_progress(rpc_ctx, &mut starting_info).await {
|
|
||||||
tracing::error!("Failed to approximate restore progress: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
} else {
|
|
||||||
*ctx.recovery_status.write().await = Some(Ok(starting_info.flatten()));
|
|
||||||
}
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
struct ProgressInfo {
|
|
||||||
package_installs: BTreeMap<PackageId, Arc<InstallProgress>>,
|
|
||||||
src_volume_size: BTreeMap<PackageId, u64>,
|
|
||||||
target_volume_size: BTreeMap<PackageId, u64>,
|
|
||||||
}
|
|
||||||
impl ProgressInfo {
|
|
||||||
fn flatten(&self) -> RecoveryStatus {
|
|
||||||
let mut total_bytes = 0;
|
|
||||||
let mut bytes_transferred = 0;
|
|
||||||
|
|
||||||
for progress in self.package_installs.values() {
|
|
||||||
total_bytes += ((progress.size.unwrap_or(0) as f64) * 2.2) as u64;
|
|
||||||
bytes_transferred += progress.downloaded.load(Ordering::SeqCst);
|
|
||||||
bytes_transferred += ((progress.validated.load(Ordering::SeqCst) as f64) * 0.2) as u64;
|
|
||||||
bytes_transferred += progress.unpacked.load(Ordering::SeqCst);
|
|
||||||
}
|
|
||||||
|
|
||||||
for size in self.src_volume_size.values() {
|
|
||||||
total_bytes += *size;
|
|
||||||
}
|
|
||||||
|
|
||||||
for size in self.target_volume_size.values() {
|
|
||||||
bytes_transferred += *size;
|
|
||||||
}
|
|
||||||
|
|
||||||
if bytes_transferred > total_bytes {
|
|
||||||
bytes_transferred = total_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
RecoveryStatus {
|
|
||||||
total_bytes,
|
|
||||||
bytes_transferred,
|
|
||||||
complete: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn recover_full_embassy(
|
|
||||||
ctx: SetupContext,
|
|
||||||
disk_guid: Arc<String>,
|
|
||||||
embassy_password: String,
|
|
||||||
recovery_source: TmpMountGuard,
|
|
||||||
recovery_password: Option<String>,
|
|
||||||
) -> Result<(OnionAddressV3, X509, BoxFuture<'static, Result<(), Error>>), Error> {
|
|
||||||
let backup_guard = BackupMountGuard::mount(
|
|
||||||
recovery_source,
|
|
||||||
recovery_password.as_deref().unwrap_or_default(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let os_backup_path = backup_guard.as_ref().join("os-backup.cbor");
|
|
||||||
let os_backup: OsBackup =
|
|
||||||
IoFormat::Cbor.from_slice(&tokio::fs::read(&os_backup_path).await.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
os_backup_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?)?;
|
|
||||||
|
|
||||||
let password = argon2::hash_encoded(
|
|
||||||
embassy_password.as_bytes(),
|
|
||||||
&rand::random::<[u8; 16]>()[..],
|
|
||||||
&argon2::Config::default(),
|
|
||||||
)
|
|
||||||
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
|
|
||||||
let key_vec = os_backup.tor_key.as_bytes().to_vec();
|
|
||||||
let secret_store = ctx.secret_store().await?;
|
|
||||||
sqlx::query!(
|
|
||||||
"REPLACE INTO account (id, password, tor_key) VALUES (?, ?, ?)",
|
|
||||||
0,
|
|
||||||
password,
|
|
||||||
key_vec,
|
|
||||||
)
|
|
||||||
.execute(&mut secret_store.acquire().await?)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
SslManager::import_root_ca(
|
|
||||||
secret_store.clone(),
|
|
||||||
os_backup.root_ca_key,
|
|
||||||
os_backup.root_ca_cert.clone(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
secret_store.close().await;
|
|
||||||
|
|
||||||
Ok((
|
|
||||||
os_backup.tor_key.public().get_onion_address(),
|
|
||||||
os_backup.root_ca_cert,
|
|
||||||
async move {
|
|
||||||
let rpc_ctx = RpcContext::init(ctx.config_path.as_ref(), disk_guid).await?;
|
|
||||||
let mut db = rpc_ctx.db.handle();
|
|
||||||
|
|
||||||
let ids = backup_guard
|
|
||||||
.metadata
|
|
||||||
.package_backups
|
|
||||||
.keys()
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
let (_, backup_guard, tasks, progress_info) = restore_packages(
|
|
||||||
&rpc_ctx,
|
|
||||||
&mut db,
|
|
||||||
backup_guard,
|
|
||||||
ids,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
tokio::select! {
|
|
||||||
res = futures::future::join_all(tasks) => {
|
|
||||||
for res in res {
|
|
||||||
match res.with_kind(crate::ErrorKind::Unknown) {
|
|
||||||
Ok((Ok(_), _)) => (),
|
|
||||||
Ok((Err(err), package_id)) => {
|
|
||||||
if let Err(err) = rpc_ctx.notification_manager.notify(
|
|
||||||
&mut db,
|
|
||||||
Some(package_id.clone()),
|
|
||||||
NotificationLevel::Error,
|
|
||||||
"Restoration Failure".to_string(), format!("Error restoring package {}: {}", package_id,err), (), None).await{
|
|
||||||
tracing::error!("Failed to notify: {}", err);
|
|
||||||
tracing::debug!("{:?}", err);
|
|
||||||
};
|
|
||||||
tracing::error!("Error restoring package {}: {}", package_id, err);
|
|
||||||
tracing::debug!("{:?}", err);
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
if let Err(err) = rpc_ctx.notification_manager.notify(
|
|
||||||
&mut db,
|
|
||||||
None,
|
|
||||||
NotificationLevel::Error,
|
|
||||||
"Restoration Failure".to_string(), format!("Error restoring ?: {}", e), (), None).await {
|
|
||||||
|
|
||||||
tracing::error!("Failed to notify: {}", err);
|
|
||||||
tracing::debug!("{:?}", err);
|
|
||||||
}
|
|
||||||
tracing::error!("Error restoring packages: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
},
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
_ = approximate_progress_loop(&ctx, &rpc_ctx, progress_info) => unreachable!(concat!(module_path!(), "::approximate_progress_loop should not terminate")),
|
|
||||||
}
|
|
||||||
|
|
||||||
backup_guard.unmount().await?;
|
|
||||||
rpc_ctx.shutdown().await
|
|
||||||
}.boxed()
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn restore_packages(
|
|
||||||
ctx: &RpcContext,
|
|
||||||
db: &mut PatchDbHandle,
|
|
||||||
backup_guard: BackupMountGuard<TmpMountGuard>,
|
|
||||||
ids: Vec<PackageId>,
|
|
||||||
) -> Result<
|
|
||||||
(
|
|
||||||
Option<Arc<Revision>>,
|
|
||||||
BackupMountGuard<TmpMountGuard>,
|
|
||||||
Vec<JoinHandle<(Result<(), Error>, PackageId)>>,
|
|
||||||
ProgressInfo,
|
|
||||||
),
|
|
||||||
Error,
|
|
||||||
> {
|
|
||||||
let (revision, guards) = assure_restoring(ctx, db, ids, &backup_guard).await?;
|
|
||||||
|
|
||||||
let mut progress_info = ProgressInfo::default();
|
|
||||||
|
|
||||||
let mut tasks = Vec::with_capacity(guards.len());
|
|
||||||
for (manifest, guard) in guards {
|
|
||||||
let id = manifest.id.clone();
|
|
||||||
let (progress, task) = restore_package(ctx.clone(), manifest, guard).await?;
|
|
||||||
progress_info.package_installs.insert(id.clone(), progress);
|
|
||||||
progress_info
|
|
||||||
.src_volume_size
|
|
||||||
.insert(id.clone(), dir_size(backup_dir(&id)).await?);
|
|
||||||
progress_info.target_volume_size.insert(id.clone(), 0);
|
|
||||||
let package_id = id.clone();
|
|
||||||
tasks.push(tokio::spawn(
|
|
||||||
async move {
|
|
||||||
if let Err(e) = task.await {
|
|
||||||
tracing::error!("Error restoring package {}: {}", id, e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
Err(e)
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.map(|x| (x, package_id)),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((revision, backup_guard, tasks, progress_info))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db, backup_guard))]
|
|
||||||
async fn assure_restoring(
|
|
||||||
ctx: &RpcContext,
|
|
||||||
db: &mut PatchDbHandle,
|
|
||||||
ids: Vec<PackageId>,
|
|
||||||
backup_guard: &BackupMountGuard<TmpMountGuard>,
|
|
||||||
) -> Result<
|
|
||||||
(
|
|
||||||
Option<Arc<Revision>>,
|
|
||||||
Vec<(Manifest, PackageBackupMountGuard)>,
|
|
||||||
),
|
|
||||||
Error,
|
|
||||||
> {
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
|
|
||||||
let mut guards = Vec::with_capacity(ids.len());
|
|
||||||
|
|
||||||
for id in ids {
|
|
||||||
let mut model = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(&id)
|
|
||||||
.get_mut(&mut tx)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if !model.is_none() {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Can't restore over existing package: {}", id),
|
|
||||||
crate::ErrorKind::InvalidRequest,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let guard = backup_guard.mount_package_backup(&id).await?;
|
|
||||||
let s9pk_path = Path::new(BACKUP_DIR).join(&id).join(format!("{}.s9pk", id));
|
|
||||||
let mut rdr = S9pkReader::open(&s9pk_path, false).await?;
|
|
||||||
|
|
||||||
let manifest = rdr.manifest().await?;
|
|
||||||
let version = manifest.version.clone();
|
|
||||||
let progress = InstallProgress::new(Some(tokio::fs::metadata(&s9pk_path).await?.len()));
|
|
||||||
|
|
||||||
let public_dir_path = ctx
|
|
||||||
.datadir
|
|
||||||
.join(PKG_PUBLIC_DIR)
|
|
||||||
.join(&id)
|
|
||||||
.join(version.as_str());
|
|
||||||
tokio::fs::create_dir_all(&public_dir_path).await?;
|
|
||||||
|
|
||||||
let license_path = public_dir_path.join("LICENSE.md");
|
|
||||||
let mut dst = File::create(&license_path).await?;
|
|
||||||
tokio::io::copy(&mut rdr.license().await?, &mut dst).await?;
|
|
||||||
dst.sync_all().await?;
|
|
||||||
|
|
||||||
let instructions_path = public_dir_path.join("INSTRUCTIONS.md");
|
|
||||||
let mut dst = File::create(&instructions_path).await?;
|
|
||||||
tokio::io::copy(&mut rdr.instructions().await?, &mut dst).await?;
|
|
||||||
dst.sync_all().await?;
|
|
||||||
|
|
||||||
let icon_path = Path::new("icon").with_extension(&manifest.assets.icon_type());
|
|
||||||
let icon_path = public_dir_path.join(&icon_path);
|
|
||||||
let mut dst = File::create(&icon_path).await?;
|
|
||||||
tokio::io::copy(&mut rdr.icon().await?, &mut dst).await?;
|
|
||||||
dst.sync_all().await?;
|
|
||||||
|
|
||||||
*model = Some(PackageDataEntry::Restoring {
|
|
||||||
install_progress: progress.clone(),
|
|
||||||
static_files: StaticFiles::local(&id, &version, manifest.assets.icon_type()),
|
|
||||||
manifest: manifest.clone(),
|
|
||||||
});
|
|
||||||
model.save(&mut tx).await?;
|
|
||||||
|
|
||||||
guards.push((manifest, guard));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((tx.commit(None).await?, guards))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, guard))]
|
|
||||||
async fn restore_package<'a>(
|
|
||||||
ctx: RpcContext,
|
|
||||||
manifest: Manifest,
|
|
||||||
guard: PackageBackupMountGuard,
|
|
||||||
) -> Result<(Arc<InstallProgress>, BoxFuture<'static, Result<(), Error>>), Error> {
|
|
||||||
let s9pk_path = Path::new(BACKUP_DIR)
|
|
||||||
.join(&manifest.id)
|
|
||||||
.join(format!("{}.s9pk", manifest.id));
|
|
||||||
let len = tokio::fs::metadata(&s9pk_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
s9pk_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?
|
|
||||||
.len();
|
|
||||||
let file = File::open(&s9pk_path).await.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
s9pk_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let progress = InstallProgress::new(Some(len));
|
|
||||||
|
|
||||||
Ok((
|
|
||||||
progress.clone(),
|
|
||||||
async move {
|
|
||||||
download_install_s9pk(&ctx, &manifest, None, progress, file).await?;
|
|
||||||
|
|
||||||
guard.unmount().await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
.boxed(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
@@ -1,212 +0,0 @@
|
|||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sqlx::{Executor, Sqlite};
|
|
||||||
|
|
||||||
use super::{BackupTarget, BackupTargetId};
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::disk::mount::filesystem::cifs::Cifs;
|
|
||||||
use crate::disk::mount::filesystem::ReadOnly;
|
|
||||||
use crate::disk::mount::guard::TmpMountGuard;
|
|
||||||
use crate::disk::util::{recovery_info, EmbassyOsRecoveryInfo};
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::serde::KeyVal;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct CifsBackupTarget {
|
|
||||||
hostname: String,
|
|
||||||
path: PathBuf,
|
|
||||||
username: String,
|
|
||||||
mountable: bool,
|
|
||||||
embassy_os: Option<EmbassyOsRecoveryInfo>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(add, update, remove))]
|
|
||||||
pub fn cifs() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub async fn add(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg] hostname: String,
|
|
||||||
#[arg] path: PathBuf,
|
|
||||||
#[arg] username: String,
|
|
||||||
#[arg] password: Option<String>,
|
|
||||||
) -> Result<KeyVal<BackupTargetId, BackupTarget>, Error> {
|
|
||||||
let cifs = Cifs {
|
|
||||||
hostname,
|
|
||||||
path,
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
};
|
|
||||||
let guard = TmpMountGuard::mount(&cifs, ReadOnly).await?;
|
|
||||||
let embassy_os = recovery_info(&guard).await?;
|
|
||||||
guard.unmount().await?;
|
|
||||||
let path_string = Path::new("/").join(&cifs.path).display().to_string();
|
|
||||||
let id: u32 = sqlx::query!(
|
|
||||||
"INSERT INTO cifs_shares (hostname, path, username, password) VALUES (?, ?, ?, ?) RETURNING id AS \"id: u32\"",
|
|
||||||
cifs.hostname,
|
|
||||||
path_string,
|
|
||||||
cifs.username,
|
|
||||||
cifs.password,
|
|
||||||
)
|
|
||||||
.fetch_one(&ctx.secret_store)
|
|
||||||
.await?.id;
|
|
||||||
Ok(KeyVal {
|
|
||||||
key: BackupTargetId::Cifs { id },
|
|
||||||
value: BackupTarget::Cifs(CifsBackupTarget {
|
|
||||||
hostname: cifs.hostname,
|
|
||||||
path: cifs.path,
|
|
||||||
username: cifs.username,
|
|
||||||
mountable: true,
|
|
||||||
embassy_os,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub async fn update(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg] id: BackupTargetId,
|
|
||||||
#[arg] hostname: String,
|
|
||||||
#[arg] path: PathBuf,
|
|
||||||
#[arg] username: String,
|
|
||||||
#[arg] password: Option<String>,
|
|
||||||
) -> Result<KeyVal<BackupTargetId, BackupTarget>, Error> {
|
|
||||||
let id = if let BackupTargetId::Cifs { id } = id {
|
|
||||||
id
|
|
||||||
} else {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Backup Target ID {} Not Found", id),
|
|
||||||
crate::ErrorKind::NotFound,
|
|
||||||
));
|
|
||||||
};
|
|
||||||
let cifs = Cifs {
|
|
||||||
hostname,
|
|
||||||
path,
|
|
||||||
username,
|
|
||||||
password,
|
|
||||||
};
|
|
||||||
let guard = TmpMountGuard::mount(&cifs, ReadOnly).await?;
|
|
||||||
let embassy_os = recovery_info(&guard).await?;
|
|
||||||
guard.unmount().await?;
|
|
||||||
let path_string = Path::new("/").join(&cifs.path).display().to_string();
|
|
||||||
if sqlx::query!(
|
|
||||||
"UPDATE cifs_shares SET hostname = ?, path = ?, username = ?, password = ? WHERE id = ?",
|
|
||||||
cifs.hostname,
|
|
||||||
path_string,
|
|
||||||
cifs.username,
|
|
||||||
cifs.password,
|
|
||||||
id,
|
|
||||||
)
|
|
||||||
.execute(&ctx.secret_store)
|
|
||||||
.await?
|
|
||||||
.rows_affected()
|
|
||||||
== 0
|
|
||||||
{
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
|
|
||||||
crate::ErrorKind::NotFound,
|
|
||||||
));
|
|
||||||
};
|
|
||||||
Ok(KeyVal {
|
|
||||||
key: BackupTargetId::Cifs { id },
|
|
||||||
value: BackupTarget::Cifs(CifsBackupTarget {
|
|
||||||
hostname: cifs.hostname,
|
|
||||||
path: cifs.path,
|
|
||||||
username: cifs.username,
|
|
||||||
mountable: true,
|
|
||||||
embassy_os,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub async fn remove(#[context] ctx: RpcContext, #[arg] id: BackupTargetId) -> Result<(), Error> {
|
|
||||||
let id = if let BackupTargetId::Cifs { id } = id {
|
|
||||||
id
|
|
||||||
} else {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Backup Target ID {} Not Found", id),
|
|
||||||
crate::ErrorKind::NotFound,
|
|
||||||
));
|
|
||||||
};
|
|
||||||
if sqlx::query!("DELETE FROM cifs_shares WHERE id = ?", id)
|
|
||||||
.execute(&ctx.secret_store)
|
|
||||||
.await?
|
|
||||||
.rows_affected()
|
|
||||||
== 0
|
|
||||||
{
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
|
|
||||||
crate::ErrorKind::NotFound,
|
|
||||||
));
|
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn load<Ex>(secrets: &mut Ex, id: u32) -> Result<Cifs, Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
let record = sqlx::query!(
|
|
||||||
"SELECT hostname, path, username, password FROM cifs_shares WHERE id = ?",
|
|
||||||
id
|
|
||||||
)
|
|
||||||
.fetch_one(secrets)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(Cifs {
|
|
||||||
hostname: record.hostname,
|
|
||||||
path: PathBuf::from(record.path),
|
|
||||||
username: record.username,
|
|
||||||
password: record.password,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn list<Ex>(secrets: &mut Ex) -> Result<Vec<(u32, CifsBackupTarget)>, Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
let mut records = sqlx::query!(
|
|
||||||
"SELECT id AS \"id: u32\", hostname, path, username, password FROM cifs_shares"
|
|
||||||
)
|
|
||||||
.fetch_many(secrets);
|
|
||||||
|
|
||||||
let mut cifs = Vec::new();
|
|
||||||
while let Some(query_result) = records.try_next().await? {
|
|
||||||
if let Some(record) = query_result.right() {
|
|
||||||
let mount_info = Cifs {
|
|
||||||
hostname: record.hostname,
|
|
||||||
path: PathBuf::from(record.path),
|
|
||||||
username: record.username,
|
|
||||||
password: record.password,
|
|
||||||
};
|
|
||||||
let embassy_os = async {
|
|
||||||
let guard = TmpMountGuard::mount(&mount_info, ReadOnly).await?;
|
|
||||||
let embassy_os = recovery_info(&guard).await?;
|
|
||||||
guard.unmount().await?;
|
|
||||||
Ok::<_, Error>(embassy_os)
|
|
||||||
}
|
|
||||||
.await;
|
|
||||||
cifs.push((
|
|
||||||
record.id,
|
|
||||||
CifsBackupTarget {
|
|
||||||
hostname: mount_info.hostname,
|
|
||||||
path: mount_info.path,
|
|
||||||
username: mount_info.username,
|
|
||||||
mountable: embassy_os.is_ok(),
|
|
||||||
embassy_os: embassy_os.ok().and_then(|a| a),
|
|
||||||
},
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(cifs)
|
|
||||||
}
|
|
||||||
@@ -1,249 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use digest::generic_array::GenericArray;
|
|
||||||
use digest::{Digest, OutputSizeUser};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sha2::Sha256;
|
|
||||||
use sqlx::{Executor, Sqlite};
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use self::cifs::CifsBackupTarget;
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::disk::mount::backup::BackupMountGuard;
|
|
||||||
use crate::disk::mount::filesystem::block_dev::BlockDev;
|
|
||||||
use crate::disk::mount::filesystem::cifs::Cifs;
|
|
||||||
use crate::disk::mount::filesystem::{FileSystem, MountType, ReadOnly};
|
|
||||||
use crate::disk::mount::guard::TmpMountGuard;
|
|
||||||
use crate::disk::util::PartitionInfo;
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::util::serde::{deserialize_from_str, display_serializable, serialize_display};
|
|
||||||
use crate::util::Version;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub mod cifs;
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(tag = "type")]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum BackupTarget {
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Disk {
|
|
||||||
vendor: Option<String>,
|
|
||||||
model: Option<String>,
|
|
||||||
#[serde(flatten)]
|
|
||||||
partition_info: PartitionInfo,
|
|
||||||
},
|
|
||||||
Cifs(CifsBackupTarget),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
|
|
||||||
pub enum BackupTargetId {
|
|
||||||
Disk { logicalname: PathBuf },
|
|
||||||
Cifs { id: u32 },
|
|
||||||
}
|
|
||||||
impl BackupTargetId {
|
|
||||||
pub async fn load<Ex>(self, secrets: &mut Ex) -> Result<BackupTargetFS, Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
|
||||||
{
|
|
||||||
Ok(match self {
|
|
||||||
BackupTargetId::Disk { logicalname } => {
|
|
||||||
BackupTargetFS::Disk(BlockDev::new(logicalname))
|
|
||||||
}
|
|
||||||
BackupTargetId::Cifs { id } => BackupTargetFS::Cifs(cifs::load(secrets, id).await?),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::fmt::Display for BackupTargetId {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
BackupTargetId::Disk { logicalname } => write!(f, "disk-{}", logicalname.display()),
|
|
||||||
BackupTargetId::Cifs { id } => write!(f, "cifs-{}", id),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::str::FromStr for BackupTargetId {
|
|
||||||
type Err = Error;
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
match s.split_once("-") {
|
|
||||||
Some(("disk", logicalname)) => Ok(BackupTargetId::Disk {
|
|
||||||
logicalname: Path::new(logicalname).to_owned(),
|
|
||||||
}),
|
|
||||||
Some(("cifs", id)) => Ok(BackupTargetId::Cifs { id: id.parse()? }),
|
|
||||||
_ => Err(Error::new(
|
|
||||||
eyre!("Invalid Backup Target ID"),
|
|
||||||
crate::ErrorKind::InvalidBackupTargetId,
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'de> Deserialize<'de> for BackupTargetId {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
deserialize_from_str(deserializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Serialize for BackupTargetId {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::Serializer,
|
|
||||||
{
|
|
||||||
serialize_display(self, serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(tag = "type")]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum BackupTargetFS {
|
|
||||||
Disk(BlockDev<PathBuf>),
|
|
||||||
Cifs(Cifs),
|
|
||||||
}
|
|
||||||
#[async_trait]
|
|
||||||
impl FileSystem for BackupTargetFS {
|
|
||||||
async fn mount<P: AsRef<Path> + Send + Sync>(
|
|
||||||
&self,
|
|
||||||
mountpoint: P,
|
|
||||||
mount_type: MountType,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
match self {
|
|
||||||
BackupTargetFS::Disk(a) => a.mount(mountpoint, mount_type).await,
|
|
||||||
BackupTargetFS::Cifs(a) => a.mount(mountpoint, mount_type).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
async fn source_hash(
|
|
||||||
&self,
|
|
||||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
|
||||||
match self {
|
|
||||||
BackupTargetFS::Disk(a) => a.source_hash().await,
|
|
||||||
BackupTargetFS::Cifs(a) => a.source_hash().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(cifs::cifs, list, info))]
|
|
||||||
pub fn target() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: incorporate reconnect into this response as well
|
|
||||||
#[command(display(display_serializable))]
|
|
||||||
pub async fn list(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
) -> Result<BTreeMap<BackupTargetId, BackupTarget>, Error> {
|
|
||||||
let mut sql_handle = ctx.secret_store.acquire().await?;
|
|
||||||
let (disks_res, cifs) =
|
|
||||||
tokio::try_join!(crate::disk::util::list(), cifs::list(&mut sql_handle),)?;
|
|
||||||
Ok(disks_res
|
|
||||||
.disks
|
|
||||||
.into_iter()
|
|
||||||
.flat_map(|mut disk| {
|
|
||||||
std::mem::take(&mut disk.partitions)
|
|
||||||
.into_iter()
|
|
||||||
.map(|part| {
|
|
||||||
(
|
|
||||||
BackupTargetId::Disk {
|
|
||||||
logicalname: part.logicalname.clone(),
|
|
||||||
},
|
|
||||||
BackupTarget::Disk {
|
|
||||||
vendor: disk.vendor.clone(),
|
|
||||||
model: disk.model.clone(),
|
|
||||||
partition_info: part,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
})
|
|
||||||
.chain(
|
|
||||||
cifs.into_iter()
|
|
||||||
.map(|(id, cifs)| (BackupTargetId::Cifs { id }, BackupTarget::Cifs(cifs))),
|
|
||||||
)
|
|
||||||
.collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct BackupInfo {
|
|
||||||
pub version: Version,
|
|
||||||
pub timestamp: Option<DateTime<Utc>>,
|
|
||||||
pub package_backups: BTreeMap<PackageId, PackageBackupInfo>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct PackageBackupInfo {
|
|
||||||
pub title: String,
|
|
||||||
pub version: Version,
|
|
||||||
pub os_version: Version,
|
|
||||||
pub timestamp: DateTime<Utc>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn display_backup_info(info: BackupInfo, matches: &ArgMatches<'_>) {
|
|
||||||
use prettytable::*;
|
|
||||||
|
|
||||||
if matches.is_present("format") {
|
|
||||||
return display_serializable(info, matches);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut table = Table::new();
|
|
||||||
table.add_row(row![bc =>
|
|
||||||
"ID",
|
|
||||||
"VERSION",
|
|
||||||
"OS VERSION",
|
|
||||||
"TIMESTAMP",
|
|
||||||
]);
|
|
||||||
table.add_row(row![
|
|
||||||
"EMBASSY OS",
|
|
||||||
info.version.as_str(),
|
|
||||||
info.version.as_str(),
|
|
||||||
&if let Some(ts) = &info.timestamp {
|
|
||||||
ts.to_string()
|
|
||||||
} else {
|
|
||||||
"N/A".to_owned()
|
|
||||||
},
|
|
||||||
]);
|
|
||||||
for (id, info) in info.package_backups {
|
|
||||||
let row = row![
|
|
||||||
id.as_str(),
|
|
||||||
info.version.as_str(),
|
|
||||||
info.os_version.as_str(),
|
|
||||||
&info.timestamp.to_string(),
|
|
||||||
];
|
|
||||||
table.add_row(row);
|
|
||||||
}
|
|
||||||
table.print_tty(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_backup_info))]
|
|
||||||
#[instrument(skip(ctx, password))]
|
|
||||||
pub async fn info(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg(rename = "target-id")] target_id: BackupTargetId,
|
|
||||||
#[arg] password: String,
|
|
||||||
) -> Result<BackupInfo, Error> {
|
|
||||||
let guard = BackupMountGuard::mount(
|
|
||||||
TmpMountGuard::mount(
|
|
||||||
&target_id
|
|
||||||
.load(&mut ctx.secret_store.acquire().await?)
|
|
||||||
.await?,
|
|
||||||
ReadOnly,
|
|
||||||
)
|
|
||||||
.await?,
|
|
||||||
&password,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let res = guard.metadata.clone();
|
|
||||||
|
|
||||||
guard.unmount().await?;
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
@@ -1,57 +0,0 @@
|
|||||||
use clap::Arg;
|
|
||||||
use embassy::context::CliContext;
|
|
||||||
use embassy::util::logger::EmbassyLogger;
|
|
||||||
use embassy::version::{Current, VersionT};
|
|
||||||
use embassy::Error;
|
|
||||||
use rpc_toolkit::run_cli;
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
fn inner_main() -> Result<(), Error> {
|
|
||||||
run_cli!({
|
|
||||||
command: embassy::main_api,
|
|
||||||
app: app => app
|
|
||||||
.name("Embassy CLI")
|
|
||||||
.version(Current::new().semver().to_string().as_str())
|
|
||||||
.arg(
|
|
||||||
clap::Arg::with_name("config")
|
|
||||||
.short("c")
|
|
||||||
.long("config")
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.arg(Arg::with_name("host").long("host").short("h").takes_value(true))
|
|
||||||
.arg(Arg::with_name("proxy").long("proxy").short("p").takes_value(true)),
|
|
||||||
context: matches => {
|
|
||||||
EmbassyLogger::init();
|
|
||||||
CliContext::init(matches)?
|
|
||||||
},
|
|
||||||
exit: |e: RpcError| {
|
|
||||||
match e.data {
|
|
||||||
Some(Value::String(s)) => eprintln!("{}: {}", e.message, s),
|
|
||||||
Some(Value::Object(o)) => if let Some(Value::String(s)) = o.get("details") {
|
|
||||||
eprintln!("{}: {}", e.message, s);
|
|
||||||
if let Some(Value::String(s)) = o.get("debug") {
|
|
||||||
tracing::debug!("{}", s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(a) => eprintln!("{}: {}", e.message, a),
|
|
||||||
None => eprintln!("{}", e.message),
|
|
||||||
}
|
|
||||||
|
|
||||||
std::process::exit(e.code);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
match inner_main() {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("{}", e.source);
|
|
||||||
tracing::debug!("{:?}", e.source);
|
|
||||||
drop(e.source);
|
|
||||||
std::process::exit(e.kind as i32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,240 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use embassy::context::rpc::RpcContextConfig;
|
|
||||||
use embassy::context::{DiagnosticContext, SetupContext};
|
|
||||||
use embassy::disk::fsck::RepairStrategy;
|
|
||||||
use embassy::disk::main::DEFAULT_PASSWORD;
|
|
||||||
use embassy::disk::REPAIR_DISK_PATH;
|
|
||||||
use embassy::hostname::get_product_key;
|
|
||||||
use embassy::middleware::cors::cors;
|
|
||||||
use embassy::middleware::diagnostic::diagnostic;
|
|
||||||
use embassy::middleware::encrypt::encrypt;
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
use embassy::net::mdns::MdnsController;
|
|
||||||
use embassy::shutdown::Shutdown;
|
|
||||||
use embassy::sound::CHIME;
|
|
||||||
use embassy::util::logger::EmbassyLogger;
|
|
||||||
use embassy::util::Invoke;
|
|
||||||
use embassy::{Error, ResultExt};
|
|
||||||
use http::StatusCode;
|
|
||||||
use rpc_toolkit::rpc_server;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
fn status_fn(_: i32) -> StatusCode {
|
|
||||||
StatusCode::OK
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
|
|
||||||
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_err() {
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
let _mdns = MdnsController::init();
|
|
||||||
tokio::fs::write(
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
include_str!("../nginx/setup-wizard.conf"),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
embassy::ErrorKind::Filesystem,
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
Command::new("systemctl")
|
|
||||||
.arg("reload")
|
|
||||||
.arg("nginx")
|
|
||||||
.invoke(embassy::ErrorKind::Nginx)
|
|
||||||
.await?;
|
|
||||||
let ctx = SetupContext::init(cfg_path).await?;
|
|
||||||
let keysource_ctx = ctx.clone();
|
|
||||||
let keysource = move || {
|
|
||||||
let ctx = keysource_ctx.clone();
|
|
||||||
async move { ctx.product_key().await }
|
|
||||||
};
|
|
||||||
let encrypt = encrypt(keysource);
|
|
||||||
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
|
|
||||||
CHIME.play().await?;
|
|
||||||
rpc_server!({
|
|
||||||
command: embassy::setup_api,
|
|
||||||
context: ctx.clone(),
|
|
||||||
status: status_fn,
|
|
||||||
middleware: [
|
|
||||||
cors,
|
|
||||||
encrypt,
|
|
||||||
]
|
|
||||||
})
|
|
||||||
.with_graceful_shutdown({
|
|
||||||
let mut shutdown = ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.with_kind(embassy::ErrorKind::Network)?;
|
|
||||||
} else {
|
|
||||||
let cfg = RpcContextConfig::load(cfg_path).await?;
|
|
||||||
let guid_string = tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
|
||||||
.await?;
|
|
||||||
let guid = guid_string.trim();
|
|
||||||
let requires_reboot = embassy::disk::main::import(
|
|
||||||
guid,
|
|
||||||
cfg.datadir(),
|
|
||||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
|
||||||
RepairStrategy::Aggressive
|
|
||||||
} else {
|
|
||||||
RepairStrategy::Preen
|
|
||||||
},
|
|
||||||
DEFAULT_PASSWORD,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
|
||||||
tokio::fs::remove_file(REPAIR_DISK_PATH)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (embassy::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
|
|
||||||
}
|
|
||||||
if requires_reboot.0 {
|
|
||||||
embassy::disk::main::export(guid, cfg.datadir()).await?;
|
|
||||||
Command::new("reboot")
|
|
||||||
.invoke(embassy::ErrorKind::Unknown)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
tracing::info!("Loaded Disk");
|
|
||||||
embassy::init::init(&cfg, &get_product_key().await?).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
|
|
||||||
let script = path.as_ref();
|
|
||||||
if script.exists() {
|
|
||||||
match Command::new("/bin/bash").arg(script).spawn() {
|
|
||||||
Ok(mut c) => {
|
|
||||||
if let Err(e) = c.wait().await {
|
|
||||||
tracing::error!("Error Running {}: {}", script.display(), e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("Error Running {}: {}", script.display(), e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
|
|
||||||
embassy::sound::BEP.play().await?;
|
|
||||||
|
|
||||||
run_script_if_exists("/embassy-os/preinit.sh").await;
|
|
||||||
|
|
||||||
let res = if let Err(e) = setup_or_init(cfg_path).await {
|
|
||||||
async {
|
|
||||||
tracing::error!("{}", e.source);
|
|
||||||
tracing::debug!("{}", e.source);
|
|
||||||
embassy::sound::BEETHOVEN.play().await?;
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
let _mdns = MdnsController::init();
|
|
||||||
tokio::fs::write(
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
include_str!("../nginx/diagnostic-ui.conf"),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
embassy::ErrorKind::Filesystem,
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
Command::new("systemctl")
|
|
||||||
.arg("reload")
|
|
||||||
.arg("nginx")
|
|
||||||
.invoke(embassy::ErrorKind::Nginx)
|
|
||||||
.await?;
|
|
||||||
let ctx = DiagnosticContext::init(
|
|
||||||
cfg_path,
|
|
||||||
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_ok() {
|
|
||||||
Some(Arc::new(
|
|
||||||
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
|
||||||
.await?
|
|
||||||
.trim()
|
|
||||||
.to_owned(),
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let mut shutdown_recv = ctx.shutdown.subscribe();
|
|
||||||
rpc_server!({
|
|
||||||
command: embassy::diagnostic_api,
|
|
||||||
context: ctx.clone(),
|
|
||||||
status: status_fn,
|
|
||||||
middleware: [
|
|
||||||
cors,
|
|
||||||
diagnostic,
|
|
||||||
]
|
|
||||||
})
|
|
||||||
.with_graceful_shutdown({
|
|
||||||
let mut shutdown = ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.with_kind(embassy::ErrorKind::Network)?;
|
|
||||||
|
|
||||||
Ok::<_, Error>(
|
|
||||||
shutdown_recv
|
|
||||||
.recv()
|
|
||||||
.await
|
|
||||||
.with_kind(embassy::ErrorKind::Network)?,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
};
|
|
||||||
|
|
||||||
run_script_if_exists("/embassy-os/postinit.sh").await;
|
|
||||||
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let matches = clap::App::new("embassyd")
|
|
||||||
.arg(
|
|
||||||
clap::Arg::with_name("config")
|
|
||||||
.short("c")
|
|
||||||
.long("config")
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.get_matches();
|
|
||||||
|
|
||||||
EmbassyLogger::init();
|
|
||||||
|
|
||||||
let cfg_path = matches.value_of("config");
|
|
||||||
let res = {
|
|
||||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.expect("failed to initialize runtime");
|
|
||||||
rt.block_on(inner_main(cfg_path))
|
|
||||||
};
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(Some(shutdown)) => shutdown.execute(),
|
|
||||||
Ok(None) => (),
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("{}", e.source);
|
|
||||||
tracing::debug!("{:?}", e.source);
|
|
||||||
drop(e.source);
|
|
||||||
std::process::exit(e.kind as i32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
use embassy::context::SdkContext;
|
|
||||||
use embassy::util::logger::EmbassyLogger;
|
|
||||||
use embassy::version::{Current, VersionT};
|
|
||||||
use embassy::Error;
|
|
||||||
use rpc_toolkit::run_cli;
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
fn inner_main() -> Result<(), Error> {
|
|
||||||
run_cli!({
|
|
||||||
command: embassy::portable_api,
|
|
||||||
app: app => app
|
|
||||||
.name("Embassy SDK")
|
|
||||||
.version(Current::new().semver().to_string().as_str())
|
|
||||||
.arg(
|
|
||||||
clap::Arg::with_name("config")
|
|
||||||
.short("c")
|
|
||||||
.long("config")
|
|
||||||
.takes_value(true),
|
|
||||||
),
|
|
||||||
context: matches => {
|
|
||||||
if let Err(_) = std::env::var("RUST_LOG") {
|
|
||||||
std::env::set_var("RUST_LOG", "embassy=warn,js_engine=warn");
|
|
||||||
}
|
|
||||||
EmbassyLogger::init();
|
|
||||||
SdkContext::init(matches)?
|
|
||||||
},
|
|
||||||
exit: |e: RpcError| {
|
|
||||||
match e.data {
|
|
||||||
Some(Value::String(s)) => eprintln!("{}: {}", e.message, s),
|
|
||||||
Some(Value::Object(o)) => if let Some(Value::String(s)) = o.get("details") {
|
|
||||||
eprintln!("{}: {}", e.message, s);
|
|
||||||
if let Some(Value::String(s)) = o.get("debug") {
|
|
||||||
tracing::debug!("{}", s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(a) => eprintln!("{}: {}", e.message, a),
|
|
||||||
None => eprintln!("{}", e.message),
|
|
||||||
}
|
|
||||||
std::process::exit(e.code);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
match inner_main() {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("{}", e.source);
|
|
||||||
tracing::debug!("{:?}", e.source);
|
|
||||||
drop(e.source);
|
|
||||||
std::process::exit(e.kind as i32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,381 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use embassy::context::{DiagnosticContext, RpcContext};
|
|
||||||
use embassy::core::rpc_continuations::RequestGuid;
|
|
||||||
use embassy::db::subscribe;
|
|
||||||
use embassy::middleware::auth::auth;
|
|
||||||
use embassy::middleware::cors::cors;
|
|
||||||
use embassy::middleware::diagnostic::diagnostic;
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
use embassy::net::mdns::MdnsController;
|
|
||||||
use embassy::net::tor::tor_health_check;
|
|
||||||
use embassy::shutdown::Shutdown;
|
|
||||||
use embassy::system::launch_metrics_task;
|
|
||||||
use embassy::util::logger::EmbassyLogger;
|
|
||||||
use embassy::util::{daemon, Invoke};
|
|
||||||
use embassy::{static_server, Error, ErrorKind, ResultExt};
|
|
||||||
use futures::{FutureExt, TryFutureExt};
|
|
||||||
use reqwest::{Client, Proxy};
|
|
||||||
use rpc_toolkit::hyper::{Body, Response, Server, StatusCode};
|
|
||||||
use rpc_toolkit::rpc_server;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tokio::signal::unix::signal;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
fn status_fn(_: i32) -> StatusCode {
|
|
||||||
StatusCode::OK
|
|
||||||
}
|
|
||||||
|
|
||||||
fn err_to_500(e: Error) -> Response<Body> {
|
|
||||||
tracing::error!("{}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
|
||||||
.body(Body::empty())
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
|
|
||||||
let (rpc_ctx, shutdown) = {
|
|
||||||
embassy::hostname::sync_hostname().await?;
|
|
||||||
let rpc_ctx = RpcContext::init(
|
|
||||||
cfg_path,
|
|
||||||
Arc::new(
|
|
||||||
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
|
||||||
.await?
|
|
||||||
.trim()
|
|
||||||
.to_owned(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let mut shutdown_recv = rpc_ctx.shutdown.subscribe();
|
|
||||||
|
|
||||||
let sig_handler_ctx = rpc_ctx.clone();
|
|
||||||
let sig_handler = tokio::spawn(async move {
|
|
||||||
use tokio::signal::unix::SignalKind;
|
|
||||||
futures::future::select_all(
|
|
||||||
[
|
|
||||||
SignalKind::interrupt(),
|
|
||||||
SignalKind::quit(),
|
|
||||||
SignalKind::terminate(),
|
|
||||||
]
|
|
||||||
.iter()
|
|
||||||
.map(|s| {
|
|
||||||
async move {
|
|
||||||
signal(*s)
|
|
||||||
.expect(&format!("register {:?} handler", s))
|
|
||||||
.recv()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
sig_handler_ctx
|
|
||||||
.shutdown
|
|
||||||
.send(None)
|
|
||||||
.map_err(|_| ())
|
|
||||||
.expect("send shutdown signal");
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut db = rpc_ctx.db.handle();
|
|
||||||
let receipts = embassy::context::rpc::RpcSetNginxReceipts::new(&mut db).await?;
|
|
||||||
|
|
||||||
rpc_ctx.set_nginx_conf(&mut db, receipts).await?;
|
|
||||||
drop(db);
|
|
||||||
let auth = auth(rpc_ctx.clone());
|
|
||||||
let ctx = rpc_ctx.clone();
|
|
||||||
let server = rpc_server!({
|
|
||||||
command: embassy::main_api,
|
|
||||||
context: ctx,
|
|
||||||
status: status_fn,
|
|
||||||
middleware: [
|
|
||||||
cors,
|
|
||||||
auth,
|
|
||||||
]
|
|
||||||
})
|
|
||||||
.with_graceful_shutdown({
|
|
||||||
let mut shutdown = rpc_ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let metrics_ctx = rpc_ctx.clone();
|
|
||||||
let metrics_task = tokio::spawn(async move {
|
|
||||||
launch_metrics_task(&metrics_ctx.metrics_cache, || {
|
|
||||||
metrics_ctx.shutdown.subscribe()
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
});
|
|
||||||
|
|
||||||
let rev_cache_ctx = rpc_ctx.clone();
|
|
||||||
let revision_cache_task = tokio::spawn(async move {
|
|
||||||
let mut sub = rev_cache_ctx.db.subscribe();
|
|
||||||
let mut shutdown = rev_cache_ctx.shutdown.subscribe();
|
|
||||||
loop {
|
|
||||||
let rev = match tokio::select! {
|
|
||||||
a = sub.recv() => a,
|
|
||||||
_ = shutdown.recv() => break,
|
|
||||||
} {
|
|
||||||
Ok(a) => a,
|
|
||||||
Err(_) => {
|
|
||||||
rev_cache_ctx.revision_cache.write().await.truncate(0);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}; // TODO: handle falling behind
|
|
||||||
let mut cache = rev_cache_ctx.revision_cache.write().await;
|
|
||||||
cache.push_back(rev);
|
|
||||||
if cache.len() > rev_cache_ctx.revision_cache_size {
|
|
||||||
cache.pop_front();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let ws_ctx = rpc_ctx.clone();
|
|
||||||
let ws_server = {
|
|
||||||
let builder = Server::bind(&ws_ctx.bind_ws);
|
|
||||||
|
|
||||||
let make_svc = ::rpc_toolkit::hyper::service::make_service_fn(move |_| {
|
|
||||||
let ctx = ws_ctx.clone();
|
|
||||||
async move {
|
|
||||||
Ok::<_, ::rpc_toolkit::hyper::Error>(::rpc_toolkit::hyper::service::service_fn(
|
|
||||||
move |req| {
|
|
||||||
let ctx = ctx.clone();
|
|
||||||
async move {
|
|
||||||
tracing::debug!("Request to {}", req.uri().path());
|
|
||||||
match req.uri().path() {
|
|
||||||
"/ws/db" => {
|
|
||||||
Ok(subscribe(ctx, req).await.unwrap_or_else(err_to_500))
|
|
||||||
}
|
|
||||||
path if path.starts_with("/rest/rpc/") => {
|
|
||||||
match RequestGuid::from(
|
|
||||||
path.strip_prefix("/rest/rpc/").unwrap(),
|
|
||||||
) {
|
|
||||||
None => {
|
|
||||||
tracing::debug!("No Guid Path");
|
|
||||||
Response::builder()
|
|
||||||
.status(StatusCode::BAD_REQUEST)
|
|
||||||
.body(Body::empty())
|
|
||||||
}
|
|
||||||
Some(guid) => {
|
|
||||||
match ctx
|
|
||||||
.rpc_stream_continuations
|
|
||||||
.lock()
|
|
||||||
.await
|
|
||||||
.remove(&guid)
|
|
||||||
{
|
|
||||||
None => Response::builder()
|
|
||||||
.status(StatusCode::NOT_FOUND)
|
|
||||||
.body(Body::empty()),
|
|
||||||
Some(cont) => match (cont.handler)(req).await {
|
|
||||||
Ok(r) => Ok(r),
|
|
||||||
Err(e) => Response::builder()
|
|
||||||
.status(
|
|
||||||
StatusCode::INTERNAL_SERVER_ERROR,
|
|
||||||
)
|
|
||||||
.body(Body::from(format!("{}", e))),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => Response::builder()
|
|
||||||
.status(StatusCode::NOT_FOUND)
|
|
||||||
.body(Body::empty()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
builder.serve(make_svc)
|
|
||||||
}
|
|
||||||
.with_graceful_shutdown({
|
|
||||||
let mut shutdown = rpc_ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let file_server_ctx = rpc_ctx.clone();
|
|
||||||
let file_server = {
|
|
||||||
static_server::init(file_server_ctx, {
|
|
||||||
let mut shutdown = rpc_ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
let tor_health_ctx = rpc_ctx.clone();
|
|
||||||
let tor_client = Client::builder()
|
|
||||||
.proxy(
|
|
||||||
Proxy::http(format!(
|
|
||||||
"socks5h://{}:{}",
|
|
||||||
rpc_ctx.tor_socks.ip(),
|
|
||||||
rpc_ctx.tor_socks.port()
|
|
||||||
))
|
|
||||||
.with_kind(crate::ErrorKind::Network)?,
|
|
||||||
)
|
|
||||||
.build()
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
let tor_health_daemon = daemon(
|
|
||||||
move || {
|
|
||||||
let ctx = tor_health_ctx.clone();
|
|
||||||
let client = tor_client.clone();
|
|
||||||
async move { tor_health_check(&client, &ctx.net_controller.tor).await }
|
|
||||||
},
|
|
||||||
Duration::from_secs(300),
|
|
||||||
rpc_ctx.shutdown.subscribe(),
|
|
||||||
);
|
|
||||||
|
|
||||||
embassy::sound::CHIME.play().await?;
|
|
||||||
|
|
||||||
futures::try_join!(
|
|
||||||
server
|
|
||||||
.map_err(|e| Error::new(e, ErrorKind::Network))
|
|
||||||
.map_ok(|_| tracing::debug!("RPC Server Shutdown")),
|
|
||||||
metrics_task
|
|
||||||
.map_err(|e| Error::new(
|
|
||||||
eyre!("{}", e).wrap_err("Metrics daemon panicked!"),
|
|
||||||
ErrorKind::Unknown
|
|
||||||
))
|
|
||||||
.map_ok(|_| tracing::debug!("Metrics daemon Shutdown")),
|
|
||||||
revision_cache_task
|
|
||||||
.map_err(|e| Error::new(
|
|
||||||
eyre!("{}", e).wrap_err("Revision Cache daemon panicked!"),
|
|
||||||
ErrorKind::Unknown
|
|
||||||
))
|
|
||||||
.map_ok(|_| tracing::debug!("Revision Cache daemon Shutdown")),
|
|
||||||
ws_server
|
|
||||||
.map_err(|e| Error::new(e, ErrorKind::Network))
|
|
||||||
.map_ok(|_| tracing::debug!("WebSocket Server Shutdown")),
|
|
||||||
file_server
|
|
||||||
.map_err(|e| Error::new(e, ErrorKind::Network))
|
|
||||||
.map_ok(|_| tracing::debug!("Static File Server Shutdown")),
|
|
||||||
tor_health_daemon
|
|
||||||
.map_err(|e| Error::new(
|
|
||||||
e.wrap_err("Tor Health daemon panicked!"),
|
|
||||||
ErrorKind::Unknown
|
|
||||||
))
|
|
||||||
.map_ok(|_| tracing::debug!("Tor Health daemon Shutdown")),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut shutdown = shutdown_recv
|
|
||||||
.recv()
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Unknown)?;
|
|
||||||
|
|
||||||
sig_handler.abort();
|
|
||||||
|
|
||||||
if let Some(shutdown) = &mut shutdown {
|
|
||||||
drop(shutdown.db_handle.take());
|
|
||||||
}
|
|
||||||
|
|
||||||
(rpc_ctx, shutdown)
|
|
||||||
};
|
|
||||||
rpc_ctx.shutdown().await?;
|
|
||||||
|
|
||||||
Ok(shutdown)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let matches = clap::App::new("embassyd")
|
|
||||||
.arg(
|
|
||||||
clap::Arg::with_name("config")
|
|
||||||
.short("c")
|
|
||||||
.long("config")
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.get_matches();
|
|
||||||
|
|
||||||
EmbassyLogger::init();
|
|
||||||
|
|
||||||
let cfg_path = matches.value_of("config");
|
|
||||||
|
|
||||||
let res = {
|
|
||||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.expect("failed to initialize runtime");
|
|
||||||
rt.block_on(async {
|
|
||||||
match inner_main(cfg_path).await {
|
|
||||||
Ok(a) => Ok(a),
|
|
||||||
Err(e) => {
|
|
||||||
(|| async {
|
|
||||||
tracing::error!("{}", e.source);
|
|
||||||
tracing::debug!("{:?}", e.source);
|
|
||||||
embassy::sound::BEETHOVEN.play().await?;
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
let _mdns = MdnsController::init();
|
|
||||||
tokio::fs::write(
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
include_str!("../nginx/diagnostic-ui.conf"),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
embassy::ErrorKind::Filesystem,
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
Command::new("systemctl")
|
|
||||||
.arg("reload")
|
|
||||||
.arg("nginx")
|
|
||||||
.invoke(embassy::ErrorKind::Nginx)
|
|
||||||
.await?;
|
|
||||||
let ctx = DiagnosticContext::init(
|
|
||||||
cfg_path,
|
|
||||||
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_ok() {
|
|
||||||
Some(Arc::new(
|
|
||||||
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
|
||||||
.await?
|
|
||||||
.trim()
|
|
||||||
.to_owned(),
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
e,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
rpc_server!({
|
|
||||||
command: embassy::diagnostic_api,
|
|
||||||
context: ctx.clone(),
|
|
||||||
status: status_fn,
|
|
||||||
middleware: [
|
|
||||||
cors,
|
|
||||||
diagnostic,
|
|
||||||
]
|
|
||||||
})
|
|
||||||
.with_graceful_shutdown({
|
|
||||||
let mut shutdown = ctx.shutdown.subscribe();
|
|
||||||
async move {
|
|
||||||
shutdown.recv().await.expect("context dropped");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.with_kind(embassy::ErrorKind::Network)?;
|
|
||||||
Ok::<_, Error>(None)
|
|
||||||
})()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
match res {
|
|
||||||
Ok(None) => (),
|
|
||||||
Ok(Some(s)) => s.execute(),
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("{}", e.source);
|
|
||||||
tracing::debug!("{:?}", e.source);
|
|
||||||
drop(e.source);
|
|
||||||
std::process::exit(e.kind as i32)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,115 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use nix::sys::signal::Signal;
|
|
||||||
use patch_db::HasModel;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::{Config, ConfigSpec};
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::dependencies::Dependencies;
|
|
||||||
use crate::id::ImageId;
|
|
||||||
use crate::procedure::{PackageProcedure, ProcedureName};
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::status::health_check::HealthCheckId;
|
|
||||||
use crate::util::Version;
|
|
||||||
use crate::volume::Volumes;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct ConfigRes {
|
|
||||||
pub config: Option<Config>,
|
|
||||||
pub spec: ConfigSpec,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
pub struct ConfigActions {
|
|
||||||
pub get: PackageProcedure,
|
|
||||||
pub set: PackageProcedure,
|
|
||||||
}
|
|
||||||
impl ConfigActions {
|
|
||||||
#[instrument]
|
|
||||||
pub fn validate(&self, volumes: &Volumes, image_ids: &BTreeSet<ImageId>) -> Result<(), Error> {
|
|
||||||
self.get
|
|
||||||
.validate(volumes, image_ids, true)
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Get"))?;
|
|
||||||
self.set
|
|
||||||
.validate(volumes, image_ids, true)
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Set"))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn get(
|
|
||||||
&self,
|
|
||||||
ctx: &RpcContext,
|
|
||||||
pkg_id: &PackageId,
|
|
||||||
pkg_version: &Version,
|
|
||||||
volumes: &Volumes,
|
|
||||||
) -> Result<ConfigRes, Error> {
|
|
||||||
self.get
|
|
||||||
.execute(
|
|
||||||
ctx,
|
|
||||||
pkg_id,
|
|
||||||
pkg_version,
|
|
||||||
ProcedureName::GetConfig,
|
|
||||||
volumes,
|
|
||||||
None::<()>,
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.and_then(|res| {
|
|
||||||
res.map_err(|e| Error::new(eyre!("{}", e.1), crate::ErrorKind::ConfigGen))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn set(
|
|
||||||
&self,
|
|
||||||
ctx: &RpcContext,
|
|
||||||
pkg_id: &PackageId,
|
|
||||||
pkg_version: &Version,
|
|
||||||
dependencies: &Dependencies,
|
|
||||||
volumes: &Volumes,
|
|
||||||
input: &Config,
|
|
||||||
) -> Result<SetResult, Error> {
|
|
||||||
let res: SetResult = self
|
|
||||||
.set
|
|
||||||
.execute(
|
|
||||||
ctx,
|
|
||||||
pkg_id,
|
|
||||||
pkg_version,
|
|
||||||
ProcedureName::SetConfig,
|
|
||||||
volumes,
|
|
||||||
Some(input),
|
|
||||||
false,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.and_then(|res| {
|
|
||||||
res.map_err(|e| {
|
|
||||||
Error::new(eyre!("{}", e.1), crate::ErrorKind::ConfigRulesViolation)
|
|
||||||
})
|
|
||||||
})?;
|
|
||||||
Ok(SetResult {
|
|
||||||
signal: res.signal,
|
|
||||||
depends_on: res
|
|
||||||
.depends_on
|
|
||||||
.into_iter()
|
|
||||||
.filter(|(pkg, _)| dependencies.0.contains_key(pkg))
|
|
||||||
.collect(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct SetResult {
|
|
||||||
#[serde(default)]
|
|
||||||
#[serde(deserialize_with = "crate::util::serde::deserialize_from_str_opt")]
|
|
||||||
#[serde(serialize_with = "crate::util::serde::serialize_display_opt")]
|
|
||||||
pub signal: Option<Signal>,
|
|
||||||
pub depends_on: BTreeMap<PackageId, BTreeSet<HealthCheckId>>,
|
|
||||||
}
|
|
||||||
@@ -1,832 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use futures::future::{BoxFuture, FutureExt};
|
|
||||||
use indexmap::IndexSet;
|
|
||||||
use itertools::Itertools;
|
|
||||||
use patch_db::{DbHandle, LockReceipt, LockTarget, LockTargetId, LockType, Verifier};
|
|
||||||
use rand::SeedableRng;
|
|
||||||
use regex::Regex;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use serde_json::Value;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::db::model::{CurrentDependencies, CurrentDependencyInfo, CurrentDependents};
|
|
||||||
use crate::db::util::WithRevision;
|
|
||||||
use crate::dependencies::{
|
|
||||||
add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive,
|
|
||||||
BreakTransitiveReceipts, BreakageRes, Dependencies, DependencyConfig, DependencyError,
|
|
||||||
DependencyErrors, DependencyReceipt, TaggedDependencyError, TryHealReceipts,
|
|
||||||
};
|
|
||||||
use crate::install::cleanup::{remove_from_current_dependents_lists, UpdateDependencyReceipts};
|
|
||||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub mod action;
|
|
||||||
pub mod spec;
|
|
||||||
pub mod util;
|
|
||||||
|
|
||||||
pub use spec::{ConfigSpec, Defaultable};
|
|
||||||
use util::NumRange;
|
|
||||||
|
|
||||||
use self::action::{ConfigActions, ConfigRes};
|
|
||||||
use self::spec::{ConfigPointerReceipts, PackagePointerSpec, ValueSpecPointer};
|
|
||||||
|
|
||||||
pub type Config = serde_json::Map<String, Value>;
|
|
||||||
pub trait TypeOf {
|
|
||||||
fn type_of(&self) -> &'static str;
|
|
||||||
}
|
|
||||||
impl TypeOf for Value {
|
|
||||||
fn type_of(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
Value::Array(_) => "list",
|
|
||||||
Value::Bool(_) => "boolean",
|
|
||||||
Value::Null => "null",
|
|
||||||
Value::Number(_) => "number",
|
|
||||||
Value::Object(_) => "object",
|
|
||||||
Value::String(_) => "string",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error)]
|
|
||||||
pub enum ConfigurationError {
|
|
||||||
#[error("Timeout Error")]
|
|
||||||
TimeoutError(#[from] TimeoutError),
|
|
||||||
#[error("No Match: {0}")]
|
|
||||||
NoMatch(#[from] NoMatchWithPath),
|
|
||||||
#[error("System Error: {0}")]
|
|
||||||
SystemError(Error),
|
|
||||||
#[error("Permission Denied: {0}")]
|
|
||||||
PermissionDenied(ValueSpecPointer),
|
|
||||||
}
|
|
||||||
impl From<ConfigurationError> for Error {
|
|
||||||
fn from(err: ConfigurationError) -> Self {
|
|
||||||
let kind = match &err {
|
|
||||||
ConfigurationError::SystemError(e) => e.kind,
|
|
||||||
_ => crate::ErrorKind::ConfigGen,
|
|
||||||
};
|
|
||||||
crate::Error::new(err, kind)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, thiserror::Error)]
|
|
||||||
#[error("Timeout Error")]
|
|
||||||
pub struct TimeoutError;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, thiserror::Error)]
|
|
||||||
pub struct NoMatchWithPath {
|
|
||||||
pub path: Vec<String>,
|
|
||||||
pub error: MatchError,
|
|
||||||
}
|
|
||||||
impl NoMatchWithPath {
|
|
||||||
pub fn new(error: MatchError) -> Self {
|
|
||||||
NoMatchWithPath {
|
|
||||||
path: Vec::new(),
|
|
||||||
error,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn prepend(mut self, seg: String) -> Self {
|
|
||||||
self.path.push(seg);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::fmt::Display for NoMatchWithPath {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{}: {}", self.path.iter().rev().join("."), self.error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<NoMatchWithPath> for Error {
|
|
||||||
fn from(e: NoMatchWithPath) -> Self {
|
|
||||||
ConfigurationError::from(e).into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, thiserror::Error)]
|
|
||||||
pub enum MatchError {
|
|
||||||
#[error("String {0:?} Does Not Match Pattern {1}")]
|
|
||||||
Pattern(String, Regex),
|
|
||||||
#[error("String {0:?} Is Not In Enum {1:?}")]
|
|
||||||
Enum(String, IndexSet<String>),
|
|
||||||
#[error("Field Is Not Nullable")]
|
|
||||||
NotNullable,
|
|
||||||
#[error("Length Mismatch: expected {0}, actual: {1}")]
|
|
||||||
LengthMismatch(NumRange<usize>, usize),
|
|
||||||
#[error("Invalid Type: expected {0}, actual: {1}")]
|
|
||||||
InvalidType(&'static str, &'static str),
|
|
||||||
#[error("Number Out Of Range: expected {0}, actual: {1}")]
|
|
||||||
OutOfRange(NumRange<f64>, f64),
|
|
||||||
#[error("Number Is Not Integral: {0}")]
|
|
||||||
NonIntegral(f64),
|
|
||||||
#[error("Variant {0:?} Is Not In Union {1:?}")]
|
|
||||||
Union(String, IndexSet<String>),
|
|
||||||
#[error("Variant Is Missing Tag {0:?}")]
|
|
||||||
MissingTag(String),
|
|
||||||
#[error("Property {0:?} Of Variant {1:?} Conflicts With Union Tag")]
|
|
||||||
PropertyMatchesUnionTag(String, String),
|
|
||||||
#[error("Name of Property {0:?} Conflicts With Map Tag Name")]
|
|
||||||
PropertyNameMatchesMapTag(String),
|
|
||||||
#[error("Pointer Is Invalid: {0}")]
|
|
||||||
InvalidPointer(spec::ValueSpecPointer),
|
|
||||||
#[error("Object Key Is Invalid: {0}")]
|
|
||||||
InvalidKey(String),
|
|
||||||
#[error("Value In List Is Not Unique")]
|
|
||||||
ListUniquenessViolation,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "config-spec", cli_only, blocking, display(display_none))]
|
|
||||||
pub fn verify_spec(#[arg] path: PathBuf) -> Result<(), Error> {
|
|
||||||
let mut file = std::fs::File::open(&path)?;
|
|
||||||
let format = match path.extension().and_then(|s| s.to_str()) {
|
|
||||||
Some("yaml") | Some("yml") => IoFormat::Yaml,
|
|
||||||
Some("json") => IoFormat::Json,
|
|
||||||
Some("toml") => IoFormat::Toml,
|
|
||||||
Some("cbor") => IoFormat::Cbor,
|
|
||||||
_ => {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Unknown file format. Expected one of yaml, json, toml, cbor."),
|
|
||||||
crate::ErrorKind::Deserialization,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let _: ConfigSpec = format.from_reader(&mut file)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(get, set))]
|
|
||||||
pub fn config(#[arg] id: PackageId) -> Result<PackageId, Error> {
|
|
||||||
Ok(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ConfigGetReceipts {
|
|
||||||
manifest_volumes: LockReceipt<crate::volume::Volumes, ()>,
|
|
||||||
manifest_version: LockReceipt<crate::util::Version, ()>,
|
|
||||||
manifest_config: LockReceipt<Option<ConfigActions>, ()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigGetReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks, id);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<LockTargetId>,
|
|
||||||
id: &PackageId,
|
|
||||||
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
|
||||||
let manifest_version = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.manifest().version())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
let manifest_volumes = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.manifest().volumes())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
let manifest_config = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.manifest().config())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
manifest_volumes: manifest_volumes.verify(skeleton_key)?,
|
|
||||||
manifest_version: manifest_version.verify(skeleton_key)?,
|
|
||||||
manifest_config: manifest_config.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_serializable))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn get(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[parent_data] id: PackageId,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<ConfigRes, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let receipts = ConfigGetReceipts::new(&mut db, &id).await?;
|
|
||||||
let action = receipts
|
|
||||||
.manifest_config
|
|
||||||
.get(&mut db)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(|| Error::new(eyre!("{} has no config", id), crate::ErrorKind::NotFound))?;
|
|
||||||
|
|
||||||
let volumes = receipts.manifest_volumes.get(&mut db).await?;
|
|
||||||
let version = receipts.manifest_version.get(&mut db).await?;
|
|
||||||
action.get(&ctx, &id, &version, &volumes).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(
|
|
||||||
subcommands(self(set_impl(async, context(RpcContext))), set_dry),
|
|
||||||
display(display_none)
|
|
||||||
)]
|
|
||||||
#[instrument]
|
|
||||||
pub fn set(
|
|
||||||
#[parent_data] id: PackageId,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
#[arg(long = "timeout")] timeout: Option<crate::util::serde::Duration>,
|
|
||||||
#[arg(stdin, parse(parse_stdin_deserializable))] config: Option<Config>,
|
|
||||||
#[arg(rename = "expire-id", long = "expire-id")] expire_id: Option<String>,
|
|
||||||
) -> Result<(PackageId, Option<Config>, Option<Duration>, Option<String>), Error> {
|
|
||||||
Ok((id, config, timeout.map(|d| *d), expire_id))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// So, the new locking finds all the possible locks and lifts them up into a bundle of locks.
|
|
||||||
/// Then this bundle will be passed down into the functions that will need to touch the db, and
|
|
||||||
/// instead of doing the locks down in the system, we have already done the locks and can
|
|
||||||
/// do the operation on the db.
|
|
||||||
/// An UnlockedLock has two types, the type of setting and getting from the db, and the second type
|
|
||||||
/// is the keys that we need to insert on getting/setting because we have included wild cards into the paths.
|
|
||||||
pub struct ConfigReceipts {
|
|
||||||
pub dependency_receipt: DependencyReceipt,
|
|
||||||
pub config_receipts: ConfigPointerReceipts,
|
|
||||||
pub update_dependency_receipts: UpdateDependencyReceipts,
|
|
||||||
pub try_heal_receipts: TryHealReceipts,
|
|
||||||
pub break_transitive_receipts: BreakTransitiveReceipts,
|
|
||||||
configured: LockReceipt<bool, String>,
|
|
||||||
config_actions: LockReceipt<ConfigActions, String>,
|
|
||||||
dependencies: LockReceipt<Dependencies, String>,
|
|
||||||
volumes: LockReceipt<crate::volume::Volumes, String>,
|
|
||||||
version: LockReceipt<crate::util::Version, String>,
|
|
||||||
manifest: LockReceipt<Manifest, String>,
|
|
||||||
system_pointers: LockReceipt<Vec<spec::SystemPointerSpec>, String>,
|
|
||||||
pub current_dependents: LockReceipt<CurrentDependents, String>,
|
|
||||||
pub current_dependencies: LockReceipt<CurrentDependencies, String>,
|
|
||||||
dependency_errors: LockReceipt<DependencyErrors, String>,
|
|
||||||
manifest_dependencies_config: LockReceipt<DependencyConfig, (String, String)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
|
||||||
let dependency_receipt = DependencyReceipt::setup(locks);
|
|
||||||
let config_receipts = ConfigPointerReceipts::setup(locks);
|
|
||||||
let update_dependency_receipts = UpdateDependencyReceipts::setup(locks);
|
|
||||||
let break_transitive_receipts = BreakTransitiveReceipts::setup(locks);
|
|
||||||
let try_heal_receipts = TryHealReceipts::setup(locks);
|
|
||||||
|
|
||||||
let configured: LockTarget<bool, String> = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.status().configured())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let config_actions = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.and_then(|x| x.manifest().config())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let dependencies = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.manifest().dependencies())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let volumes = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.manifest().volumes())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let version = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.manifest().version())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let manifest = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.manifest())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let system_pointers = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.system_pointers())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let current_dependents = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.current_dependents())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let current_dependencies = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.current_dependencies())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let dependency_errors = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.map(|x| x.status().dependency_errors())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
let manifest_dependencies_config = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.installed()
|
|
||||||
.and_then(|x| x.manifest().dependencies().star().config())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
dependency_receipt: dependency_receipt(skeleton_key)?,
|
|
||||||
config_receipts: config_receipts(skeleton_key)?,
|
|
||||||
try_heal_receipts: try_heal_receipts(skeleton_key)?,
|
|
||||||
break_transitive_receipts: break_transitive_receipts(skeleton_key)?,
|
|
||||||
update_dependency_receipts: update_dependency_receipts(skeleton_key)?,
|
|
||||||
configured: configured.verify(skeleton_key)?,
|
|
||||||
config_actions: config_actions.verify(skeleton_key)?,
|
|
||||||
dependencies: dependencies.verify(skeleton_key)?,
|
|
||||||
volumes: volumes.verify(skeleton_key)?,
|
|
||||||
version: version.verify(skeleton_key)?,
|
|
||||||
manifest: manifest.verify(skeleton_key)?,
|
|
||||||
system_pointers: system_pointers.verify(skeleton_key)?,
|
|
||||||
current_dependents: current_dependents.verify(skeleton_key)?,
|
|
||||||
current_dependencies: current_dependencies.verify(skeleton_key)?,
|
|
||||||
dependency_errors: dependency_errors.verify(skeleton_key)?,
|
|
||||||
manifest_dependencies_config: manifest_dependencies_config.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "dry", display(display_serializable))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn set_dry(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[parent_data] (id, config, timeout, _): (
|
|
||||||
PackageId,
|
|
||||||
Option<Config>,
|
|
||||||
Option<Duration>,
|
|
||||||
Option<String>,
|
|
||||||
),
|
|
||||||
) -> Result<BreakageRes, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
let mut breakages = BTreeMap::new();
|
|
||||||
let locks = ConfigReceipts::new(&mut tx).await?;
|
|
||||||
configure(
|
|
||||||
&ctx,
|
|
||||||
&mut tx,
|
|
||||||
&id,
|
|
||||||
config,
|
|
||||||
&timeout,
|
|
||||||
true,
|
|
||||||
&mut BTreeMap::new(),
|
|
||||||
&mut breakages,
|
|
||||||
&locks,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
locks.configured.set(&mut tx, true, &id).await?;
|
|
||||||
tx.abort().await?;
|
|
||||||
Ok(BreakageRes(breakages))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn set_impl(
|
|
||||||
ctx: RpcContext,
|
|
||||||
(id, config, timeout, expire_id): (PackageId, Option<Config>, Option<Duration>, Option<String>),
|
|
||||||
) -> Result<WithRevision<()>, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
let mut breakages = BTreeMap::new();
|
|
||||||
let locks = ConfigReceipts::new(&mut tx).await?;
|
|
||||||
configure(
|
|
||||||
&ctx,
|
|
||||||
&mut tx,
|
|
||||||
&id,
|
|
||||||
config,
|
|
||||||
&timeout,
|
|
||||||
false,
|
|
||||||
&mut BTreeMap::new(),
|
|
||||||
&mut breakages,
|
|
||||||
&locks,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
Ok(WithRevision {
|
|
||||||
response: (),
|
|
||||||
revision: tx.commit(expire_id).await?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db, receipts))]
|
|
||||||
pub async fn configure<'a, Db: DbHandle>(
|
|
||||||
ctx: &RpcContext,
|
|
||||||
db: &'a mut Db,
|
|
||||||
id: &PackageId,
|
|
||||||
config: Option<Config>,
|
|
||||||
timeout: &Option<Duration>,
|
|
||||||
dry_run: bool,
|
|
||||||
overrides: &mut BTreeMap<PackageId, Config>,
|
|
||||||
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
|
|
||||||
receipts: &ConfigReceipts,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
configure_rec(
|
|
||||||
ctx, db, id, config, timeout, dry_run, overrides, breakages, receipts,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
receipts.configured.set(db, true, &id).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, db, receipts))]
|
|
||||||
pub fn configure_rec<'a, Db: DbHandle>(
|
|
||||||
ctx: &'a RpcContext,
|
|
||||||
db: &'a mut Db,
|
|
||||||
id: &'a PackageId,
|
|
||||||
config: Option<Config>,
|
|
||||||
timeout: &'a Option<Duration>,
|
|
||||||
dry_run: bool,
|
|
||||||
overrides: &'a mut BTreeMap<PackageId, Config>,
|
|
||||||
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
|
|
||||||
receipts: &'a ConfigReceipts,
|
|
||||||
) -> BoxFuture<'a, Result<(), Error>> {
|
|
||||||
async move {
|
|
||||||
// fetch data from db
|
|
||||||
let action = receipts
|
|
||||||
.config_actions
|
|
||||||
.get(db, id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
let dependencies = receipts
|
|
||||||
.dependencies
|
|
||||||
.get(db, id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
let volumes = receipts.volumes.get(db, id).await?.ok_or_else(not_found)?;
|
|
||||||
let is_needs_config = !receipts
|
|
||||||
.configured
|
|
||||||
.get(db, id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
let version = receipts.version.get(db, id).await?.ok_or_else(not_found)?;
|
|
||||||
|
|
||||||
// get current config and current spec
|
|
||||||
let ConfigRes {
|
|
||||||
config: old_config,
|
|
||||||
spec,
|
|
||||||
} = action.get(ctx, id, &version, &volumes).await?;
|
|
||||||
|
|
||||||
// determine new config to use
|
|
||||||
let mut config = if let Some(config) = config.or_else(|| old_config.clone()) {
|
|
||||||
config
|
|
||||||
} else {
|
|
||||||
spec.gen(&mut rand::rngs::StdRng::from_entropy(), timeout)?
|
|
||||||
};
|
|
||||||
|
|
||||||
let manifest = receipts.manifest.get(db, id).await?.ok_or_else(not_found)?;
|
|
||||||
|
|
||||||
spec.validate(&manifest)?;
|
|
||||||
spec.matches(&config)?; // check that new config matches spec
|
|
||||||
spec.update(
|
|
||||||
ctx,
|
|
||||||
db,
|
|
||||||
&manifest,
|
|
||||||
&*overrides,
|
|
||||||
&mut config,
|
|
||||||
&receipts.config_receipts,
|
|
||||||
)
|
|
||||||
.await?; // dereference pointers in the new config
|
|
||||||
|
|
||||||
// create backreferences to pointers
|
|
||||||
let mut sys = receipts
|
|
||||||
.system_pointers
|
|
||||||
.get(db, &id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
sys.truncate(0);
|
|
||||||
let mut current_dependencies: CurrentDependencies = CurrentDependencies(
|
|
||||||
dependencies
|
|
||||||
.0
|
|
||||||
.iter()
|
|
||||||
.filter_map(|(id, info)| {
|
|
||||||
if info.requirement.required() {
|
|
||||||
Some((id.clone(), CurrentDependencyInfo::default()))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
);
|
|
||||||
for ptr in spec.pointers(&config)? {
|
|
||||||
match ptr {
|
|
||||||
ValueSpecPointer::Package(pkg_ptr) => {
|
|
||||||
if let Some(current_dependency) =
|
|
||||||
current_dependencies.0.get_mut(pkg_ptr.package_id())
|
|
||||||
{
|
|
||||||
current_dependency.pointers.push(pkg_ptr);
|
|
||||||
} else {
|
|
||||||
current_dependencies.0.insert(
|
|
||||||
pkg_ptr.package_id().to_owned(),
|
|
||||||
CurrentDependencyInfo {
|
|
||||||
pointers: vec![pkg_ptr],
|
|
||||||
health_checks: BTreeSet::new(),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ValueSpecPointer::System(s) => sys.push(s),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
receipts.system_pointers.set(db, sys, &id).await?;
|
|
||||||
|
|
||||||
let signal = if !dry_run {
|
|
||||||
// run config action
|
|
||||||
let res = action
|
|
||||||
.set(ctx, id, &version, &dependencies, &volumes, &config)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// track dependencies with no pointers
|
|
||||||
for (package_id, health_checks) in res.depends_on.into_iter() {
|
|
||||||
if let Some(current_dependency) = current_dependencies.0.get_mut(&package_id) {
|
|
||||||
current_dependency.health_checks.extend(health_checks);
|
|
||||||
} else {
|
|
||||||
current_dependencies.0.insert(
|
|
||||||
package_id,
|
|
||||||
CurrentDependencyInfo {
|
|
||||||
pointers: Vec::new(),
|
|
||||||
health_checks,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// track dependency health checks
|
|
||||||
current_dependencies = current_dependencies.map(|x| {
|
|
||||||
x.into_iter()
|
|
||||||
.filter(|(dep_id, _)| {
|
|
||||||
if dep_id != id && !manifest.dependencies.0.contains_key(dep_id) {
|
|
||||||
tracing::warn!("Illegal dependency specified: {}", dep_id);
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
});
|
|
||||||
res.signal
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
// update dependencies
|
|
||||||
let prev_current_dependencies = receipts
|
|
||||||
.current_dependencies
|
|
||||||
.get(db, &id)
|
|
||||||
.await?
|
|
||||||
.unwrap_or_default();
|
|
||||||
remove_from_current_dependents_lists(
|
|
||||||
db,
|
|
||||||
id,
|
|
||||||
&prev_current_dependencies,
|
|
||||||
&receipts.current_dependents,
|
|
||||||
)
|
|
||||||
.await?; // remove previous
|
|
||||||
add_dependent_to_current_dependents_lists(
|
|
||||||
db,
|
|
||||||
id,
|
|
||||||
¤t_dependencies,
|
|
||||||
&receipts.current_dependents,
|
|
||||||
)
|
|
||||||
.await?; // add new
|
|
||||||
current_dependencies.0.remove(id);
|
|
||||||
receipts
|
|
||||||
.current_dependencies
|
|
||||||
.set(db, current_dependencies.clone(), &id)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let errs = receipts
|
|
||||||
.dependency_errors
|
|
||||||
.get(db, &id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
tracing::warn!("Dependency Errors: {:?}", errs);
|
|
||||||
let errs = DependencyErrors::init(
|
|
||||||
ctx,
|
|
||||||
db,
|
|
||||||
&manifest,
|
|
||||||
¤t_dependencies,
|
|
||||||
&receipts.dependency_receipt.try_heal,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
receipts.dependency_errors.set(db, errs, &id).await?;
|
|
||||||
|
|
||||||
// cache current config for dependents
|
|
||||||
overrides.insert(id.clone(), config.clone());
|
|
||||||
|
|
||||||
// handle dependents
|
|
||||||
let dependents = receipts
|
|
||||||
.current_dependents
|
|
||||||
.get(db, id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
let prev = if is_needs_config { None } else { old_config }
|
|
||||||
.map(Value::Object)
|
|
||||||
.unwrap_or_default();
|
|
||||||
let next = Value::Object(config.clone());
|
|
||||||
for (dependent, dep_info) in dependents.0.iter().filter(|(dep_id, _)| dep_id != &id) {
|
|
||||||
// check if config passes dependent check
|
|
||||||
if let Some(cfg) = receipts
|
|
||||||
.manifest_dependencies_config
|
|
||||||
.get(db, (&dependent, &id))
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
let manifest = receipts
|
|
||||||
.manifest
|
|
||||||
.get(db, &dependent)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(not_found)?;
|
|
||||||
if let Err(error) = cfg
|
|
||||||
.check(
|
|
||||||
ctx,
|
|
||||||
dependent,
|
|
||||||
&manifest.version,
|
|
||||||
&manifest.volumes,
|
|
||||||
id,
|
|
||||||
&config,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
let dep_err = DependencyError::ConfigUnsatisfied { error };
|
|
||||||
break_transitive(
|
|
||||||
db,
|
|
||||||
dependent,
|
|
||||||
id,
|
|
||||||
dep_err,
|
|
||||||
breakages,
|
|
||||||
&receipts.break_transitive_receipts,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// handle backreferences
|
|
||||||
for ptr in &dep_info.pointers {
|
|
||||||
if let PackagePointerSpec::Config(cfg_ptr) = ptr {
|
|
||||||
if cfg_ptr.select(&next) != cfg_ptr.select(&prev) {
|
|
||||||
if let Err(e) = configure_rec(
|
|
||||||
ctx, db, dependent, None, timeout, dry_run, overrides, breakages,
|
|
||||||
receipts,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
if e.kind == crate::ErrorKind::ConfigRulesViolation {
|
|
||||||
break_transitive(
|
|
||||||
db,
|
|
||||||
dependent,
|
|
||||||
id,
|
|
||||||
DependencyError::ConfigUnsatisfied {
|
|
||||||
error: format!("{}", e),
|
|
||||||
},
|
|
||||||
breakages,
|
|
||||||
&receipts.break_transitive_receipts,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
} else {
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
heal_all_dependents_transitive(ctx, db, id, &receipts.dependency_receipt).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(signal) = signal {
|
|
||||||
match ctx.managers.get(&(id.clone(), version.clone())).await {
|
|
||||||
None => {
|
|
||||||
// in theory this should never happen, which indicates this function should be moved behind the
|
|
||||||
// Manager interface
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Manager Not Found for package being configured"),
|
|
||||||
crate::ErrorKind::Incoherent,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Some(m) => {
|
|
||||||
m.signal(&signal).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
#[instrument]
|
|
||||||
pub fn not_found() -> Error {
|
|
||||||
Error::new(eyre!("Could not find"), crate::ErrorKind::Incoherent)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// We want to have a double check that the paths are what we expect them to be.
|
|
||||||
/// Found that earlier the paths where not what we expected them to be.
|
|
||||||
#[tokio::test]
|
|
||||||
async fn ensure_creation_of_config_paths_makes_sense() {
|
|
||||||
let mut fake = patch_db::test_utils::NoOpDb();
|
|
||||||
let config_locks = ConfigReceipts::new(&mut fake).await.unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.configured.lock.glob),
|
|
||||||
"/package-data/*/installed/status/configured"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.config_actions.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/config"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.dependencies.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/dependencies"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.volumes.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/volumes"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.version.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/version"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.volumes.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/volumes"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.manifest.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.manifest.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.system_pointers.lock.glob),
|
|
||||||
"/package-data/*/installed/system-pointers"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.current_dependents.lock.glob),
|
|
||||||
"/package-data/*/installed/current-dependents"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.dependency_errors.lock.glob),
|
|
||||||
"/package-data/*/installed/status/dependency-errors"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.manifest_dependencies_config.lock.glob),
|
|
||||||
"/package-data/*/installed/manifest/dependencies/*/config"
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
&format!("{}", config_locks.system_pointers.lock.glob),
|
|
||||||
"/package-data/*/installed/system-pointers"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
@@ -1,368 +0,0 @@
|
|||||||
use std::borrow::Cow;
|
|
||||||
use std::ops::{Bound, RangeBounds, RangeInclusive};
|
|
||||||
|
|
||||||
use rand::distributions::Distribution;
|
|
||||||
use rand::Rng;
|
|
||||||
use serde_json::Value;
|
|
||||||
|
|
||||||
use super::Config;
|
|
||||||
|
|
||||||
pub const STATIC_NULL: Value = Value::Null;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct CharSet(pub Vec<(RangeInclusive<char>, usize)>, usize);
|
|
||||||
impl CharSet {
|
|
||||||
pub fn contains(&self, c: &char) -> bool {
|
|
||||||
self.0.iter().any(|r| r.0.contains(c))
|
|
||||||
}
|
|
||||||
pub fn gen<R: Rng>(&self, rng: &mut R) -> char {
|
|
||||||
let mut idx = rng.gen_range(0, self.1);
|
|
||||||
for r in &self.0 {
|
|
||||||
if idx < r.1 {
|
|
||||||
return std::convert::TryFrom::try_from(
|
|
||||||
rand::distributions::Uniform::new_inclusive(
|
|
||||||
u32::from(*r.0.start()),
|
|
||||||
u32::from(*r.0.end()),
|
|
||||||
)
|
|
||||||
.sample(rng),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
} else {
|
|
||||||
idx -= r.1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Default for CharSet {
|
|
||||||
fn default() -> Self {
|
|
||||||
CharSet(vec![('!'..='~', 94)], 94)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'de> serde::de::Deserialize<'de> for CharSet {
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::de::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let s = String::deserialize(deserializer)?;
|
|
||||||
let mut res = Vec::new();
|
|
||||||
let mut len = 0;
|
|
||||||
let mut a: Option<char> = None;
|
|
||||||
let mut b: Option<char> = None;
|
|
||||||
let mut in_range = false;
|
|
||||||
for c in s.chars() {
|
|
||||||
match c {
|
|
||||||
',' => match (a, b, in_range) {
|
|
||||||
(Some(start), Some(end), _) => {
|
|
||||||
if !end.is_ascii() {
|
|
||||||
return Err(serde::de::Error::custom("Invalid Character"));
|
|
||||||
}
|
|
||||||
if start >= end {
|
|
||||||
return Err(serde::de::Error::custom("Invalid Bounds"));
|
|
||||||
}
|
|
||||||
let l = u32::from(end) - u32::from(start) + 1;
|
|
||||||
res.push((start..=end, l as usize));
|
|
||||||
len += l as usize;
|
|
||||||
a = None;
|
|
||||||
b = None;
|
|
||||||
in_range = false;
|
|
||||||
}
|
|
||||||
(Some(start), None, false) => {
|
|
||||||
len += 1;
|
|
||||||
res.push((start..=start, 1));
|
|
||||||
a = None;
|
|
||||||
}
|
|
||||||
(Some(_), None, true) => {
|
|
||||||
b = Some(',');
|
|
||||||
}
|
|
||||||
(None, None, false) => {
|
|
||||||
a = Some(',');
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
return Err(serde::de::Error::custom("Syntax Error"));
|
|
||||||
}
|
|
||||||
},
|
|
||||||
'-' => {
|
|
||||||
if a.is_none() {
|
|
||||||
a = Some('-');
|
|
||||||
} else if !in_range {
|
|
||||||
in_range = true;
|
|
||||||
} else if b.is_none() {
|
|
||||||
b = Some('-')
|
|
||||||
} else {
|
|
||||||
return Err(serde::de::Error::custom("Syntax Error"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
if a.is_none() {
|
|
||||||
a = Some(c);
|
|
||||||
} else if in_range && b.is_none() {
|
|
||||||
b = Some(c);
|
|
||||||
} else {
|
|
||||||
return Err(serde::de::Error::custom("Syntax Error"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
match (a, b) {
|
|
||||||
(Some(start), Some(end)) => {
|
|
||||||
if !end.is_ascii() {
|
|
||||||
return Err(serde::de::Error::custom("Invalid Character"));
|
|
||||||
}
|
|
||||||
if start >= end {
|
|
||||||
return Err(serde::de::Error::custom("Invalid Bounds"));
|
|
||||||
}
|
|
||||||
let l = u32::from(end) - u32::from(start) + 1;
|
|
||||||
res.push((start..=end, l as usize));
|
|
||||||
len += l as usize;
|
|
||||||
}
|
|
||||||
(Some(c), None) => {
|
|
||||||
len += 1;
|
|
||||||
res.push((c..=c, 1));
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(CharSet(res, len))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl serde::ser::Serialize for CharSet {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::ser::Serializer,
|
|
||||||
{
|
|
||||||
<&str>::serialize(
|
|
||||||
&self
|
|
||||||
.0
|
|
||||||
.iter()
|
|
||||||
.map(|r| match r.1 {
|
|
||||||
1 => format!("{}", r.0.start()),
|
|
||||||
_ => format!("{}-{}", r.0.start(), r.0.end()),
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(",")
|
|
||||||
.as_str(),
|
|
||||||
serializer,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub mod serde_regex {
|
|
||||||
use regex::Regex;
|
|
||||||
use serde::*;
|
|
||||||
|
|
||||||
pub fn serialize<S>(regex: &Regex, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: Serializer,
|
|
||||||
{
|
|
||||||
<&str>::serialize(®ex.as_str(), serializer)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Regex, D::Error>
|
|
||||||
where
|
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let s = String::deserialize(deserializer)?;
|
|
||||||
Regex::new(&s).map_err(|e| de::Error::custom(e))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct NumRange<T: std::str::FromStr + std::fmt::Display + std::cmp::PartialOrd>(
|
|
||||||
pub (Bound<T>, Bound<T>),
|
|
||||||
);
|
|
||||||
impl<T> std::ops::Deref for NumRange<T>
|
|
||||||
where
|
|
||||||
T: std::str::FromStr + std::fmt::Display + std::cmp::PartialOrd,
|
|
||||||
{
|
|
||||||
type Target = (Bound<T>, Bound<T>);
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'de, T> serde::de::Deserialize<'de> for NumRange<T>
|
|
||||||
where
|
|
||||||
T: std::str::FromStr + std::fmt::Display + std::cmp::PartialOrd,
|
|
||||||
<T as std::str::FromStr>::Err: std::fmt::Display,
|
|
||||||
{
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: serde::de::Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let s = String::deserialize(deserializer)?;
|
|
||||||
let mut split = s.split(",");
|
|
||||||
let start = split
|
|
||||||
.next()
|
|
||||||
.map(|s| match s.get(..1) {
|
|
||||||
Some("(") => match s.get(1..2) {
|
|
||||||
Some("*") => Ok(Bound::Unbounded),
|
|
||||||
_ => s[1..]
|
|
||||||
.trim()
|
|
||||||
.parse()
|
|
||||||
.map(Bound::Excluded)
|
|
||||||
.map_err(|e| serde::de::Error::custom(e)),
|
|
||||||
},
|
|
||||||
Some("[") => s[1..]
|
|
||||||
.trim()
|
|
||||||
.parse()
|
|
||||||
.map(Bound::Included)
|
|
||||||
.map_err(|e| serde::de::Error::custom(e)),
|
|
||||||
_ => Err(serde::de::Error::custom(format!(
|
|
||||||
"Could not parse left bound: {}",
|
|
||||||
s
|
|
||||||
))),
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.unwrap();
|
|
||||||
let end = split
|
|
||||||
.next()
|
|
||||||
.map(|s| match s.get(s.len() - 1..) {
|
|
||||||
Some(")") => match s.get(s.len() - 2..s.len() - 1) {
|
|
||||||
Some("*") => Ok(Bound::Unbounded),
|
|
||||||
_ => s[..s.len() - 1]
|
|
||||||
.trim()
|
|
||||||
.parse()
|
|
||||||
.map(Bound::Excluded)
|
|
||||||
.map_err(|e| serde::de::Error::custom(e)),
|
|
||||||
},
|
|
||||||
Some("]") => s[..s.len() - 1]
|
|
||||||
.trim()
|
|
||||||
.parse()
|
|
||||||
.map(Bound::Included)
|
|
||||||
.map_err(|e| serde::de::Error::custom(e)),
|
|
||||||
_ => Err(serde::de::Error::custom(format!(
|
|
||||||
"Could not parse right bound: {}",
|
|
||||||
s
|
|
||||||
))),
|
|
||||||
})
|
|
||||||
.transpose()?
|
|
||||||
.unwrap_or(Bound::Unbounded);
|
|
||||||
|
|
||||||
Ok(NumRange((start, end)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<T> std::fmt::Display for NumRange<T>
|
|
||||||
where
|
|
||||||
T: std::str::FromStr + std::fmt::Display + std::cmp::PartialOrd,
|
|
||||||
{
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self.start_bound() {
|
|
||||||
Bound::Excluded(n) => write!(f, "({},", n)?,
|
|
||||||
Bound::Included(n) => write!(f, "[{},", n)?,
|
|
||||||
Bound::Unbounded => write!(f, "(*,")?,
|
|
||||||
};
|
|
||||||
match self.end_bound() {
|
|
||||||
Bound::Excluded(n) => write!(f, "{})", n),
|
|
||||||
Bound::Included(n) => write!(f, "{}]", n),
|
|
||||||
Bound::Unbounded => write!(f, "*)"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<T> serde::ser::Serialize for NumRange<T>
|
|
||||||
where
|
|
||||||
T: std::str::FromStr + std::fmt::Display + std::cmp::PartialOrd,
|
|
||||||
{
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::ser::Serializer,
|
|
||||||
{
|
|
||||||
<&str>::serialize(&format!("{}", self).as_str(), serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub enum UniqueBy {
|
|
||||||
Any(Vec<UniqueBy>),
|
|
||||||
All(Vec<UniqueBy>),
|
|
||||||
Exactly(String),
|
|
||||||
NotUnique,
|
|
||||||
}
|
|
||||||
impl UniqueBy {
|
|
||||||
pub fn eq(&self, lhs: &Config, rhs: &Config) -> bool {
|
|
||||||
match self {
|
|
||||||
UniqueBy::Any(any) => any.iter().any(|u| u.eq(lhs, rhs)),
|
|
||||||
UniqueBy::All(all) => all.iter().all(|u| u.eq(lhs, rhs)),
|
|
||||||
UniqueBy::Exactly(key) => lhs.get(key) == rhs.get(key),
|
|
||||||
UniqueBy::NotUnique => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Default for UniqueBy {
|
|
||||||
fn default() -> Self {
|
|
||||||
UniqueBy::NotUnique
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'de> serde::de::Deserialize<'de> for UniqueBy {
|
|
||||||
fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
|
|
||||||
struct Visitor;
|
|
||||||
impl<'de> serde::de::Visitor<'de> for Visitor {
|
|
||||||
type Value = UniqueBy;
|
|
||||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
|
||||||
write!(formatter, "a key, an \"any\" object, or an \"all\" object")
|
|
||||||
}
|
|
||||||
fn visit_str<E: serde::de::Error>(self, v: &str) -> Result<Self::Value, E> {
|
|
||||||
Ok(UniqueBy::Exactly(v.to_owned()))
|
|
||||||
}
|
|
||||||
fn visit_string<E: serde::de::Error>(self, v: String) -> Result<Self::Value, E> {
|
|
||||||
Ok(UniqueBy::Exactly(v))
|
|
||||||
}
|
|
||||||
fn visit_map<A: serde::de::MapAccess<'de>>(
|
|
||||||
self,
|
|
||||||
mut map: A,
|
|
||||||
) -> Result<Self::Value, A::Error> {
|
|
||||||
let mut variant = None;
|
|
||||||
while let Some(key) = map.next_key::<Cow<str>>()? {
|
|
||||||
match key.as_ref() {
|
|
||||||
"any" => {
|
|
||||||
return Ok(UniqueBy::Any(map.next_value()?));
|
|
||||||
}
|
|
||||||
"all" => {
|
|
||||||
return Ok(UniqueBy::All(map.next_value()?));
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
variant = Some(key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(serde::de::Error::unknown_variant(
|
|
||||||
variant.unwrap_or_default().as_ref(),
|
|
||||||
&["any", "all"],
|
|
||||||
))
|
|
||||||
}
|
|
||||||
fn visit_unit<E: serde::de::Error>(self) -> Result<Self::Value, E> {
|
|
||||||
Ok(UniqueBy::NotUnique)
|
|
||||||
}
|
|
||||||
fn visit_none<E: serde::de::Error>(self) -> Result<Self::Value, E> {
|
|
||||||
Ok(UniqueBy::NotUnique)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
deserializer.deserialize_any(Visitor)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl serde::ser::Serialize for UniqueBy {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: serde::ser::Serializer,
|
|
||||||
{
|
|
||||||
use serde::ser::SerializeMap;
|
|
||||||
|
|
||||||
match self {
|
|
||||||
UniqueBy::Any(any) => {
|
|
||||||
let mut map = serializer.serialize_map(Some(1))?;
|
|
||||||
map.serialize_key("any")?;
|
|
||||||
map.serialize_value(any)?;
|
|
||||||
map.end()
|
|
||||||
}
|
|
||||||
UniqueBy::All(all) => {
|
|
||||||
let mut map = serializer.serialize_map(Some(1))?;
|
|
||||||
map.serialize_key("all")?;
|
|
||||||
map.serialize_value(all)?;
|
|
||||||
map.end()
|
|
||||||
}
|
|
||||||
UniqueBy::Exactly(key) => serializer.serialize_str(key),
|
|
||||||
UniqueBy::NotUnique => serializer.serialize_unit(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,169 +0,0 @@
|
|||||||
use std::fs::File;
|
|
||||||
use std::io::BufReader;
|
|
||||||
use std::net::{Ipv4Addr, SocketAddr};
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use cookie_store::CookieStore;
|
|
||||||
use reqwest::Proxy;
|
|
||||||
use reqwest_cookie_store::CookieStoreMutex;
|
|
||||||
use rpc_toolkit::reqwest::{Client, Url};
|
|
||||||
use rpc_toolkit::url::Host;
|
|
||||||
use rpc_toolkit::Context;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::util::config::{load_config_from_paths, local_config_path};
|
|
||||||
use crate::ResultExt;
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct CliContextConfig {
|
|
||||||
pub bind_rpc: Option<SocketAddr>,
|
|
||||||
pub host: Option<Url>,
|
|
||||||
#[serde(deserialize_with = "crate::util::serde::deserialize_from_str_opt")]
|
|
||||||
#[serde(default)]
|
|
||||||
pub proxy: Option<Url>,
|
|
||||||
pub cookie_path: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct CliContextSeed {
|
|
||||||
pub base_url: Url,
|
|
||||||
pub rpc_url: Url,
|
|
||||||
pub client: Client,
|
|
||||||
pub cookie_store: Arc<CookieStoreMutex>,
|
|
||||||
pub cookie_path: PathBuf,
|
|
||||||
}
|
|
||||||
impl Drop for CliContextSeed {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
let tmp = format!("{}.tmp", self.cookie_path.display());
|
|
||||||
let parent_dir = self.cookie_path.parent().unwrap_or(Path::new("/"));
|
|
||||||
if !parent_dir.exists() {
|
|
||||||
std::fs::create_dir_all(&parent_dir).unwrap();
|
|
||||||
}
|
|
||||||
let mut writer = fd_lock_rs::FdLock::lock(
|
|
||||||
File::create(&tmp).unwrap(),
|
|
||||||
fd_lock_rs::LockType::Exclusive,
|
|
||||||
true,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let store = self.cookie_store.lock().unwrap();
|
|
||||||
store.save_json(&mut *writer).unwrap();
|
|
||||||
writer.sync_all().unwrap();
|
|
||||||
std::fs::rename(tmp, &self.cookie_path).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const DEFAULT_HOST: Host<&'static str> = Host::Ipv4(Ipv4Addr::new(127, 0, 0, 1));
|
|
||||||
const DEFAULT_PORT: u16 = 5959;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct CliContext(Arc<CliContextSeed>);
|
|
||||||
impl CliContext {
|
|
||||||
/// BLOCKING
|
|
||||||
#[instrument(skip(matches))]
|
|
||||||
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
|
|
||||||
let local_config_path = local_config_path();
|
|
||||||
let base: CliContextConfig = load_config_from_paths(
|
|
||||||
matches
|
|
||||||
.values_of("config")
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
.map(|p| Path::new(p))
|
|
||||||
.chain(local_config_path.as_deref().into_iter())
|
|
||||||
.chain(std::iter::once(Path::new(crate::util::config::CONFIG_PATH))),
|
|
||||||
)?;
|
|
||||||
let mut url = if let Some(host) = matches.value_of("host") {
|
|
||||||
host.parse()?
|
|
||||||
} else if let Some(host) = base.host {
|
|
||||||
host
|
|
||||||
} else {
|
|
||||||
format!(
|
|
||||||
"http://{}",
|
|
||||||
base.bind_rpc.unwrap_or(([127, 0, 0, 1], 80).into())
|
|
||||||
)
|
|
||||||
.parse()?
|
|
||||||
};
|
|
||||||
let proxy = if let Some(proxy) = matches.value_of("proxy") {
|
|
||||||
Some(proxy.parse()?)
|
|
||||||
} else {
|
|
||||||
base.proxy
|
|
||||||
};
|
|
||||||
|
|
||||||
let cookie_path = base.cookie_path.unwrap_or_else(|| {
|
|
||||||
local_config_path
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| Path::new(crate::util::config::CONFIG_PATH))
|
|
||||||
.parent()
|
|
||||||
.unwrap_or(Path::new("/"))
|
|
||||||
.join(".cookies.json")
|
|
||||||
});
|
|
||||||
let cookie_store = Arc::new(CookieStoreMutex::new(if cookie_path.exists() {
|
|
||||||
CookieStore::load_json(BufReader::new(File::open(&cookie_path)?))
|
|
||||||
.map_err(|e| eyre!("{}", e))
|
|
||||||
.with_kind(crate::ErrorKind::Deserialization)?
|
|
||||||
} else {
|
|
||||||
CookieStore::default()
|
|
||||||
}));
|
|
||||||
Ok(CliContext(Arc::new(CliContextSeed {
|
|
||||||
base_url: url.clone(),
|
|
||||||
rpc_url: {
|
|
||||||
url.path_segments_mut()
|
|
||||||
.map_err(|_| eyre!("Url cannot be base"))
|
|
||||||
.with_kind(crate::ErrorKind::ParseUrl)?
|
|
||||||
.push("rpc")
|
|
||||||
.push("v1");
|
|
||||||
url
|
|
||||||
},
|
|
||||||
client: {
|
|
||||||
let mut builder = Client::builder().cookie_provider(cookie_store.clone());
|
|
||||||
if let Some(proxy) = proxy {
|
|
||||||
builder =
|
|
||||||
builder.proxy(Proxy::all(proxy).with_kind(crate::ErrorKind::ParseUrl)?)
|
|
||||||
}
|
|
||||||
builder.build().expect("cannot fail")
|
|
||||||
},
|
|
||||||
cookie_store,
|
|
||||||
cookie_path,
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::ops::Deref for CliContext {
|
|
||||||
type Target = CliContextSeed;
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Context for CliContext {
|
|
||||||
fn protocol(&self) -> &str {
|
|
||||||
self.0.base_url.scheme()
|
|
||||||
}
|
|
||||||
fn host(&self) -> Host<&str> {
|
|
||||||
self.0.base_url.host().unwrap_or(DEFAULT_HOST)
|
|
||||||
}
|
|
||||||
fn port(&self) -> u16 {
|
|
||||||
self.0.base_url.port().unwrap_or(DEFAULT_PORT)
|
|
||||||
}
|
|
||||||
fn path(&self) -> &str {
|
|
||||||
self.0.rpc_url.path()
|
|
||||||
}
|
|
||||||
fn url(&self) -> Url {
|
|
||||||
self.0.rpc_url.clone()
|
|
||||||
}
|
|
||||||
fn client(&self) -> &Client {
|
|
||||||
&self.0.client
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// When we had an empty proxy the system wasn't working like it used to, which allowed empty proxy
|
|
||||||
#[test]
|
|
||||||
fn test_cli_proxy_empty() {
|
|
||||||
serde_yaml::from_str::<CliContextConfig>(
|
|
||||||
"
|
|
||||||
bind_rpc:
|
|
||||||
",
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
use std::net::{IpAddr, SocketAddr};
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use rpc_toolkit::Context;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::sync::broadcast::Sender;
|
|
||||||
use tracing::instrument;
|
|
||||||
use url::Host;
|
|
||||||
|
|
||||||
use crate::shutdown::Shutdown;
|
|
||||||
use crate::util::io::from_toml_async_reader;
|
|
||||||
use crate::util::AsyncFileExt;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct DiagnosticContextConfig {
|
|
||||||
pub bind_rpc: Option<SocketAddr>,
|
|
||||||
pub datadir: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
impl DiagnosticContextConfig {
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
|
||||||
let cfg_path = path
|
|
||||||
.as_ref()
|
|
||||||
.map(|p| p.as_ref())
|
|
||||||
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
|
|
||||||
if let Some(f) = File::maybe_open(cfg_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
|
||||||
{
|
|
||||||
from_toml_async_reader(f).await
|
|
||||||
} else {
|
|
||||||
Ok(Self::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn datadir(&self) -> &Path {
|
|
||||||
self.datadir
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| Path::new("/embassy-data"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct DiagnosticContextSeed {
|
|
||||||
pub bind_rpc: SocketAddr,
|
|
||||||
pub datadir: PathBuf,
|
|
||||||
pub shutdown: Sender<Option<Shutdown>>,
|
|
||||||
pub error: Arc<RpcError>,
|
|
||||||
pub disk_guid: Option<Arc<String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct DiagnosticContext(Arc<DiagnosticContextSeed>);
|
|
||||||
impl DiagnosticContext {
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn init<P: AsRef<Path>>(
|
|
||||||
path: Option<P>,
|
|
||||||
disk_guid: Option<Arc<String>>,
|
|
||||||
error: Error,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
let cfg = DiagnosticContextConfig::load(path).await?;
|
|
||||||
|
|
||||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
|
||||||
|
|
||||||
Ok(Self(Arc::new(DiagnosticContextSeed {
|
|
||||||
bind_rpc: cfg.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
|
|
||||||
datadir: cfg.datadir().to_owned(),
|
|
||||||
shutdown,
|
|
||||||
disk_guid,
|
|
||||||
error: Arc::new(error.into()),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Context for DiagnosticContext {
|
|
||||||
fn host(&self) -> Host<&str> {
|
|
||||||
match self.0.bind_rpc.ip() {
|
|
||||||
IpAddr::V4(a) => Host::Ipv4(a),
|
|
||||||
IpAddr::V6(a) => Host::Ipv6(a),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn port(&self) -> u16 {
|
|
||||||
self.0.bind_rpc.port()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Deref for DiagnosticContext {
|
|
||||||
type Target = DiagnosticContextSeed;
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
pub mod cli;
|
|
||||||
pub mod diagnostic;
|
|
||||||
pub mod rpc;
|
|
||||||
pub mod sdk;
|
|
||||||
pub mod setup;
|
|
||||||
|
|
||||||
pub use cli::CliContext;
|
|
||||||
pub use diagnostic::DiagnosticContext;
|
|
||||||
pub use rpc::RpcContext;
|
|
||||||
pub use sdk::SdkContext;
|
|
||||||
pub use setup::SetupContext;
|
|
||||||
|
|
||||||
impl From<CliContext> for () {
|
|
||||||
fn from(_: CliContext) -> Self {
|
|
||||||
()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<DiagnosticContext> for () {
|
|
||||||
fn from(_: DiagnosticContext) -> Self {
|
|
||||||
()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<RpcContext> for () {
|
|
||||||
fn from(_: RpcContext) -> Self {
|
|
||||||
()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<SdkContext> for () {
|
|
||||||
fn from(_: SdkContext) -> Self {
|
|
||||||
()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<SetupContext> for () {
|
|
||||||
fn from(_: SetupContext) -> Self {
|
|
||||||
()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,412 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, VecDeque};
|
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4};
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use bollard::Docker;
|
|
||||||
use patch_db::json_ptr::JsonPointer;
|
|
||||||
use patch_db::{DbHandle, LockReceipt, LockType, PatchDb, Revision};
|
|
||||||
use reqwest::Url;
|
|
||||||
use rpc_toolkit::url::Host;
|
|
||||||
use rpc_toolkit::Context;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use sqlx::sqlite::SqliteConnectOptions;
|
|
||||||
use sqlx::SqlitePool;
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
|
|
||||||
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
|
|
||||||
use crate::hostname::{derive_hostname, derive_id, get_product_key};
|
|
||||||
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
|
|
||||||
use crate::manager::ManagerMap;
|
|
||||||
use crate::middleware::auth::HashSessionToken;
|
|
||||||
use crate::net::tor::os_key;
|
|
||||||
use crate::net::wifi::WpaCli;
|
|
||||||
use crate::net::NetController;
|
|
||||||
use crate::notifications::NotificationManager;
|
|
||||||
use crate::setup::password_hash;
|
|
||||||
use crate::shutdown::Shutdown;
|
|
||||||
use crate::status::{MainStatus, Status};
|
|
||||||
use crate::util::io::from_yaml_async_reader;
|
|
||||||
use crate::util::{AsyncFileExt, Invoke};
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct RpcContextConfig {
|
|
||||||
pub bind_rpc: Option<SocketAddr>,
|
|
||||||
pub bind_ws: Option<SocketAddr>,
|
|
||||||
pub bind_static: Option<SocketAddr>,
|
|
||||||
pub tor_control: Option<SocketAddr>,
|
|
||||||
pub tor_socks: Option<SocketAddr>,
|
|
||||||
pub dns_bind: Option<Vec<SocketAddr>>,
|
|
||||||
pub revision_cache_size: Option<usize>,
|
|
||||||
pub datadir: Option<PathBuf>,
|
|
||||||
pub log_server: Option<Url>,
|
|
||||||
}
|
|
||||||
impl RpcContextConfig {
|
|
||||||
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
|
||||||
let cfg_path = path
|
|
||||||
.as_ref()
|
|
||||||
.map(|p| p.as_ref())
|
|
||||||
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
|
|
||||||
if let Some(f) = File::maybe_open(cfg_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
|
||||||
{
|
|
||||||
from_yaml_async_reader(f).await
|
|
||||||
} else {
|
|
||||||
Ok(Self::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn datadir(&self) -> &Path {
|
|
||||||
self.datadir
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| Path::new("/embassy-data"))
|
|
||||||
}
|
|
||||||
pub async fn db(&self, secret_store: &SqlitePool, product_key: &str) -> Result<PatchDb, Error> {
|
|
||||||
let sid = derive_id(product_key);
|
|
||||||
let hostname = derive_hostname(&sid);
|
|
||||||
let db_path = self.datadir().join("main").join("embassy.db");
|
|
||||||
let db = PatchDb::open(&db_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
|
|
||||||
if !db.exists(&<JsonPointer>::default()).await? {
|
|
||||||
db.put(
|
|
||||||
&<JsonPointer>::default(),
|
|
||||||
&Database::init(
|
|
||||||
sid,
|
|
||||||
&hostname,
|
|
||||||
&os_key(&mut secret_store.acquire().await?).await?,
|
|
||||||
password_hash(&mut secret_store.acquire().await?).await?,
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(db)
|
|
||||||
}
|
|
||||||
#[instrument]
|
|
||||||
pub async fn secret_store(&self) -> Result<SqlitePool, Error> {
|
|
||||||
let secret_store = SqlitePool::connect_with(
|
|
||||||
SqliteConnectOptions::new()
|
|
||||||
.filename(self.datadir().join("main").join("secrets.db"))
|
|
||||||
.create_if_missing(true)
|
|
||||||
.busy_timeout(Duration::from_secs(30)),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
sqlx::migrate!()
|
|
||||||
.run(&secret_store)
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Database)?;
|
|
||||||
Ok(secret_store)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RpcContextSeed {
|
|
||||||
is_closed: AtomicBool,
|
|
||||||
pub bind_rpc: SocketAddr,
|
|
||||||
pub bind_ws: SocketAddr,
|
|
||||||
pub bind_static: SocketAddr,
|
|
||||||
pub datadir: PathBuf,
|
|
||||||
pub disk_guid: Arc<String>,
|
|
||||||
pub db: PatchDb,
|
|
||||||
pub secret_store: SqlitePool,
|
|
||||||
pub docker: Docker,
|
|
||||||
pub net_controller: NetController,
|
|
||||||
pub managers: ManagerMap,
|
|
||||||
pub revision_cache_size: usize,
|
|
||||||
pub revision_cache: RwLock<VecDeque<Arc<Revision>>>,
|
|
||||||
pub metrics_cache: RwLock<Option<crate::system::Metrics>>,
|
|
||||||
pub shutdown: broadcast::Sender<Option<Shutdown>>,
|
|
||||||
pub tor_socks: SocketAddr,
|
|
||||||
pub notification_manager: NotificationManager,
|
|
||||||
pub open_authed_websockets: Mutex<BTreeMap<HashSessionToken, Vec<oneshot::Sender<()>>>>,
|
|
||||||
pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>,
|
|
||||||
pub wifi_manager: Arc<RwLock<WpaCli>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RpcCleanReceipts {
|
|
||||||
cleanup_receipts: CleanupFailedReceipts,
|
|
||||||
packages: LockReceipt<crate::db::model::AllPackageData, ()>,
|
|
||||||
package: LockReceipt<crate::db::model::PackageDataEntry, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RpcCleanReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<patch_db::LockTargetId>,
|
|
||||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
|
||||||
let cleanup_receipts = CleanupFailedReceipts::setup(locks);
|
|
||||||
|
|
||||||
let packages = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
let package = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
cleanup_receipts: cleanup_receipts(skeleton_key)?,
|
|
||||||
packages: packages.verify(skeleton_key)?,
|
|
||||||
package: package.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RpcSetNginxReceipts {
|
|
||||||
server_info: LockReceipt<crate::db::model::ServerInfo, ()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RpcSetNginxReceipts {
|
|
||||||
pub async fn new(db: &'_ mut impl DbHandle) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<patch_db::LockTargetId>,
|
|
||||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
|
||||||
let server_info = crate::db::DatabaseModel::new()
|
|
||||||
.server_info()
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
server_info: server_info.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct RpcContext(Arc<RpcContextSeed>);
|
|
||||||
impl RpcContext {
|
|
||||||
#[instrument(skip(cfg_path))]
|
|
||||||
pub async fn init<P: AsRef<Path>>(
|
|
||||||
cfg_path: Option<P>,
|
|
||||||
disk_guid: Arc<String>,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
let base = RpcContextConfig::load(cfg_path).await?;
|
|
||||||
tracing::info!("Loaded Config");
|
|
||||||
let tor_proxy = base.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
|
|
||||||
Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
9050,
|
|
||||||
)));
|
|
||||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
|
||||||
let secret_store = base.secret_store().await?;
|
|
||||||
tracing::info!("Opened Sqlite DB");
|
|
||||||
let db = base.db(&secret_store, &get_product_key().await?).await?;
|
|
||||||
tracing::info!("Opened PatchDB");
|
|
||||||
let docker = Docker::connect_with_unix_defaults()?;
|
|
||||||
tracing::info!("Connected to Docker");
|
|
||||||
let net_controller = NetController::init(
|
|
||||||
([127, 0, 0, 1], 80).into(),
|
|
||||||
crate::net::tor::os_key(&mut secret_store.acquire().await?).await?,
|
|
||||||
base.tor_control
|
|
||||||
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
|
|
||||||
base.dns_bind
|
|
||||||
.as_ref()
|
|
||||||
.map(|v| v.as_slice())
|
|
||||||
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
|
|
||||||
secret_store.clone(),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tracing::info!("Initialized Net Controller");
|
|
||||||
let managers = ManagerMap::default();
|
|
||||||
let metrics_cache = RwLock::new(None);
|
|
||||||
let notification_manager = NotificationManager::new(secret_store.clone());
|
|
||||||
tracing::info!("Initialized Notification Manager");
|
|
||||||
let seed = Arc::new(RpcContextSeed {
|
|
||||||
is_closed: AtomicBool::new(false),
|
|
||||||
bind_rpc: base.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
|
|
||||||
bind_ws: base.bind_ws.unwrap_or(([127, 0, 0, 1], 5960).into()),
|
|
||||||
bind_static: base.bind_static.unwrap_or(([127, 0, 0, 1], 5961).into()),
|
|
||||||
datadir: base.datadir().to_path_buf(),
|
|
||||||
disk_guid,
|
|
||||||
db,
|
|
||||||
secret_store,
|
|
||||||
docker,
|
|
||||||
net_controller,
|
|
||||||
managers,
|
|
||||||
revision_cache_size: base.revision_cache_size.unwrap_or(512),
|
|
||||||
revision_cache: RwLock::new(VecDeque::new()),
|
|
||||||
metrics_cache,
|
|
||||||
shutdown,
|
|
||||||
tor_socks: tor_proxy,
|
|
||||||
notification_manager,
|
|
||||||
open_authed_websockets: Mutex::new(BTreeMap::new()),
|
|
||||||
rpc_stream_continuations: Mutex::new(BTreeMap::new()),
|
|
||||||
wifi_manager: Arc::new(RwLock::new(WpaCli::init("wlan0".to_string()))),
|
|
||||||
});
|
|
||||||
|
|
||||||
let res = Self(seed);
|
|
||||||
res.cleanup().await?;
|
|
||||||
tracing::info!("Cleaned up transient states");
|
|
||||||
res.managers
|
|
||||||
.init(
|
|
||||||
&res,
|
|
||||||
&mut res.db.handle(),
|
|
||||||
&mut res.secret_store.acquire().await?,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tracing::info!("Initialized Package Managers");
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self, db, receipts))]
|
|
||||||
pub async fn set_nginx_conf<Db: DbHandle>(
|
|
||||||
&self,
|
|
||||||
db: &mut Db,
|
|
||||||
receipts: RpcSetNginxReceipts,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
tokio::fs::write("/etc/nginx/sites-available/default", {
|
|
||||||
let info = receipts.server_info.get(db).await?;
|
|
||||||
format!(
|
|
||||||
include_str!("../nginx/main-ui.conf.template"),
|
|
||||||
lan_hostname = info.lan_address.host_str().unwrap(),
|
|
||||||
tor_hostname = info.tor_address.host_str().unwrap(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
"/etc/nginx/sites-available/default",
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
Command::new("systemctl")
|
|
||||||
.arg("reload")
|
|
||||||
.arg("nginx")
|
|
||||||
.invoke(crate::ErrorKind::Nginx)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn shutdown(self) -> Result<(), Error> {
|
|
||||||
self.managers.empty().await?;
|
|
||||||
self.secret_store.close().await;
|
|
||||||
self.is_closed.store(true, Ordering::SeqCst);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn cleanup(&self) -> Result<(), Error> {
|
|
||||||
let mut db = self.db.handle();
|
|
||||||
let receipts = RpcCleanReceipts::new(&mut db).await?;
|
|
||||||
for (package_id, package) in receipts.packages.get(&mut db).await?.0 {
|
|
||||||
if let Err(e) = async {
|
|
||||||
match package {
|
|
||||||
PackageDataEntry::Installing { .. }
|
|
||||||
| PackageDataEntry::Restoring { .. }
|
|
||||||
| PackageDataEntry::Updating { .. } => {
|
|
||||||
cleanup_failed(self, &mut db, &package_id, &receipts.cleanup_receipts)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
PackageDataEntry::Removing { .. } => {
|
|
||||||
uninstall(
|
|
||||||
self,
|
|
||||||
&mut db,
|
|
||||||
&mut self.secret_store.acquire().await?,
|
|
||||||
&package_id,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
PackageDataEntry::Installed {
|
|
||||||
installed,
|
|
||||||
static_files,
|
|
||||||
manifest,
|
|
||||||
} => {
|
|
||||||
let status = installed.status;
|
|
||||||
let main = match status.main {
|
|
||||||
MainStatus::BackingUp { started, .. } => {
|
|
||||||
if let Some(_) = started {
|
|
||||||
MainStatus::Starting { restarting: false }
|
|
||||||
} else {
|
|
||||||
MainStatus::Stopped
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MainStatus::Running { .. } => {
|
|
||||||
MainStatus::Starting { restarting: false }
|
|
||||||
}
|
|
||||||
a => a.clone(),
|
|
||||||
};
|
|
||||||
let new_package = PackageDataEntry::Installed {
|
|
||||||
installed: InstalledPackageDataEntry {
|
|
||||||
status: Status { main, ..status },
|
|
||||||
..installed
|
|
||||||
},
|
|
||||||
static_files,
|
|
||||||
manifest,
|
|
||||||
};
|
|
||||||
receipts
|
|
||||||
.package
|
|
||||||
.set(&mut db, new_package, &package_id)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok::<_, Error>(())
|
|
||||||
}
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::error!("Failed to clean up package {}: {}", package_id, e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Context for RpcContext {
|
|
||||||
fn host(&self) -> Host<&str> {
|
|
||||||
match self.0.bind_rpc.ip() {
|
|
||||||
IpAddr::V4(a) => Host::Ipv4(a),
|
|
||||||
IpAddr::V6(a) => Host::Ipv6(a),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn port(&self) -> u16 {
|
|
||||||
self.0.bind_rpc.port()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Deref for RpcContext {
|
|
||||||
type Target = RpcContextSeed;
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
#[cfg(feature = "unstable")]
|
|
||||||
if self.0.is_closed.load(Ordering::SeqCst) {
|
|
||||||
panic!(
|
|
||||||
"RpcContext used after shutdown! {}",
|
|
||||||
tracing_error::SpanTrace::capture()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Drop for RpcContext {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
#[cfg(feature = "unstable")]
|
|
||||||
if self.0.is_closed.load(Ordering::SeqCst) {
|
|
||||||
tracing::info!(
|
|
||||||
"RpcContext dropped. {} left.",
|
|
||||||
Arc::strong_count(&self.0) - 1
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use rpc_toolkit::Context;
|
|
||||||
use serde::Deserialize;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::util::config::{load_config_from_paths, local_config_path};
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct SdkContextConfig {
|
|
||||||
pub developer_key_path: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct SdkContextSeed {
|
|
||||||
pub developer_key_path: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct SdkContext(Arc<SdkContextSeed>);
|
|
||||||
impl SdkContext {
|
|
||||||
/// BLOCKING
|
|
||||||
#[instrument(skip(matches))]
|
|
||||||
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
|
|
||||||
let local_config_path = local_config_path();
|
|
||||||
let base: SdkContextConfig = load_config_from_paths(
|
|
||||||
matches
|
|
||||||
.values_of("config")
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
.map(|p| Path::new(p))
|
|
||||||
.chain(local_config_path.as_deref().into_iter())
|
|
||||||
.chain(std::iter::once(Path::new(crate::util::config::CONFIG_PATH))),
|
|
||||||
)?;
|
|
||||||
Ok(SdkContext(Arc::new(SdkContextSeed {
|
|
||||||
developer_key_path: base.developer_key_path.unwrap_or_else(|| {
|
|
||||||
local_config_path
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| Path::new(crate::util::config::CONFIG_PATH))
|
|
||||||
.parent()
|
|
||||||
.unwrap_or(Path::new("/"))
|
|
||||||
.join("developer.key.pem")
|
|
||||||
}),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
/// BLOCKING
|
|
||||||
#[instrument]
|
|
||||||
pub fn developer_key(&self) -> Result<ed25519_dalek::Keypair, Error> {
|
|
||||||
if !self.developer_key_path.exists() {
|
|
||||||
return Err(Error::new(eyre!("Developer Key does not exist! Please run `embassy-sdk init` before running this command."), crate::ErrorKind::Uninitialized));
|
|
||||||
}
|
|
||||||
let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem(
|
|
||||||
&std::fs::read_to_string(&self.developer_key_path)?,
|
|
||||||
)
|
|
||||||
.with_kind(crate::ErrorKind::Pem)?;
|
|
||||||
let secret = ed25519_dalek::SecretKey::from_bytes(&pair.secret_key[..])?;
|
|
||||||
let public = if let Some(public) = pair.public_key {
|
|
||||||
ed25519_dalek::PublicKey::from_bytes(&public[..])?
|
|
||||||
} else {
|
|
||||||
(&secret).into()
|
|
||||||
};
|
|
||||||
Ok(ed25519_dalek::Keypair { secret, public })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::ops::Deref for SdkContext {
|
|
||||||
type Target = SdkContextSeed;
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Context for SdkContext {}
|
|
||||||
@@ -1,169 +0,0 @@
|
|||||||
use std::net::{IpAddr, SocketAddr};
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use patch_db::json_ptr::JsonPointer;
|
|
||||||
use patch_db::PatchDb;
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
use rpc_toolkit::Context;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sqlx::sqlite::SqliteConnectOptions;
|
|
||||||
use sqlx::SqlitePool;
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::sync::broadcast::Sender;
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
use tracing::instrument;
|
|
||||||
use url::Host;
|
|
||||||
|
|
||||||
use crate::db::model::Database;
|
|
||||||
use crate::hostname::{derive_hostname, derive_id, get_product_key};
|
|
||||||
use crate::net::tor::os_key;
|
|
||||||
use crate::setup::{password_hash, RecoveryStatus};
|
|
||||||
use crate::util::io::from_yaml_async_reader;
|
|
||||||
use crate::util::AsyncFileExt;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct SetupResult {
|
|
||||||
pub tor_address: String,
|
|
||||||
pub lan_address: String,
|
|
||||||
pub root_ca: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct SetupContextConfig {
|
|
||||||
pub bind_rpc: Option<SocketAddr>,
|
|
||||||
pub datadir: Option<PathBuf>,
|
|
||||||
}
|
|
||||||
impl SetupContextConfig {
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
|
||||||
let cfg_path = path
|
|
||||||
.as_ref()
|
|
||||||
.map(|p| p.as_ref())
|
|
||||||
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
|
|
||||||
if let Some(f) = File::maybe_open(cfg_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
|
||||||
{
|
|
||||||
from_yaml_async_reader(f).await
|
|
||||||
} else {
|
|
||||||
Ok(Self::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn datadir(&self) -> &Path {
|
|
||||||
self.datadir
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or_else(|| Path::new("/embassy-data"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SetupContextSeed {
|
|
||||||
pub config_path: Option<PathBuf>,
|
|
||||||
pub bind_rpc: SocketAddr,
|
|
||||||
pub shutdown: Sender<()>,
|
|
||||||
pub datadir: PathBuf,
|
|
||||||
pub selected_v2_drive: RwLock<Option<PathBuf>>,
|
|
||||||
pub cached_product_key: RwLock<Option<Arc<String>>>,
|
|
||||||
pub recovery_status: RwLock<Option<Result<RecoveryStatus, RpcError>>>,
|
|
||||||
pub setup_result: RwLock<Option<(Arc<String>, SetupResult)>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct SetupContext(Arc<SetupContextSeed>);
|
|
||||||
impl SetupContext {
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn init<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
|
|
||||||
let cfg = SetupContextConfig::load(path.as_ref()).await?;
|
|
||||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
|
||||||
let datadir = cfg.datadir().to_owned();
|
|
||||||
Ok(Self(Arc::new(SetupContextSeed {
|
|
||||||
config_path: path.as_ref().map(|p| p.as_ref().to_owned()),
|
|
||||||
bind_rpc: cfg.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
|
|
||||||
shutdown,
|
|
||||||
datadir,
|
|
||||||
selected_v2_drive: RwLock::new(None),
|
|
||||||
cached_product_key: RwLock::new(None),
|
|
||||||
recovery_status: RwLock::new(None),
|
|
||||||
setup_result: RwLock::new(None),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn db(&self, secret_store: &SqlitePool) -> Result<PatchDb, Error> {
|
|
||||||
let db_path = self.datadir.join("main").join("embassy.db");
|
|
||||||
let db = PatchDb::open(&db_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
|
|
||||||
if !db.exists(&<JsonPointer>::default()).await? {
|
|
||||||
let pkey = self.product_key().await?;
|
|
||||||
let sid = derive_id(&*pkey);
|
|
||||||
let hostname = derive_hostname(&sid);
|
|
||||||
db.put(
|
|
||||||
&<JsonPointer>::default(),
|
|
||||||
&Database::init(
|
|
||||||
sid,
|
|
||||||
&hostname,
|
|
||||||
&os_key(&mut secret_store.acquire().await?).await?,
|
|
||||||
password_hash(&mut secret_store.acquire().await?).await?,
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
Ok(db)
|
|
||||||
}
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn secret_store(&self) -> Result<SqlitePool, Error> {
|
|
||||||
let secret_store = SqlitePool::connect_with(
|
|
||||||
SqliteConnectOptions::new()
|
|
||||||
.filename(self.datadir.join("main").join("secrets.db"))
|
|
||||||
.create_if_missing(true)
|
|
||||||
.busy_timeout(Duration::from_secs(30)),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
sqlx::migrate!()
|
|
||||||
.run(&secret_store)
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Database)?;
|
|
||||||
Ok(secret_store)
|
|
||||||
}
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn product_key(&self) -> Result<Arc<String>, Error> {
|
|
||||||
Ok(
|
|
||||||
if let Some(k) = {
|
|
||||||
let guard = self.cached_product_key.read().await;
|
|
||||||
let res = guard.clone();
|
|
||||||
drop(guard);
|
|
||||||
res
|
|
||||||
} {
|
|
||||||
k
|
|
||||||
} else {
|
|
||||||
let k = Arc::new(get_product_key().await?);
|
|
||||||
*self.cached_product_key.write().await = Some(k.clone());
|
|
||||||
k
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Context for SetupContext {
|
|
||||||
fn host(&self) -> Host<&str> {
|
|
||||||
match self.0.bind_rpc.ip() {
|
|
||||||
IpAddr::V4(a) => Host::Ipv4(a),
|
|
||||||
IpAddr::V6(a) => Host::Ipv6(a),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn port(&self) -> u16 {
|
|
||||||
self.0.bind_rpc.port()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Deref for SetupContext {
|
|
||||||
type Target = SetupContextSeed;
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&*self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,216 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use patch_db::{DbHandle, LockReceipt, LockType};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::db::util::WithRevision;
|
|
||||||
use crate::dependencies::{
|
|
||||||
break_all_dependents_transitive, heal_all_dependents_transitive, BreakageRes, DependencyError,
|
|
||||||
DependencyReceipt, TaggedDependencyError,
|
|
||||||
};
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::status::MainStatus;
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::serde::display_serializable;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct StartReceipts {
|
|
||||||
dependency_receipt: DependencyReceipt,
|
|
||||||
status: LockReceipt<MainStatus, ()>,
|
|
||||||
version: LockReceipt<crate::util::Version, ()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StartReceipts {
|
|
||||||
pub async fn new(db: &mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks, id);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<patch_db::LockTargetId>,
|
|
||||||
id: &PackageId,
|
|
||||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
|
||||||
let dependency_receipt = DependencyReceipt::setup(locks);
|
|
||||||
let status = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.status().main())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
let version = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.manifest().version())
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
dependency_receipt: dependency_receipt(skeleton_key)?,
|
|
||||||
status: status.verify(skeleton_key)?,
|
|
||||||
version: version.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn start(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg] id: PackageId,
|
|
||||||
) -> Result<WithRevision<()>, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
let receipts = StartReceipts::new(&mut tx, &id).await?;
|
|
||||||
let version = receipts.version.get(&mut tx).await?;
|
|
||||||
receipts
|
|
||||||
.status
|
|
||||||
.set(&mut tx, MainStatus::Starting { restarting: false })
|
|
||||||
.await?;
|
|
||||||
heal_all_dependents_transitive(&ctx, &mut tx, &id, &receipts.dependency_receipt).await?;
|
|
||||||
|
|
||||||
let revision = tx.commit(None).await?;
|
|
||||||
drop(receipts);
|
|
||||||
|
|
||||||
ctx.managers
|
|
||||||
.get(&(id, version))
|
|
||||||
.await
|
|
||||||
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
|
|
||||||
.synchronize()
|
|
||||||
.await;
|
|
||||||
|
|
||||||
Ok(WithRevision {
|
|
||||||
revision,
|
|
||||||
response: (),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct StopReceipts {
|
|
||||||
breaks: crate::dependencies::BreakTransitiveReceipts,
|
|
||||||
status: LockReceipt<MainStatus, ()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StopReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks, id);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<patch_db::LockTargetId>,
|
|
||||||
id: &PackageId,
|
|
||||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
|
||||||
let breaks = crate::dependencies::BreakTransitiveReceipts::setup(locks);
|
|
||||||
let status = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(id)
|
|
||||||
.and_then(|x| x.installed())
|
|
||||||
.map(|x| x.status().main())
|
|
||||||
.make_locker(LockType::Write)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
breaks: breaks(skeleton_key)?,
|
|
||||||
status: status.verify(skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(db))]
|
|
||||||
async fn stop_common<Db: DbHandle>(
|
|
||||||
db: &mut Db,
|
|
||||||
id: &PackageId,
|
|
||||||
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
let receipts = StopReceipts::new(&mut tx, id).await?;
|
|
||||||
receipts.status.set(&mut tx, MainStatus::Stopping).await?;
|
|
||||||
|
|
||||||
tx.save().await?;
|
|
||||||
break_all_dependents_transitive(
|
|
||||||
db,
|
|
||||||
id,
|
|
||||||
DependencyError::NotRunning,
|
|
||||||
breakages,
|
|
||||||
&receipts.breaks,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(self(stop_impl(async)), stop_dry), display(display_none))]
|
|
||||||
pub fn stop(#[arg] id: PackageId) -> Result<PackageId, Error> {
|
|
||||||
Ok(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "dry", display(display_serializable))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn stop_dry(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[parent_data] id: PackageId,
|
|
||||||
) -> Result<BreakageRes, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
|
|
||||||
let mut breakages = BTreeMap::new();
|
|
||||||
stop_common(&mut tx, &id, &mut breakages).await?;
|
|
||||||
|
|
||||||
tx.abort().await?;
|
|
||||||
|
|
||||||
Ok(BreakageRes(breakages))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevision<()>, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
|
|
||||||
stop_common(&mut tx, &id, &mut BTreeMap::new()).await?;
|
|
||||||
|
|
||||||
Ok(WithRevision {
|
|
||||||
revision: tx.commit(None).await?,
|
|
||||||
response: (),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub async fn restart(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg] id: PackageId,
|
|
||||||
) -> Result<WithRevision<()>, Error> {
|
|
||||||
let mut db = ctx.db.handle();
|
|
||||||
let mut tx = db.begin().await?;
|
|
||||||
|
|
||||||
let mut status = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.idx_model(&id)
|
|
||||||
.and_then(|pde| pde.installed())
|
|
||||||
.map(|i| i.status().main())
|
|
||||||
.get_mut(&mut tx)
|
|
||||||
.await?;
|
|
||||||
if !matches!(&*status, Some(MainStatus::Running { .. })) {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("{} is not running", id),
|
|
||||||
crate::ErrorKind::InvalidRequest,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
*status = Some(MainStatus::Restarting);
|
|
||||||
status.save(&mut tx).await?;
|
|
||||||
|
|
||||||
Ok(WithRevision {
|
|
||||||
revision: tx.commit(None).await?,
|
|
||||||
response: (),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
pub mod rpc_continuations;
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
use std::time::Instant;
|
|
||||||
|
|
||||||
use futures::future::BoxFuture;
|
|
||||||
use http::{Request, Response};
|
|
||||||
use hyper::Body;
|
|
||||||
use rand::RngCore;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)]
|
|
||||||
pub struct RequestGuid<T: AsRef<str> = String>(T);
|
|
||||||
impl RequestGuid {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
let mut buf = [0; 40];
|
|
||||||
rand::thread_rng().fill_bytes(&mut buf);
|
|
||||||
RequestGuid(base32::encode(
|
|
||||||
base32::Alphabet::RFC4648 { padding: false },
|
|
||||||
&buf,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from(r: &str) -> Option<RequestGuid> {
|
|
||||||
if r.len() != 64 {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
for c in r.chars() {
|
|
||||||
if !(c >= 'A' && c <= 'Z' || c >= '2' && c <= '7') {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(RequestGuid(r.to_owned()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[test]
|
|
||||||
fn parse_guid() {
|
|
||||||
println!(
|
|
||||||
"{:?}",
|
|
||||||
RequestGuid::from(&format!("{}", RequestGuid::new()))
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: AsRef<str>> std::fmt::Display for RequestGuid<T> {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
self.0.as_ref().fmt(f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct RpcContinuation {
|
|
||||||
pub created_at: Instant,
|
|
||||||
pub handler: Box<
|
|
||||||
dyn FnOnce(Request<Body>) -> BoxFuture<'static, Result<Response<Body>, crate::Error>>
|
|
||||||
+ Send
|
|
||||||
+ Sync,
|
|
||||||
>,
|
|
||||||
}
|
|
||||||
@@ -1,279 +0,0 @@
|
|||||||
pub mod model;
|
|
||||||
pub mod package;
|
|
||||||
pub mod util;
|
|
||||||
|
|
||||||
use std::borrow::Cow;
|
|
||||||
use std::future::Future;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use futures::{FutureExt, SinkExt, StreamExt};
|
|
||||||
use patch_db::json_ptr::JsonPointer;
|
|
||||||
use patch_db::{Dump, Revision};
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use rpc_toolkit::hyper::upgrade::Upgraded;
|
|
||||||
use rpc_toolkit::hyper::{Body, Error as HyperError, Request, Response};
|
|
||||||
use rpc_toolkit::yajrc::{GenericRpcMethod, RpcError, RpcResponse};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use tokio::sync::{broadcast, oneshot};
|
|
||||||
use tokio::task::JoinError;
|
|
||||||
use tokio_tungstenite::tungstenite::Message;
|
|
||||||
use tokio_tungstenite::WebSocketStream;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
pub use self::model::DatabaseModel;
|
|
||||||
use self::util::WithRevision;
|
|
||||||
use crate::context::RpcContext;
|
|
||||||
use crate::middleware::auth::{HasValidSession, HashSessionToken};
|
|
||||||
use crate::util::serde::{display_serializable, IoFormat};
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[instrument(skip(ctx, ws_fut))]
|
|
||||||
async fn ws_handler<
|
|
||||||
WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
|
|
||||||
>(
|
|
||||||
ctx: RpcContext,
|
|
||||||
ws_fut: WSFut,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let (dump, sub) = ctx.db.dump_and_sub().await;
|
|
||||||
let mut stream = ws_fut
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?
|
|
||||||
.with_kind(crate::ErrorKind::Unknown)?;
|
|
||||||
|
|
||||||
let (has_valid_session, token) = loop {
|
|
||||||
if let Some(Message::Text(cookie)) = stream
|
|
||||||
.next()
|
|
||||||
.await
|
|
||||||
.transpose()
|
|
||||||
.with_kind(crate::ErrorKind::Network)?
|
|
||||||
{
|
|
||||||
let cookie_str = serde_json::from_str::<Cow<str>>(&cookie)
|
|
||||||
.with_kind(crate::ErrorKind::Deserialization)?;
|
|
||||||
|
|
||||||
let id = basic_cookies::Cookie::parse(&cookie_str)
|
|
||||||
.with_kind(crate::ErrorKind::Authorization)?
|
|
||||||
.into_iter()
|
|
||||||
.find(|c| c.get_name() == "session")
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::new(eyre!("UNAUTHORIZED"), crate::ErrorKind::Authorization)
|
|
||||||
})?;
|
|
||||||
let authenticated_session = HashSessionToken::from_cookie(&id);
|
|
||||||
match HasValidSession::from_session(&authenticated_session, &ctx).await {
|
|
||||||
Err(e) => {
|
|
||||||
stream
|
|
||||||
.send(Message::Text(
|
|
||||||
serde_json::to_string(
|
|
||||||
&RpcResponse::<GenericRpcMethod<String>>::from_result(Err::<
|
|
||||||
_,
|
|
||||||
RpcError,
|
|
||||||
>(
|
|
||||||
e.into()
|
|
||||||
)),
|
|
||||||
)
|
|
||||||
.with_kind(crate::ErrorKind::Serialization)?,
|
|
||||||
))
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
Ok(has_validation) => break (has_validation, authenticated_session),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let kill = subscribe_to_session_kill(&ctx, token).await;
|
|
||||||
send_dump(has_valid_session, &mut stream, dump).await?;
|
|
||||||
|
|
||||||
deal_with_messages(has_valid_session, kill, sub, stream).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn subscribe_to_session_kill(
|
|
||||||
ctx: &RpcContext,
|
|
||||||
token: HashSessionToken,
|
|
||||||
) -> oneshot::Receiver<()> {
|
|
||||||
let (send, recv) = oneshot::channel();
|
|
||||||
let mut guard = ctx.open_authed_websockets.lock().await;
|
|
||||||
if !guard.contains_key(&token) {
|
|
||||||
guard.insert(token, vec![send]);
|
|
||||||
} else {
|
|
||||||
guard.get_mut(&token).unwrap().push(send);
|
|
||||||
}
|
|
||||||
recv
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(_has_valid_authentication, kill, sub, stream))]
|
|
||||||
async fn deal_with_messages(
|
|
||||||
_has_valid_authentication: HasValidSession,
|
|
||||||
mut kill: oneshot::Receiver<()>,
|
|
||||||
mut sub: broadcast::Receiver<Arc<Revision>>,
|
|
||||||
mut stream: WebSocketStream<Upgraded>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
loop {
|
|
||||||
futures::select! {
|
|
||||||
_ = (&mut kill).fuse() => {
|
|
||||||
tracing::info!("Closing WebSocket: Reason: Session Terminated");
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
new_rev = sub.recv().fuse() => {
|
|
||||||
let rev = new_rev.with_kind(crate::ErrorKind::Database)?;
|
|
||||||
stream
|
|
||||||
.send(Message::Text(
|
|
||||||
serde_json::to_string(
|
|
||||||
&RpcResponse::<GenericRpcMethod<String>>::from_result(Ok::<_, RpcError>(
|
|
||||||
serde_json::to_value(&rev).with_kind(crate::ErrorKind::Serialization)?,
|
|
||||||
)),
|
|
||||||
)
|
|
||||||
.with_kind(crate::ErrorKind::Serialization)?,
|
|
||||||
))
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
}
|
|
||||||
message = stream.next().fuse() => {
|
|
||||||
let message = message.transpose().with_kind(crate::ErrorKind::Network)?;
|
|
||||||
match message {
|
|
||||||
Some(Message::Ping(a)) => {
|
|
||||||
stream
|
|
||||||
.send(Message::Pong(a))
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
}
|
|
||||||
Some(Message::Close(frame)) => {
|
|
||||||
if let Some(reason) = frame.as_ref() {
|
|
||||||
tracing::info!("Closing WebSocket: Reason: {} {}", reason.code, reason.reason);
|
|
||||||
} else {
|
|
||||||
tracing::info!("Closing WebSocket: Reason: Unknown");
|
|
||||||
}
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
tracing::info!("Closing WebSocket: Stream Finished");
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ = tokio::time::sleep(Duration::from_secs(10)).fuse() => {
|
|
||||||
stream
|
|
||||||
.send(Message::Ping(Vec::new()))
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn send_dump(
|
|
||||||
_has_valid_authentication: HasValidSession,
|
|
||||||
stream: &mut WebSocketStream<Upgraded>,
|
|
||||||
dump: Dump,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
stream
|
|
||||||
.send(Message::Text(
|
|
||||||
serde_json::to_string(&RpcResponse::<GenericRpcMethod<String>>::from_result(Ok::<
|
|
||||||
_,
|
|
||||||
RpcError,
|
|
||||||
>(
|
|
||||||
serde_json::to_value(&dump).with_kind(crate::ErrorKind::Serialization)?,
|
|
||||||
)))
|
|
||||||
.with_kind(crate::ErrorKind::Serialization)?,
|
|
||||||
))
|
|
||||||
.await
|
|
||||||
.with_kind(crate::ErrorKind::Network)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<Body>, Error> {
|
|
||||||
let (parts, body) = req.into_parts();
|
|
||||||
let req = Request::from_parts(parts, body);
|
|
||||||
let (res, ws_fut) = hyper_ws_listener::create_ws(req).with_kind(crate::ErrorKind::Network)?;
|
|
||||||
if let Some(ws_fut) = ws_fut {
|
|
||||||
tokio::task::spawn(async move {
|
|
||||||
match ws_handler(ctx, ws_fut).await {
|
|
||||||
Ok(()) => (),
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("WebSocket Closed: {}", e);
|
|
||||||
tracing::debug!("{:?}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(revisions, dump, put))]
|
|
||||||
pub fn db() -> Result<(), RpcError> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
#[serde(untagged)]
|
|
||||||
pub enum RevisionsRes {
|
|
||||||
Revisions(Vec<Arc<Revision>>),
|
|
||||||
Dump(Dump),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_serializable))]
|
|
||||||
pub async fn revisions(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg] since: u64,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<RevisionsRes, RpcError> {
|
|
||||||
let cache = ctx.revision_cache.read().await;
|
|
||||||
if cache
|
|
||||||
.front()
|
|
||||||
.map(|rev| rev.id <= since + 1)
|
|
||||||
.unwrap_or(false)
|
|
||||||
{
|
|
||||||
Ok(RevisionsRes::Revisions(
|
|
||||||
cache
|
|
||||||
.iter()
|
|
||||||
.skip_while(|rev| rev.id < since + 1)
|
|
||||||
.cloned()
|
|
||||||
.collect(),
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
drop(cache);
|
|
||||||
Ok(RevisionsRes::Dump(ctx.db.dump().await))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_serializable))]
|
|
||||||
pub async fn dump(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<Dump, RpcError> {
|
|
||||||
Ok(ctx.db.dump().await)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(ui))]
|
|
||||||
pub fn put() -> Result<(), RpcError> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_serializable))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub async fn ui(
|
|
||||||
#[context] ctx: RpcContext,
|
|
||||||
#[arg] pointer: JsonPointer,
|
|
||||||
#[arg] value: Value,
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg(long = "format")]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<WithRevision<()>, Error> {
|
|
||||||
let ptr = "/ui"
|
|
||||||
.parse::<JsonPointer>()
|
|
||||||
.with_kind(crate::ErrorKind::Database)?
|
|
||||||
+ &pointer;
|
|
||||||
Ok(WithRevision {
|
|
||||||
response: (),
|
|
||||||
revision: ctx.db.put(&ptr, &value, None).await?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,371 +0,0 @@
|
|||||||
use std::collections::{BTreeMap, BTreeSet};
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
|
||||||
use emver::VersionRange;
|
|
||||||
use isocountry::CountryCode;
|
|
||||||
use patch_db::json_ptr::JsonPointer;
|
|
||||||
use patch_db::{HasModel, Map, MapModel, OptionModel};
|
|
||||||
use reqwest::Url;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_json::Value;
|
|
||||||
use torut::onion::TorSecretKeyV3;
|
|
||||||
|
|
||||||
use crate::config::spec::{PackagePointerSpec, SystemPointerSpec};
|
|
||||||
use crate::install::progress::InstallProgress;
|
|
||||||
use crate::net::interface::InterfaceId;
|
|
||||||
use crate::s9pk::manifest::{Manifest, ManifestModel, PackageId};
|
|
||||||
use crate::status::health_check::HealthCheckId;
|
|
||||||
use crate::status::Status;
|
|
||||||
use crate::util::Version;
|
|
||||||
use crate::version::{Current, VersionT};
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct Database {
|
|
||||||
#[model]
|
|
||||||
pub server_info: ServerInfo,
|
|
||||||
#[model]
|
|
||||||
pub package_data: AllPackageData,
|
|
||||||
#[model]
|
|
||||||
pub recovered_packages: BTreeMap<PackageId, RecoveredPackageInfo>,
|
|
||||||
pub ui: Value,
|
|
||||||
}
|
|
||||||
impl Database {
|
|
||||||
pub fn init(
|
|
||||||
id: String,
|
|
||||||
hostname: &str,
|
|
||||||
tor_key: &TorSecretKeyV3,
|
|
||||||
password_hash: String,
|
|
||||||
) -> Self {
|
|
||||||
// TODO
|
|
||||||
Database {
|
|
||||||
server_info: ServerInfo {
|
|
||||||
id,
|
|
||||||
version: Current::new().semver().into(),
|
|
||||||
last_backup: None,
|
|
||||||
last_wifi_region: None,
|
|
||||||
eos_version_compat: Current::new().compat().clone(),
|
|
||||||
lan_address: format!("https://{}.local", hostname).parse().unwrap(),
|
|
||||||
tor_address: format!("http://{}", tor_key.public().get_onion_address())
|
|
||||||
.parse()
|
|
||||||
.unwrap(),
|
|
||||||
status_info: ServerStatus {
|
|
||||||
backup_progress: None,
|
|
||||||
updated: false,
|
|
||||||
update_progress: None,
|
|
||||||
},
|
|
||||||
wifi: WifiInfo {
|
|
||||||
ssids: Vec::new(),
|
|
||||||
connected: None,
|
|
||||||
selected: None,
|
|
||||||
},
|
|
||||||
unread_notification_count: 0,
|
|
||||||
connection_addresses: ConnectionAddresses {
|
|
||||||
tor: Vec::new(),
|
|
||||||
clearnet: Vec::new(),
|
|
||||||
},
|
|
||||||
password_hash,
|
|
||||||
},
|
|
||||||
package_data: AllPackageData::default(),
|
|
||||||
recovered_packages: BTreeMap::new(),
|
|
||||||
ui: Value::Object(Default::default()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl DatabaseModel {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::from(JsonPointer::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct ServerInfo {
|
|
||||||
pub id: String,
|
|
||||||
pub version: Version,
|
|
||||||
pub last_backup: Option<DateTime<Utc>>,
|
|
||||||
/// Used in the wifi to determine the region to set the system to
|
|
||||||
pub last_wifi_region: Option<CountryCode>,
|
|
||||||
pub eos_version_compat: VersionRange,
|
|
||||||
pub lan_address: Url,
|
|
||||||
pub tor_address: Url,
|
|
||||||
#[model]
|
|
||||||
#[serde(default)]
|
|
||||||
pub status_info: ServerStatus,
|
|
||||||
pub wifi: WifiInfo,
|
|
||||||
pub unread_notification_count: u64,
|
|
||||||
pub connection_addresses: ConnectionAddresses,
|
|
||||||
pub password_hash: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
|
|
||||||
pub struct BackupProgress {
|
|
||||||
pub complete: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct ServerStatus {
|
|
||||||
#[model]
|
|
||||||
pub backup_progress: Option<BTreeMap<PackageId, BackupProgress>>,
|
|
||||||
pub updated: bool,
|
|
||||||
#[model]
|
|
||||||
pub update_progress: Option<UpdateProgress>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct UpdateProgress {
|
|
||||||
pub size: Option<u64>,
|
|
||||||
pub downloaded: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct WifiInfo {
|
|
||||||
pub ssids: Vec<String>,
|
|
||||||
pub selected: Option<String>,
|
|
||||||
pub connected: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct ServerSpecs {
|
|
||||||
pub cpu: String,
|
|
||||||
pub disk: String,
|
|
||||||
pub memory: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct ConnectionAddresses {
|
|
||||||
pub tor: Vec<String>,
|
|
||||||
pub clearnet: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Deserialize, Serialize)]
|
|
||||||
pub struct AllPackageData(pub BTreeMap<PackageId, PackageDataEntry>);
|
|
||||||
impl Map for AllPackageData {
|
|
||||||
type Key = PackageId;
|
|
||||||
type Value = PackageDataEntry;
|
|
||||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
|
||||||
self.0.get(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl HasModel for AllPackageData {
|
|
||||||
type Model = MapModel<Self>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct StaticFiles {
|
|
||||||
license: String,
|
|
||||||
instructions: String,
|
|
||||||
icon: String,
|
|
||||||
}
|
|
||||||
impl StaticFiles {
|
|
||||||
pub fn local(id: &PackageId, version: &Version, icon_type: &str) -> Self {
|
|
||||||
StaticFiles {
|
|
||||||
license: format!("/public/package-data/{}/{}/LICENSE.md", id, version),
|
|
||||||
instructions: format!("/public/package-data/{}/{}/INSTRUCTIONS.md", id, version),
|
|
||||||
icon: format!("/public/package-data/{}/{}/icon.{}", id, version, icon_type),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(tag = "state")]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum PackageDataEntry {
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Installing {
|
|
||||||
static_files: StaticFiles,
|
|
||||||
manifest: Manifest,
|
|
||||||
install_progress: Arc<InstallProgress>,
|
|
||||||
},
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Updating {
|
|
||||||
static_files: StaticFiles,
|
|
||||||
manifest: Manifest,
|
|
||||||
installed: InstalledPackageDataEntry,
|
|
||||||
install_progress: Arc<InstallProgress>,
|
|
||||||
},
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Restoring {
|
|
||||||
static_files: StaticFiles,
|
|
||||||
manifest: Manifest,
|
|
||||||
install_progress: Arc<InstallProgress>,
|
|
||||||
},
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Removing {
|
|
||||||
static_files: StaticFiles,
|
|
||||||
manifest: Manifest,
|
|
||||||
removing: InstalledPackageDataEntry,
|
|
||||||
},
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
Installed {
|
|
||||||
static_files: StaticFiles,
|
|
||||||
manifest: Manifest,
|
|
||||||
installed: InstalledPackageDataEntry,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
impl PackageDataEntry {
|
|
||||||
pub fn installed(&self) -> Option<&InstalledPackageDataEntry> {
|
|
||||||
match self {
|
|
||||||
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
|
|
||||||
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn installed_mut(&mut self) -> Option<&mut InstalledPackageDataEntry> {
|
|
||||||
match self {
|
|
||||||
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
|
|
||||||
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn into_installed(self) -> Option<InstalledPackageDataEntry> {
|
|
||||||
match self {
|
|
||||||
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
|
|
||||||
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn manifest(self) -> Manifest {
|
|
||||||
match self {
|
|
||||||
PackageDataEntry::Installing { manifest, .. } => manifest,
|
|
||||||
PackageDataEntry::Updating { manifest, .. } => manifest,
|
|
||||||
PackageDataEntry::Restoring { manifest, .. } => manifest,
|
|
||||||
PackageDataEntry::Removing { manifest, .. } => manifest,
|
|
||||||
PackageDataEntry::Installed { manifest, .. } => manifest,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl PackageDataEntryModel {
|
|
||||||
pub fn installed(self) -> OptionModel<InstalledPackageDataEntry> {
|
|
||||||
self.0.child("installed").into()
|
|
||||||
}
|
|
||||||
pub fn removing(self) -> OptionModel<InstalledPackageDataEntry> {
|
|
||||||
self.0.child("removing").into()
|
|
||||||
}
|
|
||||||
pub fn install_progress(self) -> OptionModel<InstallProgress> {
|
|
||||||
self.0.child("install-progress").into()
|
|
||||||
}
|
|
||||||
pub fn manifest(self) -> ManifestModel {
|
|
||||||
self.0.child("manifest").into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct InstalledPackageDataEntry {
|
|
||||||
#[model]
|
|
||||||
pub status: Status,
|
|
||||||
pub marketplace_url: Option<Url>,
|
|
||||||
#[serde(default)]
|
|
||||||
#[serde(with = "crate::util::serde::ed25519_pubkey")]
|
|
||||||
pub developer_key: ed25519_dalek::PublicKey,
|
|
||||||
#[model]
|
|
||||||
pub manifest: Manifest,
|
|
||||||
pub last_backup: Option<DateTime<Utc>>,
|
|
||||||
#[model]
|
|
||||||
pub system_pointers: Vec<SystemPointerSpec>,
|
|
||||||
#[model]
|
|
||||||
pub dependency_info: BTreeMap<PackageId, StaticDependencyInfo>,
|
|
||||||
#[model]
|
|
||||||
pub current_dependents: CurrentDependents,
|
|
||||||
#[model]
|
|
||||||
pub current_dependencies: CurrentDependencies,
|
|
||||||
#[model]
|
|
||||||
pub interface_addresses: InterfaceAddressMap,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
|
||||||
pub struct CurrentDependents(pub BTreeMap<PackageId, CurrentDependencyInfo>);
|
|
||||||
impl CurrentDependents {
|
|
||||||
pub fn map(
|
|
||||||
mut self,
|
|
||||||
transform: impl Fn(
|
|
||||||
BTreeMap<PackageId, CurrentDependencyInfo>,
|
|
||||||
) -> BTreeMap<PackageId, CurrentDependencyInfo>,
|
|
||||||
) -> Self {
|
|
||||||
self.0 = transform(self.0);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Map for CurrentDependents {
|
|
||||||
type Key = PackageId;
|
|
||||||
type Value = CurrentDependencyInfo;
|
|
||||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
|
||||||
self.0.get(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl HasModel for CurrentDependents {
|
|
||||||
type Model = MapModel<Self>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
|
|
||||||
pub struct CurrentDependencies(pub BTreeMap<PackageId, CurrentDependencyInfo>);
|
|
||||||
impl CurrentDependencies {
|
|
||||||
pub fn map(
|
|
||||||
mut self,
|
|
||||||
transform: impl Fn(
|
|
||||||
BTreeMap<PackageId, CurrentDependencyInfo>,
|
|
||||||
) -> BTreeMap<PackageId, CurrentDependencyInfo>,
|
|
||||||
) -> Self {
|
|
||||||
self.0 = transform(self.0);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Map for CurrentDependencies {
|
|
||||||
type Key = PackageId;
|
|
||||||
type Value = CurrentDependencyInfo;
|
|
||||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
|
||||||
self.0.get(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl HasModel for CurrentDependencies {
|
|
||||||
type Model = MapModel<Self>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct StaticDependencyInfo {
|
|
||||||
pub manifest: Option<Manifest>,
|
|
||||||
pub icon: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct CurrentDependencyInfo {
|
|
||||||
pub pointers: Vec<PackagePointerSpec>,
|
|
||||||
pub health_checks: BTreeSet<HealthCheckId>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
pub struct InterfaceAddressMap(pub BTreeMap<InterfaceId, InterfaceAddresses>);
|
|
||||||
impl Map for InterfaceAddressMap {
|
|
||||||
type Key = InterfaceId;
|
|
||||||
type Value = InterfaceAddresses;
|
|
||||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
|
||||||
self.0.get(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl HasModel for InterfaceAddressMap {
|
|
||||||
type Model = MapModel<Self>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct InterfaceAddresses {
|
|
||||||
#[model]
|
|
||||||
pub tor_address: Option<String>,
|
|
||||||
#[model]
|
|
||||||
pub lan_address: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize, HasModel)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct RecoveredPackageInfo {
|
|
||||||
pub title: String,
|
|
||||||
pub icon: String,
|
|
||||||
pub version: Version,
|
|
||||||
}
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
use patch_db::{DbHandle, LockReceipt, LockTargetId, LockType, Verifier};
|
|
||||||
|
|
||||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub struct PackageReceipts {
|
|
||||||
package_data: LockReceipt<super::model::AllPackageData, ()>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PackageReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
|
||||||
let package_data = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
package_data: package_data.verify(&skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_packages<Db: DbHandle>(
|
|
||||||
db: &mut Db,
|
|
||||||
receipts: &PackageReceipts,
|
|
||||||
) -> Result<Vec<PackageId>, Error> {
|
|
||||||
let packages = receipts.package_data.get(db).await?;
|
|
||||||
Ok(packages.0.keys().cloned().collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ManifestReceipts {
|
|
||||||
manifest: LockReceipt<Manifest, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ManifestReceipts {
|
|
||||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
|
||||||
let mut locks = Vec::new();
|
|
||||||
|
|
||||||
let setup = Self::setup(&mut locks, id);
|
|
||||||
Ok(setup(&db.lock_all(locks).await?)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(
|
|
||||||
locks: &mut Vec<LockTargetId>,
|
|
||||||
_id: &PackageId,
|
|
||||||
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
|
||||||
let manifest = crate::db::DatabaseModel::new()
|
|
||||||
.package_data()
|
|
||||||
.star()
|
|
||||||
.manifest()
|
|
||||||
.make_locker(LockType::Read)
|
|
||||||
.add_to_keys(locks);
|
|
||||||
move |skeleton_key| {
|
|
||||||
Ok(Self {
|
|
||||||
manifest: manifest.verify(&skeleton_key)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_manifest<Db: DbHandle>(
|
|
||||||
db: &mut Db,
|
|
||||||
pkg: &PackageId,
|
|
||||||
receipts: &ManifestReceipts,
|
|
||||||
) -> Result<Option<Manifest>, Error> {
|
|
||||||
Ok(receipts.manifest.get(db, pkg).await?)
|
|
||||||
}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use patch_db::Revision;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
|
||||||
pub struct WithRevision<T> {
|
|
||||||
pub response: T,
|
|
||||||
pub revision: Option<Arc<Revision>>,
|
|
||||||
}
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
use std::fs::File;
|
|
||||||
use std::io::Write;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use ed25519::pkcs8::EncodePrivateKey;
|
|
||||||
use ed25519_dalek::Keypair;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::context::SdkContext;
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[command(cli_only, blocking, display(display_none))]
|
|
||||||
#[instrument(skip(ctx))]
|
|
||||||
pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
|
|
||||||
if !ctx.developer_key_path.exists() {
|
|
||||||
let parent = ctx.developer_key_path.parent().unwrap_or(Path::new("/"));
|
|
||||||
if !parent.exists() {
|
|
||||||
std::fs::create_dir_all(parent)
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?;
|
|
||||||
}
|
|
||||||
tracing::info!("Generating new developer key...");
|
|
||||||
let keypair = Keypair::generate(&mut rand::thread_rng());
|
|
||||||
tracing::info!("Writing key to {}", ctx.developer_key_path.display());
|
|
||||||
let keypair_bytes = ed25519::KeypairBytes {
|
|
||||||
secret_key: keypair.secret.to_bytes(),
|
|
||||||
public_key: Some(keypair.public.to_bytes()),
|
|
||||||
};
|
|
||||||
let mut dev_key_file = File::create(&ctx.developer_key_path)?;
|
|
||||||
dev_key_file.write_all(
|
|
||||||
keypair_bytes
|
|
||||||
.to_pkcs8_pem(base64ct::LineEnding::default())
|
|
||||||
.with_kind(crate::ErrorKind::Pem)?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
dev_key_file.sync_all()?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(crate::s9pk::verify, crate::config::verify_spec))]
|
|
||||||
pub fn verify() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
|
|
||||||
use crate::context::DiagnosticContext;
|
|
||||||
use crate::disk::repair;
|
|
||||||
use crate::logs::{display_logs, fetch_logs, LogResponse, LogSource};
|
|
||||||
use crate::shutdown::Shutdown;
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub const SYSTEMD_UNIT: &'static str = "embassy-init";
|
|
||||||
|
|
||||||
#[command(subcommands(error, logs, exit, restart, forget_disk, disk))]
|
|
||||||
pub fn diagnostic() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command]
|
|
||||||
pub fn error(#[context] ctx: DiagnosticContext) -> Result<Arc<RpcError>, Error> {
|
|
||||||
Ok(ctx.error.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_logs))]
|
|
||||||
pub async fn logs(
|
|
||||||
#[arg] limit: Option<usize>,
|
|
||||||
#[arg] cursor: Option<String>,
|
|
||||||
#[arg] before_flag: Option<bool>,
|
|
||||||
) -> Result<LogResponse, Error> {
|
|
||||||
Ok(fetch_logs(
|
|
||||||
LogSource::Service(SYSTEMD_UNIT),
|
|
||||||
limit,
|
|
||||||
cursor,
|
|
||||||
before_flag.unwrap_or(false),
|
|
||||||
)
|
|
||||||
.await?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub fn exit(#[context] ctx: DiagnosticContext) -> Result<(), Error> {
|
|
||||||
ctx.shutdown.send(None).expect("receiver dropped");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub fn restart(#[context] ctx: DiagnosticContext) -> Result<(), Error> {
|
|
||||||
ctx.shutdown
|
|
||||||
.send(Some(Shutdown {
|
|
||||||
datadir: ctx.datadir.clone(),
|
|
||||||
disk_guid: ctx.disk_guid.clone(),
|
|
||||||
db_handle: None,
|
|
||||||
restart: true,
|
|
||||||
}))
|
|
||||||
.expect("receiver dropped");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(subcommands(forget_disk, repair))]
|
|
||||||
pub fn disk() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(rename = "forget", display(display_none))]
|
|
||||||
pub async fn forget_disk() -> Result<(), Error> {
|
|
||||||
let disk_guid = Path::new("/embassy-os/disk.guid");
|
|
||||||
if tokio::fs::metadata(disk_guid).await.is_ok() {
|
|
||||||
tokio::fs::remove_file(disk_guid).await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
use std::ffi::OsStr;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use futures::future::BoxFuture;
|
|
||||||
use futures::FutureExt;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
#[must_use]
|
|
||||||
pub struct RequiresReboot(pub bool);
|
|
||||||
impl std::ops::BitOrAssign for RequiresReboot {
|
|
||||||
fn bitor_assign(&mut self, rhs: Self) {
|
|
||||||
self.0 |= rhs.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
pub enum RepairStrategy {
|
|
||||||
Preen,
|
|
||||||
Aggressive,
|
|
||||||
}
|
|
||||||
impl RepairStrategy {
|
|
||||||
pub async fn e2fsck(
|
|
||||||
&self,
|
|
||||||
logicalname: impl AsRef<Path> + std::fmt::Debug,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
match self {
|
|
||||||
RepairStrategy::Preen => e2fsck_preen(logicalname).await,
|
|
||||||
RepairStrategy::Aggressive => e2fsck_aggressive(logicalname).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn e2fsck_preen(
|
|
||||||
logicalname: impl AsRef<Path> + std::fmt::Debug,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
e2fsck_runner(Command::new("e2fsck").arg("-p"), logicalname).await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn backup_existing_undo_file<'a>(path: &'a Path) -> BoxFuture<'a, Result<(), Error>> {
|
|
||||||
async move {
|
|
||||||
if tokio::fs::metadata(path).await.is_ok() {
|
|
||||||
let bak = path.with_extension(format!(
|
|
||||||
"{}.bak",
|
|
||||||
path.extension()
|
|
||||||
.and_then(|s| s.to_str())
|
|
||||||
.unwrap_or_default()
|
|
||||||
));
|
|
||||||
backup_existing_undo_file(&bak).await?;
|
|
||||||
tokio::fs::rename(path, &bak).await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn e2fsck_aggressive(
|
|
||||||
logicalname: impl AsRef<Path> + std::fmt::Debug,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
let undo_path = Path::new("/embassy-os")
|
|
||||||
.join(
|
|
||||||
logicalname
|
|
||||||
.as_ref()
|
|
||||||
.file_name()
|
|
||||||
.unwrap_or(OsStr::new("unknown")),
|
|
||||||
)
|
|
||||||
.with_extension("e2undo");
|
|
||||||
backup_existing_undo_file(&undo_path).await?;
|
|
||||||
e2fsck_runner(
|
|
||||||
Command::new("e2fsck").arg("-y").arg("-z").arg(undo_path),
|
|
||||||
logicalname,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn e2fsck_runner(
|
|
||||||
e2fsck_cmd: &mut Command,
|
|
||||||
logicalname: impl AsRef<Path> + std::fmt::Debug,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
let e2fsck_out = e2fsck_cmd.arg(logicalname.as_ref()).output().await?;
|
|
||||||
let e2fsck_stderr = String::from_utf8(e2fsck_out.stderr)?;
|
|
||||||
let code = e2fsck_out.status.code().ok_or_else(|| {
|
|
||||||
Error::new(
|
|
||||||
eyre!("e2fsck: process terminated by signal"),
|
|
||||||
crate::ErrorKind::DiskManagement,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
if code & 4 != 0 {
|
|
||||||
tracing::error!(
|
|
||||||
"some filesystem errors NOT corrected on {}:\n{}",
|
|
||||||
logicalname.as_ref().display(),
|
|
||||||
e2fsck_stderr,
|
|
||||||
);
|
|
||||||
} else if code & 1 != 0 {
|
|
||||||
tracing::warn!(
|
|
||||||
"filesystem errors corrected on {}:\n{}",
|
|
||||||
logicalname.as_ref().display(),
|
|
||||||
e2fsck_stderr,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if code < 8 {
|
|
||||||
if code & 2 != 0 {
|
|
||||||
tracing::warn!("reboot required");
|
|
||||||
Ok(RequiresReboot(true))
|
|
||||||
} else {
|
|
||||||
Ok(RequiresReboot(false))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err(Error::new(
|
|
||||||
eyre!("e2fsck: {}", e2fsck_stderr),
|
|
||||||
crate::ErrorKind::DiskManagement,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,298 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::fsck::{RepairStrategy, RequiresReboot};
|
|
||||||
use super::util::pvscan;
|
|
||||||
use crate::disk::mount::filesystem::block_dev::mount;
|
|
||||||
use crate::disk::mount::filesystem::ReadWrite;
|
|
||||||
use crate::disk::mount::util::unmount;
|
|
||||||
use crate::util::Invoke;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
pub const PASSWORD_PATH: &'static str = "/etc/embassy/password";
|
|
||||||
pub const DEFAULT_PASSWORD: &'static str = "password";
|
|
||||||
pub const MAIN_FS_SIZE: FsSize = FsSize::Gigabytes(8);
|
|
||||||
|
|
||||||
#[instrument(skip(disks, datadir, password))]
|
|
||||||
pub async fn create<I, P>(
|
|
||||||
disks: &I,
|
|
||||||
pvscan: &BTreeMap<PathBuf, Option<String>>,
|
|
||||||
datadir: impl AsRef<Path>,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<String, Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a I: IntoIterator<Item = &'a P>,
|
|
||||||
P: AsRef<Path>,
|
|
||||||
{
|
|
||||||
let guid = create_pool(disks, pvscan).await?;
|
|
||||||
create_all_fs(&guid, &datadir, password).await?;
|
|
||||||
export(&guid, datadir).await?;
|
|
||||||
Ok(guid)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(disks))]
|
|
||||||
pub async fn create_pool<I, P>(
|
|
||||||
disks: &I,
|
|
||||||
pvscan: &BTreeMap<PathBuf, Option<String>>,
|
|
||||||
) -> Result<String, Error>
|
|
||||||
where
|
|
||||||
for<'a> &'a I: IntoIterator<Item = &'a P>,
|
|
||||||
P: AsRef<Path>,
|
|
||||||
{
|
|
||||||
Command::new("dmsetup")
|
|
||||||
.arg("remove_all") // TODO: find a higher finesse way to do this for portability reasons
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
for disk in disks {
|
|
||||||
if pvscan.contains_key(disk.as_ref()) {
|
|
||||||
Command::new("pvremove")
|
|
||||||
.arg("-yff")
|
|
||||||
.arg(disk.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
tokio::fs::write(disk.as_ref(), &[0; 2048]).await?; // wipe partition table
|
|
||||||
Command::new("pvcreate")
|
|
||||||
.arg("-yff")
|
|
||||||
.arg(disk.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
let guid = format!(
|
|
||||||
"EMBASSY_{}",
|
|
||||||
base32::encode(
|
|
||||||
base32::Alphabet::RFC4648 { padding: false },
|
|
||||||
&rand::random::<[u8; 32]>(),
|
|
||||||
)
|
|
||||||
);
|
|
||||||
let mut cmd = Command::new("vgcreate");
|
|
||||||
cmd.arg("-y").arg(&guid);
|
|
||||||
for disk in disks {
|
|
||||||
cmd.arg(disk.as_ref());
|
|
||||||
}
|
|
||||||
cmd.invoke(crate::ErrorKind::DiskManagement).await?;
|
|
||||||
Ok(guid)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
pub enum FsSize {
|
|
||||||
Gigabytes(usize),
|
|
||||||
FreePercentage(usize),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir, password))]
|
|
||||||
pub async fn create_fs<P: AsRef<Path>>(
|
|
||||||
guid: &str,
|
|
||||||
datadir: P,
|
|
||||||
name: &str,
|
|
||||||
size: FsSize,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
tokio::fs::write(PASSWORD_PATH, password)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
|
|
||||||
let mut cmd = Command::new("lvcreate");
|
|
||||||
match size {
|
|
||||||
FsSize::Gigabytes(a) => cmd.arg("-L").arg(format!("{}G", a)),
|
|
||||||
FsSize::FreePercentage(a) => cmd.arg("-l").arg(format!("{}%FREE", a)),
|
|
||||||
};
|
|
||||||
cmd.arg("-y")
|
|
||||||
.arg("-n")
|
|
||||||
.arg(name)
|
|
||||||
.arg(guid)
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Command::new("cryptsetup")
|
|
||||||
.arg("-q")
|
|
||||||
.arg("luksFormat")
|
|
||||||
.arg(format!("--key-file={}", PASSWORD_PATH))
|
|
||||||
.arg(format!("--keyfile-size={}", password.len()))
|
|
||||||
.arg(Path::new("/dev").join(guid).join(name))
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Command::new("cryptsetup")
|
|
||||||
.arg("-q")
|
|
||||||
.arg("luksOpen")
|
|
||||||
.arg(format!("--key-file={}", PASSWORD_PATH))
|
|
||||||
.arg(format!("--keyfile-size={}", password.len()))
|
|
||||||
.arg(Path::new("/dev").join(guid).join(name))
|
|
||||||
.arg(format!("{}_{}", guid, name))
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Command::new("mkfs.ext4")
|
|
||||||
.arg(Path::new("/dev/mapper").join(format!("{}_{}", guid, name)))
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
mount(
|
|
||||||
Path::new("/dev/mapper").join(format!("{}_{}", guid, name)),
|
|
||||||
datadir.as_ref().join(name),
|
|
||||||
ReadWrite,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
tokio::fs::remove_file(PASSWORD_PATH)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir, password))]
|
|
||||||
pub async fn create_all_fs<P: AsRef<Path>>(
|
|
||||||
guid: &str,
|
|
||||||
datadir: P,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
create_fs(guid, &datadir, "main", MAIN_FS_SIZE, password).await?;
|
|
||||||
create_fs(
|
|
||||||
guid,
|
|
||||||
&datadir,
|
|
||||||
"package-data",
|
|
||||||
FsSize::FreePercentage(100),
|
|
||||||
password,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir))]
|
|
||||||
pub async fn unmount_fs<P: AsRef<Path>>(guid: &str, datadir: P, name: &str) -> Result<(), Error> {
|
|
||||||
unmount(datadir.as_ref().join(name)).await?;
|
|
||||||
Command::new("cryptsetup")
|
|
||||||
.arg("-q")
|
|
||||||
.arg("luksClose")
|
|
||||||
.arg(format!("{}_{}", guid, name))
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir))]
|
|
||||||
pub async fn unmount_all_fs<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> {
|
|
||||||
unmount_fs(guid, &datadir, "main").await?;
|
|
||||||
unmount_fs(guid, &datadir, "package-data").await?;
|
|
||||||
Command::new("dmsetup")
|
|
||||||
.arg("remove_all") // TODO: find a higher finesse way to do this for portability reasons
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir))]
|
|
||||||
pub async fn export<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> {
|
|
||||||
unmount_all_fs(guid, datadir).await?;
|
|
||||||
Command::new("vgchange")
|
|
||||||
.arg("-an")
|
|
||||||
.arg(guid)
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Command::new("vgexport")
|
|
||||||
.arg(guid)
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir, password))]
|
|
||||||
pub async fn import<P: AsRef<Path>>(
|
|
||||||
guid: &str,
|
|
||||||
datadir: P,
|
|
||||||
repair: RepairStrategy,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
let scan = pvscan().await?;
|
|
||||||
if scan
|
|
||||||
.values()
|
|
||||||
.filter_map(|a| a.as_ref())
|
|
||||||
.filter(|a| a.starts_with("EMBASSY_"))
|
|
||||||
.next()
|
|
||||||
.is_none()
|
|
||||||
{
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Embassy disk not found."),
|
|
||||||
crate::ErrorKind::DiskNotAvailable,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
if !scan
|
|
||||||
.values()
|
|
||||||
.filter_map(|a| a.as_ref())
|
|
||||||
.any(|id| id == guid)
|
|
||||||
{
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("An Embassy disk was found, but it is not the correct disk for this device."),
|
|
||||||
crate::ErrorKind::IncorrectDisk,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
Command::new("dmsetup")
|
|
||||||
.arg("remove_all") // TODO: find a higher finesse way to do this for portability reasons
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
match Command::new("vgimport")
|
|
||||||
.arg(guid)
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(_) => Ok(()),
|
|
||||||
Err(e)
|
|
||||||
if format!("{}", e.source)
|
|
||||||
.lines()
|
|
||||||
.any(|l| l.trim() == format!("Volume group \"{}\" is not exported", guid)) =>
|
|
||||||
{
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}?;
|
|
||||||
Command::new("vgchange")
|
|
||||||
.arg("-ay")
|
|
||||||
.arg(guid)
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
mount_all_fs(guid, datadir, repair, password).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir, password))]
|
|
||||||
pub async fn mount_fs<P: AsRef<Path>>(
|
|
||||||
guid: &str,
|
|
||||||
datadir: P,
|
|
||||||
name: &str,
|
|
||||||
repair: RepairStrategy,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
tokio::fs::write(PASSWORD_PATH, password)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
|
|
||||||
Command::new("cryptsetup")
|
|
||||||
.arg("-q")
|
|
||||||
.arg("luksOpen")
|
|
||||||
.arg(format!("--key-file={}", PASSWORD_PATH))
|
|
||||||
.arg(format!("--keyfile-size={}", password.len()))
|
|
||||||
.arg(Path::new("/dev").join(guid).join(name))
|
|
||||||
.arg(format!("{}_{}", guid, name))
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
let mapper_path = Path::new("/dev/mapper").join(format!("{}_{}", guid, name));
|
|
||||||
let reboot = repair.e2fsck(&mapper_path).await?;
|
|
||||||
mount(&mapper_path, datadir.as_ref().join(name), ReadWrite).await?;
|
|
||||||
|
|
||||||
tokio::fs::remove_file(PASSWORD_PATH)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
|
|
||||||
|
|
||||||
Ok(reboot)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(datadir, password))]
|
|
||||||
pub async fn mount_all_fs<P: AsRef<Path>>(
|
|
||||||
guid: &str,
|
|
||||||
datadir: P,
|
|
||||||
repair: RepairStrategy,
|
|
||||||
password: &str,
|
|
||||||
) -> Result<RequiresReboot, Error> {
|
|
||||||
let mut reboot = RequiresReboot(false);
|
|
||||||
reboot |= mount_fs(guid, &datadir, "main", repair, password).await?;
|
|
||||||
reboot |= mount_fs(guid, &datadir, "package-data", repair, password).await?;
|
|
||||||
Ok(reboot)
|
|
||||||
}
|
|
||||||
@@ -1,90 +0,0 @@
|
|||||||
use clap::ArgMatches;
|
|
||||||
use rpc_toolkit::command;
|
|
||||||
|
|
||||||
use self::util::DiskListResponse;
|
|
||||||
use crate::util::display_none;
|
|
||||||
use crate::util::serde::{display_serializable, IoFormat};
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub mod fsck;
|
|
||||||
pub mod main;
|
|
||||||
pub mod mount;
|
|
||||||
pub mod quirks;
|
|
||||||
pub mod util;
|
|
||||||
|
|
||||||
pub const BOOT_RW_PATH: &str = "/media/boot-rw";
|
|
||||||
pub const REPAIR_DISK_PATH: &str = "/embassy-os/repair-disk";
|
|
||||||
|
|
||||||
#[command(subcommands(list, repair))]
|
|
||||||
pub fn disk() -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn display_disk_info(info: DiskListResponse, matches: &ArgMatches<'_>) {
|
|
||||||
use prettytable::*;
|
|
||||||
|
|
||||||
if matches.is_present("format") {
|
|
||||||
return display_serializable(info, matches);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut table = Table::new();
|
|
||||||
table.add_row(row![bc =>
|
|
||||||
"LOGICALNAME",
|
|
||||||
"LABEL",
|
|
||||||
"CAPACITY",
|
|
||||||
"USED",
|
|
||||||
"EMBASSY OS VERSION"
|
|
||||||
]);
|
|
||||||
for disk in info.disks {
|
|
||||||
let row = row![
|
|
||||||
disk.logicalname.display(),
|
|
||||||
"N/A",
|
|
||||||
&format!("{:.2} GiB", disk.capacity as f64 / 1024.0 / 1024.0 / 1024.0),
|
|
||||||
"N/A",
|
|
||||||
"N/A",
|
|
||||||
];
|
|
||||||
table.add_row(row);
|
|
||||||
for part in disk.partitions {
|
|
||||||
let row = row![
|
|
||||||
part.logicalname.display(),
|
|
||||||
if let Some(label) = part.label.as_ref() {
|
|
||||||
label
|
|
||||||
} else {
|
|
||||||
"N/A"
|
|
||||||
},
|
|
||||||
part.capacity,
|
|
||||||
if let Some(used) = part
|
|
||||||
.used
|
|
||||||
.map(|u| format!("{:.2} GiB", u as f64 / 1024.0 / 1024.0 / 1024.0))
|
|
||||||
.as_ref()
|
|
||||||
{
|
|
||||||
used
|
|
||||||
} else {
|
|
||||||
"N/A"
|
|
||||||
},
|
|
||||||
if let Some(eos) = part.embassy_os.as_ref() {
|
|
||||||
eos.version.as_str()
|
|
||||||
} else {
|
|
||||||
"N/A"
|
|
||||||
},
|
|
||||||
];
|
|
||||||
table.add_row(row);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
table.print_tty(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_disk_info))]
|
|
||||||
pub async fn list(
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
#[arg]
|
|
||||||
format: Option<IoFormat>,
|
|
||||||
) -> Result<DiskListResponse, Error> {
|
|
||||||
crate::disk::util::list().await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[command(display(display_none))]
|
|
||||||
pub async fn repair() -> Result<(), Error> {
|
|
||||||
tokio::fs::write(REPAIR_DISK_PATH, b"").await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,257 +0,0 @@
|
|||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::filesystem::ecryptfs::EcryptFS;
|
|
||||||
use super::guard::{GenericMountGuard, TmpMountGuard};
|
|
||||||
use super::util::{bind, unmount};
|
|
||||||
use crate::auth::check_password;
|
|
||||||
use crate::backup::target::BackupInfo;
|
|
||||||
use crate::disk::mount::filesystem::ReadWrite;
|
|
||||||
use crate::disk::util::EmbassyOsRecoveryInfo;
|
|
||||||
use crate::middleware::encrypt::{decrypt_slice, encrypt_slice};
|
|
||||||
use crate::s9pk::manifest::PackageId;
|
|
||||||
use crate::util::serde::IoFormat;
|
|
||||||
use crate::util::{AtomicFile, FileLock};
|
|
||||||
use crate::volume::BACKUP_DIR;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
pub struct BackupMountGuard<G: GenericMountGuard> {
|
|
||||||
backup_disk_mount_guard: Option<G>,
|
|
||||||
encrypted_guard: Option<TmpMountGuard>,
|
|
||||||
enc_key: String,
|
|
||||||
pub unencrypted_metadata: EmbassyOsRecoveryInfo,
|
|
||||||
pub metadata: BackupInfo,
|
|
||||||
}
|
|
||||||
impl<G: GenericMountGuard> BackupMountGuard<G> {
|
|
||||||
fn backup_disk_path(&self) -> &Path {
|
|
||||||
if let Some(guard) = &self.backup_disk_mount_guard {
|
|
||||||
guard.as_ref()
|
|
||||||
} else {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(password))]
|
|
||||||
pub async fn mount(backup_disk_mount_guard: G, password: &str) -> Result<Self, Error> {
|
|
||||||
let backup_disk_path = backup_disk_mount_guard.as_ref();
|
|
||||||
let unencrypted_metadata_path =
|
|
||||||
backup_disk_path.join("EmbassyBackups/unencrypted-metadata.cbor");
|
|
||||||
let mut unencrypted_metadata: EmbassyOsRecoveryInfo =
|
|
||||||
if tokio::fs::metadata(&unencrypted_metadata_path)
|
|
||||||
.await
|
|
||||||
.is_ok()
|
|
||||||
{
|
|
||||||
IoFormat::Cbor.from_slice(
|
|
||||||
&tokio::fs::read(&unencrypted_metadata_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
unencrypted_metadata_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
)?
|
|
||||||
} else {
|
|
||||||
Default::default()
|
|
||||||
};
|
|
||||||
let enc_key = if let (Some(hash), Some(wrapped_key)) = (
|
|
||||||
unencrypted_metadata.password_hash.as_ref(),
|
|
||||||
unencrypted_metadata.wrapped_key.as_ref(),
|
|
||||||
) {
|
|
||||||
let wrapped_key =
|
|
||||||
base32::decode(base32::Alphabet::RFC4648 { padding: true }, wrapped_key)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
Error::new(
|
|
||||||
eyre!("failed to decode wrapped key"),
|
|
||||||
crate::ErrorKind::Backup,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
check_password(hash, password)?;
|
|
||||||
String::from_utf8(decrypt_slice(wrapped_key, password))?
|
|
||||||
} else {
|
|
||||||
base32::encode(
|
|
||||||
base32::Alphabet::RFC4648 { padding: false },
|
|
||||||
&rand::random::<[u8; 32]>()[..],
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
if unencrypted_metadata.password_hash.is_none() {
|
|
||||||
unencrypted_metadata.password_hash = Some(
|
|
||||||
argon2::hash_encoded(
|
|
||||||
password.as_bytes(),
|
|
||||||
&rand::random::<[u8; 16]>()[..],
|
|
||||||
&argon2::Config::default(),
|
|
||||||
)
|
|
||||||
.with_kind(crate::ErrorKind::PasswordHashGeneration)?,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if unencrypted_metadata.wrapped_key.is_none() {
|
|
||||||
unencrypted_metadata.wrapped_key = Some(base32::encode(
|
|
||||||
base32::Alphabet::RFC4648 { padding: true },
|
|
||||||
&encrypt_slice(&enc_key, password),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let crypt_path = backup_disk_path.join("EmbassyBackups/crypt");
|
|
||||||
if tokio::fs::metadata(&crypt_path).await.is_err() {
|
|
||||||
tokio::fs::create_dir_all(&crypt_path).await.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
crypt_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
let encrypted_guard =
|
|
||||||
TmpMountGuard::mount(&EcryptFS::new(&crypt_path, &enc_key), ReadWrite).await?;
|
|
||||||
|
|
||||||
let metadata_path = encrypted_guard.as_ref().join("metadata.cbor");
|
|
||||||
let metadata: BackupInfo = if tokio::fs::metadata(&metadata_path).await.is_ok() {
|
|
||||||
IoFormat::Cbor.from_slice(&tokio::fs::read(&metadata_path).await.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
metadata_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?)?
|
|
||||||
} else {
|
|
||||||
Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
backup_disk_mount_guard: Some(backup_disk_mount_guard),
|
|
||||||
encrypted_guard: Some(encrypted_guard),
|
|
||||||
enc_key,
|
|
||||||
unencrypted_metadata,
|
|
||||||
metadata,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn change_password(&mut self, new_password: &str) -> Result<(), Error> {
|
|
||||||
self.unencrypted_metadata.password_hash = Some(
|
|
||||||
argon2::hash_encoded(
|
|
||||||
new_password.as_bytes(),
|
|
||||||
&rand::random::<[u8; 16]>()[..],
|
|
||||||
&argon2::Config::default(),
|
|
||||||
)
|
|
||||||
.with_kind(crate::ErrorKind::PasswordHashGeneration)?,
|
|
||||||
);
|
|
||||||
self.unencrypted_metadata.wrapped_key = Some(base32::encode(
|
|
||||||
base32::Alphabet::RFC4648 { padding: false },
|
|
||||||
&encrypt_slice(&self.enc_key, new_password),
|
|
||||||
));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn mount_package_backup(
|
|
||||||
&self,
|
|
||||||
id: &PackageId,
|
|
||||||
) -> Result<PackageBackupMountGuard, Error> {
|
|
||||||
let lock = FileLock::new(Path::new(BACKUP_DIR).join(format!("{}.lock", id)), false).await?;
|
|
||||||
let mountpoint = Path::new(BACKUP_DIR).join(id);
|
|
||||||
bind(self.as_ref().join(id), &mountpoint, false).await?;
|
|
||||||
Ok(PackageBackupMountGuard {
|
|
||||||
mountpoint: Some(mountpoint),
|
|
||||||
lock: Some(lock),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn save(&self) -> Result<(), Error> {
|
|
||||||
let metadata_path = self.as_ref().join("metadata.cbor");
|
|
||||||
let backup_disk_path = self.backup_disk_path();
|
|
||||||
let mut file = AtomicFile::new(&metadata_path).await?;
|
|
||||||
file.write_all(&IoFormat::Cbor.to_vec(&self.metadata)?)
|
|
||||||
.await?;
|
|
||||||
file.save().await?;
|
|
||||||
let unencrypted_metadata_path =
|
|
||||||
backup_disk_path.join("EmbassyBackups/unencrypted-metadata.cbor");
|
|
||||||
let mut file = AtomicFile::new(&unencrypted_metadata_path).await?;
|
|
||||||
file.write_all(&IoFormat::Cbor.to_vec(&self.unencrypted_metadata)?)
|
|
||||||
.await?;
|
|
||||||
file.save().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn unmount(mut self) -> Result<(), Error> {
|
|
||||||
if let Some(guard) = self.encrypted_guard.take() {
|
|
||||||
guard.unmount().await?;
|
|
||||||
}
|
|
||||||
if let Some(guard) = self.backup_disk_mount_guard.take() {
|
|
||||||
guard.unmount().await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(self))]
|
|
||||||
pub async fn save_and_unmount(self) -> Result<(), Error> {
|
|
||||||
self.save().await?;
|
|
||||||
self.unmount().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<G: GenericMountGuard> AsRef<Path> for BackupMountGuard<G> {
|
|
||||||
fn as_ref(&self) -> &Path {
|
|
||||||
if let Some(guard) = &self.encrypted_guard {
|
|
||||||
guard.as_ref()
|
|
||||||
} else {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<G: GenericMountGuard> Drop for BackupMountGuard<G> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
let first = self.encrypted_guard.take();
|
|
||||||
let second = self.backup_disk_mount_guard.take();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Some(guard) = first {
|
|
||||||
guard.unmount().await.unwrap();
|
|
||||||
}
|
|
||||||
if let Some(guard) = second {
|
|
||||||
guard.unmount().await.unwrap();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct PackageBackupMountGuard {
|
|
||||||
mountpoint: Option<PathBuf>,
|
|
||||||
lock: Option<FileLock>,
|
|
||||||
}
|
|
||||||
impl PackageBackupMountGuard {
|
|
||||||
pub async fn unmount(mut self) -> Result<(), Error> {
|
|
||||||
if let Some(mountpoint) = self.mountpoint.take() {
|
|
||||||
unmount(&mountpoint).await?;
|
|
||||||
}
|
|
||||||
if let Some(lock) = self.lock.take() {
|
|
||||||
lock.unlock().await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl AsRef<Path> for PackageBackupMountGuard {
|
|
||||||
fn as_ref(&self) -> &Path {
|
|
||||||
if let Some(mountpoint) = &self.mountpoint {
|
|
||||||
mountpoint
|
|
||||||
} else {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Drop for PackageBackupMountGuard {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
let mountpoint = self.mountpoint.take();
|
|
||||||
let lock = self.lock.take();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Some(mountpoint) = mountpoint {
|
|
||||||
unmount(&mountpoint).await.unwrap();
|
|
||||||
}
|
|
||||||
if let Some(lock) = lock {
|
|
||||||
lock.unlock().await.unwrap();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
use std::os::unix::ffi::OsStrExt;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use digest::generic_array::GenericArray;
|
|
||||||
use digest::{Digest, OutputSizeUser};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sha2::Sha256;
|
|
||||||
|
|
||||||
use super::{FileSystem, MountType, ReadOnly};
|
|
||||||
use crate::util::Invoke;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
pub async fn mount(
|
|
||||||
logicalname: impl AsRef<Path>,
|
|
||||||
mountpoint: impl AsRef<Path>,
|
|
||||||
mount_type: MountType,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
tokio::fs::create_dir_all(mountpoint.as_ref()).await?;
|
|
||||||
let mut cmd = tokio::process::Command::new("mount");
|
|
||||||
cmd.arg(logicalname.as_ref()).arg(mountpoint.as_ref());
|
|
||||||
if mount_type == ReadOnly {
|
|
||||||
cmd.arg("-o").arg("ro");
|
|
||||||
}
|
|
||||||
cmd.invoke(crate::ErrorKind::Filesystem).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct BlockDev<LogicalName: AsRef<Path>> {
|
|
||||||
logicalname: LogicalName,
|
|
||||||
}
|
|
||||||
impl<LogicalName: AsRef<Path>> BlockDev<LogicalName> {
|
|
||||||
pub fn new(logicalname: LogicalName) -> Self {
|
|
||||||
BlockDev { logicalname }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[async_trait]
|
|
||||||
impl<LogicalName: AsRef<Path> + Send + Sync> FileSystem for BlockDev<LogicalName> {
|
|
||||||
async fn mount<P: AsRef<Path> + Send + Sync>(
|
|
||||||
&self,
|
|
||||||
mountpoint: P,
|
|
||||||
mount_type: MountType,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
mount(self.logicalname.as_ref(), mountpoint, mount_type).await
|
|
||||||
}
|
|
||||||
async fn source_hash(
|
|
||||||
&self,
|
|
||||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
|
||||||
let mut sha = Sha256::new();
|
|
||||||
sha.update("BlockDev");
|
|
||||||
sha.update(
|
|
||||||
tokio::fs::canonicalize(self.logicalname.as_ref())
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
self.logicalname.as_ref().display().to_string(),
|
|
||||||
)
|
|
||||||
})?
|
|
||||||
.as_os_str()
|
|
||||||
.as_bytes(),
|
|
||||||
);
|
|
||||||
Ok(sha.finalize())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,105 +0,0 @@
|
|||||||
use std::net::IpAddr;
|
|
||||||
use std::os::unix::ffi::OsStrExt;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use digest::generic_array::GenericArray;
|
|
||||||
use digest::{Digest, OutputSizeUser};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use sha2::Sha256;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::{FileSystem, MountType, ReadOnly};
|
|
||||||
use crate::disk::mount::guard::TmpMountGuard;
|
|
||||||
use crate::util::Invoke;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
async fn resolve_hostname(hostname: &str) -> Result<IpAddr, Error> {
|
|
||||||
#[cfg(feature = "avahi")]
|
|
||||||
if hostname.ends_with(".local") {
|
|
||||||
return Ok(IpAddr::V4(crate::net::mdns::resolve_mdns(hostname).await?));
|
|
||||||
}
|
|
||||||
Ok(String::from_utf8(
|
|
||||||
Command::new("nmblookup")
|
|
||||||
.arg(hostname)
|
|
||||||
.invoke(crate::ErrorKind::Network)
|
|
||||||
.await?,
|
|
||||||
)?
|
|
||||||
.split(" ")
|
|
||||||
.next()
|
|
||||||
.unwrap()
|
|
||||||
.trim()
|
|
||||||
.parse()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(path, password, mountpoint))]
|
|
||||||
pub async fn mount_cifs(
|
|
||||||
hostname: &str,
|
|
||||||
path: impl AsRef<Path>,
|
|
||||||
username: &str,
|
|
||||||
password: Option<&str>,
|
|
||||||
mountpoint: impl AsRef<Path>,
|
|
||||||
mount_type: MountType,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
tokio::fs::create_dir_all(mountpoint.as_ref()).await?;
|
|
||||||
let ip: IpAddr = resolve_hostname(hostname).await?;
|
|
||||||
let absolute_path = Path::new("/").join(path.as_ref());
|
|
||||||
let mut cmd = Command::new("mount");
|
|
||||||
cmd.arg("-t")
|
|
||||||
.arg("cifs")
|
|
||||||
.env("USER", username)
|
|
||||||
.env("PASSWD", password.unwrap_or_default())
|
|
||||||
.arg(format!("//{}{}", ip, absolute_path.display()))
|
|
||||||
.arg(mountpoint.as_ref());
|
|
||||||
if mount_type == ReadOnly {
|
|
||||||
cmd.arg("-o").arg("ro,noserverino");
|
|
||||||
} else {
|
|
||||||
cmd.arg("-o").arg("noserverino");
|
|
||||||
}
|
|
||||||
cmd.invoke(crate::ErrorKind::Filesystem).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct Cifs {
|
|
||||||
pub hostname: String,
|
|
||||||
pub path: PathBuf,
|
|
||||||
pub username: String,
|
|
||||||
pub password: Option<String>,
|
|
||||||
}
|
|
||||||
impl Cifs {
|
|
||||||
pub async fn mountable(&self) -> Result<(), Error> {
|
|
||||||
let guard = TmpMountGuard::mount(self, ReadOnly).await?;
|
|
||||||
guard.unmount().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[async_trait]
|
|
||||||
impl FileSystem for Cifs {
|
|
||||||
async fn mount<P: AsRef<std::path::Path> + Send + Sync>(
|
|
||||||
&self,
|
|
||||||
mountpoint: P,
|
|
||||||
mount_type: MountType,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
mount_cifs(
|
|
||||||
&self.hostname,
|
|
||||||
&self.path,
|
|
||||||
&self.username,
|
|
||||||
self.password.as_ref().map(|p| p.as_str()),
|
|
||||||
mountpoint,
|
|
||||||
mount_type,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
async fn source_hash(
|
|
||||||
&self,
|
|
||||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
|
||||||
let mut sha = Sha256::new();
|
|
||||||
sha.update("Cifs");
|
|
||||||
sha.update(self.hostname.as_bytes());
|
|
||||||
sha.update(self.path.as_os_str().as_bytes());
|
|
||||||
Ok(sha.finalize())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,85 +0,0 @@
|
|||||||
use std::os::unix::ffi::OsStrExt;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use digest::generic_array::GenericArray;
|
|
||||||
use digest::{Digest, OutputSizeUser};
|
|
||||||
use sha2::Sha256;
|
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
|
||||||
|
|
||||||
use super::{FileSystem, MountType};
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
pub async fn mount_ecryptfs<P0: AsRef<Path>, P1: AsRef<Path>>(
|
|
||||||
src: P0,
|
|
||||||
dst: P1,
|
|
||||||
key: &str,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
tokio::fs::create_dir_all(dst.as_ref()).await?;
|
|
||||||
let mut ecryptfs = tokio::process::Command::new("mount")
|
|
||||||
.arg("-t")
|
|
||||||
.arg("ecryptfs")
|
|
||||||
.arg(src.as_ref())
|
|
||||||
.arg(dst.as_ref())
|
|
||||||
.arg("-o")
|
|
||||||
// for more information `man ecryptfs`
|
|
||||||
.arg(format!("key=passphrase:passphrase_passwd={},ecryptfs_cipher=aes,ecryptfs_key_bytes=32,ecryptfs_passthrough=n,ecryptfs_enable_filename_crypto=y,no_sig_cache", key))
|
|
||||||
.stdin(std::process::Stdio::piped())
|
|
||||||
.stderr(std::process::Stdio::piped())
|
|
||||||
.spawn()?;
|
|
||||||
let mut stdin = ecryptfs.stdin.take().unwrap();
|
|
||||||
let mut stderr = ecryptfs.stderr.take().unwrap();
|
|
||||||
stdin.write_all(b"\n").await?;
|
|
||||||
stdin.flush().await?;
|
|
||||||
stdin.shutdown().await?;
|
|
||||||
drop(stdin);
|
|
||||||
let mut err = String::new();
|
|
||||||
stderr.read_to_string(&mut err).await?;
|
|
||||||
if !ecryptfs.wait().await?.success() {
|
|
||||||
Err(Error::new(eyre!("{}", err), crate::ErrorKind::Filesystem))
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct EcryptFS<EncryptedDir: AsRef<Path>, Key: AsRef<str>> {
|
|
||||||
encrypted_dir: EncryptedDir,
|
|
||||||
key: Key,
|
|
||||||
}
|
|
||||||
impl<EncryptedDir: AsRef<Path>, Key: AsRef<str>> EcryptFS<EncryptedDir, Key> {
|
|
||||||
pub fn new(encrypted_dir: EncryptedDir, key: Key) -> Self {
|
|
||||||
EcryptFS { encrypted_dir, key }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[async_trait]
|
|
||||||
impl<EncryptedDir: AsRef<Path> + Send + Sync, Key: AsRef<str> + Send + Sync> FileSystem
|
|
||||||
for EcryptFS<EncryptedDir, Key>
|
|
||||||
{
|
|
||||||
async fn mount<P: AsRef<Path> + Send + Sync>(
|
|
||||||
&self,
|
|
||||||
mountpoint: P,
|
|
||||||
_mount_type: MountType, // ignored - inherited from parent fs
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
mount_ecryptfs(self.encrypted_dir.as_ref(), mountpoint, self.key.as_ref()).await
|
|
||||||
}
|
|
||||||
async fn source_hash(
|
|
||||||
&self,
|
|
||||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
|
||||||
let mut sha = Sha256::new();
|
|
||||||
sha.update("EcryptFS");
|
|
||||||
sha.update(
|
|
||||||
tokio::fs::canonicalize(self.encrypted_dir.as_ref())
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
self.encrypted_dir.as_ref().display().to_string(),
|
|
||||||
)
|
|
||||||
})?
|
|
||||||
.as_os_str()
|
|
||||||
.as_bytes(),
|
|
||||||
);
|
|
||||||
Ok(sha.finalize())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use digest::generic_array::GenericArray;
|
|
||||||
use digest::{Digest, OutputSizeUser};
|
|
||||||
use sha2::Sha256;
|
|
||||||
|
|
||||||
use super::{FileSystem, MountType, ReadOnly};
|
|
||||||
use crate::util::Invoke;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub async fn mount_label(
|
|
||||||
label: &str,
|
|
||||||
mountpoint: impl AsRef<Path>,
|
|
||||||
mount_type: MountType,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
tokio::fs::create_dir_all(mountpoint.as_ref()).await?;
|
|
||||||
let mut cmd = tokio::process::Command::new("mount");
|
|
||||||
cmd.arg("-L").arg(label).arg(mountpoint.as_ref());
|
|
||||||
if mount_type == ReadOnly {
|
|
||||||
cmd.arg("-o").arg("ro");
|
|
||||||
}
|
|
||||||
cmd.invoke(crate::ErrorKind::Filesystem).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Label<S: AsRef<str>> {
|
|
||||||
label: S,
|
|
||||||
}
|
|
||||||
impl<S: AsRef<str>> Label<S> {
|
|
||||||
pub fn new(label: S) -> Self {
|
|
||||||
Label { label }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[async_trait]
|
|
||||||
impl<S: AsRef<str> + Send + Sync> FileSystem for Label<S> {
|
|
||||||
async fn mount<P: AsRef<Path> + Send + Sync>(
|
|
||||||
&self,
|
|
||||||
mountpoint: P,
|
|
||||||
mount_type: MountType,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
mount_label(self.label.as_ref(), mountpoint, mount_type).await
|
|
||||||
}
|
|
||||||
async fn source_hash(
|
|
||||||
&self,
|
|
||||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
|
||||||
let mut sha = Sha256::new();
|
|
||||||
sha.update("Label");
|
|
||||||
sha.update(self.label.as_ref().as_bytes());
|
|
||||||
Ok(sha.finalize())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use digest::generic_array::GenericArray;
|
|
||||||
use digest::OutputSizeUser;
|
|
||||||
use sha2::Sha256;
|
|
||||||
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub mod block_dev;
|
|
||||||
pub mod cifs;
|
|
||||||
pub mod ecryptfs;
|
|
||||||
pub mod label;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
|
||||||
pub enum MountType {
|
|
||||||
ReadOnly,
|
|
||||||
ReadWrite,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub use MountType::*;
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait FileSystem {
|
|
||||||
async fn mount<P: AsRef<Path> + Send + Sync>(
|
|
||||||
&self,
|
|
||||||
mountpoint: P,
|
|
||||||
mount_type: MountType,
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
async fn source_hash(
|
|
||||||
&self,
|
|
||||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error>;
|
|
||||||
}
|
|
||||||
@@ -1,129 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::sync::{Arc, Weak};
|
|
||||||
|
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use tokio::sync::Mutex;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::filesystem::{FileSystem, MountType, ReadOnly, ReadWrite};
|
|
||||||
use super::util::unmount;
|
|
||||||
use crate::util::Invoke;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub const TMP_MOUNTPOINT: &'static str = "/media/embassy-os/tmp";
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait GenericMountGuard: AsRef<Path> + std::fmt::Debug + Send + Sync + 'static {
|
|
||||||
async fn unmount(mut self) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct MountGuard {
|
|
||||||
mountpoint: PathBuf,
|
|
||||||
mounted: bool,
|
|
||||||
}
|
|
||||||
impl MountGuard {
|
|
||||||
pub async fn mount(
|
|
||||||
filesystem: &impl FileSystem,
|
|
||||||
mountpoint: impl AsRef<Path>,
|
|
||||||
mount_type: MountType,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
let mountpoint = mountpoint.as_ref().to_owned();
|
|
||||||
filesystem.mount(&mountpoint, mount_type).await?;
|
|
||||||
Ok(MountGuard {
|
|
||||||
mountpoint,
|
|
||||||
mounted: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
pub async fn unmount(mut self) -> Result<(), Error> {
|
|
||||||
if self.mounted {
|
|
||||||
unmount(&self.mountpoint).await?;
|
|
||||||
self.mounted = false;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl AsRef<Path> for MountGuard {
|
|
||||||
fn as_ref(&self) -> &Path {
|
|
||||||
&self.mountpoint
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Drop for MountGuard {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if self.mounted {
|
|
||||||
let mountpoint = std::mem::take(&mut self.mountpoint);
|
|
||||||
tokio::spawn(async move { unmount(mountpoint).await.unwrap() });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl GenericMountGuard for MountGuard {
|
|
||||||
async fn unmount(mut self) -> Result<(), Error> {
|
|
||||||
MountGuard::unmount(self).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn tmp_mountpoint(source: &impl FileSystem) -> Result<PathBuf, Error> {
|
|
||||||
Ok(Path::new(TMP_MOUNTPOINT).join(base32::encode(
|
|
||||||
base32::Alphabet::RFC4648 { padding: false },
|
|
||||||
&source.source_hash().await?,
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref TMP_MOUNTS: Mutex<BTreeMap<PathBuf, (MountType, Weak<MountGuard>)>> =
|
|
||||||
Mutex::new(BTreeMap::new());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct TmpMountGuard {
|
|
||||||
guard: Arc<MountGuard>,
|
|
||||||
}
|
|
||||||
impl TmpMountGuard {
|
|
||||||
/// DRAGONS: if you try to mount something as ro and rw at the same time, the ro mount will be upgraded to rw.
|
|
||||||
#[instrument(skip(filesystem))]
|
|
||||||
pub async fn mount(filesystem: &impl FileSystem, mount_type: MountType) -> Result<Self, Error> {
|
|
||||||
let mountpoint = tmp_mountpoint(filesystem).await?;
|
|
||||||
let mut tmp_mounts = TMP_MOUNTS.lock().await;
|
|
||||||
if !tmp_mounts.contains_key(&mountpoint) {
|
|
||||||
tmp_mounts.insert(mountpoint.clone(), (mount_type, Weak::new()));
|
|
||||||
}
|
|
||||||
let (prev_mt, weak_slot) = tmp_mounts.get_mut(&mountpoint).unwrap();
|
|
||||||
if let Some(guard) = weak_slot.upgrade() {
|
|
||||||
// upgrade to rw
|
|
||||||
if *prev_mt == ReadOnly && mount_type == ReadWrite {
|
|
||||||
tokio::process::Command::new("mount")
|
|
||||||
.arg("-o")
|
|
||||||
.arg("remount,rw")
|
|
||||||
.arg(&mountpoint)
|
|
||||||
.invoke(crate::ErrorKind::Filesystem)
|
|
||||||
.await?;
|
|
||||||
*prev_mt = ReadWrite;
|
|
||||||
}
|
|
||||||
Ok(TmpMountGuard { guard })
|
|
||||||
} else {
|
|
||||||
let guard = Arc::new(MountGuard::mount(filesystem, &mountpoint, mount_type).await?);
|
|
||||||
*weak_slot = Arc::downgrade(&guard);
|
|
||||||
*prev_mt = mount_type;
|
|
||||||
Ok(TmpMountGuard { guard })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub async fn unmount(self) -> Result<(), Error> {
|
|
||||||
if let Ok(guard) = Arc::try_unwrap(self.guard) {
|
|
||||||
guard.unmount().await?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl AsRef<Path> for TmpMountGuard {
|
|
||||||
fn as_ref(&self) -> &Path {
|
|
||||||
(&*self.guard).as_ref()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl GenericMountGuard for TmpMountGuard {
|
|
||||||
async fn unmount(mut self) -> Result<(), Error> {
|
|
||||||
TmpMountGuard::unmount(self).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::util::Invoke;
|
|
||||||
use crate::{Error, ResultExt};
|
|
||||||
|
|
||||||
#[instrument(skip(src, dst))]
|
|
||||||
pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
|
|
||||||
src: P0,
|
|
||||||
dst: P1,
|
|
||||||
read_only: bool,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
tracing::info!(
|
|
||||||
"Binding {} to {}",
|
|
||||||
src.as_ref().display(),
|
|
||||||
dst.as_ref().display()
|
|
||||||
);
|
|
||||||
let is_mountpoint = tokio::process::Command::new("mountpoint")
|
|
||||||
.arg(dst.as_ref())
|
|
||||||
.stdout(std::process::Stdio::null())
|
|
||||||
.stderr(std::process::Stdio::null())
|
|
||||||
.status()
|
|
||||||
.await?;
|
|
||||||
if is_mountpoint.success() {
|
|
||||||
unmount(dst.as_ref()).await?;
|
|
||||||
}
|
|
||||||
tokio::fs::create_dir_all(&src).await?;
|
|
||||||
tokio::fs::create_dir_all(&dst).await?;
|
|
||||||
let mut mount_cmd = tokio::process::Command::new("mount");
|
|
||||||
mount_cmd.arg("--bind");
|
|
||||||
if read_only {
|
|
||||||
mount_cmd.arg("-o").arg("ro");
|
|
||||||
}
|
|
||||||
mount_cmd
|
|
||||||
.arg(src.as_ref())
|
|
||||||
.arg(dst.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::Filesystem)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(mountpoint))]
|
|
||||||
pub async fn unmount<P: AsRef<Path>>(mountpoint: P) -> Result<(), Error> {
|
|
||||||
tracing::debug!("Unmounting {}.", mountpoint.as_ref().display());
|
|
||||||
tokio::process::Command::new("umount")
|
|
||||||
.arg("-l")
|
|
||||||
.arg(mountpoint.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::Filesystem)
|
|
||||||
.await?;
|
|
||||||
tokio::fs::remove_dir_all(mountpoint.as_ref())
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
format!("rm {}", mountpoint.as_ref().display()),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,170 +0,0 @@
|
|||||||
use std::collections::BTreeSet;
|
|
||||||
use std::num::ParseIntError;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::BOOT_RW_PATH;
|
|
||||||
use crate::util::AtomicFile;
|
|
||||||
use crate::Error;
|
|
||||||
|
|
||||||
pub const QUIRK_PATH: &'static str = "/sys/module/usb_storage/parameters/quirks";
|
|
||||||
|
|
||||||
pub const WHITELIST: [(VendorId, ProductId); 5] = [
|
|
||||||
(VendorId(0x1d6b), ProductId(0x0002)), // root hub usb2
|
|
||||||
(VendorId(0x1d6b), ProductId(0x0003)), // root hub usb3
|
|
||||||
(VendorId(0x2109), ProductId(0x3431)),
|
|
||||||
(VendorId(0x1058), ProductId(0x262f)), // western digital black HDD
|
|
||||||
(VendorId(0x04e8), ProductId(0x4001)), // Samsung T7
|
|
||||||
];
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
|
|
||||||
pub struct VendorId(u16);
|
|
||||||
impl std::str::FromStr for VendorId {
|
|
||||||
type Err = ParseIntError;
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
u16::from_str_radix(s.trim(), 16).map(VendorId)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::fmt::Display for VendorId {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{:04x}", self.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
|
|
||||||
pub struct ProductId(u16);
|
|
||||||
impl std::str::FromStr for ProductId {
|
|
||||||
type Err = ParseIntError;
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
u16::from_str_radix(s.trim(), 16).map(ProductId)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::fmt::Display for ProductId {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{:04x}", self.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct Quirks(BTreeSet<(VendorId, ProductId)>);
|
|
||||||
impl Quirks {
|
|
||||||
pub fn add(&mut self, vendor: VendorId, product: ProductId) {
|
|
||||||
self.0.insert((vendor, product));
|
|
||||||
}
|
|
||||||
pub fn remove(&mut self, vendor: VendorId, product: ProductId) {
|
|
||||||
self.0.remove(&(vendor, product));
|
|
||||||
}
|
|
||||||
pub fn contains(&self, vendor: VendorId, product: ProductId) -> bool {
|
|
||||||
self.0.contains(&(vendor, product))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::fmt::Display for Quirks {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
let mut comma = false;
|
|
||||||
for (vendor, product) in &self.0 {
|
|
||||||
if comma {
|
|
||||||
write!(f, ",")?;
|
|
||||||
} else {
|
|
||||||
comma = true;
|
|
||||||
}
|
|
||||||
write!(f, "{}:{}:u", vendor, product)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::str::FromStr for Quirks {
|
|
||||||
type Err = Error;
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
let s = s.trim();
|
|
||||||
let mut quirks = BTreeSet::new();
|
|
||||||
for item in s.split(",") {
|
|
||||||
if let [vendor, product, "u"] = item.splitn(3, ":").collect::<Vec<_>>().as_slice() {
|
|
||||||
quirks.insert((vendor.parse()?, product.parse()?));
|
|
||||||
} else {
|
|
||||||
return Err(Error::new(
|
|
||||||
eyre!("Invalid quirk: `{}`", item),
|
|
||||||
crate::ErrorKind::DiskManagement,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(Quirks(quirks))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn update_quirks(quirks: &mut Quirks) -> Result<Vec<String>, Error> {
|
|
||||||
let mut usb_devices = tokio::fs::read_dir("/sys/bus/usb/devices/").await?;
|
|
||||||
let mut to_reconnect = Vec::new();
|
|
||||||
while let Some(usb_device) = usb_devices.next_entry().await? {
|
|
||||||
if tokio::fs::metadata(usb_device.path().join("idVendor"))
|
|
||||||
.await
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let vendor = tokio::fs::read_to_string(usb_device.path().join("idVendor"))
|
|
||||||
.await?
|
|
||||||
.parse()?;
|
|
||||||
let product = tokio::fs::read_to_string(usb_device.path().join("idProduct"))
|
|
||||||
.await?
|
|
||||||
.parse()?;
|
|
||||||
if WHITELIST.contains(&(vendor, product)) {
|
|
||||||
quirks.remove(vendor, product);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if quirks.contains(vendor, product) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
quirks.add(vendor, product);
|
|
||||||
{
|
|
||||||
// write quirks to sysfs
|
|
||||||
let mut quirk_file = tokio::fs::File::create(QUIRK_PATH).await?;
|
|
||||||
quirk_file.write_all(quirks.to_string().as_bytes()).await?;
|
|
||||||
quirk_file.sync_all().await?;
|
|
||||||
drop(quirk_file);
|
|
||||||
}
|
|
||||||
|
|
||||||
disconnect_usb(usb_device.path()).await?;
|
|
||||||
let (vendor_name, product_name) = tokio::try_join!(
|
|
||||||
tokio::fs::read_to_string(usb_device.path().join("manufacturer")),
|
|
||||||
tokio::fs::read_to_string(usb_device.path().join("product")),
|
|
||||||
)?;
|
|
||||||
to_reconnect.push(format!("{} {}", vendor_name, product_name));
|
|
||||||
}
|
|
||||||
Ok(to_reconnect)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(usb_device_path))]
|
|
||||||
pub async fn disconnect_usb(usb_device_path: impl AsRef<Path>) -> Result<(), Error> {
|
|
||||||
let authorized_path = usb_device_path.as_ref().join("bConfigurationValue");
|
|
||||||
let mut authorized_file = tokio::fs::File::create(&authorized_path).await?;
|
|
||||||
authorized_file.write_all(b"0").await?;
|
|
||||||
authorized_file.sync_all().await?;
|
|
||||||
drop(authorized_file);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn fetch_quirks() -> Result<Quirks, Error> {
|
|
||||||
Ok(tokio::fs::read_to_string(QUIRK_PATH).await?.parse()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn save_quirks(quirks: &Quirks) -> Result<(), Error> {
|
|
||||||
let orig_path = Path::new(BOOT_RW_PATH).join("cmdline.txt.orig");
|
|
||||||
let target_path = Path::new(BOOT_RW_PATH).join("cmdline.txt");
|
|
||||||
if tokio::fs::metadata(&orig_path).await.is_err() {
|
|
||||||
tokio::fs::copy(&target_path, &orig_path).await?;
|
|
||||||
}
|
|
||||||
let cmdline = tokio::fs::read_to_string(&orig_path).await?;
|
|
||||||
let mut target = AtomicFile::new(&target_path).await?;
|
|
||||||
target
|
|
||||||
.write_all(format!("usb-storage.quirks={} {}", quirks, cmdline).as_bytes())
|
|
||||||
.await?;
|
|
||||||
target.save().await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,435 +0,0 @@
|
|||||||
use std::collections::BTreeMap;
|
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use color_eyre::eyre::{self, eyre};
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
use indexmap::IndexSet;
|
|
||||||
use nom::bytes::complete::{tag, take_till1};
|
|
||||||
use nom::character::complete::multispace1;
|
|
||||||
use nom::character::is_space;
|
|
||||||
use nom::combinator::{opt, rest};
|
|
||||||
use nom::sequence::{pair, preceded, terminated};
|
|
||||||
use nom::IResult;
|
|
||||||
use regex::Regex;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use super::mount::filesystem::block_dev::BlockDev;
|
|
||||||
use super::mount::filesystem::ReadOnly;
|
|
||||||
use super::mount::guard::TmpMountGuard;
|
|
||||||
use super::quirks::{fetch_quirks, save_quirks, update_quirks};
|
|
||||||
use crate::util::io::from_yaml_async_reader;
|
|
||||||
use crate::util::serde::IoFormat;
|
|
||||||
use crate::util::{Invoke, Version};
|
|
||||||
use crate::{Error, ResultExt as _};
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct DiskListResponse {
|
|
||||||
pub disks: Vec<DiskInfo>,
|
|
||||||
pub reconnect: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct DiskInfo {
|
|
||||||
pub logicalname: PathBuf,
|
|
||||||
pub vendor: Option<String>,
|
|
||||||
pub model: Option<String>,
|
|
||||||
pub partitions: Vec<PartitionInfo>,
|
|
||||||
pub capacity: u64,
|
|
||||||
pub guid: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct PartitionInfo {
|
|
||||||
pub logicalname: PathBuf,
|
|
||||||
pub label: Option<String>,
|
|
||||||
pub capacity: u64,
|
|
||||||
pub used: Option<u64>,
|
|
||||||
pub embassy_os: Option<EmbassyOsRecoveryInfo>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct EmbassyOsRecoveryInfo {
|
|
||||||
pub version: Version,
|
|
||||||
pub full: bool,
|
|
||||||
pub password_hash: Option<String>,
|
|
||||||
pub wrapped_key: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
const DISK_PATH: &'static str = "/dev/disk/by-path";
|
|
||||||
const SYS_BLOCK_PATH: &'static str = "/sys/block";
|
|
||||||
|
|
||||||
lazy_static::lazy_static! {
|
|
||||||
static ref PARTITION_REGEX: Regex = Regex::new("-part[0-9]+$").unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn get_vendor<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> {
|
|
||||||
let vendor = tokio::fs::read_to_string(
|
|
||||||
Path::new(SYS_BLOCK_PATH)
|
|
||||||
.join(path.as_ref().strip_prefix("/dev").map_err(|_| {
|
|
||||||
Error::new(
|
|
||||||
eyre!("not a canonical block device"),
|
|
||||||
crate::ErrorKind::BlockDevice,
|
|
||||||
)
|
|
||||||
})?)
|
|
||||||
.join("device")
|
|
||||||
.join("vendor"),
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.trim()
|
|
||||||
.to_owned();
|
|
||||||
Ok(if vendor.is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(vendor)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn get_model<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> {
|
|
||||||
let model = tokio::fs::read_to_string(
|
|
||||||
Path::new(SYS_BLOCK_PATH)
|
|
||||||
.join(path.as_ref().strip_prefix("/dev").map_err(|_| {
|
|
||||||
Error::new(
|
|
||||||
eyre!("not a canonical block device"),
|
|
||||||
crate::ErrorKind::BlockDevice,
|
|
||||||
)
|
|
||||||
})?)
|
|
||||||
.join("device")
|
|
||||||
.join("model"),
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
.trim()
|
|
||||||
.to_owned();
|
|
||||||
Ok(if model.is_empty() { None } else { Some(model) })
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn get_capacity<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
|
|
||||||
Ok(String::from_utf8(
|
|
||||||
Command::new("blockdev")
|
|
||||||
.arg("--getsize64")
|
|
||||||
.arg(path.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::BlockDevice)
|
|
||||||
.await?,
|
|
||||||
)?
|
|
||||||
.trim()
|
|
||||||
.parse::<u64>()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn get_label<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> {
|
|
||||||
let label = String::from_utf8(
|
|
||||||
Command::new("lsblk")
|
|
||||||
.arg("-no")
|
|
||||||
.arg("label")
|
|
||||||
.arg(path.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::BlockDevice)
|
|
||||||
.await?,
|
|
||||||
)?
|
|
||||||
.trim()
|
|
||||||
.to_owned();
|
|
||||||
Ok(if label.is_empty() { None } else { Some(label) })
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn get_used<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
|
|
||||||
Ok(String::from_utf8(
|
|
||||||
Command::new("df")
|
|
||||||
.arg("--output=used")
|
|
||||||
.arg("--block-size=1")
|
|
||||||
.arg(path.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::Filesystem)
|
|
||||||
.await?,
|
|
||||||
)?
|
|
||||||
.lines()
|
|
||||||
.skip(1)
|
|
||||||
.next()
|
|
||||||
.unwrap_or_default()
|
|
||||||
.trim()
|
|
||||||
.parse::<u64>()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn get_available<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
|
|
||||||
Ok(String::from_utf8(
|
|
||||||
Command::new("df")
|
|
||||||
.arg("--output=avail")
|
|
||||||
.arg("--block-size=1")
|
|
||||||
.arg(path.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::Filesystem)
|
|
||||||
.await?,
|
|
||||||
)?
|
|
||||||
.lines()
|
|
||||||
.skip(1)
|
|
||||||
.next()
|
|
||||||
.unwrap_or_default()
|
|
||||||
.trim()
|
|
||||||
.parse::<u64>()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(skip(path))]
|
|
||||||
pub async fn get_percentage<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
|
|
||||||
Ok(String::from_utf8(
|
|
||||||
Command::new("df")
|
|
||||||
.arg("--output=pcent")
|
|
||||||
.arg(path.as_ref())
|
|
||||||
.invoke(crate::ErrorKind::Filesystem)
|
|
||||||
.await?,
|
|
||||||
)?
|
|
||||||
.lines()
|
|
||||||
.skip(1)
|
|
||||||
.next()
|
|
||||||
.unwrap_or_default()
|
|
||||||
.trim()
|
|
||||||
.strip_suffix("%")
|
|
||||||
.unwrap()
|
|
||||||
.parse::<u64>()?)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn pvscan() -> Result<BTreeMap<PathBuf, Option<String>>, Error> {
|
|
||||||
let pvscan_out = Command::new("pvscan")
|
|
||||||
.invoke(crate::ErrorKind::DiskManagement)
|
|
||||||
.await?;
|
|
||||||
let pvscan_out_str = std::str::from_utf8(&pvscan_out)?;
|
|
||||||
Ok(parse_pvscan_output(pvscan_out_str))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn recovery_info(
|
|
||||||
mountpoint: impl AsRef<Path>,
|
|
||||||
) -> Result<Option<EmbassyOsRecoveryInfo>, Error> {
|
|
||||||
let backup_unencrypted_metadata_path = mountpoint
|
|
||||||
.as_ref()
|
|
||||||
.join("EmbassyBackups/unencrypted-metadata.cbor");
|
|
||||||
if tokio::fs::metadata(&backup_unencrypted_metadata_path)
|
|
||||||
.await
|
|
||||||
.is_ok()
|
|
||||||
{
|
|
||||||
return Ok(Some(
|
|
||||||
IoFormat::Cbor.from_slice(
|
|
||||||
&tokio::fs::read(&backup_unencrypted_metadata_path)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
backup_unencrypted_metadata_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
)?,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let version_path = mountpoint.as_ref().join("root/appmgr/version");
|
|
||||||
if tokio::fs::metadata(&version_path).await.is_ok() {
|
|
||||||
return Ok(Some(EmbassyOsRecoveryInfo {
|
|
||||||
version: from_yaml_async_reader(File::open(&version_path).await?).await?,
|
|
||||||
full: true,
|
|
||||||
password_hash: None,
|
|
||||||
wrapped_key: None,
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn list() -> Result<DiskListResponse, Error> {
|
|
||||||
let mut quirks = fetch_quirks().await?;
|
|
||||||
let reconnect = update_quirks(&mut quirks).await?;
|
|
||||||
save_quirks(&mut quirks).await?;
|
|
||||||
let disk_guids = pvscan().await?;
|
|
||||||
let disks = tokio_stream::wrappers::ReadDirStream::new(
|
|
||||||
tokio::fs::read_dir(DISK_PATH)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, DISK_PATH))?,
|
|
||||||
)
|
|
||||||
.map_err(|e| {
|
|
||||||
Error::new(
|
|
||||||
eyre::Error::from(e).wrap_err(DISK_PATH),
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.try_fold(BTreeMap::new(), |mut disks, dir_entry| async move {
|
|
||||||
if let Some(disk_path) = dir_entry.path().file_name().and_then(|s| s.to_str()) {
|
|
||||||
let (disk_path, part_path) = if let Some(end) = PARTITION_REGEX.find(disk_path) {
|
|
||||||
(
|
|
||||||
disk_path.strip_suffix(end.as_str()).unwrap_or_default(),
|
|
||||||
Some(disk_path),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
(disk_path, None)
|
|
||||||
};
|
|
||||||
let disk_path = Path::new(DISK_PATH).join(disk_path);
|
|
||||||
let disk = tokio::fs::canonicalize(&disk_path).await.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
disk_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
if &*disk == Path::new("/dev/mmcblk0") {
|
|
||||||
return Ok(disks);
|
|
||||||
}
|
|
||||||
if !disks.contains_key(&disk) {
|
|
||||||
disks.insert(disk.clone(), IndexSet::new());
|
|
||||||
}
|
|
||||||
if let Some(part_path) = part_path {
|
|
||||||
let part_path = Path::new(DISK_PATH).join(part_path);
|
|
||||||
let part = tokio::fs::canonicalize(&part_path).await.with_ctx(|_| {
|
|
||||||
(
|
|
||||||
crate::ErrorKind::Filesystem,
|
|
||||||
part_path.display().to_string(),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
disks.get_mut(&disk).unwrap().insert(part);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(disks)
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut res = Vec::with_capacity(disks.len());
|
|
||||||
for (disk, parts) in disks {
|
|
||||||
let mut guid: Option<String> = None;
|
|
||||||
let mut partitions = Vec::with_capacity(parts.len());
|
|
||||||
let vendor = get_vendor(&disk)
|
|
||||||
.await
|
|
||||||
.map_err(|e| tracing::warn!("Could not get vendor of {}: {}", disk.display(), e.source))
|
|
||||||
.unwrap_or_default();
|
|
||||||
let model = get_model(&disk)
|
|
||||||
.await
|
|
||||||
.map_err(|e| tracing::warn!("Could not get model of {}: {}", disk.display(), e.source))
|
|
||||||
.unwrap_or_default();
|
|
||||||
let capacity = get_capacity(&disk)
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
tracing::warn!("Could not get capacity of {}: {}", disk.display(), e.source)
|
|
||||||
})
|
|
||||||
.unwrap_or_default();
|
|
||||||
if let Some(g) = disk_guids.get(&disk) {
|
|
||||||
guid = g.clone();
|
|
||||||
} else {
|
|
||||||
for part in parts {
|
|
||||||
let mut embassy_os = None;
|
|
||||||
let label = get_label(&part).await?;
|
|
||||||
let capacity = get_capacity(&part)
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
tracing::warn!("Could not get capacity of {}: {}", part.display(), e.source)
|
|
||||||
})
|
|
||||||
.unwrap_or_default();
|
|
||||||
let mut used = None;
|
|
||||||
|
|
||||||
match TmpMountGuard::mount(&BlockDev::new(&part), ReadOnly).await {
|
|
||||||
Err(e) => tracing::warn!("Could not collect usage information: {}", e.source),
|
|
||||||
Ok(mount_guard) => {
|
|
||||||
used = get_used(&mount_guard)
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
tracing::warn!(
|
|
||||||
"Could not get usage of {}: {}",
|
|
||||||
part.display(),
|
|
||||||
e.source
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.ok();
|
|
||||||
if let Some(recovery_info) = match recovery_info(&mount_guard).await {
|
|
||||||
Ok(a) => a,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(
|
|
||||||
"Error fetching unencrypted backup metadata: {}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
} {
|
|
||||||
embassy_os = Some(recovery_info)
|
|
||||||
}
|
|
||||||
mount_guard.unmount().await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
partitions.push(PartitionInfo {
|
|
||||||
logicalname: part,
|
|
||||||
label,
|
|
||||||
capacity,
|
|
||||||
used,
|
|
||||||
embassy_os,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res.push(DiskInfo {
|
|
||||||
logicalname: disk,
|
|
||||||
vendor,
|
|
||||||
model,
|
|
||||||
partitions,
|
|
||||||
capacity,
|
|
||||||
guid,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(DiskListResponse {
|
|
||||||
disks: res,
|
|
||||||
reconnect,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_pvscan_output(pvscan_output: &str) -> BTreeMap<PathBuf, Option<String>> {
|
|
||||||
fn parse_line(line: &str) -> IResult<&str, (&str, Option<&str>)> {
|
|
||||||
let pv_parse = preceded(
|
|
||||||
tag(" PV "),
|
|
||||||
terminated(take_till1(|c| is_space(c as u8)), multispace1),
|
|
||||||
);
|
|
||||||
let vg_parse = preceded(
|
|
||||||
opt(tag("is in exported ")),
|
|
||||||
preceded(
|
|
||||||
tag("VG "),
|
|
||||||
terminated(take_till1(|c| is_space(c as u8)), multispace1),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
let mut parser = terminated(pair(pv_parse, opt(vg_parse)), rest);
|
|
||||||
parser(line)
|
|
||||||
}
|
|
||||||
let lines = pvscan_output.lines();
|
|
||||||
let n = lines.clone().count();
|
|
||||||
let entries = lines.take(n.saturating_sub(1));
|
|
||||||
let mut ret = BTreeMap::new();
|
|
||||||
for entry in entries {
|
|
||||||
match parse_line(entry) {
|
|
||||||
Ok((_, (pv, vg))) => {
|
|
||||||
ret.insert(PathBuf::from(pv), vg.map(|s| s.to_owned()));
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
tracing::warn!("Failed to parse pvscan output line: {}", entry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ret
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_pvscan_parser() {
|
|
||||||
let s1 = r#" PV /dev/mapper/cryptdata VG data lvm2 [1.81 TiB / 0 free]
|
|
||||||
PV /dev/sdb lvm2 [931.51 GiB]
|
|
||||||
Total: 2 [2.72 TiB] / in use: 1 [1.81 TiB] / in no VG: 1 [931.51 GiB]
|
|
||||||
"#;
|
|
||||||
let s2 = r#" PV /dev/sdb VG EMBASSY_LZHJAENWGPCJJL6C6AXOD7OOOIJG7HFBV4GYRJH6HADXUCN4BRWQ lvm2 [931.51 GiB / 0 free]
|
|
||||||
Total: 1 [931.51 GiB] / in use: 1 [931.51 GiB] / in no VG: 0 [0 ]
|
|
||||||
"#;
|
|
||||||
let s3 = r#" PV /dev/mapper/cryptdata VG data lvm2 [1.81 TiB / 0 free]
|
|
||||||
Total: 1 [1.81 TiB] / in use: 1 [1.81 TiB] / in no VG: 0 [0 ]
|
|
||||||
"#;
|
|
||||||
let s4 = r#" PV /dev/sda is in exported VG EMBASSY_ZFHOCTYV3ZJMJW3OTFMG55LSQZLP667EDNZKDNUJKPJX5HE6S5HQ [931.51 GiB / 0 free]
|
|
||||||
Total: 1 [931.51 GiB] / in use: 1 [931.51 GiB] / in no VG: 0 [0 ]
|
|
||||||
"#;
|
|
||||||
println!("{:?}", parse_pvscan_output(s1));
|
|
||||||
println!("{:?}", parse_pvscan_output(s2));
|
|
||||||
println!("{:?}", parse_pvscan_output(s3));
|
|
||||||
println!("{:?}", parse_pvscan_output(s4));
|
|
||||||
}
|
|
||||||
@@ -1,359 +0,0 @@
|
|||||||
use std::fmt::Display;
|
|
||||||
|
|
||||||
use color_eyre::eyre::eyre;
|
|
||||||
use models::InvalidId;
|
|
||||||
use patch_db::Revision;
|
|
||||||
use rpc_toolkit::yajrc::RpcError;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
||||||
pub enum ErrorKind {
|
|
||||||
Unknown = 1,
|
|
||||||
Filesystem = 2,
|
|
||||||
Docker = 3,
|
|
||||||
ConfigSpecViolation = 4,
|
|
||||||
ConfigRulesViolation = 5,
|
|
||||||
NotFound = 6,
|
|
||||||
IncorrectPassword = 7,
|
|
||||||
VersionIncompatible = 8,
|
|
||||||
Network = 9,
|
|
||||||
Registry = 10,
|
|
||||||
Serialization = 11,
|
|
||||||
Deserialization = 12,
|
|
||||||
Utf8 = 13,
|
|
||||||
ParseVersion = 14,
|
|
||||||
IncorrectDisk = 15,
|
|
||||||
Nginx = 16,
|
|
||||||
Dependency = 17,
|
|
||||||
ParseS9pk = 18,
|
|
||||||
ParseUrl = 19,
|
|
||||||
DiskNotAvailable = 20,
|
|
||||||
BlockDevice = 21,
|
|
||||||
InvalidOnionAddress = 22,
|
|
||||||
Pack = 23,
|
|
||||||
ValidateS9pk = 24,
|
|
||||||
DiskCorrupted = 25, // Remove
|
|
||||||
Tor = 26,
|
|
||||||
ConfigGen = 27,
|
|
||||||
ParseNumber = 28,
|
|
||||||
Database = 29,
|
|
||||||
InvalidPackageId = 30,
|
|
||||||
InvalidSignature = 31,
|
|
||||||
Backup = 32,
|
|
||||||
Restore = 33,
|
|
||||||
Authorization = 34,
|
|
||||||
AutoConfigure = 35,
|
|
||||||
Action = 36,
|
|
||||||
RateLimited = 37,
|
|
||||||
InvalidRequest = 38,
|
|
||||||
MigrationFailed = 39,
|
|
||||||
Uninitialized = 40,
|
|
||||||
ParseNetAddress = 41,
|
|
||||||
ParseSshKey = 42,
|
|
||||||
SoundError = 43,
|
|
||||||
ParseTimestamp = 44,
|
|
||||||
ParseSysInfo = 45,
|
|
||||||
Wifi = 46,
|
|
||||||
Journald = 47,
|
|
||||||
DiskManagement = 48,
|
|
||||||
OpenSsl = 49,
|
|
||||||
PasswordHashGeneration = 50,
|
|
||||||
DiagnosticMode = 51,
|
|
||||||
ParseDbField = 52,
|
|
||||||
Duplicate = 53,
|
|
||||||
MultipleErrors = 54,
|
|
||||||
Incoherent = 55,
|
|
||||||
InvalidBackupTargetId = 56,
|
|
||||||
ProductKeyMismatch = 57,
|
|
||||||
LanPortConflict = 58,
|
|
||||||
Javascript = 59,
|
|
||||||
Pem = 60,
|
|
||||||
}
|
|
||||||
impl ErrorKind {
|
|
||||||
pub fn as_str(&self) -> &'static str {
|
|
||||||
use ErrorKind::*;
|
|
||||||
match self {
|
|
||||||
Unknown => "Unknown Error",
|
|
||||||
Filesystem => "Filesystem I/O Error",
|
|
||||||
Docker => "Docker Error",
|
|
||||||
ConfigSpecViolation => "Config Spec Violation",
|
|
||||||
ConfigRulesViolation => "Config Rules Violation",
|
|
||||||
NotFound => "Not Found",
|
|
||||||
IncorrectPassword => "Incorrect Password",
|
|
||||||
VersionIncompatible => "Version Incompatible",
|
|
||||||
Network => "Network Error",
|
|
||||||
Registry => "Registry Error",
|
|
||||||
Serialization => "Serialization Error",
|
|
||||||
Deserialization => "Deserialization Error",
|
|
||||||
Utf8 => "UTF-8 Parse Error",
|
|
||||||
ParseVersion => "Version Parsing Error",
|
|
||||||
IncorrectDisk => "Incorrect Disk",
|
|
||||||
Nginx => "Nginx Error",
|
|
||||||
Dependency => "Dependency Error",
|
|
||||||
ParseS9pk => "S9PK Parsing Error",
|
|
||||||
ParseUrl => "URL Parsing Error",
|
|
||||||
DiskNotAvailable => "Disk Not Available",
|
|
||||||
BlockDevice => "Block Device Error",
|
|
||||||
InvalidOnionAddress => "Invalid Onion Address",
|
|
||||||
Pack => "Pack Error",
|
|
||||||
ValidateS9pk => "S9PK Validation Error",
|
|
||||||
DiskCorrupted => "Disk Corrupted", // Remove
|
|
||||||
Tor => "Tor Daemon Error",
|
|
||||||
ConfigGen => "Config Generation Error",
|
|
||||||
ParseNumber => "Number Parsing Error",
|
|
||||||
Database => "Database Error",
|
|
||||||
InvalidPackageId => "Invalid Package ID",
|
|
||||||
InvalidSignature => "Invalid Signature",
|
|
||||||
Backup => "Backup Error",
|
|
||||||
Restore => "Restore Error",
|
|
||||||
Authorization => "Unauthorized",
|
|
||||||
AutoConfigure => "Auto-Configure Error",
|
|
||||||
Action => "Action Failed",
|
|
||||||
RateLimited => "Rate Limited",
|
|
||||||
InvalidRequest => "Invalid Request",
|
|
||||||
MigrationFailed => "Migration Failed",
|
|
||||||
Uninitialized => "Uninitialized",
|
|
||||||
ParseNetAddress => "Net Address Parsing Error",
|
|
||||||
ParseSshKey => "SSH Key Parsing Error",
|
|
||||||
SoundError => "Sound Interface Error",
|
|
||||||
ParseTimestamp => "Timestamp Parsing Error",
|
|
||||||
ParseSysInfo => "System Info Parsing Error",
|
|
||||||
Wifi => "WiFi Internal Error",
|
|
||||||
Journald => "Journald Error",
|
|
||||||
DiskManagement => "Disk Management Error",
|
|
||||||
OpenSsl => "OpenSSL Internal Error",
|
|
||||||
PasswordHashGeneration => "Password Hash Generation Error",
|
|
||||||
DiagnosticMode => "Embassy is in Diagnostic Mode",
|
|
||||||
ParseDbField => "Database Field Parse Error",
|
|
||||||
Duplicate => "Duplication Error",
|
|
||||||
MultipleErrors => "Multiple Errors",
|
|
||||||
Incoherent => "Incoherent",
|
|
||||||
InvalidBackupTargetId => "Invalid Backup Target ID",
|
|
||||||
ProductKeyMismatch => "Incompatible Product Keys",
|
|
||||||
LanPortConflict => "Incompatible LAN Port Configuration",
|
|
||||||
Javascript => "Javascript Engine Error",
|
|
||||||
Pem => "PEM Encoding Error",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Display for ErrorKind {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{}", self.as_str())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Error {
|
|
||||||
pub source: color_eyre::eyre::Error,
|
|
||||||
pub kind: ErrorKind,
|
|
||||||
pub revision: Option<Revision>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for Error {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{}: {}", self.kind.as_str(), self.source)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Error {
|
|
||||||
pub fn new<E: Into<color_eyre::eyre::Error>>(source: E, kind: ErrorKind) -> Self {
|
|
||||||
Error {
|
|
||||||
source: source.into(),
|
|
||||||
kind,
|
|
||||||
revision: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<InvalidId> for Error {
|
|
||||||
fn from(err: InvalidId) -> Self {
|
|
||||||
Error::new(err, crate::error::ErrorKind::InvalidPackageId)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<std::io::Error> for Error {
|
|
||||||
fn from(e: std::io::Error) -> Self {
|
|
||||||
Error::new(e, ErrorKind::Filesystem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<std::str::Utf8Error> for Error {
|
|
||||||
fn from(e: std::str::Utf8Error) -> Self {
|
|
||||||
Error::new(e, ErrorKind::Utf8)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<std::string::FromUtf8Error> for Error {
|
|
||||||
fn from(e: std::string::FromUtf8Error) -> Self {
|
|
||||||
Error::new(e, ErrorKind::Utf8)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<emver::ParseError> for Error {
|
|
||||||
fn from(e: emver::ParseError) -> Self {
|
|
||||||
Error::new(e, ErrorKind::ParseVersion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<rpc_toolkit::url::ParseError> for Error {
|
|
||||||
fn from(e: rpc_toolkit::url::ParseError) -> Self {
|
|
||||||
Error::new(e, ErrorKind::ParseUrl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<std::num::ParseIntError> for Error {
|
|
||||||
fn from(e: std::num::ParseIntError) -> Self {
|
|
||||||
Error::new(e, ErrorKind::ParseNumber)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<std::num::ParseFloatError> for Error {
|
|
||||||
fn from(e: std::num::ParseFloatError) -> Self {
|
|
||||||
Error::new(e, ErrorKind::ParseNumber)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<patch_db::Error> for Error {
|
|
||||||
fn from(e: patch_db::Error) -> Self {
|
|
||||||
Error::new(e, ErrorKind::Database)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<sqlx::Error> for Error {
|
|
||||||
fn from(e: sqlx::Error) -> Self {
|
|
||||||
Error::new(e, ErrorKind::Database)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<ed25519_dalek::SignatureError> for Error {
|
|
||||||
fn from(e: ed25519_dalek::SignatureError) -> Self {
|
|
||||||
Error::new(e, ErrorKind::InvalidSignature)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<bollard::errors::Error> for Error {
|
|
||||||
fn from(e: bollard::errors::Error) -> Self {
|
|
||||||
Error::new(e, ErrorKind::Docker)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<torut::control::ConnError> for Error {
|
|
||||||
fn from(e: torut::control::ConnError) -> Self {
|
|
||||||
Error::new(eyre!("{:?}", e), ErrorKind::Tor)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<std::net::AddrParseError> for Error {
|
|
||||||
fn from(e: std::net::AddrParseError) -> Self {
|
|
||||||
Error::new(e, ErrorKind::ParseNetAddress)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<openssl::error::ErrorStack> for Error {
|
|
||||||
fn from(e: openssl::error::ErrorStack) -> Self {
|
|
||||||
Error::new(eyre!("OpenSSL ERROR:\n{}", e), ErrorKind::OpenSsl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<Error> for RpcError {
|
|
||||||
fn from(e: Error) -> Self {
|
|
||||||
let mut data_object = serde_json::Map::with_capacity(3);
|
|
||||||
data_object.insert("details".to_owned(), format!("{}", e.source).into());
|
|
||||||
data_object.insert("debug".to_owned(), format!("{:?}", e.source).into());
|
|
||||||
data_object.insert(
|
|
||||||
"revision".to_owned(),
|
|
||||||
match serde_json::to_value(&e.revision) {
|
|
||||||
Ok(a) => a,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::warn!("Error serializing revision for Error object: {}", e);
|
|
||||||
serde_json::Value::Null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
);
|
|
||||||
RpcError {
|
|
||||||
code: e.kind as i32,
|
|
||||||
message: e.kind.as_str().into(),
|
|
||||||
data: Some(data_object.into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct ErrorCollection(Vec<Error>);
|
|
||||||
impl ErrorCollection {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn handle<T, E: Into<Error>>(&mut self, result: Result<T, E>) -> Option<T> {
|
|
||||||
match result {
|
|
||||||
Ok(a) => Some(a),
|
|
||||||
Err(e) => {
|
|
||||||
self.0.push(e.into());
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn into_result(self) -> Result<(), Error> {
|
|
||||||
if self.0.is_empty() {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(Error::new(eyre!("{}", self), ErrorKind::MultipleErrors))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<ErrorCollection> for Result<(), Error> {
|
|
||||||
fn from(e: ErrorCollection) -> Self {
|
|
||||||
e.into_result()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<T, E: Into<Error>> Extend<Result<T, E>> for ErrorCollection {
|
|
||||||
fn extend<I: IntoIterator<Item = Result<T, E>>>(&mut self, iter: I) {
|
|
||||||
for item in iter {
|
|
||||||
self.handle(item);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::fmt::Display for ErrorCollection {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
for (idx, e) in self.0.iter().enumerate() {
|
|
||||||
if idx > 0 {
|
|
||||||
write!(f, "; ")?;
|
|
||||||
}
|
|
||||||
write!(f, "{}", e)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub trait ResultExt<T, E>
|
|
||||||
where
|
|
||||||
Self: Sized,
|
|
||||||
{
|
|
||||||
fn with_kind(self, kind: ErrorKind) -> Result<T, Error>;
|
|
||||||
fn with_ctx<F: FnOnce(&E) -> (ErrorKind, D), D: Display + Send + Sync + 'static>(
|
|
||||||
self,
|
|
||||||
f: F,
|
|
||||||
) -> Result<T, Error>;
|
|
||||||
}
|
|
||||||
impl<T, E> ResultExt<T, E> for Result<T, E>
|
|
||||||
where
|
|
||||||
color_eyre::eyre::Error: From<E>,
|
|
||||||
{
|
|
||||||
fn with_kind(self, kind: ErrorKind) -> Result<T, Error> {
|
|
||||||
self.map_err(|e| Error {
|
|
||||||
source: e.into(),
|
|
||||||
kind,
|
|
||||||
revision: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn with_ctx<F: FnOnce(&E) -> (ErrorKind, D), D: Display + Send + Sync + 'static>(
|
|
||||||
self,
|
|
||||||
f: F,
|
|
||||||
) -> Result<T, Error> {
|
|
||||||
self.map_err(|e| {
|
|
||||||
let (kind, ctx) = f(&e);
|
|
||||||
let source = color_eyre::eyre::Error::from(e);
|
|
||||||
let ctx = format!("{}: {}", ctx, source);
|
|
||||||
let source = source.wrap_err(ctx);
|
|
||||||
Error {
|
|
||||||
kind,
|
|
||||||
source: source.into(),
|
|
||||||
revision: None,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! ensure_code {
|
|
||||||
($x:expr, $c:expr, $fmt:expr $(, $arg:expr)*) => {
|
|
||||||
if !($x) {
|
|
||||||
return Err(crate::Error::new(color_eyre::eyre::eyre!($fmt, $($arg, )*), $c));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,78 +0,0 @@
|
|||||||
use digest::Digest;
|
|
||||||
use tokio::fs::File;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
use tokio::process::Command;
|
|
||||||
use tracing::instrument;
|
|
||||||
|
|
||||||
use crate::util::Invoke;
|
|
||||||
use crate::{Error, ErrorKind, ResultExt};
|
|
||||||
|
|
||||||
pub const PRODUCT_KEY_PATH: &'static str = "/embassy-os/product_key.txt";
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn get_hostname() -> Result<String, Error> {
|
|
||||||
Ok(derive_hostname(&get_id().await?))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn derive_hostname(id: &str) -> String {
|
|
||||||
format!("embassy-{}", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn get_current_hostname() -> Result<String, Error> {
|
|
||||||
let out = Command::new("hostname")
|
|
||||||
.invoke(ErrorKind::ParseSysInfo)
|
|
||||||
.await?;
|
|
||||||
let out_string = String::from_utf8(out)?;
|
|
||||||
Ok(out_string.trim().to_owned())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn set_hostname(hostname: &str) -> Result<(), Error> {
|
|
||||||
let _out = Command::new("hostnamectl")
|
|
||||||
.arg("set-hostname")
|
|
||||||
.arg(hostname)
|
|
||||||
.invoke(ErrorKind::ParseSysInfo)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn get_product_key() -> Result<String, Error> {
|
|
||||||
let out = tokio::fs::read_to_string(PRODUCT_KEY_PATH)
|
|
||||||
.await
|
|
||||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, PRODUCT_KEY_PATH))?;
|
|
||||||
Ok(out.trim().to_owned())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn set_product_key(key: &str) -> Result<(), Error> {
|
|
||||||
let mut pkey_file = File::create(PRODUCT_KEY_PATH).await?;
|
|
||||||
pkey_file.write_all(key.as_bytes()).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn derive_id(key: &str) -> String {
|
|
||||||
let mut hasher = sha2::Sha256::new();
|
|
||||||
hasher.update(key.as_bytes());
|
|
||||||
let res = hasher.finalize();
|
|
||||||
hex::encode(&res[0..4])
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument]
|
|
||||||
pub async fn get_id() -> Result<String, Error> {
|
|
||||||
let key = get_product_key().await?;
|
|
||||||
Ok(derive_id(&key))
|
|
||||||
}
|
|
||||||
|
|
||||||
// cat /embassy-os/product_key.txt | shasum -a 256 | head -c 8 | awk '{print "embassy-"$1}' | xargs hostnamectl set-hostname && systemctl restart avahi-daemon
|
|
||||||
#[instrument]
|
|
||||||
pub async fn sync_hostname() -> Result<(), Error> {
|
|
||||||
set_hostname(&format!("embassy-{}", get_id().await?)).await?;
|
|
||||||
Command::new("systemctl")
|
|
||||||
.arg("restart")
|
|
||||||
.arg("avahi-daemon")
|
|
||||||
.invoke(crate::ErrorKind::Network)
|
|
||||||
.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
use std::fmt::Debug;
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use serde::{Deserialize, Deserializer, Serialize};
|
|
||||||
|
|
||||||
use crate::util::Version;
|
|
||||||
|
|
||||||
pub use models::{Id, InvalidId, IdUnchecked, SYSTEM_ID};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
|
|
||||||
pub struct ImageId<S: AsRef<str> = String>(Id<S>);
|
|
||||||
impl<S: AsRef<str>> std::fmt::Display for ImageId<S> {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{}", &self.0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<S: AsRef<str>> ImageId<S> {
|
|
||||||
pub fn for_package<PkgId: AsRef<crate::s9pk::manifest::PackageId<S0>>, S0: AsRef<str>>(
|
|
||||||
&self,
|
|
||||||
pkg_id: PkgId,
|
|
||||||
pkg_version: Option<&Version>,
|
|
||||||
) -> String {
|
|
||||||
format!(
|
|
||||||
"start9/{}/{}:{}",
|
|
||||||
pkg_id.as_ref(),
|
|
||||||
self.0,
|
|
||||||
pkg_version.map(|v| { v.as_str() }).unwrap_or("latest")
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl FromStr for ImageId {
|
|
||||||
type Err = InvalidId;
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
Ok(ImageId(Id::try_from(s.to_owned())?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'de, S> Deserialize<'de> for ImageId<S>
|
|
||||||
where
|
|
||||||
S: AsRef<str>,
|
|
||||||
Id<S>: Deserialize<'de>,
|
|
||||||
{
|
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
Ok(ImageId(Deserialize::deserialize(deserializer)?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||